hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2510cbb743f1f29e0bb13e1ae7ff3435645c3b4d | 184 | py | Python | libs/db_check.py | redpeacock78/bach_bot | 086efe4d6eef05fbdff6af34534e54e43fd9af88 | [
"MIT"
] | null | null | null | libs/db_check.py | redpeacock78/bach_bot | 086efe4d6eef05fbdff6af34534e54e43fd9af88 | [
"MIT"
] | null | null | null | libs/db_check.py | redpeacock78/bach_bot | 086efe4d6eef05fbdff6af34534e54e43fd9af88 | [
"MIT"
] | null | null | null | import mysql.connector as mydb
conn = mydb.connect(
host='mysql_container',
port='3306',
user='docker',
password='docker',
database='my_db'
)
conn.is_connected()
| 15.333333 | 30 | 0.657609 |
2511753f88ea48953fbf7d9fff0197ffc5356c2e | 752 | py | Python | students/models/exams.py | samitnuk/studentsdb | 659c82f7bdc0d6a14074da14252384b9443e286c | [
"MIT"
] | null | null | null | students/models/exams.py | samitnuk/studentsdb | 659c82f7bdc0d6a14074da14252384b9443e286c | [
"MIT"
] | null | null | null | students/models/exams.py | samitnuk/studentsdb | 659c82f7bdc0d6a14074da14252384b9443e286c | [
"MIT"
] | null | null | null | from django.db import models
class Exam(models.Model):
"""Exam Model"""
class Meta(object):
verbose_name = 'Іспит'
verbose_name_plural = 'Іспити'
title = models.CharField(
max_length=256,
blank=False,
verbose_name='Назва предмету')
datetime = models.DateTimeField(
blank=False,
verbose_name='Дата і час проведення')
teacher = models.CharField(
max_length=256,
blank=False,
verbose_name='ПІБ викладача')
exam_group = models.ForeignKey(
'Group',
verbose_name='Група',
blank=False,
null=True,
on_delete=models.PROTECT)
def __str__(self):
return '%s (приймає %s' % (self.title, self.teacher)
| 22.117647 | 60 | 0.599734 |
25132e1264d30cca913fe293f3805c8d79177d9b | 2,201 | py | Python | club_crm/api/clubtour.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/api/clubtour.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/api/clubtour.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import frappe
from frappe import _
from datetime import datetime, timedelta, date, time
from frappe.utils import getdate, get_time, flt, now_datetime
from frappe.utils import escape_html
from frappe import throw, msgprint, _
@frappe.whitelist()
def get_schedule():
time_schedule = frappe.get_doc('Club Settings')
schedule = []
for time in time_schedule.club_tour_schedule:
from_time_string = str(time.from_time)
from_time_datetime = datetime.strptime(from_time_string, "%H:%M:%S")
from_time = datetime.strftime(from_time_datetime, "%I:%M %p")
to_time_string = str(time.to_time)
to_time_datetime = datetime.strptime(to_time_string, "%H:%M:%S")
to_time = datetime.strftime(to_time_datetime, "%I:%M %p")
name = _('{0} - {1}').format(from_time, to_time)
schedule.append({
"name" : name
})
frappe.response["message"] = {
"Preferred Time": schedule
}
@frappe.whitelist()
def get_status(client_id):
client = frappe.db.get("Client", {"email": frappe.session.user})
doc= frappe.get_all('Club Tour', filters={'client_id':client.name,'tour_status': "Pending"}, fields=["*"])
if doc:
frappe.response["message"] = {
"Status": 0,
"Status Message": "Pending"
}
else:
doc= frappe.get_all('Club Tour', filters={'client_id':client.name,'tour_status': "Scheduled"}, fields=["*"])
if doc:
doc_1= doc[0]
frappe.response["message"] = {
"Status":1,
"Status Message": "Scheduled",
"From Time": doc_1.start_time,
"To Time": doc_1.end_time
}
@frappe.whitelist()
def create_clubtour(client_id,date,time):
client = frappe.db.get("Client", {"email": frappe.session.user})
doc = frappe.get_doc({
'doctype': 'Club Tour',
'client_id': client.name,
'preferred_date': date,
'preferred_time_between': time
})
doc.save()
frappe.response["message"] = {
"Status":1,
"Status Message": "Club Tour booking submitted"
}
| 33.348485 | 116 | 0.606997 |
2513a6b22c946cb8b820c0695cdd317c638f6bf0 | 647 | py | Python | goalboost/model/__init__.py | JohnLockwood/Goalboost | 1556a15f766ab762243e5d198b00ee7239b20411 | [
"RSA-MD"
] | null | null | null | goalboost/model/__init__.py | JohnLockwood/Goalboost | 1556a15f766ab762243e5d198b00ee7239b20411 | [
"RSA-MD"
] | 10 | 2021-07-30T14:39:05.000Z | 2021-07-30T14:39:07.000Z | goalboost/model/__init__.py | JohnLockwood/Goalboost | 1556a15f766ab762243e5d198b00ee7239b20411 | [
"RSA-MD"
] | null | null | null | '''
goalboost.model package
The goalboost model package consists of MongoEngine models along with
Marshmallow schemas. MongoEngine is our database ORM to MongoDB,
and Marshmallow is a serialization library that helps us validate, consume,
and expose these Orm objects for clients that need it at the API layer.
For MongoEngine, see http://mongoengine.org/
For Marshmallow and the MongoEngine integration piece, see:
https://marshmallow.readthedocs.org/en/latest/
https://github.com/touilleMan/marshmallow-mongoengine
'''
from flask.ext.mongoengine import MongoEngine
db = MongoEngine()
def init_db(app):
global db
db.init_app(app)
| 26.958333 | 75 | 0.789799 |
2513a8a764760e74306e494219df1291ea86952f | 3,290 | py | Python | examples/block_store/snapshots.py | IamFive/sdk-python | 223b04f90477f7de0f00b3e652d8672ba73271c8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/block_store/snapshots.py | IamFive/sdk-python | 223b04f90477f7de0f00b3e652d8672ba73271c8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/block_store/snapshots.py | IamFive/sdk-python | 223b04f90477f7de0f00b3e652d8672ba73271c8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import logging
def snapshots_detail(conn):
query = {
'limit': 10
}
details = list(conn.block_store.snapshots(**query))
logging.info(details)
def create_snapshot(conn):
attr = {
'name': 'snap-001',
'description': 'Daily backup',
'volume_id': '5aa119a8-d25b-45a7-8d1b-88e127885635',
'force': False,
'metadata': {}
}
snapshot = conn.block_store.create_snapshot(**attr)
logging.info(snapshot)
def rollback_snapshot(conn):
snapshot_id = 'snapshot-id'
volume_id = 'volume-id'
volume_name = 'volume-name'
snapshot_rollback = conn.block_store.rollback_snapshot(volume_id,
volume_name,
snapshot_id)
logging.info(snapshot_rollback)
def update_snapshot(conn):
snapshot_id = 'snapshot-id'
attrs = {
'name': 'name_xx3',
'description': 'hello'
}
snapshot = conn.block_store.update_snapshot(snapshot_id, **attrs)
logging.info(snapshot)
def create_snapshot_metadata(conn):
snapshot_id = 'snapshot-id'
metadata = {
'metadata': {
'key1': 'value1',
'key2': 'value2'
}
}
new_metadata = conn.block_store.create_snapshot_metadata(snapshot_id,
**metadata)
logging.info(new_metadata)
def update_snapshot_metadata(conn):
snapshot_id = 'snapshot-id'
metadata = {
'metadata': {
'key1': 'value1',
'key2': 'value2'
}
}
updated_metadata = conn.block_store.update_snapshot_metadata(snapshot_id, **metadata)
logging.info(updated_metadata)
def update_snapshot_metadata_with_key(conn):
snapshot_id = 'snapshot-id'
key = 'key1'
metadata = {
'meta': {
'key1': 'value1',
}
}
updated_metadata = conn.block_store.update_snapshot_metadata(snapshot_id,
key=key,
**metadata)
logging.info(updated_metadata)
def delete_snapshot_metadata(conn):
snapshot_id = 'snapshot-id'
key = 'key1'
conn.block_store.delete_snapshot_metadata(snapshot_id, key)
def get_snapshot_metadata(conn):
snapshot_id = 'snapshot-id'
metadata = conn.block_store.get_snapshot_metadata(snapshot_id)
logging.info(metadata)
def get_snapshot_metadata_with_key(conn):
key = 'key1'
snapshot_id = 'snapshot-id'
metadata = conn.block_store.get_snapshot_metadata(snapshot_id, key)
logging.info(metadata)
| 29.115044 | 89 | 0.614286 |
2515ebed6d44cdb6e775f2b149da71a36b8ce3fa | 6,270 | py | Python | lambda_upload.py | elbursto/aws_lambda_upload | 62215a1efd7037cad2d099489c16fab905ccf2d3 | [
"Apache-2.0"
] | null | null | null | lambda_upload.py | elbursto/aws_lambda_upload | 62215a1efd7037cad2d099489c16fab905ccf2d3 | [
"Apache-2.0"
] | null | null | null | lambda_upload.py | elbursto/aws_lambda_upload | 62215a1efd7037cad2d099489c16fab905ccf2d3 | [
"Apache-2.0"
] | null | null | null |
import boto3
from zipfile import ZipFile
import argparse
import json
import os
import shutil
class LambdaMaker(object):
def __init__(self, config_file, working_dir):
# const vars
self.creator='TomLambdaCreator_v1.0.0'
os.chdir(working_dir)
self.process_config_file(config_file)
def process_config_file(self, fname):
# read config file
with open(fname, 'r') as f:
self.contents = json.load(f)
f.close()
self.lambda_bucket = self.contents['S3Bucket']
self.key = self.contents['S3Key']
self.fname = self.contents['ZipLocalFname']
self.basename = self.contents['ZipBaseName']
self.buildDir = self.contents['BuildDir']
self.functionName=self.contents['FunctionName']
self.runTime=self.contents['Runtime']
self.iamRole=self.contents['Role']
self.handler=self.contents['Handler']
self.desc=self.contents['Description']
self.timeout=self.contents['Timeout']
self.memory=self.contents['MemorySize']
self.publish=self.contents['Publish']
self.vpnconfig = {}
self.vpnconfig['SubnetIds'] = self.contents['SubnetIds']
self.vpnconfig['SecurityGroupIds'] = self.contents['SecurityGroupIds']
self.targetArn = self.contents['DeadLetterTargetArn']
self.env = self.contents['EnvironmentVariables']
self.tracingConfig = self.contents['TracingConfigMode']
self.keyarn = self.contents['KeyArn']
def install_python_dependancies(self):
deps = self.contents['dependancies']
for dep in deps:
cmd = (("pip install {0} -t .").format(dep))
os.system(cmd)
def install_node_dependancies(self):
deps = self.contents['dependancies']
deplen = len(deps)
if deplen > 0:
os.mkdir("node_modules")
for dep in deps:
cmd = (("npm install -s {0}").format(dep))
print(cmd)
os.system(cmd)
def make_zip_file(self):
if (os.path.exists(self.buildDir)):
# remove old build director
shutil.rmtree(self.buildDir)
# make the build dir
os.mkdir(self.buildDir)
#copy the source file
source = self.contents['sourceFile']
shutil.copy(source, self.buildDir)
source_files = []
source_files.append(source)
os.chdir(self.buildDir)
if 'node' in self.runTime:
self.install_node_dependancies()
else:
self.install_python_dependancies()
shutil.make_archive(self.basename, "zip")
#with ZipFile(self.fname, 'w') as myzip:
# for zipit in source_files:
# print(("adding {0} to {1}").format(zipit, self.fname))
# myzip.write(zipit)
def push_function_code_to_s3(self):
self.make_zip_file()
client = boto3.client('s3')
response = client.put_object(
Bucket=self.lambda_bucket,
Body=open(self.fname, 'rb'),
Key=self.key)
metadata=response['ResponseMetadata']
print(("s3 code metadata = {0}").format(metadata))
self.s3version = response['VersionId']
print(('version = {0}').format(self.s3version))
# now that we pushed the code we can setup the S3
# info.
self.setup_function_vars()
print("pushed code to s3")
def setup_function_vars(self):
self.code = {}
self.code['S3Bucket'] = self.lambda_bucket
self.code['S3Key'] = self.key
self.code['S3ObjectVersion'] = self.s3version
self.desc="Get Location"
self.deadcfg = {}
self.deadcfg['TargetArn'] = self.targetArn
self.variables = {}
self.variables['Variables'] = self.env
self.tracingMode = {}
self.tracingMode['Mode'] = self.tracingConfig
# Active needs special permissions
#self.tracingConfig['Mode'] = 'Active'
self.tags = {}
self.tags['FunctionName'] = self.functionName
self.tags['RunTime'] = self.runTime
self.tags['Creator'] = self.creator
def make_new_function(self):
response = self.lambda_client.create_function(
FunctionName=self.functionName,
Runtime=self.runTime,
Role=self.iamRole,
Handler=self.handler,
Code=self.code,
Description=self.desc,
Timeout=self.timeout,
MemorySize=self.memory,
Publish=self.publish,
VpcConfig=self.vpnconfig,
DeadLetterConfig=self.deadcfg,
Environment=self.variables,
#KMSKeyArn=self.keyarn,
TracingConfig=self.tracingMode,
Tags=self.tags
)
print(("lambda create response = {0}").format(response))
def update_function_code(self):
response = self.lambda_client.update_function_code(
FunctionName=self.functionName,
S3Bucket=self.lambda_bucket,
S3Key=self.key,
S3ObjectVersion=self.s3version,
Publish=True,
DryRun=False)
print(("update_function_code response: {0}").format(response))
def push_code(self):
self.lambda_client = boto3.client('lambda')
newFunction = False
try:
response = self.lambda_client.get_function(
FunctionName=self.functionName)
#print(response['ResponseMetadata'])
except Exception:
newFunction = True
# push the new code to S3
self.push_function_code_to_s3()
if newFunction:
# new function so make it
self.make_new_function();
else:
# function exists so just update code
self.update_function_code()
def main():
parser = argparse.ArgumentParser(description='aws lambda function creator')
parser.add_argument('-f', required=True, help='json file')
parser.add_argument('-w', required=True, help='working directory')
args = parser.parse_args()
config_file = args.f
wdir = args.w
LambdaMaker(config_file).push_code()
if __name__ == "__main__":
main()
| 33 | 79 | 0.601435 |
25166ab3132cfb837c187df9b62bcf91450b7109 | 6,260 | py | Python | official/vision/image_classification/callbacks.py | arayabrain/models | ceaa23c0ebecdb445d14f002cc66a39c50ac92e3 | [
"Apache-2.0"
] | null | null | null | official/vision/image_classification/callbacks.py | arayabrain/models | ceaa23c0ebecdb445d14f002cc66a39c50ac92e3 | [
"Apache-2.0"
] | 3 | 2020-08-12T06:16:40.000Z | 2020-08-17T05:44:26.000Z | official/vision/image_classification/callbacks.py | arayabrain/models | ceaa23c0ebecdb445d14f002cc66a39c50ac92e3 | [
"Apache-2.0"
] | 1 | 2020-08-04T01:56:03.000Z | 2020-08-04T01:56:03.000Z | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modules for callbacks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import functools
import os
from absl import logging
import numpy as np
import tensorflow as tf
from typing import Any, List, Optional, MutableMapping
from official.utils.misc import keras_utils
from official.vision.image_classification.pruning.pruning_base_configs import ModelPruningConfig
from tensorflow_model_optimization.python.core.keras import compat
from tensorflow_model_optimization.python.core.sparsity.keras.cprune_registry import ConstraintRegistry
def get_callbacks(model_checkpoint: bool = True,
include_tensorboard: bool = True,
time_history: bool = True,
track_lr: bool = True,
model_pruning_config: Optional[ModelPruningConfig] = None,
write_model_weights: bool = True,
batch_size: int = 0,
log_steps: int = 0,
model_dir: str = None) -> List[tf.keras.callbacks.Callback]:
"""Get all callbacks."""
model_dir = model_dir or ''
callbacks = []
if model_checkpoint:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
ckpt_full_path, save_weights_only=True, verbose=1))
if include_tensorboard:
callbacks.append(
CustomTensorBoard(
log_dir=model_dir,
track_lr=track_lr,
model_pruning_config=model_pruning_config,
write_images=write_model_weights))
if time_history:
callbacks.append(
keras_utils.TimeHistory(
batch_size,
log_steps,
logdir=model_dir if include_tensorboard else None))
return callbacks
def get_scalar_from_tensor(t: tf.Tensor) -> int:
"""Utility function to convert a Tensor to a scalar."""
t = tf.keras.backend.get_value(t)
if callable(t):
return t()
else:
return t
class CustomTensorBoard(tf.keras.callbacks.TensorBoard):
"""A customized TensorBoard callback that tracks additional datapoints.
Metrics tracked:
- Global learning rate
Attributes:
log_dir: the path of the directory where to save the log files to be parsed
by TensorBoard.
track_lr: `bool`, whether or not to track the global learning rate.
**kwargs: Additional arguments for backwards compatibility. Possible key is
`period`.
"""
# TODO(b/146499062): track params, flops, log lr, l2 loss,
# classification loss
def __init__(self,
log_dir: str,
track_lr: bool = False,
model_pruning_config: Optional[ModelPruningConfig] = None,
**kwargs):
super(CustomTensorBoard, self).__init__(log_dir=log_dir, **kwargs)
self._track_lr = track_lr
self._model_pruning_config = model_pruning_config
def _collect_learning_rate(self, logs):
logs = logs or {}
lr_schedule = getattr(self.model.optimizer, "lr", None)
if isinstance(lr_schedule, tf.keras.optimizers.schedules.LearningRateSchedule):
logs["learning_rate"] = tf.keras.backend.get_value(
lr_schedule(self.model.optimizer.iterations)
)
if isinstance(logs["learning_rate"], functools.partial):
logs["learning_rate"] = logs["learning_rate"]()
return logs
def _log_metrics(self, logs, prefix, step):
if self._track_lr:
super()._log_metrics(self._collect_learning_rate(logs), prefix, step)
def _log_pruning_metrics(self, logs, prefix, step):
if compat.is_v1_apis():
# Safely depend on TF 1.X private API given
# no more 1.X releases.
self._write_custom_summaries(step, logs)
else: # TF 2.X
log_dir = self.log_dir + '/metrics'
file_writer = tf.summary.create_file_writer(log_dir)
file_writer.set_as_default()
for name, value in logs.items():
tf.summary.scalar(name, value, step=step)
file_writer.flush()
def on_epoch_begin(self, epoch, logs=None):
if logs is not None:
super(CustomTensorBoard, self).on_epoch_begin(epoch, logs)
if self._model_pruning_config:
pruning_logs = {}
params = []
postfixes = []
for layer_pruning_config in self._model_pruning_config.pruning:
layer_name = layer_pruning_config.layer_name
layer = self.model.get_layer(layer_name)
for weight_pruning_config in layer_pruning_config.pruning:
weight_name = weight_pruning_config.weight_name
constraint_name = ConstraintRegistry.get_constraint_from_weight(weight_name)
constraint = getattr(layer, constraint_name)
params.append(constraint.mask)
params.append(constraint.threshold)
postfixes.append('/' + layer_name + '/' + weight_name)
params.append(self.model.optimizer.iterations)
values = tf.keras.backend.batch_get_value(params)
iteration = values[-1]
del values[-1]
del params[-1]
param_value_pairs = list(zip(params, values))
for (mask, mask_value), postfix in zip(param_value_pairs[::2], postfixes):
pruning_logs.update({
'mask_sparsity' + postfix: 1 - np.mean(mask_value)
})
for (threshold, threshold_value), postfix in zip(param_value_pairs[1::2], postfixes):
pruning_logs.update({'threshold' + postfix: threshold_value})
self._log_pruning_metrics(pruning_logs, '', iteration)
| 36.184971 | 103 | 0.684824 |
2516f01f8f44e4e51781ce4ffc642a90318eac4f | 129 | py | Python | Lib/site-packages/git/index/__init__.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | Lib/site-packages/git/index/__init__.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 10 | 2017-05-10T08:10:23.000Z | 2020-03-23T10:23:37.000Z | Lib/site-packages/git/index/__init__.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 38 | 2017-04-26T14:13:37.000Z | 2021-06-24T11:36:38.000Z | """Initialize the index package"""
# flake8: noqa
from __future__ import absolute_import
from .base import *
from .typ import *
| 18.428571 | 38 | 0.751938 |
2519a94caf6b2f931b487b3397703da9ddf2b842 | 885 | py | Python | EDyA_II/4_tree/python/4_default_parameter.py | jrg-sln/academy | 498c11dcfeab78dbbbb77045a13d7d6675c0d150 | [
"MIT"
] | null | null | null | EDyA_II/4_tree/python/4_default_parameter.py | jrg-sln/academy | 498c11dcfeab78dbbbb77045a13d7d6675c0d150 | [
"MIT"
] | null | null | null | EDyA_II/4_tree/python/4_default_parameter.py | jrg-sln/academy | 498c11dcfeab78dbbbb77045a13d7d6675c0d150 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
class Saucer(object):
"""
Representa un plato de comida.
"""
def __init__(self, cadNombre, realPrecio, cadDescription=None,
cadImagen=None, boolVegetariano=False, entCoccion=1):
self.nombre = cadNombre
self.precio = realPrecio
self.descripcion = cadDescription
self.imagen = cadImagen
self.esVegetariano = boolVegetariano
self.coccion = entCoccion
def __str__(self):
return "{nombre}{esVeg}: {precio:.2f}{desc}".format(
nombre=self.nombre,
desc=' (' + self.descripcion + ')' if self.descripcion else '',
precio=self.precio,
esVeg='*' if self.esVegetariano else '')
burgerPython = Saucer("Hamburguesa de Python", 0.13,
cadDescription="Barely an eigth of a byte")
print(burgerPython) | 34.038462 | 75 | 0.59661 |
2519e01a81d1d3e2c4f4e4fede4c19c82e764391 | 9,768 | py | Python | model/bdrar.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | 2 | 2019-01-10T03:44:03.000Z | 2019-05-24T08:50:14.000Z | model/bdrar.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | model/bdrar.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from torch import nn
from resnext.resnext101_regular import ResNeXt101
class _AttentionModule(nn.Module):
def __init__(self):
super(_AttentionModule, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, 3, dilation=2, padding=2, groups=32, bias=False), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64)
)
self.block2 = nn.Sequential(
nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, 3, dilation=3, padding=3, groups=32, bias=False), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64)
)
self.block3 = nn.Sequential(
nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, 3, dilation=4, padding=4, groups=32, bias=False), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32)
)
self.down = nn.Sequential(
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32)
)
def forward(self, x):
block1 = F.relu(self.block1(x) + x, True)
block2 = F.relu(self.block2(block1) + block1, True)
block3 = F.sigmoid(self.block3(block2) + self.down(block2))
return block3
class BDRAR(nn.Module):
def __init__(self):
super(BDRAR, self).__init__()
resnext = ResNeXt101()
self.layer0 = resnext.layer0
self.layer1 = resnext.layer1
self.layer2 = resnext.layer2
self.layer3 = resnext.layer3
self.layer4 = resnext.layer4
self.down4 = nn.Sequential(
nn.Conv2d(2048, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU()
)
self.down3 = nn.Sequential(
nn.Conv2d(1024, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU()
)
self.down2 = nn.Sequential(
nn.Conv2d(512, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU()
)
self.down1 = nn.Sequential(
nn.Conv2d(256, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU()
)
self.refine3_hl = nn.Sequential(
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, groups=32, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 1, bias=False), nn.BatchNorm2d(32)
)
self.refine2_hl = nn.Sequential(
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, groups=32, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 1, bias=False), nn.BatchNorm2d(32)
)
self.refine1_hl = nn.Sequential(
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, groups=32, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 1, bias=False), nn.BatchNorm2d(32)
)
self.attention3_hl = _AttentionModule()
self.attention2_hl = _AttentionModule()
self.attention1_hl = _AttentionModule()
self.refine2_lh = nn.Sequential(
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, groups=32, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 1, bias=False), nn.BatchNorm2d(32)
)
self.refine4_lh = nn.Sequential(
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, groups=32, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 1, bias=False), nn.BatchNorm2d(32)
)
self.refine3_lh = nn.Sequential(
nn.Conv2d(64, 32, 1, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, groups=32, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.Conv2d(32, 32, 1, bias=False), nn.BatchNorm2d(32)
)
self.attention2_lh = _AttentionModule()
self.attention3_lh = _AttentionModule()
self.attention4_lh = _AttentionModule()
self.fuse_attention = nn.Sequential(
nn.Conv2d(64, 16, 3, padding=1, bias=False), nn.BatchNorm2d(16), nn.ReLU(),
nn.Conv2d(16, 2, 1)
)
self.predict = nn.Sequential(
nn.Conv2d(32, 8, 3, padding=1, bias=False), nn.BatchNorm2d(8), nn.ReLU(),
nn.Dropout(0.1), nn.Conv2d(8, 1, 1)
)
# for m in self.modules():
# if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
# m.inplace = True
for m in self.modules():
if isinstance(m, nn.ReLU):
m.inplace = True
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
down4 = self.down4(layer4)
down3 = self.down3(layer3)
down2 = self.down2(layer2)
down1 = self.down1(layer1)
down4 = F.upsample(down4, size=down3.size()[2:], mode='bilinear')
refine3_hl_0 = F.relu(self.refine3_hl(torch.cat((down4, down3), 1)) + down4, True)
refine3_hl_0 = (1 + self.attention3_hl(torch.cat((down4, down3), 1))) * refine3_hl_0
refine3_hl_1 = F.relu(self.refine3_hl(torch.cat((refine3_hl_0, down3), 1)) + refine3_hl_0, True)
refine3_hl_1 = (1 + self.attention3_hl(torch.cat((refine3_hl_0, down3), 1))) * refine3_hl_1
refine3_hl_1 = F.upsample(refine3_hl_1, size=down2.size()[2:], mode='bilinear')
refine2_hl_0 = F.relu(self.refine2_hl(torch.cat((refine3_hl_1, down2), 1)) + refine3_hl_1, True)
refine2_hl_0 = (1 + self.attention2_hl(torch.cat((refine3_hl_1, down2), 1))) * refine2_hl_0
refine2_hl_1 = F.relu(self.refine2_hl(torch.cat((refine2_hl_0, down2), 1)) + refine2_hl_0, True)
refine2_hl_1 = (1 + self.attention2_hl(torch.cat((refine2_hl_0, down2), 1))) * refine2_hl_1
refine2_hl_1 = F.upsample(refine2_hl_1, size=down1.size()[2:], mode='bilinear')
refine1_hl_0 = F.relu(self.refine1_hl(torch.cat((refine2_hl_1, down1), 1)) + refine2_hl_1, True)
refine1_hl_0 = (1 + self.attention1_hl(torch.cat((refine2_hl_1, down1), 1))) * refine1_hl_0
refine1_hl_1 = F.relu(self.refine1_hl(torch.cat((refine1_hl_0, down1), 1)) + refine1_hl_0, True)
refine1_hl_1 = (1 + self.attention1_hl(torch.cat((refine1_hl_0, down1), 1))) * refine1_hl_1
down2 = F.upsample(down2, size=down1.size()[2:], mode='bilinear')
refine2_lh_0 = F.relu(self.refine2_lh(torch.cat((down1, down2), 1)) + down1, True)
refine2_lh_0 = (1 + self.attention2_lh(torch.cat((down1, down2), 1))) * refine2_lh_0
refine2_lh_1 = F.relu(self.refine2_lh(torch.cat((refine2_lh_0, down2), 1)) + refine2_lh_0, True)
refine2_lh_1 = (1 + self.attention2_lh(torch.cat((refine2_lh_0, down2), 1))) * refine2_lh_1
down3 = F.upsample(down3, size=down1.size()[2:], mode='bilinear')
refine3_lh_0 = F.relu(self.refine3_lh(torch.cat((refine2_lh_1, down3), 1)) + refine2_lh_1, True)
refine3_lh_0 = (1 + self.attention3_lh(torch.cat((refine2_lh_1, down3), 1))) * refine3_lh_0
refine3_lh_1 = F.relu(self.refine3_lh(torch.cat((refine3_lh_0, down3), 1)) + refine3_lh_0, True)
refine3_lh_1 = (1 + self.attention3_lh(torch.cat((refine3_lh_0, down3), 1))) * refine3_lh_1
down4 = F.upsample(down4, size=down1.size()[2:], mode='bilinear')
refine4_lh_0 = F.relu(self.refine4_lh(torch.cat((refine3_lh_1, down4), 1)) + refine3_lh_1, True)
refine4_lh_0 = (1 + self.attention4_lh(torch.cat((refine3_lh_1, down4), 1))) * refine4_lh_0
refine4_lh_1 = F.relu(self.refine4_lh(torch.cat((refine4_lh_0, down4), 1)) + refine4_lh_0, True)
refine4_lh_1 = (1 + self.attention4_lh(torch.cat((refine4_lh_0, down4), 1))) * refine4_lh_1
refine3_hl_1 = F.upsample(refine3_hl_1, size=down1.size()[2:], mode='bilinear')
predict4_hl = self.predict(down4)
predict3_hl = self.predict(refine3_hl_1)
predict2_hl = self.predict(refine2_hl_1)
predict1_hl = self.predict(refine1_hl_1)
predict1_lh = self.predict(down1)
predict2_lh = self.predict(refine2_lh_1)
predict3_lh = self.predict(refine3_lh_1)
predict4_lh = self.predict(refine4_lh_1)
fuse_attention = F.sigmoid(self.fuse_attention(torch.cat((refine1_hl_1, refine4_lh_1), 1)))
fuse_predict = torch.sum(fuse_attention * torch.cat((predict1_hl, predict4_lh), 1), 1, True)
predict4_hl = F.upsample(predict4_hl, size=x.size()[2:], mode='bilinear')
predict3_hl = F.upsample(predict3_hl, size=x.size()[2:], mode='bilinear')
predict2_hl = F.upsample(predict2_hl, size=x.size()[2:], mode='bilinear')
predict1_hl = F.upsample(predict1_hl, size=x.size()[2:], mode='bilinear')
predict1_lh = F.upsample(predict1_lh, size=x.size()[2:], mode='bilinear')
predict2_lh = F.upsample(predict2_lh, size=x.size()[2:], mode='bilinear')
predict3_lh = F.upsample(predict3_lh, size=x.size()[2:], mode='bilinear')
predict4_lh = F.upsample(predict4_lh, size=x.size()[2:], mode='bilinear')
fuse_predict = F.upsample(fuse_predict, size=x.size()[2:], mode='bilinear')
if self.training:
return fuse_predict, predict1_hl, predict2_hl, predict3_hl, predict4_hl, predict1_lh, predict2_lh, predict3_lh, predict4_lh
return F.sigmoid(fuse_predict)
| 51.141361 | 135 | 0.619676 |
251a755eafd6983caca29826a579cc38212144dd | 7,413 | py | Python | pgeng/font.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | pgeng/font.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | pgeng/font.py | Bouncehball/pgeng | 6f88991e16cfd744c8565b68b6348f313b4d75c0 | [
"MIT"
] | null | null | null | 'Classes and functions for creating fonts and text buttons'
#IMPORTS
import pygame
from pathlib import Path
from .core import clip_surface, load_image
from .colour import palette_swap
#IMPORTS
#VARIALBES
__all__ = ['create_font', 'TextButton']
path = Path(__file__).resolve().parent
#VARIABLES
#CREATE_FONT
def create_font(colour):
'''A function to create small and large Font objects
colour will be the colour of the text
The first value in the returned tuple is the small font and the second value is the large font
Returns: tuple'''
if tuple(colour[:3]) == (0, 0, 0):
small_font_image = palette_swap(load_image(path.joinpath('font/small.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (255, 255, 255)})
large_font_image = palette_swap(load_image(path.joinpath('font/large.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (255, 255, 255)})
return Font(small_font_image, background_colour=255), Font(large_font_image, background_colour=255)
if tuple(colour[:3]) == (127, 127, 127):
small_font_image = palette_swap(load_image(path.joinpath('font/small.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (128, 128, 128)})
large_font_image = palette_swap(load_image(path.joinpath('font/large.png')), {(255, 0, 0): colour[:3], tuple(colour[:3]): (128, 128, 128)})
return Font(small_font_image, 128), Font(large_font_image, 128)
small_font_image = palette_swap(load_image(path.joinpath('font/small.png')), {(255, 0, 0): colour[:3]})
large_font_image = palette_swap(load_image(path.joinpath('font/large.png')), {(255, 0, 0): colour[:3]})
return Font(small_font_image), Font(large_font_image)
#CREATE_FONT
#FONT
class Font:
'''A class to create a pixel art font
It will get all the letters out of the image and render them
The border between letters is usually (127, 127, 127) and the background is usually (0, 0, 0) change them if it is necessary
The font is made by DaFluffyPotato
Attributes:
character_height
characters
font_image
space_width'''
#__INIT__
def __init__(self, font_image, border_colour=127, background_colour=0):
'Initialising a font object'
self.font_image = font_image
self.font_image.set_colorkey((0, 0, 0) if not background_colour else [background_colour for i in range(3)])
self.characters = {}
current_width, character_count = 0, 0
character_order = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','.','-',',',':','+','\'','!','?','0','1','2','3','4','5','6','7','8','9','(',')','/','_','=','\\','[',']','*','"','<','>',';']
for x in range(self.font_image.get_width()):
colour = self.font_image.get_at((x, 0))
if colour[:3] == (border_colour, border_colour, border_colour): #IF THE TEXT COLOR IS (127, 127, 127), CHANGE BORDER_COLOR
character_image = clip_surface(self.font_image, (x - current_width, 0), (current_width, self.font_image.get_height())) #CLIP EVERY CHARACTER OUT OF THE FONT IMAGE
self.characters[character_order[character_count]] = character_image
character_count += 1
current_width = 0
else:
current_width += 1
self.space_width, self.character_height = self.characters['A'].get_size()
#__INIT__
#__REPR__
def __repr__(self):
'''Returns a string representation of the object
Returns: str'''
return 'pgeng.Font'
#__REPR__
#GET_SIZE
def get_size(self, text):
'''Get the size that that a rendered string would use
It will return the width and height
Returns: tuple'''
if type(text) is not str:
raise TypeError('text has to be a string')
width, height = 0, 0
for character in text:
if character not in ('\n', ' ') and character in self.characters:
width += self.characters[character].get_width() + 1 #+ 1 FOR SPACING
elif character == ' ' or character not in ['\n']:
width += self.space_width + 1 #+ 1 FOR SPACING
else:
width = 0
height += self.character_height + 1 #+ 1 FOR SPACING
return width, height
#GET_SIZE
#RENDER
def render(self, surface, text, location, scroll=pygame.Vector2()):
'Render a string on a surface at a location'
if type(text) is not str:
raise TypeError('text has to be a string')
x_offset, y_offset = 0, 0
for character in text:
if character not in ('\n', ' ') and character in self.characters:
surface.blit(self.characters[character], (location[0] + x_offset - scroll[0], location[1] + y_offset - scroll[1]))
x_offset += self.characters[character].get_width() + 1 #+ 1 FOR SPACING
elif character == ' ' or character not in ['\n']:
x_offset += self.space_width + 1 #+ 1 FOR SPACING
else:
x_offset = 0
y_offset += self.character_height + 1 #+ 1 FOR SPACING
#RENDER
#FONT
#TEXTBUTTON
class TextButton:
'''A string of text that is also a button
The collide function is to collide with the mouse and clicks
It also needs a font size, it has to be either 'small' or 'large'
Use the location variable instead of the rect values
Attributes:
location
rect
size
test_font
text'''
#__INIT__
def __init__(self, text, location, font_size):
'Initialising a TextButton object'
if font_size != 'small' and font_size != 'large':
raise ValueError('font_size is not \'small\' or \'large\'')
if type(text) is not str:
raise TypeError('text is not a string')
self.text = text
self.location = pygame.Vector2(location)
self.test_font = Font(load_image(path.joinpath(f'font/{font_size}.png')))
self.size = self.test_font.get_size(text)
#__INIT__
#__REPR__
def __repr__(self):
'''Returns a string representation of the object
Returns: str'''
return f'pgeng.TextButton({tuple(self.location)})'
#__REPR__
#RECT
@property
def rect(self):
'''Returns the pygame.Rect object of the TextButton
Returns: pygame.Rect'''
self.location = pygame.Vector2(self.location)
return pygame.Rect(self.location, (self.size[0] - 1, self.size[1] + self.test_font.character_height)) #- 1 FOR THE EXTRA SPACING
#RECT
#SET_TEXT
def set_text(self, text):
'''Sets a new string as the text
All the variables will be updated, so the functions can be used the same'''
if type(text) is not str:
raise TypeError('text is not a string')
self.text = text
self.size = self.test_font.get_size(text)
#SET_TEXT
#COLLIDE
def collide(self, click, check_location=None):
'''This will check collision with the mouse location and also if click is True with it
A custom location can be set with location if pygame.mouse.get_pos() is not wished to be used
The first value returns True if the mouse has collided with the button, the second one is if the mouse clicked on it
Returns: tuple'''
check_location = pygame.mouse.get_pos() if check_location is None else check_location
if self.rect.collidepoint(check_location):
if click:
return True, True
return True, False
return False, False
#COLLIDE
#RENDER
def render(self, surface, font, scroll=pygame.Vector2()):
'Renders the text from the button'
if not isinstance(font, Font):
raise TypeError('font is not a Font object')
font.render(surface, self.text, self.location, scroll)
#RENDER
#TEXTBUTTON | 37.439394 | 356 | 0.673816 |
251ac80cf768d166a984daeae7c4d2c5d7422487 | 1,814 | py | Python | pyguetzli/pil_image.py | wanadev/pyguetzli | 765cc89137e2f5fca80e5f894f4ec95c38995d96 | [
"Apache-2.0"
] | 28 | 2017-05-03T17:48:21.000Z | 2022-02-14T13:40:24.000Z | pyguetzli/pil_image.py | wanadev/pyguetzli | 765cc89137e2f5fca80e5f894f4ec95c38995d96 | [
"Apache-2.0"
] | 6 | 2017-08-21T07:52:18.000Z | 2020-07-17T16:41:44.000Z | pyguetzli/pil_image.py | wanadev/pyguetzli | 765cc89137e2f5fca80e5f894f4ec95c38995d96 | [
"Apache-2.0"
] | 3 | 2018-03-13T23:33:10.000Z | 2021-09-09T02:33:07.000Z | """
This modules contain helper function to deal with PIL / Pillow Images.
.. note::
Please note that the ``[PIL]`` (pillow) extra dependency must be installed
to allow functions from this module to work.
"""
from . import guetzli
def _to_pil_rgb_image(image):
"""Returns an PIL Image converted to the RGB color space. If the image has
an alpha channel (transparency), it will be overlaid on a black background.
:param image: the PIL image to convert
:returns: The input image if it was already in RGB mode, or a new RGB image
if converted.
:raises ImportError: PIL / Pillow cannot be imported.
"""
if image.mode == "RGB":
return image
from PIL import Image
image.load()
rgb_image = Image.new("RGB", image.size, (0x00, 0x00, 0x00))
mask = None
if image.mode == "RGBA":
mask = image.split()[3] # bands: R=0, G=1, B=2, 1=3
rgb_image.paste(image, mask=mask)
return rgb_image
def process_pil_image(image, quality=guetzli.DEFAULT_JPEG_QUALITY):
"""Generates an optimized JPEG from a PIL image. If the image has an alpha
channel (transparency), it will be overlaid on a black background.
:param image: the PIL image
:param quality: the output JPEG quality (default 95)
:returns: Optimized JPEG bytes
:rtype: bytes
:raises ImportError: PIL / Pillow cannot be imported.
.. code:: python
import pyguetzli
from PIL import Image
image = Image.open("./test/image.jpg")
optimized_jpeg = pyguetzli.process_pil_image(image)
"""
image_rgb = _to_pil_rgb_image(image)
image_rgb_bytes = image_rgb.tobytes()
return guetzli.process_rgb_bytes(
image_rgb_bytes,
*image.size,
quality=quality
)
| 26.676471 | 79 | 0.656009 |
251c85ca611047b1b27da7153669dd51f78397d6 | 1,034 | py | Python | 201.bitwise-and-of-numbers-range.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 201.bitwise-and-of-numbers-range.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 201.bitwise-and-of-numbers-range.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=201 lang=python
#
# [201] Bitwise AND of Numbers Range
#
# https://leetcode.com/problems/bitwise-and-of-numbers-range/description/
#
# algorithms
# Medium (35.44%)
# Total Accepted: 77.3K
# Total Submissions: 217.7K
# Testcase Example: '5\n7'
#
# Given a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND
# of all numbers in this range, inclusive.
#
# Example 1:
#
#
# Input: [5,7]
# Output: 4
#
#
# Example 2:
#
#
# Input: [0,1]
# Output: 0
#
class Solution(object):
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
# Extraordinary Solution
max_num = 2 ** 31 - 1
while (max_num & m != max_num & n):
max_num = max_num << 1
print max_num
return max_num & m
def _rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
# memory error
return reduce(lambda x,y: x & y, range(m, n+1))
| 19.509434 | 78 | 0.548356 |
251cba64cfe05ed7cdba8439be4d154984b803ea | 12,053 | py | Python | src/dip_main.py | BardiaMojra/dip | 201bd14c13052b81967e051444f4e5c08c72631a | [
"MIT"
] | null | null | null | src/dip_main.py | BardiaMojra/dip | 201bd14c13052b81967e051444f4e5c08c72631a | [
"MIT"
] | null | null | null | src/dip_main.py | BardiaMojra/dip | 201bd14c13052b81967e051444f4e5c08c72631a | [
"MIT"
] | null | null | null | ''' dip
@author Bardia Mojra - 1000766739
@brief ee-5323 - project -
@date 10/31/21
code based on below YouTube tutorial and Pymotw.com documentation for socket mod.
@link https://www.youtube.com/watch?v=3QiPPX-KeSc
@link https://pymotw.com/2/socket/tcp.html
python socket module documentation
@link https://docs.python.org/3/library/socket.html
@link https://docs.python.org/3/howto/sockets.html
'''
import csv
import math
import numpy as np
import os
import pygame
import pyglet
from pyglet.window import key
import pymunk
import pymunk.constraints
import pymunk.pygame_util
import pandas as pd
import pyglet.gl as gl
''' custom libs
'''
import dvm
import tcm
''' NBUG
'''
from nbug import *
''' TEST CONFIG
'''
TEST_ID = 'Test 903'
SIM_DUR = 30.0 # in seconds
OUT_DIR = '../out/'
OUT_DATA = OUT_DIR+TEST_ID+'_data.csv'
CONF_DIR = '../config/'
# cart
m_c = 0.5
all_friction = 0.2
''' pendulum 1 '''
l_1 = 0.4 # 6, 5, 4, 7 -- 4 ->
m_1 = 0.2 # 2, 3, 4 -- 1 -> stable
m_1_moment = 0.01
m_1_radius = 0.05
''' pendulum 2 '''
l_2 = 0.7 # 6, 5, 7 -- 3 -> unstable
m_2 = 0.3 # 2, 3, 4 -- 2 -> unstable
m_2_moment = 0.001
m_2_radius = 0.05
# other config
output_labels=['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2']
# control config
# K gain matrix and Nbar found from modelling via Jupyter
# K = [16.91887353, 21.12423935, 137.96378003, -3.20040325, -259.72220049, -50.48383455]
# Nbar = 17.0
K = [51.43763708,
54.12690472,
157.5467596,
-21.67111679,
-429.11603909,
-88.73125241]
Nbar = 51.5
tConfig = tcm.test_configuration(TEST_ID=TEST_ID,
OUT_DIR=OUT_DIR,
OUT_DATA=OUT_DATA,
CONF_DIR=CONF_DIR,
SIM_DUR=SIM_DUR,
output_labels=output_labels,
all_friction=all_friction,
cart_mass=m_c,
pend_1_length=l_1,
pend_1_mass=m_1,
pend_1_moment=m_1_moment,
pend_2_length=l_2,
pend_2_mass=m_2,
pend_2_moment=m_2_moment,
K=K,
Nbar=Nbar)
# log test config
tcm.pkl(tConfig)
''' MOD CONFIG
'''
SCREEN_WIDTH = 700
SCREEN_HEIGHT = 500
# sim config
MAX_FORCE = 25
DT = 1 / 60.0
PPM = 200.0 # pxls per meter
END_ = 1000 # samples used for plotting and analysis
SHOW_ = True
cart_size = 0.3, 0.2
white_color = (0,0,0,0)
black_color = (255,255,255,255)
green_color = (0,135,0,255)
red_color = (135,0,0,255)
blue_color = (0,0,135,255)
''' main
'''
pygame.init()
# screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
surface = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))
# clock = pygame.time.Clock()
window = pyglet.window.Window(SCREEN_WIDTH, SCREEN_HEIGHT, vsync=False, caption='Double Inverted Pendulum Simulation')
gl.glClearColor(255,255,255,255)
# setup the space
space = pymunk.Space()
# options = pymunk.pygame_util.DrawOptions(surface)
# space.debug_draw(options)
space.gravity = 0, -9.81
# space.debug_draw(options)
fil = pymunk.ShapeFilter(group=1)
# screen.fill(pygame.Color("white"))
# options = pymunk.pygame_util.DrawOptions(screen)
# space.debug_draw(options)
# ground
ground = pymunk.Segment(space.static_body, (-4, -0.1), (4, -0.1), 0.1)
# ground.color = pygame.Color("pink")
ground.friction = all_friction
ground.filter = fil
space.add(ground)
# space.debug_draw(options)
# cart
cart_moment = pymunk.moment_for_box(m_c, cart_size)
cart_body = pymunk.Body(mass=m_c, moment=cart_moment)
cart_body.position = 0.0, cart_size[1] / 2
cart_shape = pymunk.Poly.create_box(cart_body, cart_size)
cart_shape.color = black_color
# cart_shape.color = red_color
# cart_shape.fill_color = red_color
# cart_shape.color = black_color
cart_shape.friction = ground.friction
space.add(cart_body, cart_shape)
# space.debug_draw(options)
# pendulum 1
pend_1_body = pymunk.Body(mass=m_1, moment=m_1_moment)
pend_1_body.position = cart_body.position[0], cart_body.position[1] + cart_size[1] / 2 + l_1
pend_shape = pymunk.Circle(pend_1_body, m_1_radius)
pend_shape.filter = fil
space.add(pend_1_body, pend_shape)
# joint
joint = pymunk.constraints.PivotJoint(cart_body, pend_1_body, cart_body.position + (0, cart_size[1] / 2))
joint.collide_bodies = False
space.add(joint)
# pendulum 2
pend_2_body = pymunk.Body(mass=m_2, moment=m_2_moment)
pend_2_body.position = cart_body.position[0], cart_body.position[1] + cart_size[1] / 2 + (2 * l_2)
pend_shape2 = pymunk.Circle(pend_2_body, m_2_radius)
pend_shape2.filter = fil
space.add(pend_2_body, pend_shape2)
# joint 2
joint2 = pymunk.constraints.PivotJoint(pend_1_body, pend_2_body, cart_body.position + (0, cart_size[1] / 2 + l_2))
joint2.collide_bodies = False
space.add(joint2)
# space.debug_draw(options)
print(f"cart mass = {cart_body.mass:0.1f} kg")
print(f"pendulum 1 mass = {pend_1_body.mass:0.1f} kg, pendulum moment = {pend_1_body.moment:0.3f} kg*m^2")
print(f"pendulum 2 mass = {pend_2_body.mass:0.1f} kg, pendulum moment = {pend_2_body.moment:0.3f} kg*m^2")
force = 0.0
ref = 0.0
color = (200, 200, 200, 200)
label_x = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 28)
label_th_1 = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 58)
label_th_2 = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 88)
label_force = pyglet.text.Label(text='', font_size=12, color=color, x=10, y=SCREEN_HEIGHT - 118)
labels = [label_x, label_th_1, label_th_2, label_force]
# data recorder so we can compare our results to our predictions
if os.path.exists(OUT_DATA):
os.remove(OUT_DATA)
with open(OUT_DATA, 'w') as f:
output_header = str()
for i, s in enumerate(output_labels):
if i == 0:
output_header = s
else:
output_header += ', '+s
output_header += '\n'
f.write(output_header)
f.close()
currtime = 0.0
record_data = True
def draw_body(offset, body):
for shape in body.shapes:
if isinstance(shape, pymunk.Circle):
vertices = []
num_points = 10
for ii in range(num_points):
angle = ii / num_points * 2 * math.pi
vertices.append(body.position + (shape.radius * math.cos(angle), shape.radius * math.sin(angle)))
points = []
for v in vertices:
points.append(int(v[0] * PPM) + offset[0])
points.append(int(v[1] * PPM) + offset[1])
data = ('v2i', tuple(points))
gl.glColor3b(255,255,255)
pyglet.graphics.draw(len(vertices), pyglet.gl.GL_LINE_LOOP, data)
elif isinstance(shape, pymunk.Poly):
# get vertices in world coordinates
vertices = [v.rotated(body.angle) + body.position for v in shape.get_vertices()]
# convert vertices to pixel coordinates
points = []
for v in vertices:
points.append(int(v[0] * PPM) + offset[0])
points.append(int(v[1] * PPM) + offset[1])
data = ('v2i', tuple(points))
gl.glColor3b(255,255,255)
pyglet.graphics.draw(len(vertices), pyglet.gl.GL_LINE_LOOP, data)
def draw_line_between(offset, pos1, pos2):
vertices = [pos1, pos2]
points = []
for v in vertices:
points.append(int(v[0] * PPM) + offset[0])
points.append(int(v[1] * PPM) + offset[1])
data = ('v2i', tuple(points))
gl.glColor3b(255,255,255)
pyglet.graphics.draw(len(vertices), pyglet.gl.GL_LINE_STRIP, data)
def draw_ground(offset):
vertices = [v + (0, ground.radius) for v in (ground.a, ground.b)]
# convert vertices to pixel coordinates
points = []
for v in vertices:
points.append(int(v[0] * PPM) + offset[0])
points.append(int(v[1] * PPM) + offset[1])
data = ('v2i', tuple(points))
pyglet.graphics.draw(len(vertices), pyglet.gl.GL_LINES, data)
@window.event
def on_draw():
window.clear()
# center view x around 0
offset = (250, 5)
draw_body(offset, cart_body)
draw_body(offset, pend_1_body)
draw_line_between(offset, cart_body.position + (0, cart_size[1] / 2), pend_1_body.position)
draw_body(offset, pend_2_body)
draw_line_between(offset, pend_1_body.position, pend_2_body.position)
draw_ground(offset)
for label in labels:
label.draw()
@window.event
def on_key_press(symbol, modifiers):
# Symbolic names:
if symbol == key.ESCAPE:
window.close()
def simulate(_):
global currtime
if currtime > SIM_DUR:
window.close()
# nprint('_',_)
# ensure we get a consistent simulation step - ignore the input dt value
dt = DT
# simulate the world
# NOTE: using substeps will mess up gains
space.step(dt)
# populate the current state
posx = cart_body.position[0]
velx = cart_body.velocity[0]
th_1 = pend_1_body.angle
th_1v = pend_1_body.angular_velocity
th_2 = pend_2_body.angle
th_2v = pend_2_body.angular_velocity
# dump our data so we can plot
if record_data:
with open(OUT_DATA, 'a+') as f:
f.write(f"{currtime:0.5f}, {posx:0.5f}, {velx:0.5f}, {th_1:0.5f}, {th_1v:0.5f}, {th_2:0.5f}, {th_2v:0.5f} \n")
f.close()
currtime += dt
# calculate our gain based on the current state
gain = K[0] * posx + K[1] * velx + K[2] * th_1 + K[3] * th_1v + K[4] * th_2 + K[5] * th_2v
# calculate the force required
global force
force = ref * Nbar - gain
# kill our motors if our angles get out of control
if math.fabs(pend_1_body.angle) > 1.0 or math.fabs(pend_2_body.angle) > 1.0:
force = 0.0
# cap our maximum force so it doesn't go crazy
if math.fabs(force) > MAX_FORCE:
force = math.copysign(MAX_FORCE, force)
# apply force to cart center of mass
cart_body.apply_force_at_local_point((force, 0.0), (0, 0))
def update_state_label(_):
'''
function to store the current state to draw on screen
'''
label_x.text = f'x: {cart_body.position[0]:0.3f} m'
label_th_1.text = f'theta_1: {pend_1_body.angle:0.3f} rad'
label_th_2.text = f'theta_2: {pend_2_body.angle:0.3f} rad'
label_force.text = f'force: {force:0.1f} N'
def update_reference(_, newref):
global ref
ref = newref
# callback for simulation
pyglet.clock.schedule_interval(simulate, DT)
pyglet.clock.schedule_interval(update_state_label, 0.25)
# schedule some small movements by updating our reference
pyglet.clock.schedule_once(update_reference, 2, 0.2)
pyglet.clock.schedule_once(update_reference, 7, 0.6)
pyglet.clock.schedule_once(update_reference, 12, 0.2)
pyglet.clock.schedule_once(update_reference, 17, 0.0)
pyglet.app.run()
f.close()
# data recorder so we can compare our results to our predictions
# f = open(OUT_DATA, 'r')
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
# for i in test_IDs:
tConfig = tcm.unpkl(TEST_ID, CONF_DIR)
df = pd.read_csv(tConfig.out_data)
df = dvm.get_losses(df,
dataPath=tConfig.data_path,
lossPath=tConfig.loss_path)
# plot pose
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
cols = [0, 1, 3, 5]
xy_df = df.iloc[:,cols].copy()
dvm.plot_df(xy_df,
plot_title='State Position',
labels=xy_df.columns,
test_id=tConfig.id,
out_dir=tConfig.out_dir,
end=END_,
show=SHOW_)
# plot vel
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
cols = [0, 2, 4, 6]
xy_df = df.iloc[:,cols].copy()
dvm.plot_df(xy_df,
plot_title='State Velocity',
labels=xy_df.columns,
test_id=tConfig.id,
out_dir=tConfig.out_dir,
end=END_,
show=SHOW_)
# plot losses
# ['t', 'x', 'dx', 'th_1', 'dth_1', 'th_2', 'dth_2', 'L1', 'L2']
cols = [0, 7, 8]
xy_df = df.iloc[:,cols].copy()
dvm.plot_df(xy_df,
plot_title='State Losses',
labels=xy_df.columns,
test_id=tConfig.id,
out_dir=tConfig.out_dir,
end=END_,
show=SHOW_)
# print losses
dvm.print_losses(df)
| 31.064433 | 118 | 0.652867 |
251d295ac1daf4f6c0aa7d07697c6e03ea7c9186 | 1,128 | py | Python | generator/apigen/CommandParser.py | grbd/GBD.Build.BlackJack | 3e8d027625b7528af3674a373fd9931e3feaaab4 | [
"Apache-2.0"
] | 1 | 2017-05-26T00:18:26.000Z | 2017-05-26T00:18:26.000Z | generator/apigen/CommandParser.py | grbd/GBD.Build.BlackJack | 3e8d027625b7528af3674a373fd9931e3feaaab4 | [
"Apache-2.0"
] | null | null | null | generator/apigen/CommandParser.py | grbd/GBD.Build.BlackJack | 3e8d027625b7528af3674a373fd9931e3feaaab4 | [
"Apache-2.0"
] | null | null | null | """
A Command parser to parse over each jinja template for a given cmake command
"""
import os
from apigen.Logger import Logger
from jinja2 import Environment, PackageLoader, FileSystemLoader
class CommandParser(object):
def __init__(self, cmdfile: str, env: Environment, outdir: str):
super().__init__()
self.__log = Logger.getlogger()
self.CommandFilePath = cmdfile
self.__env = env
self.OutputDirectory = outdir
def ParseFile(self):
cmd_basefilename = os.path.basename(self.CommandFilePath)
self.__log.info("Parsing File: " + cmd_basefilename)
cmd_name = os.path.splitext(cmd_basefilename)[0]
cmd_outfile = os.path.join(self.OutputDirectory, cmd_basefilename)
#if (cmd_basefilename != "add_executable.py"):
# return
# Render the command output from the template
template = self.__env.get_template(cmd_basefilename)
tmpl_output = template.render(CmdName=cmd_name)
# Save the File
with open(cmd_outfile, "w") as text_file:
text_file.write(tmpl_output)
return
| 31.333333 | 76 | 0.675532 |
251d599be91a9d5e66da8bf669765945fc72709e | 299 | py | Python | 1_Sys_Module/sysIO.py | ericchou1/Top5PythonModulesForNetworkEngineers | c6aa92c3b7bf6668f049acc6d3ba295634b56027 | [
"Apache-2.0"
] | 5 | 2016-08-21T16:24:03.000Z | 2021-01-11T23:04:21.000Z | 1_Sys_Module/sysIO.py | ericchou1/Top5PythonModulesForNetworkEngineers | c6aa92c3b7bf6668f049acc6d3ba295634b56027 | [
"Apache-2.0"
] | null | null | null | 1_Sys_Module/sysIO.py | ericchou1/Top5PythonModulesForNetworkEngineers | c6aa92c3b7bf6668f049acc6d3ba295634b56027 | [
"Apache-2.0"
] | 5 | 2016-11-05T17:05:39.000Z | 2022-01-31T20:19:12.000Z | #!/usr/bin/env python
import sys
print("Please tell me your favorite color: ")
color = sys.stdin.readline()
animal = raw_input("Please tell me your favorite animal: ")
print(animal)
sys.stdout.write("Your favorite color is: " + color + " favorite animal is: " + animal + "\n")
print("*" * 10)
| 19.933333 | 94 | 0.67893 |
251e7d6fbbff67cb94790461d92eb77f3f88ed53 | 111 | py | Python | comet/handler/__init__.py | shinybrar/Comet | 4229092fca74c130a7d4ecd4dbd22ae85f7e6308 | [
"BSD-2-Clause"
] | 15 | 2015-11-29T18:53:58.000Z | 2022-03-09T15:47:30.000Z | comet/handler/__init__.py | shinybrar/Comet | 4229092fca74c130a7d4ecd4dbd22ae85f7e6308 | [
"BSD-2-Clause"
] | 29 | 2016-01-21T18:10:45.000Z | 2021-10-01T16:41:12.000Z | comet/handler/__init__.py | shinybrar/Comet | 4229092fca74c130a7d4ecd4dbd22ae85f7e6308 | [
"BSD-2-Clause"
] | 11 | 2016-01-22T14:05:51.000Z | 2022-03-09T17:49:56.000Z | # Comet VOEvent Broker.
# Event handlers.
from comet.handler.relay import *
from comet.handler.spawn import *
| 18.5 | 33 | 0.765766 |
251efaea3581632f73c0223d75becaac1ffc7162 | 954 | py | Python | measure_mate/tests/api/test_template.py | niche-tester/measure-mate | c3acba57747bcb89fe0c6b9509ec90f04a581506 | [
"MIT"
] | 15 | 2015-12-14T02:20:31.000Z | 2022-01-30T04:36:39.000Z | measure_mate/tests/api/test_template.py | rloomans/measure-mate | e89f9c8e1faa1920496f1c997f6d87ec0f9bd7c2 | [
"MIT"
] | 1,403 | 2017-02-16T01:00:04.000Z | 2022-03-15T21:12:13.000Z | measure_mate/tests/api/test_template.py | rloomans/measure-mate | e89f9c8e1faa1920496f1c997f6d87ec0f9bd7c2 | [
"MIT"
] | 10 | 2015-12-18T01:30:46.000Z | 2022-01-30T04:36:41.000Z | from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from measure_mate.models import Template
from measure_mate.tests.factories import TemplateFactory
class TemplateAPITestCases(APITestCase):
def test_list_template(self):
"""
List all templates and check that all fields are returned
"""
template = TemplateFactory()
template2 = TemplateFactory()
url = reverse('template-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data[0]['id'], template.id)
self.assertEqual(response.data[0]['name'], template.name)
self.assertEqual(response.data[0]['taggable'], template.taggable)
self.assertEqual(response.data[0]['enabled'], template.enabled)
self.assertEqual(len(response.data), Template.objects.count())
| 38.16 | 73 | 0.715933 |
251f5df96375dbae57ea9bdc6db0a3e28bc73439 | 658 | py | Python | ecommerce/shop_management/migrations/0003_shop_created_at_shop_last_updated.py | mhdirajabi/django-drf-e-commerce | 526044a728f9f073a21386ff7f67ac570f4755c6 | [
"MIT"
] | null | null | null | ecommerce/shop_management/migrations/0003_shop_created_at_shop_last_updated.py | mhdirajabi/django-drf-e-commerce | 526044a728f9f073a21386ff7f67ac570f4755c6 | [
"MIT"
] | null | null | null | ecommerce/shop_management/migrations/0003_shop_created_at_shop_last_updated.py | mhdirajabi/django-drf-e-commerce | 526044a728f9f073a21386ff7f67ac570f4755c6 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2021-12-29 11:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop_management', '0002_alter_shop_type_alter_shoptype_name'),
]
operations = [
migrations.AddField(
model_name='shop',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='تاریخ ایجاد'),
),
migrations.AddField(
model_name='shop',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='آخرین بروزرسانی'),
),
]
| 27.416667 | 97 | 0.62462 |
252023af22ef5365f5e3d2b2d4c333240848fc36 | 4,085 | py | Python | lib/model.py | lanseyege/rl_algorithms | 5bdc5211b84fa4e9f16e68e1407825fdcacacec0 | [
"MIT"
] | null | null | null | lib/model.py | lanseyege/rl_algorithms | 5bdc5211b84fa4e9f16e68e1407825fdcacacec0 | [
"MIT"
] | null | null | null | lib/model.py | lanseyege/rl_algorithms | 5bdc5211b84fa4e9f16e68e1407825fdcacacec0 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from lib.util import normal_log_density
class ModelActor(nn.Module):
def __init__(self, obs_size, act_size, active='tanh', hidden_size=128, lstd=-0.0):
super(ModelActor, self).__init__()
if active == 'tanh':
self.active = torch.tanh
else:
pass
self.linear1 = nn.Linear(obs_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, act_size)
self.linear3.weight.data.mul_(0.1)
self.linear3.bias.data.mul_(0.0)
self.logstd = nn.Parameter(torch.ones(1, act_size) * lstd)
def forward(self, x):
x = self.linear1(x)
x = self.active(x)
x = self.linear2(x)
x = self.active(x)
mean = self.linear3(x)
logstd = self.logstd.expand_as(mean)
std = torch.exp(logstd)
return mean, logstd, std
def select_action(self, x):
mean, _, std = self.forward(x)
#action = mean + std * torch.normal(mean=torch.zeros_like(mean), std=torch.ones_like(std))
action = torch.normal(mean, std)
#print(mean)
#print(std)
#print(action)
#print(torch.normal(mean, std))
#torch.normal(mean, std)
return action, mean, std
def get_log_prob(self, x, action):
mean, logstd, std = self.forward(x)
return normal_log_density(action, mean, logstd, std)
def get_kl(self, x, model):
mean, logstd, std = self.forward(x)
#mean_, logstd_, std_ = model(x)
mean_, logstd_, std_ = mean.detach(), logstd.detach(), std.detach()
kl = logstd - logstd_ + (std_ ** 2 + (mean_ - mean) ** 2) / (2.0 * std ** 2) - 0.5
return kl.sum(1, keepdim=True)
def get_ent(self, x ):
mean, logstd, std = self.forward(x)
return (logstd + 0.5 * np.log(2.0*np.pi*np.e)).sum(-1)
class ModelCritic(nn.Module):
def __init__(self, obs_size, hidden_size = 128):
super(ModelCritic, self).__init__()
self.active = torch.tanh
self.linear1 = nn.Linear(obs_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.mul_(0.1)
self.linear3.bias.data.mul_(0.0)
def forward(self, x):
x = self.linear1(x)
x = self.active(x)
x - self.linear2(x)
x - self.active(x)
value = self.linear3(x)
return value
class ModelDCritic(nn.Module):
def __init__(self, obs_size, act_size, hidden_size=128, ):
super(ModelDCritic, self).__init__()
self.active = torch.tanh
self.linear1 = nn.Linear(obs_size+act_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.linear3.weight.data.mul_(0.1)
self.linear3.bias.data.mul_(0.0)
def forward(self, x):
x = self.linear1(x)
x = self.active(x)
x = self.linear2(x)
x = self.active(x)
value = torch.sigmoid(self.linear3(x))
return value
class DDPGActor(nn.Module):
def __init__(self, obs_size, act_size, active='tanh', hidden_size=128):
super(ModelActor, self).__init__()
self.active = torch.tanh
self.linear1 = nn.Linear(obs_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, act_size)
self.linear3.weight.data.mul_(0.1)
self.linear3.bias.data.mul_(0.0)
def forward(self, x):
x = self.linear1(x)
x = self.active(x)
x = self.linear2(x)
x = self.active(x)
x = self.linear3(x)
return self.active(x)
def select_action(self, x):
x = self.forward(x)
def get_log_prob(self, ):
pass
def get_kl(self, ):
pass
def get_ent(self,):
pass
class DDPGCritic(nn.Module):
def __init__(self, ):
pass
def forward(self, ):
pass
| 31.183206 | 98 | 0.59388 |
252143e0b4bc8782465cc8f472bab67d3793cee0 | 1,129 | py | Python | python/test_2020_04_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/test_2020_04_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/test_2020_04_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | import importlib
import unittest
solution = importlib.import_module('2020_04_2')
class Test2020Day4Part1(unittest.TestCase):
def test_example1(self):
input = (
'eyr:1972 cid:100\n'
'hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n'
'\n'
'iyr:2019\n'
'hcl:#602927 eyr:1967 hgt:170cm\n'
'ecl:grn pid:012533040 byr:1946\n'
'\n'
'hcl:dab227 iyr:2012\n'
'ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n'
'\n'
'hgt:59cm ecl:zzz\n'
'eyr:2038 hcl:74454a iyr:2023\n'
'pid:3556412378 byr:2007\n'
'\n'
'pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n'
'hcl:#623a2f\n'
'\n'
'eyr:2029 ecl:blu cid:129 byr:1989\n'
'iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n'
'\n'
'hcl:#888785\n'
'hgt:164cm byr:2001 iyr:2015 cid:88\n'
'pid:545766238 ecl:hzl\n'
'eyr:2022\n'
'\n'
'iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719\n'
)
self.assertEqual(solution.run(input), 4)
| 29.710526 | 82 | 0.578388 |
252147a24fb71425db336b4bd835e50e021bad1a | 1,649 | py | Python | acme/agents/jax/ail/__init__.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 2,650 | 2020-06-01T16:31:25.000Z | 2022-03-31T07:32:41.000Z | acme/agents/jax/ail/__init__.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 199 | 2020-06-02T01:09:09.000Z | 2022-03-31T17:11:20.000Z | acme/agents/jax/ail/__init__.py | Tsaousis/acme | 14278693bcc5fef0839ac60792d452d3d80acfd7 | [
"Apache-2.0"
] | 344 | 2020-06-01T16:45:21.000Z | 2022-03-30T11:15:09.000Z | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of a AIL agent."""
from acme.agents.jax.ail import losses
from acme.agents.jax.ail import rewards
from acme.agents.jax.ail.agents import AIL
from acme.agents.jax.ail.agents import DistributedAIL
from acme.agents.jax.ail.builder import AILBuilder
from acme.agents.jax.ail.config import AILConfig
from acme.agents.jax.ail.dac_agents import DAC
from acme.agents.jax.ail.dac_agents import DACConfig
from acme.agents.jax.ail.dac_agents import DistributedDAC
from acme.agents.jax.ail.gail_agents import DistributedGAIL
from acme.agents.jax.ail.gail_agents import GAIL
from acme.agents.jax.ail.gail_agents import GAILConfig
from acme.agents.jax.ail.learning import AILLearner
from acme.agents.jax.ail.networks import AILNetworks
from acme.agents.jax.ail.networks import AIRLModule
from acme.agents.jax.ail.networks import compute_ail_reward
from acme.agents.jax.ail.networks import DiscriminatorMLP
from acme.agents.jax.ail.networks import DiscriminatorModule
from acme.agents.jax.ail.networks import make_discriminator
| 45.805556 | 74 | 0.814433 |
2521a1ac6de3b8964ba83ce10e729714793f678d | 2,578 | py | Python | cineapp/push.py | ptitoliv/cineapp | 4b6a8c68144436c5497353135a013ea783cfd224 | [
"MIT"
] | 2 | 2016-12-02T02:29:01.000Z | 2019-03-03T15:48:50.000Z | cineapp/push.py | ptitoliv/cineapp | 4b6a8c68144436c5497353135a013ea783cfd224 | [
"MIT"
] | 128 | 2016-05-22T21:44:20.000Z | 2022-03-11T23:14:18.000Z | cineapp/push.py | ptitoliv/cineapp | 4b6a8c68144436c5497353135a013ea783cfd224 | [
"MIT"
] | 1 | 2017-08-20T14:14:52.000Z | 2017-08-20T14:14:52.000Z | from __future__ import print_function
from cineapp import app, db, lm
from flask_login import login_required
from flask import jsonify, session, g, url_for, request
from pywebpush import webpush, WebPushException
from cineapp.models import PushNotification
import json, traceback, sys, datetime, time
from cineapp.auth import guest_control
@app.route('/notifications/subscribe', methods=['POST'])
@login_required
@guest_control
def notification_subscribe():
app.logger.info('New user subscription !!')
subscription = request.get_json()
app.logger.info('User id: %s, Subscription data: %s' % (g.user.id,subscription))
# Let's register the subscription message into the database
push_notification = PushNotification(endpoint_id=subscription["endpoint"], public_key=subscription["keys"]["p256dh"], auth_token=subscription["keys"]["auth"], session_id=session.sid, user_id=g.user.id)
# Store the subscription data into database
try:
db.session.add(push_notification)
db.session.commit()
app.logger.info('User subscription correctly stored into database')
except Exception as e:
app.logger.error('Unable to store subscription user in database %s', repr(e))
return jsonify({ "status": "failure", "message": u"Unable to store subscription object into database" })
return jsonify({ "status": "success", "message": u"Endpoint enregistray" })
def notification_send(text,active_subscriptions):
for cur_active_sub in active_subscriptions:
try:
expiration_date = time.mktime((datetime.datetime.now() + datetime.timedelta(hours=12)).timetuple())
webpush(cur_active_sub.serialize(),
data=json.dumps({ "url":url_for('chat'), "message_title": "Message depuis le chat", "message": text }) ,
vapid_private_key=app.config["NOTIF_PRIVATE_KEY_PATH"],
vapid_claims={
"sub": "mailto:ptitoliv@gmail.com",
"exp": expiration_date
}
)
except WebPushException as ex:
# If there is an error let's remove the subscription
app.logger.error("Subscription for endpoint %s is incorrect ==> Delete it", cur_active_sub)
print(traceback.print_exc(file=sys.stdout));
# Let's remove the notification
notification_unsubscribe(cur_active_sub)
print(("I'm sorry, Dave, but I can't do that: {}", repr(ex)))
print(ex.response.json())
def notification_unsubscribe(sub):
try:
db.session.delete(sub)
db.session.commit()
app.logger.info('User subscription correctly delete from database')
return True
except Exception as e:
app.logger.error('Unable to remove subscription user in database %s', repr(e))
return False
| 39.060606 | 202 | 0.747867 |
252492e17fae91abe1251ab7bb4d09c4949ed235 | 37,380 | py | Python | pacu/models/awsapi/iotanalytics.py | RyanJarv/Pacu2 | 27df4bcf296fc8f467d3dc671a47bf9519ce7a24 | [
"MIT"
] | 1 | 2022-03-09T14:51:54.000Z | 2022-03-09T14:51:54.000Z | pacu/models/awsapi/iotanalytics.py | RyanJarv/Pacu2 | 27df4bcf296fc8f467d3dc671a47bf9519ce7a24 | [
"MIT"
] | null | null | null | pacu/models/awsapi/iotanalytics.py | RyanJarv/Pacu2 | 27df4bcf296fc8f467d3dc671a47bf9519ce7a24 | [
"MIT"
] | null | null | null | # generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:50:50+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Extra, Field
class ResourceNotFoundException(BaseModel):
__root__: Any
class InvalidRequestException(ResourceNotFoundException):
pass
class InternalFailureException(ResourceNotFoundException):
pass
class ServiceUnavailableException(ResourceNotFoundException):
pass
class ThrottlingException(ResourceNotFoundException):
pass
class CancelPipelineReprocessingResponse(BaseModel):
pass
class ServiceManagedChannelS3Storage(CancelPipelineReprocessingResponse):
"""
Used to store channel data in an S3 bucket managed by IoT Analytics. You can't change the choice of S3 storage after the data store is created.
"""
pass
class UnlimitedRetentionPeriod(BaseModel):
__root__: bool
class RetentionPeriodInDays(BaseModel):
__root__: Annotated[int, Field(ge=1.0)]
class ResourceAlreadyExistsException(ResourceNotFoundException):
pass
class LimitExceededException(ResourceNotFoundException):
pass
class UnlimitedVersioning(UnlimitedRetentionPeriod):
pass
class MaxVersions(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=1000.0)]
class ServiceManagedDatastoreS3Storage(CancelPipelineReprocessingResponse):
"""
Used to store data in an Amazon S3 bucket managed by IoT Analytics. You can't change the choice of Amazon S3 storage after your data store is created.
"""
pass
class JsonConfiguration(CancelPipelineReprocessingResponse):
"""
Contains the configuration information of the JSON format.
"""
pass
class RoleArn(BaseModel):
__root__: Annotated[str, Field(max_length=2048, min_length=20)]
class LoggingLevel(Enum):
ERROR = 'ERROR'
class LoggingEnabled(UnlimitedRetentionPeriod):
pass
class MessagePayload(BaseModel):
__root__: str
class TagResourceResponse(CancelPipelineReprocessingResponse):
pass
class UntagResourceResponse(CancelPipelineReprocessingResponse):
pass
class TagKey(BaseModel):
__root__: Annotated[str, Field(max_length=256, min_length=1)]
class ActivityBatchSize(MaxVersions):
pass
class ActivityName(BaseModel):
__root__: Annotated[str, Field(max_length=128, min_length=1)]
class AttributeNameMapping(BaseModel):
pass
class Config:
extra = Extra.allow
class AttributeName(TagKey):
pass
class AttributeNames(BaseModel):
__root__: Annotated[List[AttributeName], Field(max_items=50, min_items=1)]
class MessageId(BaseModel):
__root__: Annotated[str, Field(max_length=128, min_length=1, regex='\\p{ASCII}*')]
class ErrorCode(MessagePayload):
pass
class ErrorMessage(MessagePayload):
pass
class ChannelName(BaseModel):
__root__: Annotated[
str, Field(max_length=128, min_length=1, regex='(^(?!_{2}))(^[a-zA-Z0-9_]+$)')
]
class BucketKeyExpression(BaseModel):
__root__: Annotated[
str, Field(max_length=255, min_length=1, regex="^[a-zA-Z0-9!_.*'()/{}:-]*$")
]
class BucketName(BaseModel):
__root__: Annotated[
str, Field(max_length=255, min_length=3, regex='^[a-zA-Z0-9.\\-_]*$')
]
class PipelineName(ChannelName):
pass
class ReprocessingId(MessagePayload):
pass
class CancelPipelineReprocessingRequest(BaseModel):
pass
class ChannelArn(MessagePayload):
pass
class ChannelStatus(Enum):
CREATING = 'CREATING'
ACTIVE = 'ACTIVE'
DELETING = 'DELETING'
class RetentionPeriod(BaseModel):
"""
How long, in days, message data is kept.
"""
unlimited: Optional[UnlimitedRetentionPeriod] = None
numberOfDays: Optional[RetentionPeriodInDays] = None
class Timestamp(BaseModel):
__root__: datetime
class ServiceManagedChannelS3StorageSummary(CancelPipelineReprocessingResponse):
"""
Used to store channel data in an S3 bucket managed by IoT Analytics.
"""
pass
class ColumnName(BaseModel):
__root__: Annotated[
str,
Field(
max_length=255,
min_length=1,
regex='^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$',
),
]
class ColumnDataType(BaseModel):
__root__: Annotated[str, Field(max_length=131072, min_length=1)]
class Column(BaseModel):
"""
Contains information about a column that stores your data.
"""
name: ColumnName
type: ColumnDataType
class Columns(BaseModel):
__root__: List[Column]
class ComputeType(Enum):
ACU_1 = 'ACU_1'
ACU_2 = 'ACU_2'
class Image(BaseModel):
__root__: Annotated[str, Field(max_length=255)]
class DatasetName(ChannelName):
pass
class DatasetContentVersion(BaseModel):
__root__: Annotated[str, Field(max_length=36, min_length=7)]
class CreateDatasetContentRequest(BaseModel):
versionId: Optional[DatasetContentVersion] = None
class VersioningConfiguration(BaseModel):
"""
Information about the versioning of dataset contents.
"""
unlimited: Optional[UnlimitedVersioning] = None
maxVersions: Optional[MaxVersions] = None
class DatasetArn(MessagePayload):
pass
class DatastoreName(ChannelName):
pass
class DatastoreArn(MessagePayload):
pass
class PipelineArn(MessagePayload):
pass
class S3KeyPrefix(BaseModel):
__root__: Annotated[
str, Field(max_length=255, min_length=1, regex="^[a-zA-Z0-9!_.*'()/{}:-]*/$")
]
class CustomerManagedDatastoreS3StorageSummary(BaseModel):
"""
Contains information about the data store that you manage.
"""
bucket: Optional[BucketName] = None
keyPrefix: Optional[S3KeyPrefix] = None
roleArn: Optional[RoleArn] = None
class DatasetActionName(BaseModel):
__root__: Annotated[
str, Field(max_length=128, min_length=1, regex='^[a-zA-Z0-9_]+$')
]
class DatasetActionType(Enum):
QUERY = 'QUERY'
CONTAINER = 'CONTAINER'
class EntryName(MessagePayload):
pass
class DatasetContentState(Enum):
CREATING = 'CREATING'
SUCCEEDED = 'SUCCEEDED'
FAILED = 'FAILED'
class Reason(MessagePayload):
pass
class DatasetContentStatus(BaseModel):
"""
The state of the dataset contents and the reason they are in this state.
"""
state: Optional[DatasetContentState] = None
reason: Optional[Reason] = None
class DatasetContentSummary(BaseModel):
"""
Summary information about dataset contents.
"""
version: Optional[DatasetContentVersion] = None
status: Optional[DatasetContentStatus] = None
creationTime: Optional[Timestamp] = None
scheduleTime: Optional[Timestamp] = None
completionTime: Optional[Timestamp] = None
class DatasetContentSummaries(BaseModel):
__root__: List[DatasetContentSummary]
class DatasetContentVersionValue(BaseModel):
"""
The dataset whose latest contents are used as input to the notebook or application.
"""
datasetName: DatasetName
class PresignedURI(MessagePayload):
pass
class TriggeringDataset(BaseModel):
"""
Information about the dataset whose content generation triggers the new dataset content generation.
"""
name: DatasetName
class IotSiteWiseCustomerManagedDatastoreS3Storage(BaseModel):
"""
Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.
"""
bucket: BucketName
keyPrefix: Optional[S3KeyPrefix] = None
class IotSiteWiseCustomerManagedDatastoreS3StorageSummary(BaseModel):
"""
Contains information about the data store that you manage, which stores data used by IoT SiteWise.
"""
bucket: Optional[BucketName] = None
keyPrefix: Optional[S3KeyPrefix] = None
class DatastoreIotSiteWiseMultiLayerStorageSummary(BaseModel):
"""
Contains information about the data store that you manage, which stores data used by IoT SiteWise.
"""
customerManagedS3Storage: Optional[
IotSiteWiseCustomerManagedDatastoreS3StorageSummary
] = None
class ServiceManagedDatastoreS3StorageSummary(CancelPipelineReprocessingResponse):
"""
Contains information about the data store that is managed by IoT Analytics.
"""
pass
class DatastoreStorageSummary(BaseModel):
"""
Contains information about your data store.
"""
serviceManagedS3: Optional[ServiceManagedDatastoreS3StorageSummary] = None
customerManagedS3: Optional[CustomerManagedDatastoreS3StorageSummary] = None
iotSiteWiseMultiLayerStorage: Optional[
DatastoreIotSiteWiseMultiLayerStorageSummary
] = None
class FileFormatType(Enum):
JSON = 'JSON'
PARQUET = 'PARQUET'
class DeleteChannelRequest(BaseModel):
pass
class DeleteDatasetContentRequest(BaseModel):
pass
class DeleteDatasetRequest(BaseModel):
pass
class DeleteDatastoreRequest(BaseModel):
pass
class DeletePipelineRequest(BaseModel):
pass
class OffsetSeconds(BaseModel):
__root__: int
class TimeExpression(MessagePayload):
pass
class DeltaTime(BaseModel):
"""
Used to limit data to that which has arrived since the last execution of the action.
"""
offsetSeconds: OffsetSeconds
timeExpression: TimeExpression
class SessionTimeoutInMinutes(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=60.0)]
class DeltaTimeSessionWindowConfiguration(BaseModel):
"""
<p>A structure that contains the configuration information of a delta time session window.</p> <p> <a href="https://docs.aws.amazon.com/iotanalytics/latest/APIReference/API_DeltaTime.html"> <code>DeltaTime</code> </a> specifies a time interval. You can use <code>DeltaTime</code> to create dataset contents with data that has arrived in the data store since the last execution. For an example of <code>DeltaTime</code>, see <a href="https://docs.aws.amazon.com/iotanalytics/latest/userguide/automate-create-dataset.html#automate-example6"> Creating a SQL dataset with a delta window (CLI)</a> in the <i>IoT Analytics User Guide</i>.</p>
"""
timeoutInMinutes: SessionTimeoutInMinutes
class IncludeStatisticsFlag(UnlimitedRetentionPeriod):
pass
class DescribeChannelRequest(BaseModel):
pass
class DescribeDatasetRequest(BaseModel):
pass
class DescribeDatastoreRequest(BaseModel):
pass
class DescribeLoggingOptionsRequest(BaseModel):
pass
class LoggingOptions(BaseModel):
"""
Information about logging options.
"""
roleArn: RoleArn
level: LoggingLevel
enabled: LoggingEnabled
class DescribePipelineRequest(BaseModel):
pass
class DoubleValue(BaseModel):
__root__: float
class EndTime(Timestamp):
pass
class SizeInBytes(DoubleValue):
pass
class FilterExpression(TagKey):
pass
class GetDatasetContentRequest(BaseModel):
pass
class GlueTableName(BaseModel):
__root__: Annotated[str, Field(max_length=150, min_length=1)]
class GlueDatabaseName(GlueTableName):
pass
class GlueConfiguration(BaseModel):
"""
Configuration information for coordination with Glue, a fully managed extract, transform and load (ETL) service.
"""
tableName: GlueTableName
databaseName: GlueDatabaseName
class IotEventsInputName(BaseModel):
__root__: Annotated[
str, Field(max_length=128, min_length=1, regex='^[a-zA-Z][a-zA-Z0-9_]*$')
]
class LambdaName(BaseModel):
__root__: Annotated[
str, Field(max_length=64, min_length=1, regex='^[a-zA-Z0-9_-]+$')
]
class LateDataRuleName(DatasetActionName):
pass
class LateDataRuleConfiguration(BaseModel):
"""
The information needed to configure a delta time session window.
"""
deltaTimeSessionWindowConfiguration: Optional[
DeltaTimeSessionWindowConfiguration
] = None
class NextToken(MessagePayload):
pass
class MaxResults(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=250.0)]
class ListChannelsRequest(BaseModel):
pass
class ListDatasetContentsRequest(BaseModel):
pass
class ListDatasetsRequest(BaseModel):
pass
class ListDatastoresRequest(BaseModel):
pass
class ListPipelinesRequest(BaseModel):
pass
class ResourceArn(RoleArn):
pass
class ListTagsForResourceRequest(BaseModel):
pass
class LogResult(MessagePayload):
pass
class MathExpression(TagKey):
pass
class MaxMessages(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=10.0)]
class MessagePayloads(BaseModel):
__root__: Annotated[List[MessagePayload], Field(max_items=10, min_items=1)]
class OutputFileName(BaseModel):
__root__: Annotated[str, Field(regex='[\\w\\.-]{1,255}')]
class OutputFileUriValue(BaseModel):
"""
The value of the variable as a structure that specifies an output file URI.
"""
fileName: OutputFileName
class SchemaDefinition(BaseModel):
"""
Information needed to define a schema.
"""
columns: Optional[Columns] = None
class PartitionAttributeName(DatasetActionName):
pass
class PutLoggingOptionsRequest(BaseModel):
loggingOptions: LoggingOptions
class QueryFilter(BaseModel):
"""
Information that is used to filter message data, to segregate it according to the timeframe in which it arrives.
"""
deltaTime: Optional[DeltaTime] = None
class QueryFilters(BaseModel):
__root__: Annotated[List[QueryFilter], Field(max_items=1, min_items=0)]
class ReprocessingStatus(Enum):
RUNNING = 'RUNNING'
SUCCEEDED = 'SUCCEEDED'
CANCELLED = 'CANCELLED'
FAILED = 'FAILED'
class ReprocessingSummary(BaseModel):
"""
Information about pipeline reprocessing.
"""
id: Optional[ReprocessingId] = None
status: Optional[ReprocessingStatus] = None
creationTime: Optional[Timestamp] = None
class VolumeSizeInGB(BaseModel):
__root__: Annotated[int, Field(ge=1.0, le=50.0)]
class S3PathChannelMessage(BaseModel):
__root__: Annotated[
str,
Field(
max_length=1024,
min_length=1,
regex="^[a-zA-Z0-9/_!'(){}\\*\\s\\.\\-\\=\\:]+$",
),
]
class StartTime(Timestamp):
pass
class SampleChannelDataRequest(BaseModel):
pass
class ScheduleExpression(MessagePayload):
pass
class SqlQuery(MessagePayload):
pass
class StringValue(BaseModel):
__root__: Annotated[str, Field(max_length=1024, min_length=0)]
class TagValue(TagKey):
pass
class TagKeyList(BaseModel):
__root__: Annotated[List[TagKey], Field(max_items=50, min_items=1)]
class TimestampFormat(BaseModel):
__root__: Annotated[
str, Field(max_length=50, min_length=1, regex="^[a-zA-Z0-9\\s\\[\\]_,.'/:-]*$")
]
class UntagResourceRequest(BaseModel):
pass
class VariableName(TagKey):
pass
class Variable(BaseModel):
"""
An instance of a variable to be passed to the <code>containerAction</code> execution. Each variable must have a name and a value given by one of <code>stringValue</code>, <code>datasetContentVersionValue</code>, or <code>outputFileUriValue</code>.
"""
name: VariableName
stringValue: Optional[StringValue] = None
doubleValue: Optional[DoubleValue] = None
datasetContentVersionValue: Optional[DatasetContentVersionValue] = None
outputFileUriValue: Optional[OutputFileUriValue] = None
class Message(BaseModel):
"""
Information about a message.
"""
messageId: MessageId
payload: MessagePayload
class CreateChannelResponse(BaseModel):
channelName: Optional[ChannelName] = None
channelArn: Optional[ChannelArn] = None
retentionPeriod: Optional[RetentionPeriod] = None
class CustomerManagedChannelS3Storage(BaseModel):
"""
Used to store channel data in an S3 bucket that you manage. If customer-managed storage is selected, the <code>retentionPeriod</code> parameter is ignored. You can't change the choice of S3 storage after the data store is created.
"""
bucket: BucketName
keyPrefix: Optional[S3KeyPrefix] = None
roleArn: RoleArn
class Tag(BaseModel):
"""
A set of key-value pairs that are used to manage the resource.
"""
key: TagKey
value: TagValue
class CreateDatasetResponse(BaseModel):
datasetName: Optional[DatasetName] = None
datasetArn: Optional[DatasetArn] = None
retentionPeriod: Optional[RetentionPeriod] = None
class LateDataRule(BaseModel):
"""
A structure that contains the name and configuration information of a late data rule.
"""
ruleName: Optional[LateDataRuleName] = None
ruleConfiguration: LateDataRuleConfiguration
class CreateDatasetContentResponse(BaseModel):
versionId: Optional[DatasetContentVersion] = None
class CreateDatastoreResponse(BaseModel):
datastoreName: Optional[DatastoreName] = None
datastoreArn: Optional[DatastoreArn] = None
retentionPeriod: Optional[RetentionPeriod] = None
class CustomerManagedDatastoreS3Storage(CustomerManagedChannelS3Storage):
"""
S3-customer-managed; When you choose customer-managed storage, the <code>retentionPeriod</code> parameter is ignored. You can't change the choice of Amazon S3 storage after your data store is created.
"""
pass
class DatastoreIotSiteWiseMultiLayerStorage(BaseModel):
"""
Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage. You can't change the choice of Amazon S3 storage after your data store is created.
"""
customerManagedS3Storage: IotSiteWiseCustomerManagedDatastoreS3Storage
class ParquetConfiguration(BaseModel):
"""
Contains the configuration information of the Parquet format.
"""
schemaDefinition: Optional[SchemaDefinition] = None
class CreatePipelineResponse(BaseModel):
pipelineName: Optional[PipelineName] = None
pipelineArn: Optional[PipelineArn] = None
class DescribeLoggingOptionsResponse(BaseModel):
loggingOptions: Optional[LoggingOptions] = None
class ListDatasetContentsResponse(BaseModel):
datasetContentSummaries: Optional[DatasetContentSummaries] = None
nextToken: Optional[NextToken] = None
class RunPipelineActivityResponse(BaseModel):
payloads: Optional[MessagePayloads] = None
logResult: Optional[LogResult] = None
class ChannelActivity(BaseModel):
"""
The activity that determines the source of the messages to be processed.
"""
name: ActivityName
channelName: ChannelName
next: Optional[ActivityName] = None
class LambdaActivity(BaseModel):
"""
An activity that runs a Lambda function to modify the message.
"""
name: ActivityName
lambdaName: LambdaName
batchSize: ActivityBatchSize
next: Optional[ActivityName] = None
class DatastoreActivity(BaseModel):
"""
The datastore activity that specifies where to store the processed data.
"""
name: ActivityName
datastoreName: DatastoreName
class AddAttributesActivity(BaseModel):
"""
An activity that adds other attributes based on existing attributes in the message.
"""
name: ActivityName
attributes: AttributeNameMapping
next: Optional[ActivityName] = None
class RemoveAttributesActivity(BaseModel):
"""
An activity that removes attributes from a message.
"""
name: ActivityName
attributes: AttributeNames
next: Optional[ActivityName] = None
class SelectAttributesActivity(RemoveAttributesActivity):
"""
Used to create a new message using only the specified attributes from the original message.
"""
pass
class FilterActivity(BaseModel):
"""
An activity that filters a message based on its attributes.
"""
name: ActivityName
filter: FilterExpression
next: Optional[ActivityName] = None
class MathActivity(BaseModel):
"""
An activity that computes an arithmetic expression using the message's attributes.
"""
name: ActivityName
attribute: AttributeName
math: MathExpression
next: Optional[ActivityName] = None
class DeviceRegistryEnrichActivity(BaseModel):
"""
An activity that adds data from the IoT device registry to your message.
"""
name: ActivityName
attribute: AttributeName
thingName: AttributeName
roleArn: RoleArn
next: Optional[ActivityName] = None
class DeviceShadowEnrichActivity(DeviceRegistryEnrichActivity):
"""
An activity that adds information from the IoT Device Shadow service to a message.
"""
pass
class SampleChannelDataResponse(BaseModel):
payloads: Optional[MessagePayloads] = None
class StartPipelineReprocessingResponse(BaseModel):
reprocessingId: Optional[ReprocessingId] = None
class S3PathChannelMessages(BaseModel):
__root__: Annotated[List[S3PathChannelMessage], Field(max_items=100, min_items=1)]
class BatchPutMessageErrorEntry(BaseModel):
"""
Contains informations about errors.
"""
messageId: Optional[MessageId] = None
errorCode: Optional[ErrorCode] = None
errorMessage: Optional[ErrorMessage] = None
class BatchPutMessageErrorEntries(BaseModel):
__root__: List[BatchPutMessageErrorEntry]
class Messages(BaseModel):
__root__: List[Message]
class BatchPutMessageRequest(BaseModel):
channelName: ChannelName
messages: Messages
class ChannelStorage(BaseModel):
"""
Where channel data is stored. You may choose one of <code>serviceManagedS3</code>, <code>customerManagedS3</code> storage. If not specified, the default is <code>serviceManagedS3</code>. This can't be changed after creation of the channel.
"""
serviceManagedS3: Optional[ServiceManagedChannelS3Storage] = None
customerManagedS3: Optional[CustomerManagedChannelS3Storage] = None
class Channel(BaseModel):
"""
A collection of data from an MQTT topic. Channels archive the raw, unprocessed messages before publishing the data to a pipeline.
"""
name: Optional[ChannelName] = None
storage: Optional[ChannelStorage] = None
arn: Optional[ChannelArn] = None
status: Optional[ChannelStatus] = None
retentionPeriod: Optional[RetentionPeriod] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
lastMessageArrivalTime: Optional[Timestamp] = None
class ChannelMessages(BaseModel):
"""
Specifies one or more sets of channel messages.
"""
s3Paths: Optional[S3PathChannelMessages] = None
class EstimatedResourceSize(BaseModel):
"""
The estimated size of the resource.
"""
estimatedSizeInBytes: Optional[SizeInBytes] = None
estimatedOn: Optional[Timestamp] = None
class ChannelStatistics(BaseModel):
"""
Statistics information about the channel.
"""
size: Optional[EstimatedResourceSize] = None
class CustomerManagedChannelS3StorageSummary(CustomerManagedDatastoreS3StorageSummary):
"""
Used to store channel data in an S3 bucket that you manage.
"""
pass
class ChannelStorageSummary(BaseModel):
"""
Where channel data is stored.
"""
serviceManagedS3: Optional[ServiceManagedChannelS3StorageSummary] = None
customerManagedS3: Optional[CustomerManagedChannelS3StorageSummary] = None
class ChannelSummary(BaseModel):
"""
A summary of information about a channel.
"""
channelName: Optional[ChannelName] = None
channelStorage: Optional[ChannelStorageSummary] = None
status: Optional[ChannelStatus] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
lastMessageArrivalTime: Optional[Timestamp] = None
class ChannelSummaries(BaseModel):
__root__: List[ChannelSummary]
class ResourceConfiguration(BaseModel):
"""
The configuration of the resource used to execute the <code>containerAction</code>.
"""
computeType: ComputeType
volumeSizeInGB: VolumeSizeInGB
class Variables(BaseModel):
__root__: Annotated[List[Variable], Field(max_items=50, min_items=0)]
class ContainerDatasetAction(BaseModel):
"""
Information required to run the <code>containerAction</code> to produce dataset contents.
"""
image: Image
executionRoleArn: RoleArn
resourceConfiguration: ResourceConfiguration
variables: Optional[Variables] = None
class TagList(BaseModel):
__root__: Annotated[List[Tag], Field(max_items=50, min_items=1)]
class CreateChannelRequest(BaseModel):
channelName: ChannelName
channelStorage: Optional[ChannelStorage] = None
retentionPeriod: Optional[RetentionPeriod] = None
tags: Optional[TagList] = None
class LateDataRules(BaseModel):
__root__: Annotated[List[LateDataRule], Field(max_items=1, min_items=1)]
class DatastoreStorage(BaseModel):
"""
Where data in a data store is stored.. You can choose <code>serviceManagedS3</code> storage, <code>customerManagedS3</code> storage, or <code>iotSiteWiseMultiLayerStorage</code> storage. The default is <code>serviceManagedS3</code>. You can't change the choice of Amazon S3 storage after your data store is created.
"""
serviceManagedS3: Optional[ServiceManagedDatastoreS3Storage] = None
customerManagedS3: Optional[CustomerManagedDatastoreS3Storage] = None
iotSiteWiseMultiLayerStorage: Optional[DatastoreIotSiteWiseMultiLayerStorage] = None
class FileFormatConfiguration(BaseModel):
"""
<p>Contains the configuration information of file formats. IoT Analytics data stores support JSON and <a href="https://parquet.apache.org/">Parquet</a>.</p> <p>The default file format is JSON. You can specify only one format.</p> <p>You can't change the file format after you create the data store.</p>
"""
jsonConfiguration: Optional[JsonConfiguration] = None
parquetConfiguration: Optional[ParquetConfiguration] = None
class SqlQueryDatasetAction(BaseModel):
"""
The SQL query to modify the message.
"""
sqlQuery: SqlQuery
filters: Optional[QueryFilters] = None
class DatasetActionSummary(BaseModel):
"""
Information about the action that automatically creates the dataset's contents.
"""
actionName: Optional[DatasetActionName] = None
actionType: Optional[DatasetActionType] = None
class DatasetActionSummaries(BaseModel):
__root__: Annotated[List[DatasetActionSummary], Field(max_items=1, min_items=1)]
class IotEventsDestinationConfiguration(BaseModel):
"""
Configuration information for delivery of dataset contents to IoT Events.
"""
inputName: IotEventsInputName
roleArn: RoleArn
class S3DestinationConfiguration(BaseModel):
"""
Configuration information for delivery of dataset contents to Amazon Simple Storage Service (Amazon S3).
"""
bucket: BucketName
key: BucketKeyExpression
glueConfiguration: Optional[GlueConfiguration] = None
roleArn: RoleArn
class DatasetContentDeliveryDestination(BaseModel):
"""
The destination to which dataset contents are delivered.
"""
iotEventsDestinationConfiguration: Optional[
IotEventsDestinationConfiguration
] = None
s3DestinationConfiguration: Optional[S3DestinationConfiguration] = None
class DatasetEntry(BaseModel):
"""
The reference to a dataset entry.
"""
entryName: Optional[EntryName] = None
dataURI: Optional[PresignedURI] = None
class DatasetEntries(BaseModel):
__root__: List[DatasetEntry]
class Schedule(BaseModel):
"""
The schedule for when to trigger an update.
"""
expression: Optional[ScheduleExpression] = None
class Partition(BaseModel):
"""
A partition dimension defined by an attribute.
"""
attributeName: PartitionAttributeName
class TimestampPartition(BaseModel):
"""
A partition dimension defined by a timestamp attribute.
"""
attributeName: PartitionAttributeName
timestampFormat: Optional[TimestampFormat] = None
class DatastorePartition(BaseModel):
"""
A single dimension to partition a data store. The dimension must be an <code>AttributePartition</code> or a <code>TimestampPartition</code>.
"""
attributePartition: Optional[Partition] = None
timestampPartition: Optional[TimestampPartition] = None
class DatastoreStatistics(ChannelStatistics):
"""
Statistical information about the data store.
"""
pass
class ReprocessingSummaries(BaseModel):
__root__: List[ReprocessingSummary]
class PipelineSummary(BaseModel):
"""
A summary of information about a pipeline.
"""
pipelineName: Optional[PipelineName] = None
reprocessingSummaries: Optional[ReprocessingSummaries] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
class StartPipelineReprocessingRequest(BaseModel):
startTime: Optional[StartTime] = None
endTime: Optional[EndTime] = None
channelMessages: Optional[ChannelMessages] = None
class TagResourceRequest(BaseModel):
tags: TagList
class UpdateChannelRequest(BaseModel):
channelStorage: Optional[ChannelStorage] = None
retentionPeriod: Optional[RetentionPeriod] = None
class UpdateDatastoreRequest(BaseModel):
retentionPeriod: Optional[RetentionPeriod] = None
datastoreStorage: Optional[DatastoreStorage] = None
fileFormatConfiguration: Optional[FileFormatConfiguration] = None
class BatchPutMessageResponse(BaseModel):
batchPutMessageErrorEntries: Optional[BatchPutMessageErrorEntries] = None
class DatasetAction(BaseModel):
"""
A <code>DatasetAction</code> object that specifies how dataset contents are automatically created.
"""
actionName: Optional[DatasetActionName] = None
queryAction: Optional[SqlQueryDatasetAction] = None
containerAction: Optional[ContainerDatasetAction] = None
class DatasetTrigger(BaseModel):
"""
The <code>DatasetTrigger</code> that specifies when the dataset is automatically updated.
"""
schedule: Optional[Schedule] = None
dataset: Optional[TriggeringDataset] = None
class DatasetContentDeliveryRule(BaseModel):
"""
When dataset contents are created, they are delivered to destination specified here.
"""
entryName: Optional[EntryName] = None
destination: DatasetContentDeliveryDestination
class Partitions(BaseModel):
__root__: Annotated[List[DatastorePartition], Field(max_items=25, min_items=0)]
class PipelineActivity(BaseModel):
"""
An activity that performs a transformation on a message.
"""
channel: Optional[ChannelActivity] = None
lambda_: Annotated[Optional[LambdaActivity], Field(alias='lambda')] = None
datastore: Optional[DatastoreActivity] = None
addAttributes: Optional[AddAttributesActivity] = None
removeAttributes: Optional[RemoveAttributesActivity] = None
selectAttributes: Optional[SelectAttributesActivity] = None
filter: Optional[FilterActivity] = None
math: Optional[MathActivity] = None
deviceRegistryEnrich: Optional[DeviceRegistryEnrichActivity] = None
deviceShadowEnrich: Optional[DeviceShadowEnrichActivity] = None
class DescribeChannelResponse(BaseModel):
channel: Optional[Channel] = None
statistics: Optional[ChannelStatistics] = None
class GetDatasetContentResponse(BaseModel):
entries: Optional[DatasetEntries] = None
timestamp: Optional[Timestamp] = None
status: Optional[DatasetContentStatus] = None
class ListChannelsResponse(BaseModel):
channelSummaries: Optional[ChannelSummaries] = None
nextToken: Optional[NextToken] = None
class ListTagsForResourceResponse(BaseModel):
tags: Optional[TagList] = None
class DatasetActions(BaseModel):
__root__: Annotated[List[DatasetAction], Field(max_items=1, min_items=1)]
class DatasetTriggers(BaseModel):
__root__: Annotated[List[DatasetTrigger], Field(max_items=5, min_items=0)]
class DatasetContentDeliveryRules(BaseModel):
__root__: Annotated[
List[DatasetContentDeliveryRule], Field(max_items=20, min_items=0)
]
class CreateDatasetRequest(BaseModel):
datasetName: DatasetName
actions: DatasetActions
triggers: Optional[DatasetTriggers] = None
contentDeliveryRules: Optional[DatasetContentDeliveryRules] = None
retentionPeriod: Optional[RetentionPeriod] = None
versioningConfiguration: Optional[VersioningConfiguration] = None
tags: Optional[TagList] = None
lateDataRules: Optional[LateDataRules] = None
class DatastorePartitions(BaseModel):
"""
Contains information about the partition dimensions in a data store.
"""
partitions: Optional[Partitions] = None
class CreateDatastoreRequest(BaseModel):
datastoreName: DatastoreName
datastoreStorage: Optional[DatastoreStorage] = None
retentionPeriod: Optional[RetentionPeriod] = None
tags: Optional[TagList] = None
fileFormatConfiguration: Optional[FileFormatConfiguration] = None
datastorePartitions: Optional[DatastorePartitions] = None
class PipelineActivities(BaseModel):
__root__: Annotated[List[PipelineActivity], Field(max_items=25, min_items=1)]
class CreatePipelineRequest(BaseModel):
pipelineName: PipelineName
pipelineActivities: PipelineActivities
tags: Optional[TagList] = None
class Dataset(BaseModel):
"""
Information about a dataset.
"""
name: Optional[DatasetName] = None
arn: Optional[DatasetArn] = None
actions: Optional[DatasetActions] = None
triggers: Optional[DatasetTriggers] = None
contentDeliveryRules: Optional[DatasetContentDeliveryRules] = None
status: Optional[ChannelStatus] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
retentionPeriod: Optional[RetentionPeriod] = None
versioningConfiguration: Optional[VersioningConfiguration] = None
lateDataRules: Optional[LateDataRules] = None
class DatasetSummary(BaseModel):
"""
A summary of information about a dataset.
"""
datasetName: Optional[DatasetName] = None
status: Optional[ChannelStatus] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
triggers: Optional[DatasetTriggers] = None
actions: Optional[DatasetActionSummaries] = None
class DatasetSummaries(BaseModel):
__root__: List[DatasetSummary]
class Datastore(BaseModel):
"""
Information about a data store.
"""
name: Optional[DatastoreName] = None
storage: Optional[DatastoreStorage] = None
arn: Optional[DatastoreArn] = None
status: Optional[ChannelStatus] = None
retentionPeriod: Optional[RetentionPeriod] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
lastMessageArrivalTime: Optional[Timestamp] = None
fileFormatConfiguration: Optional[FileFormatConfiguration] = None
datastorePartitions: Optional[DatastorePartitions] = None
class DatastoreSummary(BaseModel):
"""
A summary of information about a data store.
"""
datastoreName: Optional[DatastoreName] = None
datastoreStorage: Optional[DatastoreStorageSummary] = None
status: Optional[ChannelStatus] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
lastMessageArrivalTime: Optional[Timestamp] = None
fileFormatType: Optional[FileFormatType] = None
datastorePartitions: Optional[DatastorePartitions] = None
class DatastoreSummaries(BaseModel):
__root__: List[DatastoreSummary]
class Pipeline(BaseModel):
"""
Contains information about a pipeline.
"""
name: Optional[PipelineName] = None
arn: Optional[PipelineArn] = None
activities: Optional[PipelineActivities] = None
reprocessingSummaries: Optional[ReprocessingSummaries] = None
creationTime: Optional[Timestamp] = None
lastUpdateTime: Optional[Timestamp] = None
class PipelineSummaries(BaseModel):
__root__: List[PipelineSummary]
class RunPipelineActivityRequest(BaseModel):
pipelineActivity: PipelineActivity
payloads: MessagePayloads
class UpdateDatasetRequest(BaseModel):
actions: DatasetActions
triggers: Optional[DatasetTriggers] = None
contentDeliveryRules: Optional[DatasetContentDeliveryRules] = None
retentionPeriod: Optional[RetentionPeriod] = None
versioningConfiguration: Optional[VersioningConfiguration] = None
lateDataRules: Optional[LateDataRules] = None
class UpdatePipelineRequest(BaseModel):
pipelineActivities: PipelineActivities
class DescribeDatasetResponse(BaseModel):
dataset: Optional[Dataset] = None
class DescribeDatastoreResponse(BaseModel):
datastore: Optional[Datastore] = None
statistics: Optional[DatastoreStatistics] = None
class DescribePipelineResponse(BaseModel):
pipeline: Optional[Pipeline] = None
class ListDatasetsResponse(BaseModel):
datasetSummaries: Optional[DatasetSummaries] = None
nextToken: Optional[NextToken] = None
class ListDatastoresResponse(BaseModel):
datastoreSummaries: Optional[DatastoreSummaries] = None
nextToken: Optional[NextToken] = None
class ListPipelinesResponse(BaseModel):
pipelineSummaries: Optional[PipelineSummaries] = None
nextToken: Optional[NextToken] = None
| 24.511475 | 640 | 0.736811 |
25249f6ffc68bd327fd5d0540e42e061ccc8880f | 4,577 | py | Python | Codes/trreemap.py | Pepeisadog/Project | 49d77b1590723f87111a0e3a64bd94fa4bb65986 | [
"Unlicense"
] | null | null | null | Codes/trreemap.py | Pepeisadog/Project | 49d77b1590723f87111a0e3a64bd94fa4bb65986 | [
"Unlicense"
] | 3 | 2015-01-12T09:33:30.000Z | 2015-01-29T22:56:47.000Z | Codes/trreemap.py | Pepeisadog/Project | 49d77b1590723f87111a0e3a64bd94fa4bb65986 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 25 15:48:52 2015
@author: Sofia
"""
import csv
import json
import os
sourceEncoding = "iso-8859-1"
targetEncoding = "utf-8"
# encode files to utf8 (source: http://stackoverflow.com/questions/191359/how-to-convert-a-file-to-utf-8-in-python)
csvfile = open('..\Data\AMFI.csv',"r")
csvfile_encoded = open("..\Data\AMFI_encoded.csv", "w")
csvfile_encoded.write(unicode(csvfile.read(), sourceEncoding).encode(targetEncoding))
csvfile_encoded.close()
csvfile = open('..\Data\AMFI_categories.csv',"r")
csvfile_encoded = open("..\Data\AMFIcategories_encoded.csv", "w")
csvfile_encoded.write(unicode(csvfile.read(), sourceEncoding).encode(targetEncoding))
csvfile_encoded.close()
csvfile = open('..\Data\AMFI_domains.csv',"r")
csvfile_encoded = open("..\Data\AMFIdomains_encoded.csv", "w")
csvfile_encoded.write(unicode(csvfile.read(), sourceEncoding).encode(targetEncoding))
csvfile_encoded.close()
# open files
AMFI_books = open("..\Data\AMFI_encoded.csv","r")
AMFI_categories = open("..\Data\AMFIcategories_encoded.csv","r")
AMFI_domains = open("..\Data\AMFIdomains_encoded.csv","r")
# define fieldnames
fieldnames_books = ("Callnumber","Barcode","Title","Year","Location")
fieldnames_categories = ("Barcode","Category")
# put data in reader
reader_books = csv.DictReader(AMFI_books, fieldnames_books, delimiter=';')
reader_categories = csv.DictReader(AMFI_categories, fieldnames_categories, delimiter = ';')
reader_domains = csv.DictReader(AMFI_domains, delimiter = ';')
output = {"name": "Library of the University of Applied Sciences", "type":"parent", "total":5605, "value":50, "children": []}
# get data from reader_books
barcode_books = []
names_books = []
tags_books = []
copies = []
for books in reader_books:
barcode_books.append(books["Callnumber"])
names_books.append(books["Title"])
tags_books.append(books["Barcode"])
tags = []
size_books = len(barcode_books)
# Modify data books
for k in range(0, len(names_books), 1):
# count copies
count = names_books.count(names_books[k])
copies.append(count)
# collect unique ids
indeces = [i for i, x in enumerate(names_books) if x == names_books[k]]
if len(indeces) == 1:
tags.append(tags_books[indeces[0]])
else:
list_tags = []
for w in range(0,len(indeces),1):
tag = tags_books[indeces[w]]
list_tags.append(tag)
tags.append(list_tags)
# set copies to NaN
for t in range(1,len(indeces),1):
names_books[indeces[t]] = "NaN"
# Enter domains
barcode_domain = []
for domain in reader_domains:
output["children"].append({
"type": "domain",
"name": domain["Domain"],
"barcode": domain["Barcode"],
"value": 6,
"children": []
})
barcode_domain.append(domain["Barcode"])
# get category data
barcode_category = []
names_category = []
for category in reader_categories:
barcode_category.append(category["Barcode"])
names_category.append(category["Category"])
# Enter categories
for i in range(0,len(barcode_domain),1):
barcode_domain_values = output["children"][i]["barcode"]
for j in range(0,len(barcode_category),1):
if barcode_category[j] < barcode_domain_values:
if names_category[j] != "NaN":
output["children"][i]["children"].append({
"type":"category",
"barcode": barcode_category[j],
"value": 5,
"name": names_category[j],
"children": []
})
names_category[j] = "NaN"
# append data to output
lengths = []
codes_categories =[]
for i in range(0,len(barcode_domain),1):
lengths.append(len(output["children"][i]["children"]))
for k in range(0, lengths[i], 1):
#counter = 0
codes_categories = output["children"][i]["children"][k]["barcode"]
for j in range(0,len(names_books),1):
if barcode_books[j] < codes_categories:
if names_books[j] != "NaN":
output["children"][i]["children"][k]["children"].append({
"type":"book",
"barcode": barcode_books[j],
"tags": tags[j],
"value": 2,
"name": names_books[j],
"copies": copies[j]
})
names_books[j] = "NaN"
# write data to file
with open('../Data/tree.json', 'w') as f:
json.dump(output, f, indent=True)
| 31.136054 | 125 | 0.622023 |
2526119172205dbcc83b912e56e47b1cfd9d139b | 3,751 | py | Python | test_haystack/whoosh_tests/test_whoosh_management_commands.py | cbows/django-haystack | 80c154b7b11fdcf99dd2ef0e82342ed13e26053a | [
"BSD-3-Clause"
] | 2,021 | 2015-02-06T07:45:08.000Z | 2022-03-30T12:26:39.000Z | test_haystack/whoosh_tests/test_whoosh_management_commands.py | cbows/django-haystack | 80c154b7b11fdcf99dd2ef0e82342ed13e26053a | [
"BSD-3-Clause"
] | 787 | 2015-02-03T20:06:04.000Z | 2022-03-30T09:00:38.000Z | test_haystack/whoosh_tests/test_whoosh_management_commands.py | cbows/django-haystack | 80c154b7b11fdcf99dd2ef0e82342ed13e26053a | [
"BSD-3-Clause"
] | 878 | 2015-02-04T15:29:50.000Z | 2022-03-28T16:51:44.000Z | import datetime
import os
import unittest
from io import StringIO
from tempfile import mkdtemp
from unittest.mock import patch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command as real_call_command
from django.core.management.base import CommandError
from django.test import TestCase
from whoosh.qparser import QueryParser
from haystack import connections, constants, indexes
from haystack.utils.loading import UnifiedIndex
from ..core.models import MockModel
from .test_whoosh_backend import WhooshMockSearchIndex
from .testcases import WhooshTestCase
def call_command(*args, **kwargs):
kwargs["using"] = ["whoosh"]
print(args, kwargs)
real_call_command(*args, **kwargs)
class ManagementCommandTestCase(WhooshTestCase):
fixtures = ["bulk_data"]
def setUp(self):
super().setUp()
self.old_ui = connections["whoosh"].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections["whoosh"].get_backend()
connections["whoosh"]._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = MockModel.objects.all()
def tearDown(self):
connections["whoosh"]._index = self.old_ui
super().tearDown()
def verify_indexed_document_count(self, expected):
with self.raw_whoosh.searcher() as searcher:
count = searcher.doc_count()
self.assertEqual(count, expected)
def verify_indexed_documents(self):
"""Confirm that the documents in the search index match the database"""
with self.raw_whoosh.searcher() as searcher:
count = searcher.doc_count()
self.assertEqual(count, 23)
indexed_doc_ids = set(i["id"] for i in searcher.documents())
expected_doc_ids = set(
"core.mockmodel.%d" % i
for i in MockModel.objects.values_list("pk", flat=True)
)
self.assertSetEqual(indexed_doc_ids, expected_doc_ids)
def test_basic_commands(self):
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("update_index", verbosity=0)
self.verify_indexed_documents()
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("rebuild_index", interactive=False, verbosity=0)
self.verify_indexed_documents()
def test_remove(self):
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("update_index", verbosity=0)
self.verify_indexed_documents()
# Remove several instances.
MockModel.objects.get(pk=1).delete()
MockModel.objects.get(pk=2).delete()
MockModel.objects.get(pk=8).delete()
self.verify_indexed_document_count(23)
# Plain ``update_index`` doesn't fix it.
call_command("update_index", verbosity=0)
self.verify_indexed_document_count(23)
# … but remove does:
call_command("update_index", remove=True, verbosity=0)
self.verify_indexed_document_count(20)
def test_multiprocessing(self):
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("update_index", verbosity=2, workers=2, batchsize=5)
self.verify_indexed_documents()
| 33.491071 | 84 | 0.691816 |
25282fa8805725b2acc31f9c959840083384e1e2 | 2,977 | py | Python | src/server.py | tyler-fishbone/http_server | 93a49090d356b31522acd5bc3a25a1c8a3b604e3 | [
"MIT"
] | null | null | null | src/server.py | tyler-fishbone/http_server | 93a49090d356b31522acd5bc3a25a1c8a3b604e3 | [
"MIT"
] | null | null | null | src/server.py | tyler-fishbone/http_server | 93a49090d356b31522acd5bc3a25a1c8a3b604e3 | [
"MIT"
] | null | null | null | from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from cowpy import cow
import json
import sys
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = urlparse(self.path)
parsed_qs = parse_qs(parsed_path.query)
# import pdb; pdb.set_trace()
if parsed_path.path == '/':
self.send_response(200)
self.end_headers()
self.wfile.write(return_html_string())
return
elif parsed_path.path == '/cowsay':
self.send_response(200)
self.end_headers()
self.wfile.write(b'Helpful instructions about this application')
return
elif parsed_path.path == '/cow':
try:
# import pdb; pdb.set_trace()
msg = parsed_qs['msg'][0]
print(msg)
except (KeyError, json.decoder.JSONDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write(b'You did a bad thing')
return
cheese = cow.Moose(thoughts=True)
message = cheese.milk(msg)
self.send_response(200)
self.end_headers()
self.wfile.write(message.encode('utf8'))
return
else:
self.send_response(404)
self.end_headers()
self.wfile.write(b'Not Found')
def do_POST(self):
parsed_path = urlparse(self.path)
parsed_qs = parse_qs(parsed_path.query)
if parsed_path.path == '/cow':
try:
msg = parsed_qs['msg'][0]
cheese = cow.Moose(thoughts=True)
message = cheese.milk(msg)
post_dict = {}
post_dict['content'] = message
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(post_dict).encode('utf8'))
return
except (KeyError, json.decoder.JSONDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write(b'You did a bad thing')
return
def create_server():
return HTTPServer(('127.0.0.1', 3000), SimpleHTTPRequestHandler)
def run_forever():
server = create_server()
try:
print('Starting server on port 3000')
server.serve_forever()
except KeyboardInterrupt:
server.shutdown()
server.server_close()
# sys.exit()
def return_html_string():
return b'''<!DOCTYPE html>
<html>
<head>
<title> cowsay </title>
</head>
<body>
<header>
<nav>
<ul>
<li><a href="/cowsay">cowsay</a></li>
</ul>
</nav>
<header>
<main>
<!-- project description -->
</main>
</body>
</html>'''
if __name__ == '__main__':
run_forever()
| 26.114035 | 76 | 0.543164 |
2529f17c13ced51c4629d6195cff0d46c5800cac | 7,033 | py | Python | Chapter06/6B_TrendFollowings/6B_3_RunCNN.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 115 | 2020-06-18T15:00:58.000Z | 2022-03-02T10:13:19.000Z | Chapter06/6B_TrendFollowings/6B_3_RunCNN.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 2 | 2020-11-06T11:02:31.000Z | 2021-01-22T12:44:35.000Z | Chapter06/6B_TrendFollowings/6B_3_RunCNN.py | uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking | 3a10a14194368478bb8b78d3d17e9c6a7b7253db | [
"MIT"
] | 60 | 2020-07-22T14:53:10.000Z | 2022-03-23T10:17:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 00:58:34 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and key varable values
'''
import os
import quandl
import pandas as pd
import numpy as np
import keras
from PIL import Image
#folder path
folder_path = os.path.dirname(__file__)
#date range for full dataset
str_dte = '2003-01-01'
end_dte = '2018-7-31'
date_dict = {'gte':str_dte, 'lte':end_dte}
#Dates for back-testing
start_dte = '2015-1-1'
#Create list of dates
datelist = pd.date_range(start_dte, periods=365*2).tolist()
#API key for quandl
quandl.ApiConfig.api_key = '[quandl id]'
#Parameters for the image generation
col_num_mid = 10
col_num_dte = 9
pixel_size = 100
window_size = 60
pred_window_size = 1
#model path
model_path = "model2_2DCov.h5"
model = keras.models.load_model(model_path)
#number of channel for the image
num_channel=1
#strategies parameters
curr_pnl = 10000
curr_pnl_0=curr_pnl
curr_pnl_1=curr_pnl
curr_pnl_2=curr_pnl
quant_trans_0 = 0
quant_trans_1 = 0
quant_trans_2 = 0
min_pnl = 0.0005
trading_cost = 0
trade_limit = 0.5
'''*************************************
#2. Define functions
'''
#input_X is a series of price
#output_X is a series of price expressed in pixel
def rescale(input_X, pixel, min_x,max_x):
unit = (max_x - min_x)/pixel
output_X = round((input_X-min_x)/unit,0)
return output_X,unit
'''*************************************
#3. Running the test
'''
#Get the data
tkr = 'VTV'
df =quandl.get_table('SHARADAR/SFP',date=date_dict,ticker=tkr)
df = df.sort_values(by=['date'])
df=df.reset_index(drop=True)
#write header for the log of the strategy back-testing
f = open('log.txt','w+')
f.write('strategy\tBuySell\t' + 'dte' +'\t'+ 'cost' +'\t'+ 'T+1_actual' +'\t'+ 'T+1_pred'+'\t'+ 'Quantity'+'\t'+ 'PnL'+'\n')
#loop through the dates
for pred_dte in datelist:
df_i = df.index[df['date']==pred_dte]
#make sure both start and end dates are valid
if df_i.empty:
print('no data')
continue
df_i = df_i[0]
print(pred_dte)
df_start = df_i-(window_size) #starts at zero
if df_start < 0: #in case the date inputted is not valid
print('later date')
continue
#prepare the input data
df['mid'] = (df['high'] + df['low'])/2
df_plot = df.iloc[df_start:df_i,:]
min_p = min(df_plot['mid'])
max_p = max(df_plot['mid'])
output_pixel,unit = rescale(df_plot['mid'],pixel_size,min_p,max_p)
#if no trend, then drop this data point
if min_p ==max_p:
print('no trend')
continue
#stack up for a numpy for Image Recognition
#print the historical data
img_ar = np.zeros((1,pixel_size,window_size,num_channel))
img_display = np.zeros((pixel_size,window_size,num_channel))
k=0
pix_p=0
for pix in output_pixel:
y_pos = int(pix)-1
img_ar[0][y_pos][k][num_channel-1] = 255
img_display[y_pos][k][num_channel-1] = 255
pix_p=y_pos
k+=1
img_row = img_ar/255
last_actual_p = pix_p * unit + min_p
#make prediction
pred_y = model.predict(img_row)
max_y_val = max(pred_y[0])
pred_y_img = np.zeros((pixel_size,1))
#Obtain predicted price
pred_pixel = 0
expected_p = 0
#calculate expected values
for i in range(pixel_size):
expected_p += pred_y_img[i,0] * i
if pred_y[0,i] == max_y_val:
pred_y_img[i,0] = 255
pred_pixel = i
pred_p = pred_pixel * unit + min_p
print('cost at ' + str(last_actual_p))
print('predict p be ' + str(pred_p) + ' and probability of ' + str(max_y_val))
pred_exp_p = expected_p * unit + min_p
print('expected predict p be ' + str(pred_exp_p))
y_actual_p = df.iloc[df_i+1,:]['mid']
print('actual p be '+str(y_actual_p))
#Strategy Back-Testing
#Benchmark - Strategy 0 - buy and hold
if quant_trans_0 == 0:
quant_trans_0 = curr_pnl/y_actual_p
pnl = 0-trading_cost
else:
pnl = (y_actual_p/last_actual_p-1) * quant_trans_0
curr_pnl_0 += pnl
f.write('B0\tNA\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(y_actual_p)+'\t'+ str(1)+'\t'+ str(last_actual_p-y_actual_p)+'\n')
#Testing of strategy1
order_type = ""
quant_trans_1 = int(curr_pnl_1/last_actual_p*0.5)
if abs(pred_exp_p/last_actual_p-1)>min_pnl:
if pred_exp_p>last_actual_p:
#buy one now / long one unit
#stock_unit_1+=quant_trans_1
pnl = (y_actual_p-last_actual_p) * quant_trans_1-trading_cost
order_type = "B"
curr_pnl_1 += pnl
f.write('S1\tBuy\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_exp_p)+'\t'+ str(quant_trans_1)+'\t'+ str(y_actual_p-last_actual_p)+'\n')
elif pred_exp_p<last_actual_p:
#sell one now / short one unit
#stock_unit_1-=quant_trans_1
pnl = (last_actual_p-y_actual_p) * quant_trans_1-trading_cost
order_type = "S"
curr_pnl_1 += pnl
f.write('S1\tSell\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_exp_p)+'\t'+ str(quant_trans_1)+'\t'+ str(last_actual_p-y_actual_p)+'\n')
else: #no trade
if order_type == "B":
pnl = (y_actual_p-last_actual_p) * quant_trans_1
else:
pnl = (last_actual_p-y_actual_p) * quant_trans_1
curr_pnl_1 += pnl
#Testing of strategy2
if max_y_val > 0.99 and abs(pred_p/last_actual_p-1)>min_pnl:
quant_trans_2 = int(curr_pnl_2/last_actual_p*0.5)
if pred_p>last_actual_p:
#buy one now / long one unit
#stock_unit_2+=quant_trans_2
order_type = "B"
curr_pnl_2 += (y_actual_p-last_actual_p) * quant_trans_2-trading_cost
f.write('S2\tBuy\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_p) +'\t'+str(quant_trans_2)+'\t'+ str(y_actual_p-last_actual_p)+'\n')
elif pred_p<last_actual_p:
#sell one now / short one unit
#stock_unit_2-=quant_trans_2
order_type = "S"
curr_pnl_2 += (last_actual_p-y_actual_p) * quant_trans_2-trading_cost
f.write('S2\tSell\t' + str(pred_dte) +'\t'+ str(last_actual_p) +'\t'+ str(y_actual_p) +'\t'+ str(pred_p)+'\t'+ str(quant_trans_2)+'\t'+ str(last_actual_p-y_actual_p)+'\n')
else: #no trade
if order_type == "B":
pnl = (y_actual_p-last_actual_p) * quant_trans_2
else:
pnl = (last_actual_p-y_actual_p) * quant_trans_2
curr_pnl_2 += pnl
#print the final result of the strategies
print(curr_pnl_0)
print(curr_pnl_1)
print(curr_pnl_2)
f.close()
'''
export CUDA_VISIBLE_DEVICES=''
tensorboard --logdir AI_Finance_book/6B_TrendFollowings/Graph/ --host localhost --port 6006
'''
| 32.560185 | 187 | 0.624485 |
252ac1c22921db6597accc034da434758be4405a | 2,589 | py | Python | lichee/dataset/field_parser/image_local_path.py | Tencent/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
] | 91 | 2021-10-30T02:25:05.000Z | 2022-03-28T06:51:52.000Z | lichee/dataset/field_parser/image_local_path.py | zhaijunyu/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
] | 1 | 2021-12-17T09:30:25.000Z | 2022-03-05T12:30:13.000Z | lichee/dataset/field_parser/image_local_path.py | zhaijunyu/Lichee | 7653becd6fbf8b0715f788af3c0507c012be08b4 | [
"Apache-2.0"
] | 17 | 2021-11-04T07:50:23.000Z | 2022-03-24T14:24:11.000Z | # -*- coding: utf-8 -*-
from lichee import plugin
from .field_parser_base import BaseFieldParser
import os
from PIL import Image
from torchvision import transforms
import torch
from lichee.utils import storage
@plugin.register_plugin(plugin.PluginType.FIELD_PARSER, "image_local_path")
class ImgDataFieldParser(BaseFieldParser):
"""The field parser for local image. Read the image data from the path provided,
transforms through ToSensor, Resize and Normalize.
Attributes
----------
transformer: transforms.Compose
compose the transforms(ToSensor, Resize and Normalize)
"""
def __init__(self):
super().__init__()
self.transformer = None
def init(self, cfg):
self.cfg = cfg
resolution = [int(x) for x in self.global_config.DATASET.CONFIG.IMAGE_RESOLUTION]
self.transformer = transforms.Compose([
transforms.ToTensor(),
transforms.Resize(resolution),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def parse(self, row, training=False):
"""Parse the row and obtain the path of image, invoke prepare_img_data to transform the image data to tensor.
Parameters
----------
row: memoryview
Object contained in a single record
training: bool
inherited from parent, not used here.
Returns
-------
record: torch.Tensor
the tensor of image data
"""
record = {}
if self.key not in row:
raise Exception("Cannot find key %s in row by image_local_path" % self.key)
img_path = bytes(row[self.key]).decode("utf-8")
if img_path[0] != "/":
img_path = os.path.join(self.global_config.DATASET.DATA_BASE_DIR, img_path)
record[self.alias] = self.prepare_img_data(img_path)
return record
def prepare_img_data(self, img_path):
"""Read and process the image from image_path
Parameters
----------
img_path: str
path of image
Returns
------
torch.Tensor
the tensor transformed from image data.
"""
with open(storage.get_storage_file(img_path), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
return self.transformer(img)
def collate(self, batch):
record = {}
imgs = [instance[self.alias] for instance in batch]
imgs = torch.stack(imgs)
record[self.alias] = imgs
return record
| 30.821429 | 117 | 0.611047 |
252b421527774d5fb18e906562e999ce4cef4de4 | 2,054 | py | Python | models/inception.py | ildoonet/kaggle-human-protein-atlas-image-classification | 9faedaf6e480712492ccfb36c7bdf5e9f7db8b41 | [
"Apache-2.0"
] | 35 | 2019-01-11T00:55:19.000Z | 2021-07-14T11:44:10.000Z | models/inception.py | ildoonet/kaggle-human-protein-atlas-image-classification | 9faedaf6e480712492ccfb36c7bdf5e9f7db8b41 | [
"Apache-2.0"
] | null | null | null | models/inception.py | ildoonet/kaggle-human-protein-atlas-image-classification | 9faedaf6e480712492ccfb36c7bdf5e9f7db8b41 | [
"Apache-2.0"
] | 9 | 2019-01-11T01:42:14.000Z | 2020-03-02T05:47:18.000Z | import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from torchvision.models.inception import BasicConv2d, InceptionAux
import pretrainedmodels
from common import num_class
class InceptionV3(nn.Module):
def __init__(self, pre=True):
super().__init__()
self.encoder = torchvision.models.inception_v3(pretrained=pre)
conv1 = BasicConv2d(4, 32, kernel_size=3, stride=2)
if pre:
w = self.encoder.Conv2d_1a_3x3.conv.weight
conv1.conv.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
self.encoder.Conv2d_1a_3x3 = conv1
self.encoder.AuxLogits = InceptionAux(768, num_class())
self.encoder.fc = nn.Linear(2048, num_class())
def forward(self, x):
x = torch.nn.functional.interpolate(x, size=(299, 299), mode='bilinear') # resize
if self.training:
x, x_aux, feat = self.encoder(x)
x = (torch.sigmoid(x) + torch.sigmoid(x_aux)) * 0.5
else:
x, feat = self.encoder(x)
x = torch.sigmoid(x)
return {'logit': x, 'feat': feat}
class InceptionV4(nn.Module):
def __init__(self, pre=True):
super().__init__()
self.encoder = pretrainedmodels.__dict__['inceptionv4'](num_classes=1000, pretrained='imagenet')
conv1 = BasicConv2d(4, 32, kernel_size=3, stride=2)
if pre:
w = self.encoder.features[0].conv.weight
conv1.conv.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
self.encoder.features[0].conv = conv1
self.last_linear = nn.Linear(1536, num_class())
pass
def forward(self, x):
# x = torch.nn.functional.interpolate(x, size=(299, 299), mode='bilinear')
x = self.encoder.features(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
feat = x
x = self.last_linear(x)
x = torch.sigmoid(x)
return {'logit': x, 'feat': feat}
| 36.035088 | 108 | 0.595424 |
252c453ec6e9dc3416a26d47c38bcfb973477454 | 74 | py | Python | python/sequences.py | saedyousef/Python-scratch | ba4bf88d1ad86beddc8c7c5e2f43c4e837e2861e | [
"MIT"
] | 5 | 2020-07-20T17:47:08.000Z | 2021-08-17T18:26:25.000Z | python/sequences.py | saedyousef/CS-50 | ba4bf88d1ad86beddc8c7c5e2f43c4e837e2861e | [
"MIT"
] | null | null | null | python/sequences.py | saedyousef/CS-50 | ba4bf88d1ad86beddc8c7c5e2f43c4e837e2861e | [
"MIT"
] | 1 | 2021-06-29T19:49:46.000Z | 2021-06-29T19:49:46.000Z | name = "Saeed"
cordinates = (10.0, 20.0)
names = ["Saeed", "Bob", "Mousa"] | 24.666667 | 33 | 0.581081 |
252d0e1541c6bce0edda34974ac8e4c3861ecde4 | 2,622 | py | Python | scripts/create_fluseverity_figs/Supp_zOR_totalAR.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | 3 | 2018-03-29T23:02:43.000Z | 2020-08-10T12:01:50.000Z | scripts/create_fluseverity_figs/Supp_zOR_totalAR.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | scripts/create_fluseverity_figs/Supp_zOR_totalAR.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 9/2/14
###Function: mean peak-based retro zOR metric vs. total attack rate
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/OR_allweeks.csv
###Command Line: python Supp_zOR_totalAR.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
zORin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications.csv','r')
zORin.readline() # rm header
zOR = csv.reader(zORin, delimiter=',')
allincidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks.csv','r')
allincid = csv.reader(allincidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
### program ###
## import severity index ##
# d_nat_classif[season] = (mean retro zOR, mean early zOR)
d_nat_classif = fxn.readNationalClassifFile(zOR)
## import attack rate ##
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid = fxn.week_incidCA_processing(allincid, pop)
# dict_tot_attack[seasonnum] = total attack rate for weeks 40 to 20 by 100,000
_, d_tot_attack = fxn.contributions_CAO_to_attack(d_wk, d_incid)
# plot values
AR = [d_tot_attack[s] for s in ps]
retrozOR = [d_nat_classif[s][0] for s in ps]
earlyzOR = [d_nat_classif[s][1] for s in ps]
print 'retro corr coef', np.corrcoef(AR, retrozOR)
print 'early corr coef', np.corrcoef(AR, earlyzOR)
# draw plots
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
# mean retro zOR vs. attack rate
ax1.plot(AR, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
for s, x, y in zip(sl, AR, retrozOR):
ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax1.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax1.set_xlabel(fxn.gp_attackrate, fontsize=fs)
ax1.tick_params(axis='both', labelsize=fssml)
ax1.set_ylim([-10,20])
ax1.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_totalAR.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| 32.775 | 171 | 0.71167 |
252f0a3cb8c24df7cf5db2bc1599071146727275 | 1,238 | py | Python | Problem001.py | DimitrisMantas/ProjectEuler | 69b647232729a2d2a38ea08d1214616a861046cf | [
"Apache-2.0"
] | null | null | null | Problem001.py | DimitrisMantas/ProjectEuler | 69b647232729a2d2a38ea08d1214616a861046cf | [
"Apache-2.0"
] | null | null | null | Problem001.py | DimitrisMantas/ProjectEuler | 69b647232729a2d2a38ea08d1214616a861046cf | [
"Apache-2.0"
] | null | null | null | """This is the solution to Problem 1 of Project Euler."""
"""Copyright 2021 Dimitris Mantas"""
import time
def compute_all_multiples(of_number, below_number):
"""Compute all natural numbers, which are multiples of a natural number below a predefined number."""
# Register the list of said multiples.
multiples = []
for i in range(1, below_number):
if not i % of_number:
multiples.append(i)
return multiples
# These lines are for debugging purposes.
# print(compute_all_multiples(3,10))
# print(compute_all_multiples(5,10))
if __name__ == "__main__":
# This line is for debugging purposes.
# Start measuring the program runtime.
runtime = time.time()
# The resulting list is not sorted and contains the unique values the lists involved in the calculation.
# This is because the multiples of 15 are contained on both said lists.
ans = set([i for i in (compute_all_multiples(3, 1000) + compute_all_multiples(5, 1000))])
print(ans)
print(sum(ans))
# These lines are for debugging purposes.
# Compute the program runtime.
print("This problem was solved in {0} seconds.".format(time.time() - runtime))
| 30.195122 | 109 | 0.673667 |
252f723efb0474d342e7055aa1aa0011f4760543 | 3,731 | py | Python | fuentes/Colecciones.py | victorricardo/tutorial-python | 5a49407e98c371b39d53993a8d5f63ed9f266353 | [
"OLDAP-2.5"
] | null | null | null | fuentes/Colecciones.py | victorricardo/tutorial-python | 5a49407e98c371b39d53993a8d5f63ed9f266353 | [
"OLDAP-2.5"
] | null | null | null | fuentes/Colecciones.py | victorricardo/tutorial-python | 5a49407e98c371b39d53993a8d5f63ed9f266353 | [
"OLDAP-2.5"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Colecciones - Listas
# In[1]:
l = [22, True, "una lista", [1, 2]]
mi_var = l[0] # mi_var vale 22
mi_var1 = l[3] # mi_var1 vale [1, 2]
print (mi_var)
print (mi_var1)
# In[50]:
# Si queremos acceder a un elemento de una lista incluida dentro de otra lista tendremos que
# utilizar dos veces este operador
l = ["una lista", [1, 2]]
mi_var = l[1][0] # mi_var vale 1
print (mi_var)
# In[51]:
# También podemos utilizar este operador para modificar un elemento de la lista si lo colocamos en la parte izquierda
# de una asignación
l = [22, True]
l[0] = 99 # Con esto l valdrá [99, True]
print (l)
# In[52]:
# Una curiosidad sobre el operador [] de Python es que podemos utilizar también números negativos. Si se utiliza un número
# negativo como índice, esto se traduce en que el índice empieza a contar desde el final, hacia la izquierda; es decir,
# con [-1] accederíamos al último elemento de la lista, con [-2] al penúltimo, con [-3], al antepenúltimo, y así
# sucesivamente.
l = [22, True, "una lista", [1, 2], "antepenúltimo", "penúltimo", "último"]
print (l[-1]) # último
print (l[-2]) # penúltimo
print (l[-3]) # antepenúltimo
# In[53]:
# Otra cosa inusual es lo que en Python se conoce como slicing o particionado, y que consiste en ampliar este mecanismo
# para permitir seleccionar porciones de la lista. Si en lugar de un número escribimos dos números inicio y fin separados
# por dos puntos (inicio:fin) Python interpretará que queremos una lista que vaya desde la posición inicio a la posición
# fin, sin incluir este último. Si escribimos tres números (inicio:fin:salto) en lugar de dos, el tercero se utiliza para
# determinar cada cuantas posiciones añadir un elemento a la lista.
l = [99, True, "una lista", [1, 2]]
mi_var = l[0:2] # mi_var vale [99, True]
print (mi_var)
mi_var = l[0:4:2] # mi_var vale [99, "una lista"]
print (mi_var)
# In[54]:
# Hay que mencionar así mismo que no es necesario indicar el principio y el final del slicing, sino que, si estos se
# omiten, se usarán por defecto las posiciones de inicio y fin de la lista, respectivamente:
l = [99, True, "una lista"]
mi_var = l[1:] # mi_var vale [True, "una lista"]
print (mi_var)
mi_var = l[:2] # mi_var vale [99, True]
print (mi_var)
mi_var = l[:] # mi_var vale [99, True, "una lista"]
print (mi_var)
mi_var = l[::2] # mi_var vale [99, "una lista"]
print (mi_var)
# In[55]:
# También podemos utilizar este mecanismo para modificar la lista:
l = [99, True, "una lista", [1, 2]]
l[0:2] = [0, 1] # l vale [0, 1, "una lista", [1, 2]]
print (l)
# # Colecciones - Tuplas
# In[57]:
# Todo lo que hemos explicado sobre las listas se aplica también a las tuplas, a excepción de la forma de definirla,
# para lo que se utilizan paréntesis en lugar de corchetes.
t = (1, 2, True, "python")
print (t)
# # Colecciones - Diccionarios
# In[59]:
# Los diccionarios, también llamados matrices asociativas, deben su nombre a que son colecciones que relacionan una clave y
# un valor. Por ejemplo, veamos un diccionario de películas y directores:
d = {"Love Actually": "Richard Curtis", "Kill Bill": "Tarantino", "Amélie": "Jean-Pierre Jeunet"}
# La diferencia principal entre los diccionarios y las listas o las tuplas es que a los valores almacenados en un
# diccionario se les accede no por su índice, porque de hecho no tienen orden, sino por su clave, utilizando de nuevo
# el operador [].
mi_var = d["Love Actually"] # devuelve "Richard Curtis"
print (mi_var)
# Al igual que en listas y tuplas también se puede utilizar este operador para reasignar valores.
d["Kill Bill"] = "Quentin Tarantino"
mi_var = d["Kill Bill"] # devuelve "Quentin Tarantino"
print (mi_var)
| 31.091667 | 123 | 0.703029 |
2530a05e38dc4778931bafbbddc794641c581d85 | 28,045 | py | Python | tests/test_subnetlaplace.py | georgezefko/Laplace | c488f7bf739297bab5d771f65635352a07716ca0 | [
"MIT"
] | null | null | null | tests/test_subnetlaplace.py | georgezefko/Laplace | c488f7bf739297bab5d771f65635352a07716ca0 | [
"MIT"
] | null | null | null | tests/test_subnetlaplace.py | georgezefko/Laplace | c488f7bf739297bab5d771f65635352a07716ca0 | [
"MIT"
] | null | null | null | import pytest
from itertools import product
import torch
from torch import nn
from torch.nn.utils import parameters_to_vector
from torch.utils.data import DataLoader, TensorDataset
from torchvision.models import wide_resnet50_2
from laplace import Laplace, SubnetLaplace, FullSubnetLaplace, DiagSubnetLaplace
from laplace.baselaplace import DiagLaplace
from laplace.utils import (SubnetMask, RandomSubnetMask, LargestMagnitudeSubnetMask,
LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask,
ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask)
torch.manual_seed(240)
torch.set_default_tensor_type(torch.DoubleTensor)
score_based_subnet_masks = [RandomSubnetMask, LargestMagnitudeSubnetMask,
LargestVarianceDiagLaplaceSubnetMask, LargestVarianceSWAGSubnetMask]
layer_subnet_masks = [ParamNameSubnetMask, ModuleNameSubnetMask, LastLayerSubnetMask]
all_subnet_masks = score_based_subnet_masks + layer_subnet_masks
likelihoods = ['classification', 'regression']
hessian_structures = ['full', 'diag']
@pytest.fixture
def model():
model = torch.nn.Sequential(nn.Linear(3, 20), nn.Linear(20, 2))
model_params = list(model.parameters())
setattr(model, 'n_params', len(parameters_to_vector(model_params)))
return model
@pytest.fixture
def large_model():
model = wide_resnet50_2()
return model
@pytest.fixture
def class_loader():
X = torch.randn(10, 3)
y = torch.randint(2, (10,))
return DataLoader(TensorDataset(X, y), batch_size=3)
@pytest.fixture
def reg_loader():
X = torch.randn(10, 3)
y = torch.randn(10, 2)
return DataLoader(TensorDataset(X, y), batch_size=3)
@pytest.mark.parametrize('likelihood', likelihoods)
def test_subnet_laplace_init(model, likelihood):
# use random subnet mask for this test
subnetwork_mask = RandomSubnetMask
subnetmask_kwargs = dict(model=model, n_params_subnet=10)
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select()
# subnet Laplace with full Hessian should work
hessian_structure = 'full'
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
assert isinstance(lap, FullSubnetLaplace)
# subnet Laplace with diagonal Hessian should work
hessian_structure = 'diag'
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
assert isinstance(lap, DiagSubnetLaplace)
# subnet Laplace without specifying subnetwork indices should raise an error
with pytest.raises(TypeError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
hessian_structure=hessian_structure)
# subnet Laplace with kron or lowrank Hessians should raise errors
hessian_structure = 'kron'
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
hessian_structure = 'lowrank'
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
@pytest.mark.parametrize('likelihood,hessian_structure', product(likelihoods, hessian_structures))
def test_subnet_laplace_large_init(large_model, likelihood, hessian_structure):
# use random subnet mask for this test
subnetwork_mask = RandomSubnetMask
n_param_subnet = 10
subnetmask_kwargs = dict(model=large_model, n_params_subnet=n_param_subnet)
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select()
lap = Laplace(large_model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
assert lap.n_params_subnet == n_param_subnet
if hessian_structure == 'full':
assert lap.H.shape == (lap.n_params_subnet, lap.n_params_subnet)
else:
assert lap.H.shape == (lap.n_params_subnet,)
H = lap.H.clone()
lap._init_H()
assert torch.allclose(H, lap.H)
@pytest.mark.parametrize('likelihood,hessian_structure', product(likelihoods, hessian_structures))
def test_custom_subnetwork_indices(model, likelihood, class_loader, reg_loader, hessian_structure):
loader = class_loader if likelihood == 'classification' else reg_loader
# subnetwork indices that are None should raise an error
subnetwork_indices = None
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are not PyTorch tensors should raise an error
subnetwork_indices = [0, 5, 11, 42]
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are empty tensors should raise an error
subnetwork_indices = torch.LongTensor([])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are scalar tensors should raise an error
subnetwork_indices = torch.LongTensor(11)
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are not 1D PyTorch tensors should raise an error
subnetwork_indices = torch.LongTensor([[0, 5], [11, 42]])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are double tensors should raise an error
subnetwork_indices = torch.DoubleTensor([0.0, 5.0, 11.0, 42.0])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are float tensors should raise an error
subnetwork_indices = torch.FloatTensor([0.0, 5.0, 11.0, 42.0])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are half tensors should raise an error
subnetwork_indices = torch.HalfTensor([0.0, 5.0, 11.0, 42.0])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are int tensors should raise an error
subnetwork_indices = torch.IntTensor([0, 5, 11, 42])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are short tensors should raise an error
subnetwork_indices = torch.ShortTensor([0, 5, 11, 42])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are char tensors should raise an error
subnetwork_indices = torch.CharTensor([0, 5, 11, 42])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that are bool tensors should raise an error
subnetwork_indices = torch.BoolTensor([0, 5, 11, 42])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that contain elements smaller than zero should raise an error
subnetwork_indices = torch.LongTensor([0, -1, -11])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that contain elements larger than n_params should raise an error
subnetwork_indices = torch.LongTensor([model.n_params + 1, model.n_params + 42])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# subnetwork indices that contain duplicate entries should raise an error
subnetwork_indices = torch.LongTensor([0, 0, 5, 11, 11, 42])
with pytest.raises(ValueError):
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
# Non-empty, 1-dimensional torch.LongTensor with valid entries should work
subnetwork_indices = torch.LongTensor([0, 5, 11, 42])
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetwork_indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert isinstance(lap, SubnetLaplace)
assert lap.n_params_subnet == 4
if hessian_structure == 'full':
assert lap.H.shape == (4, 4)
else:
assert lap.H.shape == (4,)
assert lap.backend.subnetwork_indices.equal(subnetwork_indices)
@pytest.mark.parametrize('subnetwork_mask,likelihood,hessian_structure',
product(score_based_subnet_masks, likelihoods, hessian_structures))
def test_score_based_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader, hessian_structure):
loader = class_loader if likelihood == 'classification' else reg_loader
model_params = parameters_to_vector(model.parameters())
# set subnetwork mask arguments
if subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask:
diag_laplace_model = DiagLaplace(model, likelihood)
subnetmask_kwargs = dict(model=model, diag_laplace_model=diag_laplace_model)
elif subnetwork_mask == LargestVarianceSWAGSubnetMask:
subnetmask_kwargs = dict(model=model, likelihood=likelihood)
else:
subnetmask_kwargs = dict(model=model)
# should raise error if we don't pass number of subnet parameters within the subnetmask_kwargs
with pytest.raises(TypeError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# should raise error if we set number of subnet parameters to None
subnetmask_kwargs.update(n_params_subnet=None)
with pytest.raises(ValueError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# should raise error if number of subnet parameters is larger than number of model parameters
subnetmask_kwargs.update(n_params_subnet=99999)
with pytest.raises(ValueError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# define subnetwork mask
n_params_subnet = 32
subnetmask_kwargs.update(n_params_subnet=n_params_subnet)
subnetmask = subnetwork_mask(**subnetmask_kwargs)
# should raise error if we try to access the subnet indices before the subnet has been selected
with pytest.raises(AttributeError):
subnetmask.indices
# select subnet mask
subnetmask.select(loader)
# should raise error if we try to select the subnet again
with pytest.raises(ValueError):
subnetmask.select(loader)
# define valid subnet Laplace model
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
assert isinstance(lap, SubnetLaplace)
# fit Laplace model
lap.fit(loader)
# check some parameters
assert subnetmask.indices.equal(lap.backend.subnetwork_indices)
assert subnetmask.n_params_subnet == n_params_subnet
assert lap.n_params_subnet == n_params_subnet
assert parameters_to_vector(model.parameters()).equal(model_params)
# check that Hessian and prior precision is of correct shape
if hessian_structure == 'full':
assert lap.H.shape == (n_params_subnet, n_params_subnet)
else:
assert lap.H.shape == (n_params_subnet,)
assert lap.prior_precision_diag.shape == (n_params_subnet,)
@pytest.mark.parametrize('subnetwork_mask,likelihood,hessian_structure',
product(layer_subnet_masks, likelihoods, hessian_structures))
def test_layer_subnet_masks(model, likelihood, subnetwork_mask, class_loader, reg_loader, hessian_structure):
loader = class_loader if likelihood == 'classification' else reg_loader
subnetmask_kwargs = dict(model=model)
# fit last-layer Laplace model
lllap = Laplace(model, likelihood=likelihood, subset_of_weights='last_layer',
hessian_structure=hessian_structure)
lllap.fit(loader)
# should raise error if we pass number of subnet parameters
subnetmask_kwargs.update(n_params_subnet=32)
with pytest.raises(TypeError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
subnetmask_kwargs = dict(model=model)
if subnetwork_mask == ParamNameSubnetMask:
# should raise error if we pass no parameter name list
subnetmask_kwargs.update()
with pytest.raises(TypeError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# should raise error if we pass an empty parameter name list
subnetmask_kwargs.update(parameter_names=[])
with pytest.raises(ValueError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# should raise error if we pass a parameter name list with invalid parameter names
subnetmask_kwargs.update(parameter_names=['123'])
with pytest.raises(ValueError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# define last-layer Laplace model by parameter names and check that
# Hessian is identical to that of a full LLLaplace model
subnetmask_kwargs.update(parameter_names=['1.weight', '1.bias'])
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert lllap.H.equal(lap.H)
# define valid parameter name subnet mask
subnetmask_kwargs.update(parameter_names=['0.weight', '1.bias'])
subnetmask = subnetwork_mask(**subnetmask_kwargs)
# should raise error if we access number of subnet parameters before selecting the subnet
n_params_subnet = 62
with pytest.raises(AttributeError):
n_params_subnet = subnetmask.n_params_subnet
# select subnet mask and fit Laplace model
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert isinstance(lap, SubnetLaplace)
elif subnetwork_mask == ModuleNameSubnetMask:
# should raise error if we pass no module name list
subnetmask_kwargs.update()
with pytest.raises(TypeError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# should raise error if we pass an empty module name list
subnetmask_kwargs.update(module_names=[])
with pytest.raises(ValueError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# should raise error if we pass a module name list with invalid module names
subnetmask_kwargs.update(module_names=['123'])
with pytest.raises(ValueError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# define last-layer Laplace model by module name and check that
# Hessian is identical to that of a full LLLaplace model
subnetmask_kwargs.update(module_names=['1'])
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert lllap.H.equal(lap.H)
# define valid parameter name subnet mask
subnetmask_kwargs.update(module_names=['0'])
subnetmask = subnetwork_mask(**subnetmask_kwargs)
# should raise error if we access number of subnet parameters before selecting the subnet
n_params_subnet = 80
with pytest.raises(AttributeError):
n_params_subnet = subnetmask.n_params_subnet
# select subnet mask and fit Laplace model
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert isinstance(lap, SubnetLaplace)
elif subnetwork_mask == LastLayerSubnetMask:
# should raise error if we pass invalid last-layer name
subnetmask_kwargs.update(last_layer_name='123')
with pytest.raises(KeyError):
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(loader)
# define valid last-layer subnet mask (without passing the last-layer name)
subnetmask_kwargs = dict(model=model)
subnetmask = subnetwork_mask(**subnetmask_kwargs)
# should raise error if we access number of subnet parameters before selecting the subnet
with pytest.raises(AttributeError):
n_params_subnet = subnetmask.n_params_subnet
# select subnet mask and fit Laplace model
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert isinstance(lap, SubnetLaplace)
# check that Hessian is identical to that of a full LLLaplace model
assert lllap.H.equal(lap.H)
# define valid last-layer subnet mask (with passing the last-layer name)
subnetmask_kwargs.update(last_layer_name='1')
subnetmask = subnetwork_mask(**subnetmask_kwargs)
# should raise error if we access number of subnet parameters before selecting the subnet
n_params_subnet = 42
with pytest.raises(AttributeError):
n_params_subnet = subnetmask.n_params_subnet
# select subnet mask and fit Laplace model
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert isinstance(lap, SubnetLaplace)
# check that Hessian is identical to that of a full LLLaplace model
assert lllap.H.equal(lap.H)
# check some parameters
assert subnetmask.indices.equal(lap.backend.subnetwork_indices)
assert subnetmask.n_params_subnet == n_params_subnet
assert lap.n_params_subnet == n_params_subnet
# check that Hessian and prior precision is of correct shape
if hessian_structure == 'full':
assert lap.H.shape == (n_params_subnet, n_params_subnet)
else:
assert lap.H.shape == (n_params_subnet,)
assert lap.prior_precision_diag.shape == (n_params_subnet,)
@pytest.mark.parametrize('likelihood,hessian_structure', product(likelihoods, hessian_structures))
def test_full_subnet_mask(model, likelihood, class_loader, reg_loader, hessian_structure):
loader = class_loader if likelihood == 'classification' else reg_loader
# define full model 'subnet' mask class (i.e. where all parameters are part of the subnet)
class FullSubnetMask(SubnetMask):
def get_subnet_mask(self, train_loader):
return torch.ones(model.n_params).byte()
# define and fit valid subnet Laplace model over all weights
subnetwork_mask = FullSubnetMask
subnetmask = subnetwork_mask(model=model)
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
lap.fit(loader)
assert isinstance(lap, SubnetLaplace)
# check some parameters
assert subnetmask.indices.equal(torch.tensor(list(range(model.n_params))))
assert subnetmask.n_params_subnet == model.n_params
assert lap.n_params_subnet == model.n_params
# check that the Hessian is identical to that of an all-weights Laplace model
full_lap = Laplace(model, likelihood=likelihood, subset_of_weights='all',
hessian_structure=hessian_structure)
full_lap.fit(loader)
assert full_lap.H.equal(lap.H)
@pytest.mark.parametrize('subnetwork_mask,hessian_structure', product(all_subnet_masks, hessian_structures))
def test_regression_predictive(model, reg_loader, subnetwork_mask, hessian_structure):
subnetmask_kwargs = dict(model=model)
if subnetwork_mask in score_based_subnet_masks:
subnetmask_kwargs.update(n_params_subnet=32)
if subnetwork_mask == LargestVarianceSWAGSubnetMask:
subnetmask_kwargs.update(likelihood='regression')
elif subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask:
diag_laplace_model = DiagLaplace(model, 'regression')
subnetmask_kwargs.update(diag_laplace_model=diag_laplace_model)
elif subnetwork_mask == ParamNameSubnetMask:
subnetmask_kwargs.update(parameter_names=['0.weight', '1.bias'])
elif subnetwork_mask == ModuleNameSubnetMask:
subnetmask_kwargs.update(module_names=['0'])
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(reg_loader)
lap = Laplace(model, likelihood='regression', subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
assert isinstance(lap, SubnetLaplace)
lap.fit(reg_loader)
X, _ = reg_loader.dataset.tensors
f = model(X)
# error
with pytest.raises(ValueError):
lap(X, pred_type='linear')
# GLM predictive
f_mu, f_var = lap(X, pred_type='glm')
assert torch.allclose(f_mu, f)
assert f_var.shape == torch.Size([f_mu.shape[0], f_mu.shape[1], f_mu.shape[1]])
assert len(f_mu) == len(X)
# NN predictive (only diagonal variance estimation)
f_mu, f_var = lap(X, pred_type='nn')
assert f_mu.shape == f_var.shape
assert f_var.shape == torch.Size([f_mu.shape[0], f_mu.shape[1]])
assert len(f_mu) == len(X)
@pytest.mark.parametrize('subnetwork_mask,hessian_structure', product(all_subnet_masks, hessian_structures))
def test_classification_predictive(model, class_loader, subnetwork_mask, hessian_structure):
subnetmask_kwargs = dict(model=model)
if subnetwork_mask in score_based_subnet_masks:
subnetmask_kwargs.update(n_params_subnet=32)
if subnetwork_mask == LargestVarianceSWAGSubnetMask:
subnetmask_kwargs.update(likelihood='classification')
elif subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask:
diag_laplace_model = DiagLaplace(model, 'classification')
subnetmask_kwargs.update(diag_laplace_model=diag_laplace_model)
elif subnetwork_mask == ParamNameSubnetMask:
subnetmask_kwargs.update(parameter_names=['0.weight', '1.bias'])
elif subnetwork_mask == ModuleNameSubnetMask:
subnetmask_kwargs.update(module_names=['0'])
subnetmask = subnetwork_mask(**subnetmask_kwargs)
subnetmask.select(class_loader)
lap = Laplace(model, likelihood='classification', subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
assert isinstance(lap, SubnetLaplace)
lap.fit(class_loader)
X, _ = class_loader.dataset.tensors
f = torch.softmax(model(X), dim=-1)
# error
with pytest.raises(ValueError):
lap(X, pred_type='linear')
# GLM predictive
f_pred = lap(X, pred_type='glm', link_approx='mc', n_samples=100)
assert f_pred.shape == f.shape
assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1
f_pred = lap(X, pred_type='glm', link_approx='probit')
assert f_pred.shape == f.shape
assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1
f_pred = lap(X, pred_type='glm', link_approx='bridge')
assert f_pred.shape == f.shape
assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1
# NN predictive
f_pred = lap(X, pred_type='nn', n_samples=100)
assert f_pred.shape == f.shape
assert torch.allclose(f_pred.sum(), torch.tensor(len(f_pred), dtype=torch.double)) # sum up to 1
@pytest.mark.parametrize('subnetwork_mask,likelihood,hessian_structure',
product(all_subnet_masks, likelihoods, hessian_structures))
def test_subnet_marginal_likelihood(model, subnetwork_mask, likelihood, hessian_structure, class_loader, reg_loader):
subnetmask_kwargs = dict(model=model)
if subnetwork_mask in score_based_subnet_masks:
subnetmask_kwargs.update(n_params_subnet=32)
if subnetwork_mask == LargestVarianceSWAGSubnetMask:
subnetmask_kwargs.update(likelihood=likelihood)
elif subnetwork_mask == LargestVarianceDiagLaplaceSubnetMask:
diag_laplace_model = DiagLaplace(model, likelihood)
subnetmask_kwargs.update(diag_laplace_model=diag_laplace_model)
elif subnetwork_mask == ParamNameSubnetMask:
subnetmask_kwargs.update(parameter_names=['0.weight', '1.bias'])
elif subnetwork_mask == ModuleNameSubnetMask:
subnetmask_kwargs.update(module_names=['0'])
subnetmask = subnetwork_mask(**subnetmask_kwargs)
loader = class_loader if likelihood == 'classification' else reg_loader
subnetmask.select(loader)
lap = Laplace(model, likelihood=likelihood, subset_of_weights='subnetwork',
subnetwork_indices=subnetmask.indices, hessian_structure=hessian_structure)
assert isinstance(lap, SubnetLaplace)
lap.fit(loader)
lap.log_marginal_likelihood() | 46.976549 | 117 | 0.724764 |
253330ec00e3e989dc1286f84a83a5c56cf85fc5 | 332 | py | Python | rylog/__init__.py | Ryan-Holben/rylog | 0f81fc8031b5c008f87ce367ebeabd443ef341f8 | [
"MIT"
] | null | null | null | rylog/__init__.py | Ryan-Holben/rylog | 0f81fc8031b5c008f87ce367ebeabd443ef341f8 | [
"MIT"
] | null | null | null | rylog/__init__.py | Ryan-Holben/rylog | 0f81fc8031b5c008f87ce367ebeabd443ef341f8 | [
"MIT"
] | null | null | null | """
rylog
Logging happening in a 3-dimensional Cartesian product of:
1. The logging level: [debug, info, warn, error]
2. The logging category: e.g. software event, action, output
3. The detected function/method: e.g. my_class.class_method or foo
"""
from .misc import *
from .server import *
from .client import *
| 25.538462 | 70 | 0.701807 |
253360f89cf58d0a39abb0d2f777c0a588b4ec22 | 243 | py | Python | typic/constraints/error.py | wyfo/typical | 5fc5326b3509d0b9d35c15dae9590d6cf37a0354 | [
"MIT"
] | 157 | 2019-03-20T19:12:28.000Z | 2022-03-25T08:57:53.000Z | typic/constraints/error.py | wyfo/typical | 5fc5326b3509d0b9d35c15dae9590d6cf37a0354 | [
"MIT"
] | 147 | 2019-07-03T20:00:52.000Z | 2022-02-10T11:38:39.000Z | typic/constraints/error.py | wyfo/typical | 5fc5326b3509d0b9d35c15dae9590d6cf37a0354 | [
"MIT"
] | 15 | 2019-03-21T11:01:03.000Z | 2022-01-08T10:38:15.000Z | class ConstraintSyntaxError(SyntaxError):
"""A generic error indicating an improperly defined constraint."""
pass
class ConstraintValueError(ValueError):
"""A generic error indicating a value violates a constraint."""
pass
| 22.090909 | 70 | 0.740741 |
2533ae4893b1c779f4471ef4511dd0dbc0e4068c | 3,701 | py | Python | 03_queue/queue_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | 1 | 2021-08-13T10:55:33.000Z | 2021-08-13T10:55:33.000Z | 03_queue/queue_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | null | null | null | 03_queue/queue_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import timeit
import numpy as np
import sys
import random as rand
class Queue_array:
"""
顺序队列
"""
def __init__(self,capacity):
self._items = [None]*(capacity+1) #最后一个位置 空置
self._capacity = capacity
self._head = 0
self._tail = 0
def enqueue(self,item):
"""
入队
:param item:
:return:
"""
if self._tail== self._capacity: # 队列的最后一个位置为空置,队尾指针指在此处
if self._head!=0: # 进行数据的搬移 ,在头部腾出空间,插入新的元素
self._items[0:self._tail-self._head]=self._items[self._head:self._tail]
self._tail= self._tail-self._head
self._head=0
else: # self._head==0 并且 self._tail== self._capacity 表示 队列已满
print('the Queue is full!')
return False
self._items[self._tail]=item
self._tail+=1
return True
def dequeue(self):
"""
出队
:return:
"""
if self._head==self._tail: # 队列为空
print('the Queue is empty!')
return None
res=self._items[self._head]
self._items[self._head]=None
self._head += 1
return res
def __repr__(self):
return ','.join(self._items[self._head : self._tail])
class CircularQueue:
"""
循环队列
"""
def __init__(self,capacity):
self._items = [None]*(capacity)
self._capacity = capacity
self._head = 0
self._tail = 0
def enqueue(self,item):
"""
入队
循环队列 省略了 数据搬移的 开销
:param item:
:return:
"""
if (self._tail+1) % self._capacity==self._head: # (tail+1)% n=head 表示 队列已满
print('the Queue is full!')
return False
self._items[self._tail]=item
self._tail=(self._tail+1)%self._capacity
return True
def dequeue(self):
"""
出队
:return:
"""
if self._head==self._tail: # 队列为空
print('the Queue is empty!')
return None
res=self._items[self._head]
self._items[self._head]=None
self._head = (self._head+1)%self._capacity
return res
class BlockingQueue:
"""
阻塞队列
"""
def __init__(self, capacity):
self._items = []
self._capacity = capacity
def producer(self,item): #TODO:多线程调用,然后给队列加锁
if len(self._items)<=self._capacity:
self._items.append(item)
return True
else:
print('the Queue is full!')
return False
def consumer(self):
if len(self._items)>0:
res=self._items.pop()
return res
else:
print('the Queue is empty!')
return None
if __name__ == '__main__':
# 1. 顺序队列
# queue=Queue_array(8)
# string_list=['a','b','c','d','e','f','g','h']
#
# for ele in string_list:
# queue.enqueue(ele)
#
# print(queue._items)
#
# queue.enqueue('i')
#
# print('pop:',queue.dequeue())
# print('pop:', queue.dequeue())
# print('pop:', queue.dequeue())
# print(queue._items)
#
# queue.enqueue('i')
# print(queue)
#2. 循环队列
queue = CircularQueue(8)
string_list=['e','f','g','h','i','j']
for ele in string_list:
queue.enqueue(ele)
print(queue._items)
for i in range(3):
print('pop:',queue.dequeue())
print(queue._items)
queue.enqueue('a')
queue.enqueue('b')
print(queue._items)
queue.enqueue('c')
queue.enqueue('d')
print(queue._items)
queue.enqueue('e')
| 19.276042 | 87 | 0.518779 |
253438c9cde5237ab336b6ebc0e8e1089525b6e7 | 1,703 | py | Python | domains/gym_craft/tests/plotting.py | AndrewPaulChester/sage-code | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | [
"MIT"
] | null | null | null | domains/gym_craft/tests/plotting.py | AndrewPaulChester/sage-code | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | [
"MIT"
] | null | null | null | domains/gym_craft/tests/plotting.py | AndrewPaulChester/sage-code | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
import math
MAX_SPEED = 2
ACCELERATION = 0.5
DRAG = 0.3
TURN_SPEED=5
IMAGE = np.array([
[0,0,0,1,0,0,0],
[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[1,1,1,1,1,1,1],
[0,1,1,1,1,1,0],
[0,0,1,1,1,0,0],
[0,0,0,1,0,0,0]])
def main():
position=(42 ,42)
speed=0
bearing=0
acc=0
turn=0
plt.ion()
fig, ax = plt.subplots()
img = np.zeros((420,420))
img[207:214,207:214]=IMAGE
im = ax.imshow(img)
for i in range(1000):
acc+=np.random.rand()-0.5
turn+=np.random.rand()-0.5
acc=np.clip(acc,-1,1)
turn=np.clip(turn,-1,1)
(position,bearing,speed) = update_coords(position,bearing,speed,acc,turn)
print(acc,turn)
print(position)
render(ax,im,position,bearing,speed)
def update_coords(position,bearing,speed,acceleration,turning):
(x_pos,y_pos) = position
speed = update_speed(speed,acceleration)
bearing = (bearing + TURN_SPEED*turning) % 360
x_pos += speed * math.sin(bearing*2*math.pi/360)
y_pos += speed * math.cos(bearing*2*math.pi/360)
return ((x_pos,y_pos),bearing,speed)
def update_speed(speed,acceleration):
speed *= DRAG
speed += acceleration*ACCELERATION
speed = min(speed,MAX_SPEED) if speed > 0 else max(speed,-MAX_SPEED)
return speed
def render(ax,im,position,bearing,speed):
x_pos,y_pos = position
img = np.zeros((420,420))
x = int(x_pos*5)
y = int(y_pos*5)
img[x:x+7,y:y+7]=IMAGE
# plt.scatter(x,y)
# plt.show()
im.set_data(img)
ax.set_title(f"bearing : {bearing}, speed: {speed}")
plt.pause(0.001)
plt.draw()
if __name__ == "__main__":
main() | 22.706667 | 81 | 0.613623 |
2536ce2ad28b7718b5111d981d1c1217ff573d5d | 1,868 | py | Python | 11_ContainerWithMostWater/container_with_most_water.py | xiaowei1118/leetcode-python | 24d4ccbbf9643100dd2de91afd5d30dca9b7ffe1 | [
"MIT"
] | 2 | 2017-10-09T16:59:01.000Z | 2017-10-10T08:38:08.000Z | 11_ContainerWithMostWater/container_with_most_water.py | xiaowei1118/leetcode-python | 24d4ccbbf9643100dd2de91afd5d30dca9b7ffe1 | [
"MIT"
] | null | null | null | 11_ContainerWithMostWater/container_with_most_water.py | xiaowei1118/leetcode-python | 24d4ccbbf9643100dd2de91afd5d30dca9b7ffe1 | [
"MIT"
] | null | null | null | # coding: utf-8
# 给定 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) 。
# 在坐标内画 n 条垂直线,垂直线 i 的两个端点分别为 (i, ai) 和 (i, 0)。
# 找出其中的两条线,使得它们与 x 轴共同构成的容器可以容纳最多的水。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/container-with-most-water
class Solution(object):
# 递归做法
def maxArea1(self, height):
"""
:type height: List[int]
:rtype: int
"""
n = len(height)
if n == 2:
return height[0] if height[0] < height[1] else height[1]
maxArea = self.maxArea(height[0:n - 1])
for j, item in enumerate(height):
x = n - 1 - j # x轴距离
y = min(item, height[n-1]) # y轴距离
area = x * y
if (maxArea < area):
maxArea = area
return maxArea
# 去除尾递归
def maxArea2(self, height):
"""
:type height: List[int]
:rtype: int
"""
n = len(height)
maxArea = 0
j = 1
while j < n:
i = 0
while i < j:
x = j - i # x轴距离
y = min(height[i], height[j]) # y轴距离
area = x * y
if (maxArea < area):
maxArea = area
i = i + 1
j = j + 1
return maxArea
# 双指针法
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
n = len(height)
maxArea = 0
i = 0
j = n - 1
while i < j:
x = j - i # x轴距离
y = min(height[i], height[j]) # y轴距离
area = x * y
if (maxArea < area):
maxArea = area
if height[j] > height[i]:
i = i + 1
else:
j = j - 1
return maxArea
list = [1,8,6,2,5,4,8,3,7]
print Solution().maxArea(list)
| 20.527473 | 68 | 0.4197 |
253841f648fa7d855056a9bf18031761bbedfe7c | 561 | py | Python | app/system/migrations/0003_auto_20181206_1042.py | TennaGraph/TennaGraph | 002998d94300ee67168f1a8164c0e6bc86836e1f | [
"Apache-2.0"
] | 7 | 2018-11-13T17:39:15.000Z | 2019-03-27T04:55:24.000Z | app/system/migrations/0003_auto_20181206_1042.py | TennaGraph/TennaGraph | 002998d94300ee67168f1a8164c0e6bc86836e1f | [
"Apache-2.0"
] | 72 | 2018-11-09T14:20:25.000Z | 2020-06-05T19:28:19.000Z | app/system/migrations/0003_auto_20181206_1042.py | TennaGraph/TennaGraph | 002998d94300ee67168f1a8164c0e6bc86836e1f | [
"Apache-2.0"
] | 3 | 2018-11-19T19:10:39.000Z | 2019-08-23T20:52:23.000Z | # Generated by Django 2.1.3 on 2018-12-06 10:42
from django.db import migrations, models
import system.models.system_settings
class Migration(migrations.Migration):
dependencies = [
('system', '0002_systemsettings_contract_vot_manager_address'),
]
operations = [
migrations.AlterField(
model_name='systemsettings',
name='contract_vot_manager_address',
field=models.CharField(blank=True, max_length=42, null=True, validators=[system.models.system_settings.validate_address]),
),
]
| 28.05 | 134 | 0.691622 |
25388135b2590bec6c24b4f712d9da835c81c62b | 4,338 | py | Python | pysplit/clusgroup.py | haochiche/pysplit | df6f8ebe93dd81ff8925529b8dfaaea2f446f2e5 | [
"BSD-3-Clause"
] | 110 | 2015-07-12T15:13:18.000Z | 2022-03-28T00:58:59.000Z | pysplit/clusgroup.py | haochiche/pysplit | df6f8ebe93dd81ff8925529b8dfaaea2f446f2e5 | [
"BSD-3-Clause"
] | 70 | 2016-02-23T03:19:55.000Z | 2022-03-14T09:12:43.000Z | pysplit/clusgroup.py | haochiche/pysplit | df6f8ebe93dd81ff8925529b8dfaaea2f446f2e5 | [
"BSD-3-Clause"
] | 66 | 2015-07-10T20:43:30.000Z | 2022-02-18T01:00:33.000Z | from __future__ import division, print_function
from .trajgroup import TrajectoryGroup
from .hypath import HyPath
from .hygroup import HyGroup
def print_clusterprocedure():
"""Print clustering guide."""
print("""
In ``PySPLIT``
1. Create ``TrajectoryGroup`` with desired set of trajectories
2. ``TrajectoryGroup.make_infile()``
In ``HYSPLIT``
3. Trajectory --> Special Runs --> Clustering --> Standard
4. Adjust clustering parameters and working folder
(where output will be stored, where INFILE lives)
5. ``Run cluster analysis``
6. Determine and set appropriate number of clusters
7. Assign trajectories to clusters (``Run``)
8. ``Display Means``, ``Quit``
In ``PySPLIT``
9. ``spawn_clusters()``""")
class Cluster(HyPath, HyGroup):
"""
A special :subclass: of both ``HyGroup`` and ``HyPath``.
Clusters contain both trajectories and mean path information. The mean
path and the trajectory composition is determined by ``HySPLIT``.
"""
def __init__(self, clusterdata, pathdata, datetime, clusterheader,
trajectories, cluster_number):
"""
Initialize ``Cluster`` object.
Parameters
----------
trajectories : list of ``Trajectory`` objects
Trajectories that belong in the cluster.
cluster_number : int
The ``Cluster`` identification number. Distinguishes ``Cluster``
from other ``Clusters`` in its ``ClusterGroup``
"""
HyPath.__init__(self, clusterdata, pathdata, datetime,
clusterheader)
HyGroup.__init__(self, trajectories)
self.start_longitude = self.trajectories[0].data.loc[0, 'geometry'].x
self.clusternumber = cluster_number
self.multitraj = False
def __getitem__(self, index):
"""
Get ``Trajectory`` or ``TrajectoryGroup``.
Parameters
----------
index : int or slice
Returns
-------
``Trajectory`` or ``TrajectoryGroup`` depending if indexed
or sliced. Won't return a ``Cluster`` because those are
specially defined.
"""
newthing = self.trajectories[index]
if isinstance(newthing, list):
newthing = TrajectoryGroup(newthing)
return newthing
def __add__(self, other):
"""
Add a ``HyGroup`` to this ``Cluster`` instance.
Parameters
----------
other : ``HyGroup``
Another ``TrajectoryGroup`` or ``Cluster``. May or may not
contain some of the same ``Trajectory`` instances.
Returns
-------
A new ``TrajectoryGroup`` containing the union of the sets
of ``Trajectory`` instances.
"""
return TrajectoryGroup(HyGroup.__add__(self, other))
def __sub__(self, other):
"""
Subtract a ``HyGroup`` from this ``Cluster`` instance.
Parameters
----------
other : ``HyGroup``
Another ``Cluster`` or ``TrajectoryGroup``
Returns
-------
A new ``TrajectoryGroup`` containing the set difference betwee
the sets of ``Trajectory`` instances.
"""
return TrajectoryGroup(HyGroup.__sub__(self, other))
class ClusterGroup(object):
"""
Group of ``Cluster`` instances.
Contains all the ``Cluster``s produced in one ``HYSPLIT`` cluster analysis.
"""
def __init__(self, clusters):
"""
Initialize ``ClusterGroup`` object.
Parameters
----------
clusters : list of ``Cluster`` instances
``Cluster`` instances from the same HYSPLIT clustering run.
"""
self.clusters = clusters
self.clustercount = len(clusters)
self.trajcount = sum([c.trajcount for c in self.clusters])
def __getitem__(self, index):
"""
Get ``Cluster`` or ``ClusterGroup``.
Index or slice ``self.clusters`` to get a ``Cluster`` or
``ClusterGroup``, respectively.
"""
newthing = self.clusters[index]
try:
newthing = ClusterGroup(newthing)
except:
pass
return newthing
| 27.807692 | 79 | 0.574919 |
253a183c509b499df726c22fb7b3ee45b370c6ff | 2,424 | py | Python | bin/lkft_notify_developer.py | roxell/lkft-tools | bd1981b1f616114cb260878fe7319753107e581b | [
"MIT"
] | 3 | 2018-12-14T02:37:10.000Z | 2020-04-30T19:07:01.000Z | bin/lkft_notify_developer.py | roxell/lkft-tools | bd1981b1f616114cb260878fe7319753107e581b | [
"MIT"
] | 25 | 2018-07-27T13:38:17.000Z | 2021-10-05T13:01:36.000Z | bin/lkft_notify_developer.py | roxell/lkft-tools | bd1981b1f616114cb260878fe7319753107e581b | [
"MIT"
] | 12 | 2018-07-09T22:52:32.000Z | 2021-11-29T19:45:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import re
import requests
import sys
sys.path.append(os.path.join(sys.path[0], "../", "lib"))
import lkft_squad_client # noqa: E402
def get_branch_from_make_kernelversion(make_kernelversion):
"""
IN: "4.4.118"
OUT: "4.4"
IN: "4.9.118-rc1"
OUT: "4.9"
"""
pattern = re.compile(r"^(\d+\.\d+).*$")
match = pattern.match(make_kernelversion)
return match.group(1)
def get_most_recent_release(builds_url):
"""
Given a list of builds that is sorted with the newest first,
return the most recent finished build.
"""
first_build = None
for build in lkft_squad_client.Builds(builds_url):
if not first_build:
first_build = build
if build["finished"]:
return build
# If none found, return first build
return first_build
def get_build_report(build_url):
build = lkft_squad_client.Build(build_url)
baseline_branch = get_branch_from_make_kernelversion(
build.build_metadata["make_kernelversion"]
)
# Get baseline
baseline_project_url = lkft_squad_client.get_projects_by_branch()[baseline_branch]
baseline_builds_url = baseline_project_url + "builds"
baseline_build = get_most_recent_release(baseline_builds_url)
template_url = build_url + "email"
parameters = {"baseline": baseline_build["id"], "template": "9"}
result = requests.get(template_url, parameters)
email = build.build_metadata.get("email-notification", "")
if "No regressions" in result.text:
subject = "{}: no regressions found".format(build.build["version"])
else:
subject = "{}: regressions detected".format(build.build["version"])
return (email, subject, result.text)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("build_url", help="API URL to developer build")
args = parser.parse_args()
(email_destination, email_subject, email_body) = get_build_report(args.build_url)
with open("email.to", "w") as f:
f.write(email_destination)
with open("email.subject", "w") as f:
f.write(email_subject)
with open("email.body", "w") as f:
f.write(email_body)
print("TO: {}".format(email_destination))
print("SUBJECT: {}".format(email_subject))
print("\n{}\n".format(email_body))
| 28.186047 | 86 | 0.664604 |
253c3b4e7dd3233e756d0a0d7809bcec3e7f9d2a | 1,507 | py | Python | day_3.py | bastoche/adventofcode2017 | a93ecff1de78376b03d4c922c82dff96574f2466 | [
"MIT"
] | null | null | null | day_3.py | bastoche/adventofcode2017 | a93ecff1de78376b03d4c922c82dff96574f2466 | [
"MIT"
] | null | null | null | day_3.py | bastoche/adventofcode2017 | a93ecff1de78376b03d4c922c82dff96574f2466 | [
"MIT"
] | null | null | null | from math import ceil, sqrt
def part_one(input):
circle_index = get_circle_index(input)
circle_zero = get_circle_zero(circle_index)
cardinal_points = get_cardinal_points(circle_index, circle_zero)
distance_to_closest_cardinal_point = compute_distance_to_closest_cardinal_point(input, cardinal_points)
return circle_index + distance_to_closest_cardinal_point
def get_circle_index(input):
return ceil(sqrt(input)) // 2
def get_circle_zero(circle_index):
return pow(circle_index * 2 - 1, 2)
def get_cardinal_points(circle_index, circle_zero):
return [circle_zero + x * circle_index for x in [1, 3, 5, 7]]
def compute_distance_to_closest_cardinal_point(input, cardinal_points):
return min([abs(input - x) for x in cardinal_points])
def part_two(input):
spiral = {}
x = 0
y = 0
spiral[(0, 0)] = 1
while spiral[(x, y)] < input:
x, y = get_next_coordinates(x, y)
coordinates_offsets = [-1, 0, 1]
spiral[(x, y)] = sum([spiral.get((x + i, y + j), 0) for i in coordinates_offsets for j in coordinates_offsets])
return spiral[(x, y)]
def get_next_coordinates(x, y):
if x == y == 0:
return (1, 0)
if y > -x and x > y:
return (x, y + 1)
if y > -x and y >= x:
return (x - 1, y)
if y <= -x and x < y:
return (x, y - 1)
if y <= -x and x >= y:
return (x + 1, y)
if __name__ == "__main__":
input = 325489
print(part_one(input))
print(part_two(input))
| 25.542373 | 119 | 0.639681 |
253ce464d772dd296d3b8fca083e60adbb02df3d | 423 | py | Python | CeV - Gustavo Guanabara/exerc022.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | 1 | 2021-12-11T19:53:41.000Z | 2021-12-11T19:53:41.000Z | CeV - Gustavo Guanabara/exerc022.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | CeV - Gustavo Guanabara/exerc022.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | #022: Crie um programa que leia o nome completo de uma pessoa e mostre:
# - O nome com todas as letras maiúsculas e minúsculas.
# - Quantas letras ao tdo (sem considerar espaços).
# - Quantas letras tem o primeiro nome.
nome = input("Qual é o seu nome? ")
print(">>",nome.upper())
print(">>",nome.lower())
jnome = nome.strip()
v = jnome.count(" ")
print(">>",len(jnome)- v)
pnome = nome.split()
print(">>",len(pnome[0]))
| 28.2 | 71 | 0.664303 |
253e8b5989062bd43d076499f35aace1547716ff | 2,395 | py | Python | src/pysqldump/domain/manager.py | tongyeouki/sql-converter | 28039fe16b43f443925447d06d682f6aa8c3a909 | [
"MIT"
] | 1 | 2020-06-12T03:32:35.000Z | 2020-06-12T03:32:35.000Z | src/pysqldump/domain/manager.py | tongyeouki/sql-converter | 28039fe16b43f443925447d06d682f6aa8c3a909 | [
"MIT"
] | null | null | null | src/pysqldump/domain/manager.py | tongyeouki/sql-converter | 28039fe16b43f443925447d06d682f6aa8c3a909 | [
"MIT"
] | 1 | 2020-06-12T03:32:15.000Z | 2020-06-12T03:32:15.000Z | from typing import Optional
from pysqldump.domain.formatters import (
CSVFormatter,
DictFormatter,
JsonFormatter,
ConsoleFormatter,
)
from pysqldump.settings.base import get_config
config = get_config()
class File:
def __init__(self, filename):
self.filename = filename
def get_extension(self):
try:
return self.filename.split(".")[1]
except (IndexError, AttributeError):
return None
def get_filename(self):
if self.filename is None:
return ""
return self.filename
class OutputManager:
formats = {"csv": "csv", "json": "json", "console": "console"}
def __init__(self, data: tuple, headers: list, export_to: Optional[str] = None):
self._filename = File(filename=export_to)
self.headers = headers
self.data = data
@property
def filename(self):
return self._filename.get_filename()
@property
def formatter(self):
extension = self._filename.get_extension()
return self.formats.get(extension, "console")
def run(self, pprint: bool = False, json: bool = False):
if self.formatter == "csv":
return self.__to_csv(pprint=pprint)
elif self.formatter == "console" and json or self.formatter == "json":
return self.__to_json(pprint=pprint, json=json)
elif self.formatter == "console":
return self.__to_console(pprint=pprint)
def __to_console(self, pprint: bool = False):
if pprint:
return ConsoleFormatter(
headers=self.headers, data=self.data, export_to=self.filename
).print()
return DictFormatter(
headers=self.headers, data=self.data, export_to=self.filename
).export()
def __to_csv(self, pprint: bool = False):
formatter = CSVFormatter(
headers=self.headers, data=self.data, export_to=self.filename
)
if pprint:
return formatter.print()
return formatter.export()
def __to_json(self, pprint: bool = False, json: bool = False):
formatter = JsonFormatter(
headers=self.headers, data=self.data, export_to=self.filename
)
if pprint:
formatter.print()
if self._filename and not json:
return formatter.export()
return formatter.use()
| 29.567901 | 84 | 0.617954 |
25405166ea1f14ffbb145a0fad72cb35236d7ab6 | 605 | py | Python | Mortgage Calculator.py | BokijonovM/Projects | 7c032f872aaa4bdf0fba100385019c6058c3c8fb | [
"BSD-2-Clause"
] | 1 | 2021-03-18T08:12:15.000Z | 2021-03-18T08:12:15.000Z | Mortgage Calculator.py | BokijonovM/Python_Projects | 7c032f872aaa4bdf0fba100385019c6058c3c8fb | [
"BSD-2-Clause"
] | null | null | null | Mortgage Calculator.py | BokijonovM/Python_Projects | 7c032f872aaa4bdf0fba100385019c6058c3c8fb | [
"BSD-2-Clause"
] | null | null | null | """**Mortgage Calculator** -
Calculate the monthly payments of a fixed term mortgage
over given Nth terms at a given interest rate. Also figure
out how long it will take the user to pay back the loan."""
months = int(input("Enter mortgage term (in months): "))
rate = float(input("Enter interest rate (in %): "))
loan = float(input("Enter loan value: "))
monthly_rate = rate / 100 / 12
payment = (monthly_rate / (1 - (1 + monthly_rate)**(-months))) * loan
print("Monthly payment for a $%.2f %s year mortgage at %.2f%% interest rate is: $%.2f" % (loan, (months / 12), rate, payment))
| 37.8125 | 127 | 0.661157 |
2540e4b774668ff785e806c6ddc07e0e515e0f5f | 172 | py | Python | math_lib.py | cu-swe4s-fall-2020/version-control-rezgarshakeri | 859f863a71dbab5714a1f24e54933a0b4398790b | [
"MIT"
] | null | null | null | math_lib.py | cu-swe4s-fall-2020/version-control-rezgarshakeri | 859f863a71dbab5714a1f24e54933a0b4398790b | [
"MIT"
] | null | null | null | math_lib.py | cu-swe4s-fall-2020/version-control-rezgarshakeri | 859f863a71dbab5714a1f24e54933a0b4398790b | [
"MIT"
] | null | null | null | import numpy as np
def div(a, b):
if b == 0:
print("denominator iz zero!!!")
return np.inf
else:
return a/b
def add(a,b):
return (a+b) | 15.636364 | 39 | 0.511628 |
25428819afdb8bcef5f733f483e2dfff517079e7 | 956 | py | Python | configs.py | rudyn2/visual-odometry | 1ee37ac6669e1429461f23ccc02d5ae9a349409c | [
"MIT"
] | null | null | null | configs.py | rudyn2/visual-odometry | 1ee37ac6669e1429461f23ccc02d5ae9a349409c | [
"MIT"
] | null | null | null | configs.py | rudyn2/visual-odometry | 1ee37ac6669e1429461f23ccc02d5ae9a349409c | [
"MIT"
] | null | null | null | import cv2
class StereoSGBMConfig:
min_disparity = 0
num_disparities = 16*10
sad_window_size = 3
uniqueness_ratio = 5
p1 = 16*sad_window_size*sad_window_size
p2 = 96*sad_window_size*sad_window_size
pre_filter_cap = 63
speckle_window_size = 0
speckle_range = 0
disp_max_diff = 1
mode = cv2.STEREO_SGBM_MODE_SGBM
class StereoSGBMConfig2:
pre_filter_cap = 63
sad_window_size = 3
p1 = sad_window_size * sad_window_size * 4
p2 = sad_window_size * sad_window_size * 32
min_disparity = 0
num_disparities = 128
uniqueness_ratio = 10
speckle_window_size = 100
speckle_range = 32
disp_max_diff = 1
full_dp = 1
mode = cv2.STEREO_SGBM_MODE_SGBM_3WAY
class MatcherConfig:
ransac = {
'max_iterations': 5,
'error_threshold': 50,
'min_consensus': 5
}
hough = {
'dxbin': 100,
'dangbin': 50,
'umbralvotos': 10
}
| 21.727273 | 47 | 0.65272 |
25446e5536422db53c3887d8fec73e5ede336aa7 | 5,460 | py | Python | test/test_tensor_reorganization.py | entn-at/BrnoLM | 9f8c62523382098809c1c0967f62a67d151eafe0 | [
"MIT"
] | 17 | 2020-02-04T16:42:40.000Z | 2021-11-11T14:37:32.000Z | test/test_tensor_reorganization.py | entn-at/BrnoLM | 9f8c62523382098809c1c0967f62a67d151eafe0 | [
"MIT"
] | null | null | null | test/test_tensor_reorganization.py | entn-at/BrnoLM | 9f8c62523382098809c1c0967f62a67d151eafe0 | [
"MIT"
] | 4 | 2020-02-04T12:59:04.000Z | 2021-05-30T14:10:54.000Z | from brnolm.runtime.tensor_reorganization import TensorReorganizer
import torch
from torch.autograd import Variable
from .common import TestCase
class Dummy_lstm():
def __init__(self, nb_hidden):
self._nb_hidden = nb_hidden
def init_hidden(self, batch_size):
return (
torch.FloatTensor([[[0.0] * self._nb_hidden] * batch_size] * 2),
torch.FloatTensor([[[0.0] * self._nb_hidden] * batch_size] * 2)
)
class TensorReorganizerTests(TestCase):
def setUp(self):
self.lm = Dummy_lstm(nb_hidden=2)
self.reorganizer = TensorReorganizer(self.lm.init_hidden)
def test_passing(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([0, 1, 2])
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
self.assertEqual(new_h, last_h)
def test_shrinks(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([0, 2])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = (
torch.FloatTensor([[[0.1, 0.1], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [3, 3]]]*2),
)
self.assertEqual(new_h, expected)
def test_requires_bsz_greater_than_mask(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([0, 1, 2])
bsz = 2
self.assertRaises(ValueError, self.reorganizer, last_h, mask, bsz)
def test_on_empty_mask_zeros(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = self.lm.init_hidden(bsz)
self.assertEqual(new_h, expected)
def test_completion_by_zeros(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([1])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = (
torch.FloatTensor([[[0.2, 0.2], [0.0, 0.0]]]*2),
torch.FloatTensor([[[2.0, 2.0], [0.0, 0.0]]]*2),
)
self.assertEqual(new_h, expected)
def test_bug_regression_single_addition(self):
last_h = (
torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]*2),
torch.FloatTensor([[[1, 1], [2, 2], [3, 3]]]*2),
)
mask = torch.LongTensor([1, 2])
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
expected = (
torch.FloatTensor([[[0.2, 0.2], [0.3, 0.3], [0.0, 0.0]]]*2),
torch.FloatTensor([[[2.0, 2.0], [3.0, 3.0], [0.0, 0.0]]]*2),
)
self.assertEqual(new_h, expected)
class Dummy_srn():
def __init__(self, nb_hidden):
self._nb_hidden = nb_hidden
self._nb_layers = 1
def init_hidden(self, batch_size):
return torch.FloatTensor(self._nb_layers, batch_size, self._nb_hidden).zero_()
class TensorReorganizerTests_SRN(TestCase):
def setUp(self):
lm = Dummy_srn(nb_hidden=2)
self.reorganizer = TensorReorganizer(lm.init_hidden)
def test_passing(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([0, 1, 2])
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
self.assertEqual(new_h, last_h)
def test_passing_variables(self):
last_h = Variable(torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]]))
mask = Variable(torch.LongTensor([0, 1, 2]))
bsz = 3
new_h = self.reorganizer(last_h, mask, bsz)
self.assertEqual(new_h, last_h)
def test_shrinks(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([0, 2])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = torch.FloatTensor([[[0.1, 0.1], [0.3, 0.3]]])
self.assertEqual(new_h, expected)
def test_requires_bsz_greater_than_mask(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([0, 1, 2])
bsz = 2
self.assertRaises(ValueError, self.reorganizer, last_h, mask, bsz)
def test_on_empty_mask_zeros(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = torch.FloatTensor([[[0.0, 0.0], [0.0, 0.0]]])
self.assertEqual(new_h, expected)
def test_completion_by_zeros(self):
last_h = torch.FloatTensor([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]])
mask = torch.LongTensor([1])
bsz = 2
new_h = self.reorganizer(last_h, mask, bsz)
expected = torch.FloatTensor([[[0.2, 0.2], [0.0, 0.0]]])
self.assertEqual(new_h, expected)
| 30.502793 | 86 | 0.540842 |
254476669d60a9c34049aba84879aa1422202a6a | 25 | py | Python | latex.py | akdir/BachelorThesis | 07b5fe8b92c0b0eb21c1031e6415ac268ba27e7c | [
"MIT"
] | null | null | null | latex.py | akdir/BachelorThesis | 07b5fe8b92c0b0eb21c1031e6415ac268ba27e7c | [
"MIT"
] | null | null | null | latex.py | akdir/BachelorThesis | 07b5fe8b92c0b0eb21c1031e6415ac268ba27e7c | [
"MIT"
] | null | null | null | jobname="BachelorThesis"
| 12.5 | 24 | 0.84 |
25456b38415fb42cb49ec1c612d93c5272eac7b9 | 331 | py | Python | quantdsl/infrastructure/event_sourced_repos/contract_specification_repo.py | johnbywater/quantdsl | 81c1c69f27e094a6ed0542b28cf1ac8fcce5494a | [
"BSD-3-Clause"
] | 269 | 2015-01-09T00:56:41.000Z | 2022-03-30T17:09:46.000Z | quantdsl/infrastructure/event_sourced_repos/contract_specification_repo.py | johnbywater/quantdsl | 81c1c69f27e094a6ed0542b28cf1ac8fcce5494a | [
"BSD-3-Clause"
] | 22 | 2017-04-01T13:44:56.000Z | 2018-09-10T11:48:56.000Z | quantdsl/infrastructure/event_sourced_repos/contract_specification_repo.py | johnbywater/quantdsl | 81c1c69f27e094a6ed0542b28cf1ac8fcce5494a | [
"BSD-3-Clause"
] | 59 | 2015-01-09T00:56:50.000Z | 2022-03-13T23:52:27.000Z | from eventsourcing.infrastructure.event_sourced_repo import EventSourcedRepository
from quantdsl.domain.model.contract_specification import ContractSpecification, ContractSpecificationRepository
class ContractSpecificationRepo(ContractSpecificationRepository, EventSourcedRepository):
domain_class = ContractSpecification
| 33.1 | 111 | 0.89426 |
2545a6ce4bad291b2182fea9564fd36668358b01 | 660 | py | Python | scrapingData/scraping.py | karumo10/coursesel-helper | deb7e52a7bfe1fc41cd630d5a2cbe96fa089d986 | [
"MIT"
] | null | null | null | scrapingData/scraping.py | karumo10/coursesel-helper | deb7e52a7bfe1fc41cd630d5a2cbe96fa089d986 | [
"MIT"
] | null | null | null | scrapingData/scraping.py | karumo10/coursesel-helper | deb7e52a7bfe1fc41cd630d5a2cbe96fa089d986 | [
"MIT"
] | null | null | null | from requests_html import HTMLSession
import os
import sys
writeFileName = "courseLinks.out"
writeFileStream = open(writeFileName,'w',encoding='utf-8')
session = HTMLSession()
url = 'https://www.ji.sjtu.edu.cn/academics/courses/courses-by-number/'
r = session.get(url)
for i in range(2,100):
sel = '#Faculty-information > li:nth-child(' + str(i) + ') > a'
# print(sel)
results = r.html.find(sel)
if len(results) == 0:
break;
else:
for result in results:
writeFileStream.write(result.absolute_links.pop()+'\n')
writeFileStream.close()
# #Faculty-information > li:nth-child(3) > a
| 24.444444 | 72 | 0.636364 |
254a5b1fda824a925564dbbe740873888025ca2b | 7,655 | py | Python | jukebot/cogs/gametime.py | Kommotion/Jukebot | 4e50342b914ff6b91fd78802900d1e24bee946db | [
"MIT"
] | 1 | 2021-07-26T02:44:00.000Z | 2021-07-26T02:44:00.000Z | jukebot/cogs/gametime.py | Kommotion/Jukebot | 4e50342b914ff6b91fd78802900d1e24bee946db | [
"MIT"
] | null | null | null | jukebot/cogs/gametime.py | Kommotion/Jukebot | 4e50342b914ff6b91fd78802900d1e24bee946db | [
"MIT"
] | null | null | null | import logging
import discord
from datetime import datetime
from discord.ext import tasks, commands
from discord.ext.commands import Cog
from cogs.utils.utils import json_io_dump, json_io_load
log = logging.getLogger(__name__)
STATUS = 'status'
TIME_STARTED = 'time_started'
NAME = 'name'
GAMES = 'games'
NONE = 'none'
# Reference JSON
# {
# "player_id1": {
# "status": "a string of status",
# "time_started": "time_started_current_status",
# "games":{
# "COD MW2": "time_played",
# "Poop": "time_played"
# }
# },
# "player_id2": {
# "status": "a string of status",
# "time_started": "time_started_current_status",
# "games":{
# "COD MW2": "time_played",
# "Poop": "time_played"
# }
# }
# }
class TimePlayed(Cog):
""" Tracks your time played for each status you have had """
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger()
self.gametime_file = 'gametime.json'
self.gametime = None
self.update_time.start()
async def game_load(self):
""" Loads games from JSON """
self.gametime = json_io_load(self.gametime_file)
async def game_dump(self):
""" Dumps games to JSON """
if not json_io_dump(self.gametime_file, self.gametime):
self.log.critical('Unable to dump JSON file for TimePlayed!')
def calculate_addition(self, time_started):
""" Returns whether to add 2 minutes (in seconds) or something less than that
Time_started is a datetime.datetime string
"""
converted_time = datetime.strptime(time_started, '%Y-%m-%d %H:%M:%S')
delta = (datetime.utcnow().replace(microsecond=0) - converted_time).total_seconds()
return int(delta) if delta < 120 else 120
async def get_current_gametime(self):
""" Returns the dictionary of the current players and what they are playing """
current_gametime = dict()
for member in set(self.bot.get_all_members()):
# Initialize the dictionary for this member and set everything to None
current_gametime[str(member.id)] = dict()
current_gametime[str(member.id)][NAME] = member.name
current_gametime[str(member.id)][STATUS] = NONE
current_gametime[str(member.id)][TIME_STARTED] = NONE
current_gametime[str(member.id)][GAMES] = dict()
# If the member is not doing anything, continue
if not member.activities:
continue
# If the member is playing something, then take note of this
for activity in member.activities:
if activity.type == discord.ActivityType.playing:
# If for some reason this is not None, then we have 2 gaming activities on this member
if current_gametime[str(member.id)][STATUS] != NONE:
self.log.critical('There are multiple games playing right now in Gametime for single member!')
self.log.critical('{} had status {} instead of none as expected.'.format(
current_gametime[str(member.id)][NAME], current_gametime[str(member.id)][STATUS]))
current_gametime[str(member.id)][STATUS] = activity.name
date = member.activity.start.replace(microsecond=0) if member.activity.start else datetime.utcnow().replace(microsecond=0)
current_gametime[str(member.id)][TIME_STARTED] = str(date)
current_gametime[str(member.id)][GAMES][activity.name] = 0
return current_gametime
async def compare_and_update(self, current_gametime):
""" Compares and updates the playing list """
for id in current_gametime:
if id not in self.gametime:
self.gametime[id] = current_gametime[id]
current_status = current_gametime[id][STATUS]
# If the current gametime is not None, then update the time on the game currently played
if current_status != NONE:
if current_status not in self.gametime[id][GAMES]:
self.gametime[id][GAMES][current_status] = 0
result = self.calculate_addition(current_gametime[id][TIME_STARTED])
self.gametime[id][GAMES][current_status] += result
# If the current game is different from last game, add 2 minutes to the last game
if current_status != self.gametime[id][STATUS] and self.gametime[id][STATUS] != NONE:
self.gametime[id][GAMES][self.gametime[id][STATUS]] += 120
# Update the game status regardless of what's going on
self.gametime[id][STATUS] = current_gametime[id][STATUS]
self.gametime[id][TIME_STARTED] = current_gametime[id][TIME_STARTED]
def calculate_days_minutes_seconds(self, seconds):
""" Returns the days hours minutes seconds from seconds """
# years, seconds = seconds // 31556952, seconds % 31556952
# months, seconds = seconds // 2629746, seconds % 2629746
days, seconds = seconds // 86400, seconds % 86400
hours, seconds = seconds // 3600, seconds % 3600
minutes, seconds = seconds // 60, seconds % 60
msg = '{:02d} Days, {:02d} Hours, {:02d} Minutes, {:02d} Seconds'.format(days, hours, minutes, seconds)
if days > 9000:
msg += ' ITS OVER 9000!'
if days == 69:
msg += ' Hah, nice... 69'
return msg
@commands.command()
async def played(self, ctx, *, member: discord.Member = None):
"""Returns the amount of time played for every game
If Member is not specified, then returns the played information for member that sent command
"""
if member is None:
member = ctx.author
if str(member.id) not in self.gametime:
await ctx.send('ERROR!: Unable to find {} in gametime list... looks like a bug'.format(member.mention))
msg = 'Time played for {}\n'.format(member.mention)
if not self.gametime[str(member.id)][GAMES]:
msg += '`Looks like {} hasn\'t played any games!`'.format(member.display_name)
for game in self.gametime[str(member.id)][GAMES]:
msg += '`{:<30}: {}`\n'.format(game, self.calculate_days_minutes_seconds(self.gametime[str(member.id)][GAMES][game]))
await ctx.send('{}'.format(msg))
@tasks.loop(minutes=2)
async def update_time(self):
""" Loop that updates the time played of the current game for each member
Steps:
1. Load list
2. Get Current List of people playing
3. Compare new with old list of people playing and update old gametime list as needed
4. Write list
"""
self.log.debug('Starting gametime save loop')
await self.game_load()
current_gametime = await self.get_current_gametime()
await self.compare_and_update(current_gametime)
await self.game_dump()
self.log.debug('End gametime save loop')
@update_time.before_loop
async def before_update_time(self):
""" We want to wait until the bot is ready before going into the loop """
await self.bot.wait_until_ready()
@update_time.after_loop
async def after_update_time(self):
""" If anything is happening after the loop, we want to store all the information before any exits """
await self.game_dump()
def setup(bot):
bot.add_cog(TimePlayed(bot))
| 40.502646 | 142 | 0.617897 |
254ca1af527eda83d904a3bb25f7ec725799bb3b | 2,578 | py | Python | transformy/conversion/_pyqtgraph.py | AllenInstitute/transformy | 17c769857d0cb05ad252ab684dec9eadb61a7c59 | [
"BSD-3-Clause"
] | 1 | 2021-06-22T18:06:06.000Z | 2021-06-22T18:06:06.000Z | transformy/conversion/_pyqtgraph.py | AllenInstitute/transformy | 17c769857d0cb05ad252ab684dec9eadb61a7c59 | [
"BSD-3-Clause"
] | null | null | null | transformy/conversion/_pyqtgraph.py | AllenInstitute/transformy | 17c769857d0cb05ad252ab684dec9eadb61a7c59 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from .converter import TransformConverter
from .. import linear
class PyqtgraphTransformConverter(TransformConverter):
name = 'pyqtgraph'
def __init__(self):
try:
import pyqtgraph
self._import_error = None
except ImportError as exc:
self._import_error = str(exc)
return
self._to_classes = {
linear.STTransform: self._STTransform_to_pg,
linear.AffineTransform: self._AffineTransform_to_pg,
}
self._from_classes = {
# pyqtgraph.SRTTransform: self._from_SRTTransform,
# pyqtgraph.SRTTransform3D: self._from_SRTTransform,
pyqtgraph.QtGui.QTransform: self._from_QTransform,
pyqtgraph.QtGui.QMatrix4x4: self._from_QMatrix4x4,
pyqtgraph.Transform3D: self._from_QMatrix4x4,
}
def _STTransform_to_pg(self, tr):
import pyqtgraph
if tr.dims == (2, 2):
ptr = pyqtgraph.SRTTransform()
ptr.setScale(tr.scale)
ptr.setTranslate(tr.offset)
return ptr
elif tr.dims == (3, 3):
ptr = pyqtgraph.SRTTransform3D()
ptr.setScale(tr.scale)
ptr.setTranslate(tr.offset)
return ptr
else:
raise TypeError("Converting STTransform of dimension %r to pyqtgraph is not supported." % tr.dims)
def _AffineTransform_to_pg(self, tr):
import pyqtgraph
if tr.dims == (2, 2):
m = tr.matrix
o = tr.offset
ptr = pyqtgraph.QtGui.QTransform(m[0,0], m[1,0], 0.0, m[0,1], m[1,1], 0.0, o[0], o[1], 1.0)
return ptr
elif tr.dims == (3, 3):
m = np.eye(4)
m[:3, :3] = tr.matrix
m[:3, 3] = tr.offset
ptr = pyqtgraph.Transform3D(m)
return ptr
else:
raise TypeError("Converting AffineTransform of dimension %r to pyqtgraph is not supported." % tr.dims)
def _from_SRTTransform(self, tr):
return linear.STTransform(offset=tr.getTranslation(), scale=tr.getScale())
def _from_QTransform(self, tr):
m = np.array([
[tr.m11(), tr.m21()],
[tr.m12(), tr.m22()],
])
o = np.array([tr.m31(), tr.m32()])
return linear.AffineTransform(matrix=m, offset=o)
def _from_QMatrix4x4(self, tr):
m = np.array(tr.copyDataTo()).reshape(4,4)
return linear.AffineTransform(matrix=m[:3, :3], offset=m[:3, 3])
| 34.837838 | 114 | 0.564779 |
254d3022845aae3d1a9293a0181f060be7c09b6f | 28 | py | Python | pyqt/utils/__init__.py | TaoYang526/qt | 81ed776c67f2df0d07d8b7e964e6a25b9271b28b | [
"Apache-2.0"
] | null | null | null | pyqt/utils/__init__.py | TaoYang526/qt | 81ed776c67f2df0d07d8b7e964e6a25b9271b28b | [
"Apache-2.0"
] | null | null | null | pyqt/utils/__init__.py | TaoYang526/qt | 81ed776c67f2df0d07d8b7e964e6a25b9271b28b | [
"Apache-2.0"
] | null | null | null | from pyqt.utils import time
| 14 | 27 | 0.821429 |
254d4a38068cbf495fc111202fbf1797b204e7fd | 491 | py | Python | recipe/exceptions.py | juiceinc/recipe | ef3c5af58e2d68892d54285a24b78565f6401ef4 | [
"MIT"
] | 5 | 2017-10-26T10:44:07.000Z | 2021-08-30T16:35:55.000Z | recipe/exceptions.py | juiceinc/recipe | ef3c5af58e2d68892d54285a24b78565f6401ef4 | [
"MIT"
] | 56 | 2017-10-23T14:01:37.000Z | 2022-02-17T17:07:41.000Z | recipe/exceptions.py | juiceinc/recipe | ef3c5af58e2d68892d54285a24b78565f6401ef4 | [
"MIT"
] | null | null | null | class BadIngredient(Exception):
""" Something is wrong with an ingredient """
class BadRecipe(Exception):
""" Something is wrong with a recipe """
class InvalidColumnError(Exception):
def __init__(self, *args, **kwargs):
self.column_name = kwargs.pop("column_name", None)
if not args:
# default exception message
args = ['Invalid column "{}"'.format(self.column_name)]
super(InvalidColumnError, self).__init__(*args, **kwargs)
| 30.6875 | 67 | 0.651731 |
254f90068c187cfc444d126472019f1e35637c92 | 1,373 | py | Python | jack/io/read_semeval2017Task10.py | elyase/jack | a4f43a4012a540d55d2e05d8a904e6f8cc3002f1 | [
"MIT"
] | 192 | 2017-10-19T18:04:56.000Z | 2019-09-21T23:29:03.000Z | jack/io/read_semeval2017Task10.py | elyase/jack | a4f43a4012a540d55d2e05d8a904e6f8cc3002f1 | [
"MIT"
] | 120 | 2017-10-16T09:46:07.000Z | 2019-06-20T18:34:24.000Z | jack/io/read_semeval2017Task10.py | elyase/jack | a4f43a4012a540d55d2e05d8a904e6f8cc3002f1 | [
"MIT"
] | 50 | 2017-10-19T09:57:45.000Z | 2019-07-24T13:46:26.000Z | import os
def readAnn(textfolder="../data/SemEval2017Task10/"):
'''
Read .ann files and look up corresponding spans in .txt files
Args:
textfolder:
'''
flist = os.listdir(textfolder)
for f in flist:
if not f.endswith(".ann"):
continue
f_anno = open(os.path.join(textfolder, f), "rU")
f_text = open(os.path.join(textfolder, f.replace(".ann", ".txt")), "rU")
# there's only one line, as each .ann file is one text paragraph
for l in f_text:
text = l
#@TODO: collect all keyphrase and relation annotations, create pairs of all keyphrase that appear in same sentence for USchema style RE
for l in f_anno:
anno_inst = l.strip().split("\t")
if len(anno_inst) == 3:
keytype, start, end = anno_inst[1].split(" ")
if not keytype.endswith("-of"):
# look up span in text and print error message if it doesn't match the .ann span text
keyphr_text_lookup = text[int(start):int(end)]
keyphr_ann = anno_inst[2]
if keyphr_text_lookup != keyphr_ann:
print("Spans don't match for anno " + l.strip() + " in file " + f)
#if keytype.endswith("-of"):
if __name__ == '__main__':
readAnn() | 32.690476 | 143 | 0.554261 |
25507a35dbe62df6d608b962eb29203e902472af | 5,018 | py | Python | src/means/io/sbml.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 10 | 2016-05-25T08:28:39.000Z | 2020-06-04T03:19:50.000Z | src/means/io/sbml.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 5 | 2015-12-08T14:01:15.000Z | 2020-01-10T22:42:18.000Z | src/means/io/sbml.py | nicktimko/means | fe164916a1d84ab2a4fa039871d38ccdf638b1db | [
"MIT"
] | 6 | 2015-12-10T17:24:11.000Z | 2021-03-22T16:12:17.000Z | from collections import namedtuple
import os
import sympy
import numpy as np
from means.core.model import Model
_Reaction = namedtuple('_REACTION', ['id', 'reactants', 'products', 'propensity', 'parameters'])
def _sbml_like_piecewise(*args):
if len(args) % 2 == 1:
# Add a final True element you can skip in SBML
args += (True,)
sympy_args = []
for i in range(len(args)/2):
# We need to group args into tuples of form
# (value, condition)
# SBML usually outputs them in form (value, condition, value, condition, value ...)
sympy_args.append((args[i*2], args[i*2+1]))
return sympy.Piecewise(*sympy_args)
def _sympify_kinetic_law_formula(formula):
# We need to define some namespace hints for sympy to deal with certain functions in SBML formulae
# For instance, `eq` in formula should map to `sympy.Eq`
namespace = {'eq': sympy.Eq,
'neq': sympy.Ne,
'floor': sympy.floor,
'ceiling': sympy.ceiling,
'gt': sympy.Gt,
'lt': sympy.Lt,
'geq': sympy.Ge,
'leq': sympy.Le,
'pow': sympy.Pow,
'piecewise': _sbml_like_piecewise}
return sympy.sympify(formula, locals=namespace)
def _parse_reaction(libsbml_reaction):
id_ = libsbml_reaction.getId()
reactants = {sympy.Symbol(r.getSpecies()): r.getStoichiometry() for r in libsbml_reaction.getListOfReactants()}
products = {sympy.Symbol(p.getSpecies()): p.getStoichiometry() for p in libsbml_reaction.getListOfProducts()}
kinetic_law = _sympify_kinetic_law_formula(libsbml_reaction.getKineticLaw().getFormula())
# This would only work for SBML Level 3, prior levels do not have parameters within kinetic law
parameters = [(sympy.Symbol(p.getId()), p.getValue())
for p in libsbml_reaction.getKineticLaw().getListOfParameters()]
return _Reaction(id_, reactants, products, kinetic_law, parameters)
def read_sbml(filename):
"""
Read the model from a SBML file.
:param filename: SBML filename to read the model from
:return: A tuple, consisting of :class:`~means.core.model.Model` instance,
set of parameter values, and set of initial conditions variables.
"""
import libsbml
if not os.path.exists(filename):
raise IOError('File {0!r} does not exist'.format(filename))
reader = libsbml.SBMLReader()
document = reader.readSBML(filename)
sbml_model = document.getModel()
if not sbml_model:
raise ValueError('Cannot parse SBML model from {0!r}'.format(filename))
species = sympy.symbols([s.getId() for s in sbml_model.getListOfSpecies()])
initial_conditions = [s.getInitialConcentration() for s in sbml_model.getListOfSpecies()]
compartments = sympy.symbols([s.getId() for s in sbml_model.getListOfCompartments()])
compartment_sizes = [s.getSize() for s in sbml_model.getListOfCompartments()]
reactions = map(_parse_reaction, sbml_model.getListOfReactions())
# getListOfParameters is an attribute of the model for SBML Level 1&2
parameters_with_values = [(sympy.Symbol(p.getId()), p.getValue())
for p in sbml_model.getListOfParameters()]
parameter_values = dict(parameters_with_values)
parameters = map(lambda x: x[0], parameters_with_values)
if not parameters:
track_local_parameters = True
parameters = set()
parameter_values = {}
else:
track_local_parameters = False
stoichiometry_matrix = np.zeros((len(species), len(reactions)), dtype=int)
propensities = []
for reaction_index, reaction in enumerate(reactions):
if track_local_parameters:
for param, value in reaction.parameters:
parameters.add(param)
parameter_values[param] = value
reactants = reaction.reactants
products = reaction.products
propensities.append(reaction.propensity)
for species_index, species_id in enumerate(species):
net_stoichiometry = products.get(species_id, 0) - reactants.get(species_id, 0)
stoichiometry_matrix[species_index, reaction_index] = net_stoichiometry
if track_local_parameters:
# sympy does not allow sorting its parameter lists by default,
# explicitly tell to sort by str representation
sorted_parameters = sorted(parameters, key=str)
else:
sorted_parameters = parameters
parameter_values_list = [parameter_values[p] for p in sorted_parameters]
# We need to concatenate compartment names and parameters as in our framework we cannot differentiate the two
compartments_and_parameters = compartments + sorted_parameters
parameter_values_list = compartment_sizes + parameter_values_list
model = Model(species, compartments_and_parameters, propensities, stoichiometry_matrix)
return model, parameter_values_list, initial_conditions | 40.144 | 115 | 0.682742 |
2551cc7f888a7265ce1f8beeca110b9348759577 | 1,123 | py | Python | clrenv/tests/test_path.py | color/clrenv | e11b67fcce129a4c828b6d7b421d9f2eac58785b | [
"MIT"
] | 2 | 2019-12-04T05:38:17.000Z | 2022-02-17T06:24:23.000Z | clrenv/tests/test_path.py | color/clrenv | e11b67fcce129a4c828b6d7b421d9f2eac58785b | [
"MIT"
] | 9 | 2019-11-11T20:01:11.000Z | 2021-09-30T00:41:52.000Z | clrenv/tests/test_path.py | color/clrenv | e11b67fcce129a4c828b6d7b421d9f2eac58785b | [
"MIT"
] | 4 | 2017-08-24T00:00:34.000Z | 2021-06-25T16:41:20.000Z | import pytest
import clrenv
@pytest.fixture(autouse=True)
def clear_overlay_path(monkeypatch):
monkeypatch.setenv("CLRENV_OVERLAY_PATH", "")
def test_custom_base(tmp_path, monkeypatch):
custom_path = tmp_path / "custom/path"
custom_path.parent.mkdir()
custom_path.write_text("data")
monkeypatch.setenv("CLRENV_PATH", str(custom_path))
assert clrenv.path.environment_paths() == (custom_path,)
def test_missing_base(tmp_path, monkeypatch):
monkeypatch.setenv("CLRENV_PATH", str(tmp_path / "aaa"))
with pytest.raises(ValueError):
clrenv.path.environment_paths()
def test_overlay(tmp_path, monkeypatch):
env_path = tmp_path / "env"
monkeypatch.setenv("CLRENV_PATH", str(env_path))
env_path.write_text("")
overlay_path1 = tmp_path / "overlay1"
overlay_path2 = tmp_path / "overlay2"
overlay_path1.write_text("data")
overlay_path2.write_text("data2")
monkeypatch.setenv("CLRENV_OVERLAY_PATH", f"{overlay_path1}:{overlay_path2}")
assert clrenv.path.environment_paths() == (
overlay_path1,
overlay_path2,
env_path,
)
| 27.390244 | 81 | 0.715049 |
25536ba36fdcd55ea907e174eeadb755910513a2 | 2,583 | py | Python | utils/convert_codah.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | null | null | null | utils/convert_codah.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | null | null | null | utils/convert_codah.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | null | null | null | import random
import pandas as pd
import numpy as np
import json
from tqdm import *
def split(full_list, shuffle=False, ratio=0.2):
n_total = len(full_list)
offset = int(n_total * ratio)
if n_total == 0 or offset < 1:
return [], full_list
if shuffle:
random.shuffle(full_list)
sublist_1 = full_list[:offset]
sublist_2 = full_list[offset:2 * offset]
sublist_3 = full_list[2 * offset:]
return sublist_1, sublist_2, sublist_3
def convert_to_codah_statement(input_file: str, output_file1: str):
print(f'converting {input_file} to entailment dataset...')
tsv_file = pd.read_csv(input_file)
qa_list = tsv_file.to_numpy()
nrow = sum(1 for _ in qa_list)
id = 0
with open(output_file1, 'w') as output_handle1:
# print("Writing to {} from {}".format(output_file, qa_file))
for sample in tqdm(qa_list, total=nrow):
output_dict = convert_sample_to_entailment(sample, id)
output_handle1.write(json.dumps(output_dict))
output_handle1.write("\n")
id += 1
print(f'converted statements saved to {output_file1}')
print()
# Convert the QA file json to output dictionary containing premise and hypothesis
def convert_sample_to_entailment(sample: list, id: int):
question_text = sample[1]
choices = sample[3:7] # left close right open
single_qa_dict = {'id': id, 'question': {'stem': sample[1]}, 'answer_key': 0}
choice_list = []
choice_count = 0
for choice in choices:
statement = question_text + ' ' + choice
create_output_dict(single_qa_dict, statement, choice_count == 0)
choice_list.append({'text': choice, 'label': choice_count})
choice_count += 1
single_qa_dict['question']['choices'] = choice_list
return single_qa_dict
# Create the output json dictionary from the input json, premise and hypothesis statement
def create_output_dict(input_json: dict, statement: str, label: bool) -> dict:
if "statements" not in input_json:
input_json["statements"] = []
input_json["statements"].append({"label": label, "statement": statement})
return input_json
if __name__ == "__main__":
convert_to_codah_statement('../data/codah/fold_0/train.csv', './data/codah/fold_0/train.jsonl')
# train, dev, test = split(full_list, shuffle=True, ratio=0.2)
# convert_to_codah_statement(train, 'train.statement.jsonl')
# convert_to_codah_statement(dev, 'train.statement.jsonl')
# convert_to_codah_statement(test, 'train.statement.jsonl')
print('Hey, there!')
| 37.434783 | 99 | 0.684863 |
25541a58e6ade5999bf8649b87e0a951c63912f5 | 3,237 | py | Python | new_imgt_scraping/new_imgt/new_imgt/spiders/new_imgt_spider.py | yaosichao0915/DeepImmuno | a2a7832f6cded9296735475c2e8fa5c9b62b3f8d | [
"MIT"
] | 20 | 2020-12-28T03:34:34.000Z | 2022-03-14T01:36:52.000Z | new_imgt_scraping/new_imgt/new_imgt/spiders/new_imgt_spider.py | zhangjiahuan17/DeepImmuno | 5ab182429bc3276fd43be2ec8d86b72e773992ef | [
"MIT"
] | 3 | 2021-04-23T19:21:11.000Z | 2021-08-22T00:39:01.000Z | new_imgt_scraping/new_imgt/new_imgt/spiders/new_imgt_spider.py | zhangjiahuan17/DeepImmuno | 5ab182429bc3276fd43be2ec8d86b72e773992ef | [
"MIT"
] | 11 | 2021-04-23T16:46:29.000Z | 2022-03-18T15:53:55.000Z | '''
pip install Scrapy
pip install selenium
In a folder:
scrapy startproject imgt
when running:
scrapy crawl new_imgt -o out.json
when using scrapy shell:
scrapy shell 'url'
in Ipython, you can use response.xpath or response.css to try out
object:
1. selectorlist if css('a') and there are a lot of 'a'
2. selector it will have css and xpath method
3. reponse
conda activate selenium
remember make change to the python scirpt under spider folder
'''
'''
If encounter robot blockage error:
open setting.py and change the robot setting to False
you can specify hla in __init__, and then when call:
scrapy crawl new_imgt -a hla="HLA-A*0101" -o out.json
When encounter dynamic page, use selenium to get the page and pass it to scrapy response object
Double check using both 'inspect' and 'see source code' in a webpage, they can be different
'''
'''
cat inventory_compliant.txt | while read line; do scrapy crawl new_imgt -a hla="$line" -o "./hla_paratope/$line.json"; done
'''
import scrapy
from scrapy.crawler import CrawlerProcess
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
class imgtSpider(scrapy.Spider):
name = 'new_imgt'
start_urls = ['http://www.imgt.org/3Dstructure-DB/']
def __init__(self,hla):
self.hla = hla
path_to_chromedriver = '/Users/ligk2e/Downloads/chromedriver'
self.driver = webdriver.Chrome(executable_path=path_to_chromedriver)
self.driver.implicitly_wait(5)
def get_selenium(self,url):
self.driver.get(url)
self.driver.find_element_by_xpath('//*[@id="species"]/option[27]').click() # choose Home Sapien (select drop down)
self.driver.find_element_by_xpath('//*[@id="radio_pMH1"]').click() # choose pMHCI (input)
self.driver.find_element_by_xpath('//*[@id="datas"]/p[2]/input[1]').click() # click submit (button)
return self.driver.page_source.encode('utf-8')
def parse(self,response): # for parsing 550 entry page
response = scrapy.Selector(text=self.get_selenium(imgtSpider.start_urls[0]))
for row in response.css('body#result div#data table.Results tbody tr')[1:]: #[Selector,Selector,Selector...] # don't need header
mhc = row.css('td')[2].css('td::text').get()
if self.hla in mhc:
url_suffix = row.css('td')[1].css('a::attr(href)').get() # details.cgi?pdbcode=2CLR
# what we need is: http://www.imgt.org/3Dstructure-DB/cgi/details.cgi?pdbcode=2CLR&Part=Epitope
url_next = 'http://www.imgt.org/3Dstructure-DB/cgi/' + url_suffix + '&Part=Epitope'
yield scrapy.Request(url_next,callback=self.parse_paratope)
def parse_paratope(self,response):
url_next = response.url
paratope = ''
for i in response.css('body#result div#mybody div#main table')[0].css('tr')[2].css('td')[1].css('span a'):
aa = i.css('a::text').get()
paratope += aa
yield {'{}'.format(url_next):paratope}
# if using process, you can just run a python new_imgt_spider.py
# process = CrawlerProcess()
# process.crawl(imgtSpider)
# process.start() | 33.030612 | 139 | 0.666976 |
25582433fc1391299fee92277e679b595fa40a57 | 1,667 | py | Python | ooobuild/lo/script/provider/script_framework_error_type.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/script/provider/script_framework_error_type.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/script/provider/script_framework_error_type.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.script.provider
class ScriptFrameworkErrorType(object):
"""
Const Class
is a checked exception that represents an error encountered by the Scripting Framework whilst executing a script
See Also:
`API ScriptFrameworkErrorType <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1script_1_1provider_1_1ScriptFrameworkErrorType.html>`_
"""
__ooo_ns__: str = 'com.sun.star.script.provider'
__ooo_full_ns__: str = 'com.sun.star.script.provider.ScriptFrameworkErrorType'
__ooo_type_name__: str = 'const'
UNKNOWN = 0
"""
Unknown.
"""
NOTSUPPORTED = 1
"""
ProviderNotSupported.
"""
NO_SUCH_SCRIPT = 2
"""
the requested method, and/or with the requested signature, does not exist
"""
MALFORMED_URL = 3
"""
the requested method, with the requested signature, does not exist
"""
__all__ = ['ScriptFrameworkErrorType']
| 30.87037 | 165 | 0.721656 |
25582a95ad549fbb53f7bc9394341328228fcce8 | 38,786 | py | Python | Base/opcode_tab.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 171 | 2020-01-30T16:58:07.000Z | 2022-03-27T22:12:17.000Z | Base/opcode_tab.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 14 | 2021-05-15T02:12:09.000Z | 2022-03-16T04:16:18.000Z | Base/opcode_tab.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 5 | 2021-03-01T20:52:13.000Z | 2022-03-07T06:35:03.000Z | #!/usr/bin/python3
# (c) Robert Muth - see LICENSE for more info
from typing import List, Dict
import enum
from Util import cgen
# maximum number of operands in an instruction
MAX_OPERANDS = 5
# maximum number of function parameters (or results)
MAX_PARAMETERS = 64
############################################################
# Opcode Families [OF.]
#
# Each Opcode belongs to one of the families below.
# Within each family the order and kind of the operands is similar
############################################################
@enum.unique
class OPC_KIND(enum.Enum):
INVALID = 0
ALU = 1
ALU1 = 2
MOV = 3
LEA = 4
LEA1 = 5
COND_BRA = 6
BRA = 7
BSR = 8
JSR = 9
SWITCH = 10
RET = 11
SYSCALL = 12
ST = 13
LD = 14
PUSHARG = 15
POPARG = 16
NOP = 17
NOP1 = 18
CONV = 19
CMP = 20
BCOPY = 21
BZERO = 22
DIRECTIVE = 23 # not a real instruction
_OF_TO_PURPOSE = {
OPC_KIND.ALU: ["dst", "src1", "src2"],
OPC_KIND.ALU1: ["dst", "src"],
OPC_KIND.COND_BRA: ["op1", "op2", "target_bbl"],
OPC_KIND.SWITCH: ["index", "table"],
OPC_KIND.BRA: ["target_bbl"],
OPC_KIND.RET: [],
OPC_KIND.BSR: ["target_fun"],
OPC_KIND.JSR: ["target_fun_addr", "target_fun_sig"],
OPC_KIND.SYSCALL: ["target_fun_sig", "syscall_no"],
OPC_KIND.LEA: ["dst", "base", "offset"],
OPC_KIND.LEA1: ["dst", "base"],
OPC_KIND.LD: ["dst", "base", "offset"],
OPC_KIND.ST: ["base", "offset", "src"],
OPC_KIND.NOP: [],
OPC_KIND.NOP1: ["src_and_dst"],
OPC_KIND.BZERO: ["dst_addr", "width"],
OPC_KIND.BCOPY: ["dst_addr", "src_addr", "width"],
OPC_KIND.POPARG: ["dst"],
OPC_KIND.PUSHARG: ["src"],
OPC_KIND.CONV: ["dst", "src"],
OPC_KIND.MOV: ["dst", "src"],
OPC_KIND.CMP: ["dst", "src1", "src2", "cmp1", "cmp2"],
}
_OFS_CFG = {OPC_KIND.BSR, OPC_KIND.JSR, OPC_KIND.SYSCALL, OPC_KIND.SWITCH,
OPC_KIND.BRA, OPC_KIND.COND_BRA, OPC_KIND.RET}
# These instructions do not have a written register
_OFS_NO_DEF = _OFS_CFG | {OPC_KIND.ST, OPC_KIND.BCOPY, OPC_KIND.BZERO,
OPC_KIND.PUSHARG, OPC_KIND.NOP}
# These instructions have a written register
_OFS_WRITING_REGS = {
OPC_KIND.LEA, OPC_KIND.LEA1, OPC_KIND.ALU, OPC_KIND.ALU1, OPC_KIND.CMP,
OPC_KIND.MOV, OPC_KIND.CONV, OPC_KIND.LD,
OPC_KIND.POPARG, OPC_KIND.NOP1}
@enum.unique
class OA(enum.Flag):
"""Opcode Attributes"""
BBL_TERMINATOR = 1 << 0
NO_FALL_THROUGH = 1 << 1
CALL = 1 << 2
COMMUTATIVE = 1 << 3
MEM_RD = 1 << 4
MEM_WR = 1 << 5
SPECIAL = 1 << 6
OAS_CFG = OA.CALL | OA.BBL_TERMINATOR
OAS_SIDE_EFFECT = OA.CALL | OA.BBL_TERMINATOR | OA.MEM_RD | OA.MEM_WR | OA.SPECIAL
############################################################
# Operand Kinds [OK.]
#
# Each instruction operates on a list of operands. Since we mimic a
# three address machine, ALU instructions usually have 3 operands,
# the destination being the first one.
# There is a large variety of operands denoting registers or immediates
# which enable some basic typing on a per operand basis.
# Additional typing constraints across the operands are enforced by "rules".
############################################################
@enum.unique
class OP_KIND(enum.Enum):
INVALID = 0
REG = 1
CONST = 2
REG_OR_CONST = 3
# bbl immediates ref to a bbl in the current function
# Note: bbls can be referred to before they are defined
BBL = 4
# mem immediates refer to a global memory or stack region
MEM = 5
# stk immediates refer to a stack region in the current function
STK = 6
# fun immediates ref to a function in global function table
# Note: funs can be referred to before they are defined
FUN = 7
JTB = 8
TYPE_LIST = 20
DATA_KIND = 21 # one of the RK_
MEM_KIND = 23 # one of the MK_
FUN_KIND = 24 # one of the FK_
FIELD = 25
NAME = 26
NAME_LIST = 27
INT = 28
BBL_TAB = 29
BYTES = 30
############################################################
# Type Constraints
############################################################
@enum.unique
class TC(enum.Enum):
INVALID = 0
ANY = 1
ADDR_NUM = 2
ADDR_INT = 3
NUM = 4
FLT = 5
INT = 6
ADDR = 7
CODE = 8
UINT = 9
SINT = 10
OFFSET = 11
#
SAME_AS_PREV = 20
# for bitcast
SAME_SIZE_AS_PREV = 22
############################################################
# DataType Flavors
############################################################
DK_FLAVOR_S = 0x20 # signed int
DK_FLAVOR_U = 0x40 # unsigned int
DK_FLAVOR_F = 0x60 # ieee floating point
DK_FLAVOR_A = 0x80 # data address
DK_FLAVOR_C = 0xa0 # code address
_DK_WIDTH_8 = 0
_DK_WIDTH_16 = 1
_DK_WIDTH_32 = 2
_DK_WIDTH_64 = 3
_DK_WIDTH_128 = 4
class DK(enum.Enum):
"""Data Kind - primarily used to associate a type with Const and Reg"""
INVALID = 0
# signed
S8 = DK_FLAVOR_S + _DK_WIDTH_8
S16 = DK_FLAVOR_S + _DK_WIDTH_16
S32 = DK_FLAVOR_S + _DK_WIDTH_32
S64 = DK_FLAVOR_S + _DK_WIDTH_64
# S128 = _RK_S + _RK_128
# unsigned
U8 = DK_FLAVOR_U + _DK_WIDTH_8
U16 = DK_FLAVOR_U + _DK_WIDTH_16
U32 = DK_FLAVOR_U + _DK_WIDTH_32
U64 = DK_FLAVOR_U + _DK_WIDTH_64
# U128 = _RK_U + _RK_128
# float
F8 = DK_FLAVOR_F + _DK_WIDTH_8
F16 = DK_FLAVOR_F + _DK_WIDTH_16
F32 = DK_FLAVOR_F + _DK_WIDTH_32
F64 = DK_FLAVOR_F + _DK_WIDTH_64
# F128 = _RK_F + _RK_128
# data address
A32 = DK_FLAVOR_A + _DK_WIDTH_32
A64 = DK_FLAVOR_A + _DK_WIDTH_64
# code address
C32 = DK_FLAVOR_C + _DK_WIDTH_32
C64 = DK_FLAVOR_C + _DK_WIDTH_64
def flavor(self) -> int:
return self.value & 0xe0
def bitwidth(self) -> int:
return 8 << (self.value & 0x7)
SHORT_STR_TO_RK = {x.name: x for x in DK} # this does contain the aliases
def RegIsAddrInt(rk: DK):
return (DK.A32.value <= rk.value <= DK.A64.value or
DK.S8.value <= rk.value <= DK.U64.value)
def RegIsInt(rk: DK):
return DK.S8.value <= rk.value <= DK.U64.value
TC_TO_CHECKER = {
TC.ANY: lambda x: True,
TC.ADDR_NUM: lambda x: x.flavor() != DK_FLAVOR_C,
TC.NUM: lambda x: x.flavor() in {DK_FLAVOR_U, DK_FLAVOR_S, DK_FLAVOR_F},
TC.INT: lambda x: x.flavor() in {DK_FLAVOR_U, DK_FLAVOR_S},
TC.ADDR: lambda x: x.flavor() == DK_FLAVOR_A,
TC.CODE: lambda x: x.flavor() == DK_FLAVOR_C,
TC.SINT: lambda x: x.flavor() == DK_FLAVOR_S,
TC.UINT: lambda x: x.flavor() == DK_FLAVOR_U,
TC.ADDR_INT: RegIsAddrInt,
TC.FLT: lambda x: x.flavor() == DK_FLAVOR_F,
TC.OFFSET: lambda x: x.flavor() in {DK_FLAVOR_U, DK_FLAVOR_S},
# maybe change this to just U or S
}
def CheckTypeConstraint(last_type: DK, constraint: TC, this_type: DK) -> bool:
checker = TC_TO_CHECKER.get(constraint)
if checker:
return checker(this_type)
if constraint == TC.SAME_AS_PREV:
return last_type == this_type
elif constraint == TC.SAME_SIZE_AS_PREV:
return last_type.bitwidth() == this_type.bitwidth()
else:
assert False, f"unknown contraint {constraint.name}"
@enum.unique
class MEM_KIND(enum.Enum):
"""Represents Allocation Type of Global Memory """
INVALID = 0
RO = 1
RW = 2
TLS = 3
FIX = 4 # a fixed address provide via
EXTERN = 5 # forward declaration must be defined before code emission
BUILTIN = 6 # linker defined
SHORT_STR_TO_MK = {x.name: x for x in MEM_KIND}
@enum.unique
class FUN_KIND(enum.Enum):
"""Function Kinds"""
INVALID = 0
BUILTIN = 1 # linker defined
EXTERN = 2 # forward declaration must be defined before code emission
NORMAL = 3
SIGNATURE = 4
SHORT_STR_TO_FK = {x.name: x for x in FUN_KIND}
############################################################
# Operand Value Kind Sets
############################################################
OKS_LIST = {OP_KIND.BYTES, OP_KIND.NAME_LIST, OP_KIND.BBL_TAB,
OP_KIND.TYPE_LIST}
OKS_ALLOWED_FOR_INSTRUCTIONS = {OP_KIND.REG, OP_KIND.CONST,
OP_KIND.REG_OR_CONST,
OP_KIND.FUN, OP_KIND.BBL, OP_KIND.JTB,
OP_KIND.MEM, OP_KIND.STK, OP_KIND.FIELD}
# we do not want non-scalar operands in instructions as they
# increase memory usage and complicate the code
assert not (OKS_LIST & OKS_ALLOWED_FOR_INSTRUCTIONS)
OKS_ALLOWED_FOR_DIRECTIVES = {OP_KIND.INT, OP_KIND.MEM_KIND, OP_KIND.BYTES,
OP_KIND.NAME, OP_KIND.BBL_TAB,
OP_KIND.FUN_KIND, OP_KIND.TYPE_LIST,
OP_KIND.NAME_LIST, OP_KIND.DATA_KIND, OP_KIND.FUN,
OP_KIND.MEM, OP_KIND.BBL
}
OKS_ALL = OKS_ALLOWED_FOR_INSTRUCTIONS | OKS_ALLOWED_FOR_DIRECTIVES
############################################################
# Opcode Groups
############################################################
@enum.unique
class OPC_GENUS(enum.Enum):
INVALID = 0
BASE = 1
TBD = 2
_DIR_TO_PURPOSE = {
".mem": ["name", "alignment", "mem_kind"],
".data": ["repeat", "data"],
".addr.fun": ["width", "fun"],
".addr.mem": ["width", "mem", "offset"],
".fun": ["name", "fun_kind", "out_params", "in_params"],
".bbl": ["name"],
".reg": ["reg_kind", "names"],
".stk": ["name", "alignment", "size"],
".jtb": ["name", "size", "default_bbl", "map"],
".struct": ["name"],
".field": ["name", "alignment", "size"],
".endstruct": [],
".stk.s": ["name", "name"],
}
############################################################
# Opcode
############################################################
class Opcode:
"""Opcodes are templates for instructions similar to what you would
find in assembly language manual for a processor.
Note, the main purpose of instantiating an opcode instance is to
populate the Table/TableByNo class member
"""
Table: Dict[str, "Opcode"] = {}
TableByNo: Dict[int, "Opcode"] = {}
def __init__(self, no, name: str, kind: OPC_KIND,
operand_kinds: List[OP_KIND],
constraints: List[TC], group: OPC_GENUS, desc,
attributes=OA(0)):
assert name not in Opcode.Table, f"duplicate opcode {name}"
assert len(operand_kinds) <= MAX_OPERANDS, name
Opcode.Table[name] = self
assert no not in Opcode.TableByNo, f"duplicate no: {no} {name}"
Opcode.TableByNo[no] = self
self.no = no
self.name = name
self.kind: OPC_KIND = kind
self.operand_kinds: List[OP_KIND] = operand_kinds
self.constraints: List[TC] = constraints
self.group = group
self.desc = desc
self.attributes = attributes
assert kind != OPC_KIND.INVALID, f"unknown {kind}"
is_directive = kind == OPC_KIND.DIRECTIVE
if is_directive:
assert name.startswith(".")
self.purpose = _DIR_TO_PURPOSE[name]
else:
self.purpose = _OF_TO_PURPOSE[kind]
assert len(self.purpose) == len(
operand_kinds), f"{name} {operand_kinds}"
assert len(operand_kinds) == len(constraints), f"{no} {name}"
for ok, tc in zip(operand_kinds, constraints):
# self.operands_tab[o] = op
assert ok in OKS_ALL, f"unexpected operand: {ok}"
if ok in {OP_KIND.REG, OP_KIND.CONST, OP_KIND.REG_OR_CONST}:
assert tc != TC.INVALID, f"{no} {name}"
else:
assert tc == TC.INVALID, f"{no} {name}"
if is_directive:
assert ok in OKS_ALLOWED_FOR_DIRECTIVES, f"bad ins op [{ok}]"
else:
assert ok in OKS_ALLOWED_FOR_INSTRUCTIONS, f"bad ins op [{ok}]"
def is_call(self):
return OA.CALL in self.attributes
def is_bbl_terminator(self):
return OA.BBL_TERMINATOR in self.attributes
def has_fallthrough(self):
return OA.NO_FALL_THROUGH not in self.attributes
def has_side_effect(self):
return OAS_SIDE_EFFECT & self.attributes
def def_ops_count(self):
"""How many of the leading operands write are register writes"""
if self.kind in {OPC_KIND.INVALID,
OPC_KIND.DIRECTIVE} or self.kind in _OFS_NO_DEF:
return 0
else:
return 1
@classmethod
def Lookup(cls, name: str) -> "Opcode":
return cls.Table[name]
def __str__(self):
return f"[OPCODE: {self.name}]"
############################################################
# ARITHMETIC ALU 0x10
# FLOAT + INT
# note: limited address arithmetic allowed
ADD = Opcode(0x10, "add", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Addition: dst := src1 + src2",
OA.COMMUTATIVE)
# note: limited address arithmetic allowed
SUB = Opcode(0x11, "sub", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Subtraction: dst := src1 - src2
Note: `sub dst = 0 src` can be used to emulate `neg` for integers.
(for floating point use `dat = mul src -1.0`)
""")
# needs more work wrt to size
MUL = Opcode(0x12, "mul", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Multiplication: dst := src1 \\* src2",
OA.COMMUTATIVE)
DIV = Opcode(0x13, "div", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Division: dst := src1 / src2
Some day the operation might be more strictly defined as:
dst := 0 if src2 == 0 else src1 / src2""")
# cf.:
# https://www.gingerbill.org/article/2020/01/25/a-reply-to-lets-stop-copying-c/
REM = Opcode(0x14, "rem", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Modulo: dst := a % b
Some day the sign of the result might be more strictly defined.
Note: does not apply to floating point numbers""")
COPYSIGN = Opcode(0x15, "copysign", OPC_KIND.ALU, [OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Set the sign of src1 to match src2 (floating point only)
Note: `copysign dst src1 0.0` can be used to emulate `abs`""")
############################################################
# LOGIC ALU 0x30
# INT ONLY (all regs are treated as unsigned except for shr/rshr
XOR = Opcode(0x18, "xor", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Bitwise exclusive or: dst := src1 ^ src2
Note: `xor dst = src1 0b111...1` can be used to emulate `not`""",
OA.COMMUTATIVE)
# note: limited address arithmetic allowed
AND = Opcode(0x19, "and", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Bitwise and: dst := src1 & src2",
OA.COMMUTATIVE)
# note: limited address arithmetic allowed
OR = Opcode(0x1a, "or", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Bitwise or: dst := src1 | src2",
OA.COMMUTATIVE)
# shift amount is determined as follows:
# use the log2(width(dst)) low order bits of src2
# e.g. for a dst of kind s8 the low order 3 bits of
# src2 will be used.
# src2 is treated as an unsigned register
SHL = Opcode(0x1b, "shl", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Shift left: dst := src1 << src2
dst: = src1 << (src2 % bitwidth(src1))""")
SHR = Opcode(0x1c, "shr", OPC_KIND.ALU,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Shift right: dst := src1 >> src2
dst: = src1 >> (src2 % bitwidth(src1))""")
# do we need both directions, do we need a reverse version?
# should we rather use a funnel shift?
# ROTL = Opcode(0x1d, "rotl", OPC_KIND.ALU,
# [OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
# [TC.INT, TC.SAME_AS_PREV, TC.SAME_AS_PREV], OPC_GENUS.TBD,
# "Rotation Left")
############################################################
# CONDITIONAL BRANCHES 0x20
# do we need unordered variants for floating point?
# not beq/bne is the only operation for c_regs
BEQ = Opcode(0x20, "beq", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ANY, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if equal.",
OA.COMMUTATIVE | OA.BBL_TERMINATOR)
BNE = Opcode(0x21, "bne", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ANY, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if not equal.",
OA.COMMUTATIVE | OA.BBL_TERMINATOR)
BLT = Opcode(0x22, "blt", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ADDR_NUM, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if greater than.",
OA.BBL_TERMINATOR)
BLE = Opcode(0x23, "ble", OPC_KIND.COND_BRA,
[OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST, OP_KIND.BBL],
[TC.ADDR_NUM, TC.SAME_AS_PREV, TC.INVALID], OPC_GENUS.BASE,
"Conditional branch if less or equal.",
OA.BBL_TERMINATOR)
############################################################
# More Control Flow 0x28
SWITCH = Opcode(0x28, "switch", OPC_KIND.SWITCH, [OP_KIND.REG, OP_KIND.JTB],
[TC.UINT, TC.INVALID], OPC_GENUS.BASE,
"""Multi target computed jump.
The register argument must be less than the jtb `size`.
The jtb symbol must have been previously defined with the `.jtb` directive.
""",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
BRA = Opcode(0x29, "bra", OPC_KIND.BRA, [OP_KIND.BBL],
[TC.INVALID], OPC_GENUS.BASE,
"Unconditional branch.",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
RET = Opcode(0x2a, "ret", OPC_KIND.RET, [],
[], OPC_GENUS.BASE,
"Return from subroutine.",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
BSR = Opcode(0x2b, "bsr", OPC_KIND.BSR, [OP_KIND.FUN],
[TC.INVALID], OPC_GENUS.BASE,
"Branch to subroutine fun",
OA.CALL)
JSR = Opcode(0x2c, "jsr", OPC_KIND.JSR, [OP_KIND.REG, OP_KIND.FUN],
[TC.CODE, TC.INVALID], OPC_GENUS.BASE,
"""Jump indirectly to subroutine through register (fun describes the signature).
The signature must have been previously defined with the `.fun` directive.""",
OA.CALL)
SYSCALL = Opcode(0x2d, "syscall", OPC_KIND.SYSCALL,
[OP_KIND.FUN, OP_KIND.CONST],
[TC.INVALID, TC.UINT], OPC_GENUS.BASE,
"""Syscall to `syscall_no`. (fun describes the signature).
The signature must have been previously defined with the `.fun` directive.""",
OA.CALL)
TRAP = Opcode(0x2e, "trap", OPC_KIND.RET, [],
[], OPC_GENUS.BASE,
"Abort program.",
OA.BBL_TERMINATOR | OA.NO_FALL_THROUGH)
############################################################
# Misc 0x30
PUSHARG = Opcode(0x30, "pusharg", OPC_KIND.PUSHARG, [OP_KIND.REG_OR_CONST],
[TC.ANY], OPC_GENUS.BASE,
"push a call or return arg - must immediately precede bsr/jsr or ret.",
OA.SPECIAL)
POPARG = Opcode(0x31, "poparg", OPC_KIND.POPARG, [OP_KIND.REG],
[TC.ANY], OPC_GENUS.BASE,
"pop a call or return arg - must immediately follow fun entry or bsr/jsr.",
OA.SPECIAL)
CONV = Opcode(0x32, "conv", OPC_KIND.CONV, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.NUM, TC.NUM], OPC_GENUS.BASE,
# TODO: specify rounding and overflow for float <-> int conversions
"""Conversion of numerical regs which do not have to be of same size. Bits may change.
If the conversion involves both a widening and a change of type, the widening is performed
first. """)
BITCAST = Opcode(0x33, "bitcast", OPC_KIND.CONV,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_SIZE_AS_PREV], OPC_GENUS.BASE,
"""Cast between regs of same size. Bits will be re-interpreted but do not change.
This is useful for manipulating addresses in unusual ways or
looking at the binary representation of floats.""")
MOV = Opcode(0x34, "mov", OPC_KIND.MOV, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""Move between registers.
While a mov can be emulated via a `add dst = src 0`,
having a dedicated instruction makes some optimizations easier to
implement when combined with a canonicalization.""")
CMPEQ = Opcode(0x35, "cmpeq", OPC_KIND.CMP,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST,
OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_AS_PREV, TC.SAME_AS_PREV, TC.ANY,
TC.SAME_AS_PREV],
OPC_GENUS.BASE,
"""Conditional move (compare equal). dst := (cmp1 == cmp2) ? src1 : src2
Note: dst/cmp1/cmp2 may be of a different type than src1/src2.""",
OA.COMMUTATIVE)
CMPLT = Opcode(0x36, "cmplt", OPC_KIND.CMP,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST,
OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.SAME_AS_PREV, TC.SAME_AS_PREV, TC.ADDR_NUM,
TC.SAME_AS_PREV],
OPC_GENUS.BASE,
"""Conditional move (compare less than). dst := (cmp1 < cmp2) ? src1 : src2
Note: dst/cmp1/cmp2 may be of a different type than src1/src2.""")
# materialize addresses in a register
LEA = Opcode(0x38, "lea", OPC_KIND.LEA,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.SAME_AS_PREV, TC.OFFSET], OPC_GENUS.BASE,
"""Load effective Address. dst := base + offset
Note: dst and base are addresses but offset is not.""")
LEA_MEM = Opcode(0x39, "lea.mem", OPC_KIND.LEA,
[OP_KIND.REG, OP_KIND.MEM, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load effective memory address with offset, dst := base + offset")
LEA_STK = Opcode(0x3a, "lea.stk", OPC_KIND.LEA,
[OP_KIND.REG, OP_KIND.STK, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load effective stack address with offset. dst := base + offset")
LEA_FUN = Opcode(0x3b, "lea.fun", OPC_KIND.LEA1, [OP_KIND.REG, OP_KIND.FUN],
[TC.CODE, TC.INVALID], OPC_GENUS.BASE,
"Load effective function address: dst := base (note: no offset).")
############################################################
# LOAD STORE 0x60
# ld/st base address is in register, offset is immediate
# ld/st base address is register
LD = Opcode(0x40, "ld", OPC_KIND.LD,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.ADDR, TC.OFFSET], OPC_GENUS.BASE,
"Load from register base with offset. dst := RAM[base + offset]",
OA.MEM_RD)
# note: signedness of offset may matter here
LD_MEM = Opcode(0x41, "ld.mem", OPC_KIND.LD,
[OP_KIND.REG, OP_KIND.MEM, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load from memory base with offset. dst := RAM[base + offset] ",
OA.MEM_RD)
LD_STK = Opcode(0x42, "ld.stk", OPC_KIND.LD,
[OP_KIND.REG, OP_KIND.STK, OP_KIND.REG_OR_CONST],
[TC.ANY, TC.INVALID, TC.OFFSET], OPC_GENUS.BASE,
"Load from stack base with offset. dst := RAM[base + offset]",
OA.MEM_RD)
ST = Opcode(0x48, "st", OPC_KIND.ST,
[OP_KIND.REG, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.OFFSET, TC.ANY], OPC_GENUS.BASE,
"Store to register base with offset. RAM[base + offset] := src",
OA.MEM_WR)
ST_MEM = Opcode(0x49, "st.mem", OPC_KIND.ST,
[OP_KIND.MEM, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INVALID, TC.OFFSET, TC.ANY], OPC_GENUS.BASE,
"Store to memory base with offset. RAM[base + offset] := src",
OA.MEM_WR)
ST_STK = Opcode(0x4a, "st.stk", OPC_KIND.ST,
[OP_KIND.STK, OP_KIND.REG_OR_CONST, OP_KIND.REG_OR_CONST],
[TC.INVALID, TC.OFFSET, TC.ANY], OPC_GENUS.BASE,
"Store to stack base with offset. RAM[base + offset] := src",
OA.MEM_WR)
############################################################
# FLOAT ALU OPERAND: 0x50
CEIL = Opcode(0x50, "ceil", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Round float to integral, toward positive infinity")
FLOOR = Opcode(0x51, "floor", OPC_KIND.ALU1,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Round float to integral, toward negative infinity")
ROUND = Opcode(0x52, "round", OPC_KIND.ALU1,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Round float to integral, to nearest with ties to away")
TRUNC = Opcode(0x53, "trunc", OPC_KIND.ALU1,
[OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"""
Round float to integral, toward zero.
Note, frac(val) = val - trunc(val)""")
SQRT = Opcode(0x54, "sqrt", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Compute the sqrt of floating point value")
# do we need all these?
Opcode(0x58, "sin", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x59, "cos", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5a, "tan", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5b, "asin", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5c, "acos", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5d, "atan", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5e, "exp", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
Opcode(0x5f, "log", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.FLT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
############################################################
# Advanced ALU
############################################################
CNTLZ = Opcode(0x60, "cntlz", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Count leading zeros.")
CNTTZ = Opcode(0x61, "cnttz", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV], OPC_GENUS.BASE,
"Count trailing zeros.")
# INT SINGLE OPERAND 0xb0
# the src reg is treated as an unsigned reg
Opcode(0x62, "cntpop", OPC_KIND.ALU1, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.INT, TC.SAME_AS_PREV], OPC_GENUS.TBD,
"TBD")
############################################################
# Annotations
############################################################
NOP = Opcode(0x70, "nop", OPC_KIND.NOP, [],
[], OPC_GENUS.BASE,
"nop - internal use.")
NOP1 = Opcode(0x71, "nop1", OPC_KIND.NOP1, [OP_KIND.REG],
[TC.ANY], OPC_GENUS.BASE,
"nop with one reg - internal use. Can be used to `reserve` a reg for code generation.",
OA.SPECIAL)
# LINE = Opcode(0x78, "line", OPC_KIND., [OP_KIND.NAME, OP_KIND.CONST],
# [TC.ANY], OPC_GENUS.BASE,
# "",
# OA.SPECIAL)
############################################################
# Misc Experimental
############################################################
# Note, negative lengths copy downwards
Opcode(0xb8, "bcopy", OPC_KIND.BCOPY,
[OP_KIND.REG, OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.SAME_AS_PREV, TC.OFFSET], OPC_GENUS.TBD,
"TBD",
OA.MEM_WR | OA.MEM_RD)
# Note, negative lengths copy downwards
Opcode(0xba, "bzero", OPC_KIND.BZERO, [OP_KIND.REG, OP_KIND.REG_OR_CONST],
[TC.ADDR, TC.OFFSET], OPC_GENUS.TBD,
"TBD",
OA.MEM_WR)
############################################################
# Directives 0xd
#
# do not correspond to instructions
############################################################
def Directive(no: int, name: str, operands, desc,
group=OPC_GENUS.BASE):
return Opcode(no, name, OPC_KIND.DIRECTIVE, operands,
constraints=[TC.INVALID] * len(operands),
desc=desc, group=group)
Directive(0x01, ".mem", [OP_KIND.NAME, OP_KIND.INT, OP_KIND.MEM_KIND],
"Add new memory region to unit")
Directive(0x02, ".data", [OP_KIND.INT, OP_KIND.BYTES],
"Add content to current memory region: multiple bytes")
Directive(0x03, ".addr.fun", [OP_KIND.INT, OP_KIND.FUN],
"Add content to current memory region: code address")
Directive(0x04, ".addr.mem", [OP_KIND.INT, OP_KIND.MEM, OP_KIND.INT],
"Add content to current memory region: "
"memory address with offset")
Directive(0x05, ".fun", [OP_KIND.NAME, OP_KIND.FUN_KIND, OP_KIND.TYPE_LIST,
OP_KIND.TYPE_LIST],
"Add new function to unit")
Directive(0x06, ".bbl", [OP_KIND.NAME],
"Add new basic block to current function")
Directive(0x07, ".reg", [OP_KIND.DATA_KIND, OP_KIND.NAME_LIST],
"Add new registers to current function")
Directive(0x08, ".stk", [OP_KIND.NAME, OP_KIND.INT, OP_KIND.INT],
"Add stack region to current function")
Directive(0x09, ".jtb",
[OP_KIND.NAME, OP_KIND.INT, OP_KIND.BBL, OP_KIND.BBL_TAB],
"bbl jump table: <name> <size> <default-bbl> <sparse-table>")
############################################################
# experimental/unimplemented
############################################################
# add/sub/rotate with carry for legalizing say 64bit regs into pairs of 32bit regs
# unreachable
# swap
# unordered comparison
# https://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
# conv int - flt (urgent)
# conv int - int (urgent)
# extract (urgent)
# insert (urgent)
# ld_l, st_C, cmpxch, cmpswp
# pow, pow2 powi
# log
# crc32c (supported by x86-64 and arm64 - using 0x1EDC6F41)
# aes ???
# ld.scaled /st.scaled: base_reg + index_reg * scale imm + offset_imm
# copysign
# prefetch
# other built-ins: cf.:
# https://github.com/llvm-mirror/compiler-rt/tree/master/lib/builtins
_GROUPS = {
0x01: "## Directives\n",
0x10: "## Basic ALU\n",
0x20: "## Conditional Branches\n",
0x28: "## Other Control Flow\n",
0x30: "## Move/Conversion\n",
0x38: "## Address Arithmetic\n",
0x40: "## Load\n",
0x48: "## Store\n",
0x50: "## Float ALU\n",
0x60: "## Advanced ALU\n",
0x70: "## Annotation\n",
0xf1: "## Misc\n",
}
def _render_operand_desc(purpose: str, kind: OP_KIND, constraint: TC, mod1="",
mod2="") -> str:
kind_str = kind.name.replace("REG_OR_CONST", "REG/CONST")
if constraint == TC.INVALID:
return f"*{purpose}* {mod1}{kind_str}{mod2}"
else:
return f"*{purpose}* {mod1}{kind_str}:{constraint.name}{mod2}"
def _render_directive_doc(o: Opcode, fout):
print_ops = [_render_operand_desc(*t, mod1="<sub>[", mod2="]</sub>")
for t in zip(o.purpose, o.operand_kinds, o.constraints)]
print(f"#### [{o.no:02x}] {o.name} {' '.join(print_ops)}", file=fout)
print(o.desc, file=fout)
def _render_opcode_doc(o: Opcode, fout):
print_ops = [_render_operand_desc(*t, mod1="<sub>[", mod2="]</sub>")
for t in zip(o.purpose, o.operand_kinds, o.constraints)]
if o.kind in _OFS_WRITING_REGS:
print_ops.insert(1, "=")
if o.kind in {OPC_KIND.ST}:
print_ops.insert(-1, "=")
print(f"#### [{o.no:02x}] {o.name} {' '.join(print_ops)}", file=fout)
print(o.desc, file=fout)
# print("* constraints:", ' '.join(ops))
# print(f"{name:15.15}, // {' '.join(ops)} [{'
# '.join(cons)}]"
def _render_documentation(fout):
for opc in Opcode.Table.values():
if opc.group != OPC_GENUS.BASE:
continue
if opc.no in _GROUPS:
print(_GROUPS[opc.no], file=fout)
if opc.kind == OPC_KIND.DIRECTIVE:
_render_directive_doc(opc, fout)
else:
_render_opcode_doc(opc, fout)
print()
def _render_h(fout):
print("enum class OPC : uint8_t {", file=fout)
last = 0
print(f" INVALID = 0x00,", file=fout)
for opc in Opcode.Table.values():
if opc.group != OPC_GENUS.BASE:
continue
if (opc.no & 0xff0) != last & 0xff0:
print("", file=fout)
last = opc.no
name = opc.name.upper().replace(".", "_")
if opc.kind == OPC_KIND.DIRECTIVE:
name = "DIR_" + name[1:]
print(f" {name} = 0x{opc.no:02x},", file=fout)
print("};", file=fout)
# _render_enum("OpcodeFamily", ["OF.INVALID", "OF.DIRECTIVE"] +
# list(OFS_ALL))
# _render_enum("OperandKind", ["OK.INVALID"] +
# [x.upper() for x in OKS_ALL])
for cls in [OPC_GENUS, FUN_KIND, MEM_KIND, TC, OPC_KIND, DK, OP_KIND]:
cgen.RenderEnum(cgen.NameValues(cls), f"class {cls.__name__} : uint8_t",
fout)
cgen.RenderEnum(cgen.NameValues(OA), f"{OA.__name__} : uint16_t", fout)
def _render_c(fout):
def render(cls, both_ways=True):
cgen.RenderEnumToStringMap(cgen.NameValues(cls), cls.__name__, fout)
cgen.RenderEnumToStringFun(cls.__name__, fout)
if both_ways:
cgen.RenderStringToEnumMap(cgen.NameValues(cls),
cls.__name__ + "FromStringMap",
cls.__name__ + "Jumper", fout)
render(OPC_GENUS)
render(FUN_KIND)
render(MEM_KIND)
render(TC)
render(DK)
render(OP_KIND, False)
alpha = [(opc.name, opc.no) for opc in Opcode.Table.values()]
cgen.RenderStringToEnumMap(alpha, "OPCFromStringMap", "OPCJumper", fout)
print("const Opcode GlobalOpcodes[256] = {")
opcodes = sorted([(o.no, o) for o in Opcode.Table.values()])
last = -1
dummy_opc = Opcode(0, "", OPC_KIND.RET, [], [], OPC_GENUS.INVALID, "")
dummy_opc.name = ""
dummy_opc.kind = OPC_KIND.INVALID
def emit_one(opc: Opcode):
kinds_str = [f"OP_KIND::{x.name}" for x in opc.operand_kinds]
constraints_str = [f"TC::{x.name}" for x in opc.constraints]
attributes = [f"OA::{x.name}" for x in OA if x in opc.attributes]
if not attributes:
attributes = ["0"]
print(" { // %2x %s" % (opc.no, opc.name))
print(' {%s}, ' % ", ".join(kinds_str))
print(' OPC_KIND::%s, OPC_GENUS::%s, %d, %d,' %
(opc.kind.name, opc.group.name, len(opc.operand_kinds),
opc.def_ops_count()))
print(' {%s}, ' % ", ".join(constraints_str))
print(' "%s", %s },' % (opc.name, '|'.join(attributes)))
for n, o in opcodes:
if o.group != OPC_GENUS.BASE:
continue
last += 1
while last < n:
dummy_opc.no = last
emit_one(dummy_opc)
last += 1
emit_one(o)
print("};\n")
def Dump():
last = None
for opc in Opcode.Table.values():
if opc.kind != last:
print()
last = opc.kind
ops = [_render_operand_desc(a, b, c) for a, b, c in
zip(opc.purpose, opc.operand_kinds, opc.constraints)]
print(f"{opc.kind.name} {opc.name} {' '.join(ops)}")
print("total opcodes: %d" % len(Opcode.Table))
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
if sys.argv[1] == "documentation":
cgen.ReplaceContent(_render_documentation, sys.stdin, sys.stdout)
elif sys.argv[1] == "gen_h":
cgen.ReplaceContent(_render_h, sys.stdin, sys.stdout)
elif sys.argv[1] == "gen_c":
cgen.ReplaceContent(_render_c, sys.stdin, sys.stdout)
else:
Dump()
| 36.113594 | 108 | 0.571443 |
255944c391999d6773e34c522056f9b52e9a85c5 | 784 | py | Python | hexa/plugins/connector_airflow/migrations/0013_dag_run_states.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | hexa/plugins/connector_airflow/migrations/0013_dag_run_states.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | hexa/plugins/connector_airflow/migrations/0013_dag_run_states.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | # Generated by Django 3.2.7 on 2021-10-27 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("connector_airflow", "0012_remove_airflow_run_message"),
]
operations = [
migrations.AlterModelOptions(
name="dagrun",
options={"ordering": ("-execution_date",)},
),
migrations.AlterField(
model_name="dagrun",
name="state",
field=models.CharField(
choices=[
("success", "Success"),
("running", "Running"),
("failed", "Failed"),
("queued", "Queued"),
],
max_length=200,
),
),
]
| 25.290323 | 65 | 0.477041 |
2559a4a18ffbdf9ee00369efeb0eacf72905221a | 3,447 | py | Python | LAMARCK_ML/data_util/TypeShape_pb2.py | JonasDHomburg/LAMARCK | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2019-09-20T08:03:47.000Z | 2021-05-10T11:02:09.000Z | LAMARCK_ML/data_util/TypeShape_pb2.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | LAMARCK_ML/data_util/TypeShape_pb2.py | JonasDHomburg/LAMARCK_ML | 0e372c908ff59effc6fd68e6477d04c4d89e6c26 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: LAMARCK_ML/data_util/TypeShape.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from LAMARCK_ML.data_util import Shape_pb2 as LAMARCK__ML_dot_data__util_dot_Shape__pb2
from LAMARCK_ML.data_util import DType_pb2 as LAMARCK__ML_dot_data__util_dot_DType__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='LAMARCK_ML/data_util/TypeShape.proto',
package='LAMARCK_ML',
syntax='proto3',
serialized_pb=_b('\n$LAMARCK_ML/data_util/TypeShape.proto\x12\nLAMARCK_ML\x1a LAMARCK_ML/data_util/Shape.proto\x1a LAMARCK_ML/data_util/DType.proto\"w\n\x0eTypeShapeProto\x12\x0f\n\x07id_name\x18\x01 \x01(\t\x12)\n\tdtype_val\x18\x02 \x01(\x0b\x32\x16.LAMARCK_ML.DTypeProto\x12)\n\tshape_val\x18\x03 \x01(\x0b\x32\x16.LAMARCK_ML.ShapeProtob\x06proto3')
,
dependencies=[LAMARCK__ML_dot_data__util_dot_Shape__pb2.DESCRIPTOR,LAMARCK__ML_dot_data__util_dot_DType__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TYPESHAPEPROTO = _descriptor.Descriptor(
name='TypeShapeProto',
full_name='LAMARCK_ML.TypeShapeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id_name', full_name='LAMARCK_ML.TypeShapeProto.id_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dtype_val', full_name='LAMARCK_ML.TypeShapeProto.dtype_val', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shape_val', full_name='LAMARCK_ML.TypeShapeProto.shape_val', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=120,
serialized_end=239,
)
_TYPESHAPEPROTO.fields_by_name['dtype_val'].message_type = LAMARCK__ML_dot_data__util_dot_DType__pb2._DTYPEPROTO
_TYPESHAPEPROTO.fields_by_name['shape_val'].message_type = LAMARCK__ML_dot_data__util_dot_Shape__pb2._SHAPEPROTO
DESCRIPTOR.message_types_by_name['TypeShapeProto'] = _TYPESHAPEPROTO
TypeShapeProto = _reflection.GeneratedProtocolMessageType('TypeShapeProto', (_message.Message,), dict(
DESCRIPTOR = _TYPESHAPEPROTO,
__module__ = 'LAMARCK_ML.data_util.TypeShape_pb2'
# @@protoc_insertion_point(class_scope:LAMARCK_ML.TypeShapeProto)
))
_sym_db.RegisterMessage(TypeShapeProto)
# @@protoc_insertion_point(module_scope)
| 38.730337 | 354 | 0.78387 |
255a4a642a2b2e33a26ec84bb18d2413e8e4b098 | 31,149 | py | Python | main/staff.py | YukiGao7718/Airline-Reservation-System | ecc75316ccbc6aa2db4d0378b938c0275fddb6d3 | [
"MIT"
] | null | null | null | main/staff.py | YukiGao7718/Airline-Reservation-System | ecc75316ccbc6aa2db4d0378b938c0275fddb6d3 | [
"MIT"
] | null | null | null | main/staff.py | YukiGao7718/Airline-Reservation-System | ecc75316ccbc6aa2db4d0378b938c0275fddb6d3 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, session, redirect, url_for
import pymysql.cursors
import datetime
from pyecharts import options as opts
from pyecharts.charts import Pie,Bar
from appdef import *
#Get the airline the staff member works for
def getStaffAirline():
username = session['username']
cursor = conn.cursor()
#username is a primary key
query = 'select airline_name from airline_staff where username = %s'
cursor.execute(query, (username))
#fetchall returns an array, each element is a dictionary
airline = cursor.fetchall()[0]['airline_name']
cursor.close()
return airline
#Make sure that the user is actually staff before performing any operations
def authenticateStaff():
username = ""
try:
#could be that there is no user, make sure
username = session['username']
except:
return False
cursor = conn.cursor()
query = 'select * from airline_staff where username=%s'
cursor.execute(query, (username))
data = cursor.fetchall()
cursor.close()
if data:
return True
else:
#Logout before returning error message
session.pop('username')
return False
@app.route('/staffHome')
def staffHome():
if authenticateStaff():
username = session['username']
message = request.args.get('message')
cursor = conn.cursor()
queryGetairline = "SELECT airline_name FROM airline_staff WHERE username= %s"
cursor.execute(queryGetairline, username)
airline_name = cursor.fetchone()['airline_name']
# query top destination for the past 3 months
query1 = "select count(ticket.ticket_id) as cnt, airport.airport_city as city\
from airport,flight,ticket,purchases\
where airport.airport_name = flight.arrival_airport\
and flight.flight_num = ticket.flight_num\
and flight.airline_name = %s\
and purchases.ticket_id = ticket.ticket_id\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 3 MONTH) and curdate()\
group by city \
order by cnt DESC limit 3"
cursor.execute(query1,airline_name)
data1 = cursor.fetchall()
if len(data1)<3:
num = len(data1)
range1 = range(num)
data1 = [data1[i]['city'] for i in range(num)]
else:
range1 = range(3)
data1 = [data1[i]['city'] for i in range(3)]
# query top destination for the past 1 year
query2 = "select count(ticket.ticket_id) as cnt, airport.airport_city as city\
from airport,flight,ticket,purchases\
where airport.airport_name = flight.arrival_airport\
and flight.flight_num = ticket.flight_num\
and flight.airline_name = %s\
and purchases.ticket_id = ticket.ticket_id\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 1 YEAR) and curdate()\
group by city \
order by cnt DESC limit 3"
cursor.execute(query2,airline_name)
data2 = cursor.fetchall()
if len(data2)<3:
num = len(data2)
range2 = range(num)
data2 = [data2[i]['city'] for i in range(num)]
else:
range2 = range(3)
data2 = [data2[i]['city'] for i in range(3)]
cursor.close()
return render_template('staff.html', username=username,
message=message,
destination1 = data1,
destination2 = data2,
range1 = range1,
range2 = range2)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights')
def searchFlightsPage():
if authenticateStaff():
cursor = conn.cursor()
airline = getStaffAirline()
query = "select * from flight where airline_name = %s \
and ((departure_time between curdate() and date_add(curdate(), interval 30 day)) \
or (arrival_time between curdate() and date_add(curdate(), interval 30 day)))"
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor.close()
error = request.args.get('error')
return render_template('searchStaff.html', error=error, results=data)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/city', methods=['POST'])
def searchFlightsCity():
if authenticateStaff():
cursor = conn.cursor()
city = request.form['citysearchbox']
airline = getStaffAirline()
query = "select * from flight,airport \
where (airport.airport_name=flight.departure_airport or airport.airport_name=flight.arrival_airport) \
and airport.airport_city=%s and airline_name=%s"
cursor.execute(query, (city, airline))
data = cursor.fetchall()
cursor.close()
error = None
if data:
return render_template('searchStaffResults.html', results=data)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/airport', methods=['POST'])
def searchFlightsAirport():
if authenticateStaff():
cursor = conn.cursor()
airport = request.form['airportsearchbox']
airline = getStaffAirline()
query = 'select * from flight where (departure_airport = %s or arrival_airport = %s) and airline_name=%s'
cursor.execute(query, (airport, airport, airline))
data = cursor.fetchall()
cursor.close()
error = None
if data:
return render_template('searchStaffResults.html', results=data)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/date', methods=['POST'])
def searchFlightsDate():
if authenticateStaff():
begintime = request.form['begintime']
endtime = request.form['endtime']
if not validateDates(begintime, endtime):
error = 'Invalid date range'
return redirect(url_for('searchFlightsPage', error=error))
airline = getStaffAirline()
cursor = conn.cursor()
query = "select * from flight \
where ((departure_time between %s and %s) \
or (arrival_time between %s and %s)) and airline_name=%s"
cursor.execute(query, (begintime, endtime, begintime, endtime, airline))
data = cursor.fetchall()
cursor.close()
error = None
if data:
return render_template('searchStaffResults.html', results=data)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/searchFlights/customers', methods=['POST'])
def searchFlightsCustomer():
if authenticateStaff():
flightnum = request.form['flightsearchbox']
airline = getStaffAirline()
cursor = conn.cursor()
query = "select customer_email from purchases natural join ticket\
where flight_num = %s and airline_name=%s"
cursor.execute(query, (flightnum, airline))
data = cursor.fetchall()
cursor.close()
if data:
return render_template('searchStaffResults.html', customerresults=data, flightnum=flightnum)
else:
#returns an error message to the html page
error = 'No results found'
return redirect(url_for('searchFlightsPage', error=error))
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/createFlight')
def createFlightPage():
if authenticateStaff():
airline = getStaffAirline()
cursor = conn.cursor()
airline = getStaffAirline()
query = "select * from flight where airline_name = %s \
and ((departure_time between curdate() and date_add(curdate(), interval 30 day)) \
or (arrival_time between curdate() and date_add(curdate(), interval 30 day)))"
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor = conn.cursor()
query = 'select distinct airport_name from airport'
cursor.execute(query)
airportdata = cursor.fetchall()
query = 'select distinct airplane_id from airplane where airline_name=%s'
cursor.execute(query, (airline))
airplanedata = cursor.fetchall()
cursor.close()
error = request.args.get('error')
return render_template('createFlight.html', error = error,
airportdata = airportdata,
airplanedata = airplanedata,
results = data)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/createFlight/Auth', methods=['POST'])
def createFlight():
# prevent unauthorized users from doing this action
if not authenticateStaff():
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
username = session['username']
flightnum = request.form['flightnum']
departport = request.form['departport']
departtime = request.form['departtime']
arriveport = request.form['arriveport']
arrivetime = request.form['arrivetime']
price = request.form['price']
status = "Upcoming"
airplaneid = request.form['airplanenum']
##########################################################################
airline = getStaffAirline()
cursor = conn.cursor()
query1 = 'select * from flight where airline_name = %s and flight_num = %s'
cursor.execute(query1,(airline,flightnum))
data1 = cursor.fetchall()
if data1:
error = "The flight number already exists, please enter another one."
return redirect(url_for('createFlightPage', error=error))
cursor.close()
#############################################################################
#############################################################################
cursor = conn.cursor()
query2 = 'select * from airport where airport_name = %s '
cursor.execute(query2,(departport))
data2 = cursor.fetchall()
query3 = 'select * from airport where airport_name = %s '
cursor.execute(query3,(arriveport))
data3 = cursor.fetchall()
if (not data2):
error = "The Departure Airport does not exist, please add the airport first."
return redirect(url_for('createFlightPage', error=error))
if (not data3):
error = "The Arrival Airport does not exist, please add the airport first."
return redirect(url_for('createFlightPage', error=error))
cursor.close()
#############################################################################
if not validateDates(departtime, arrivetime):
error = 'Invalid date range'
return redirect(url_for('createFlightPage', error=error))
airline = getStaffAirline()
#Check that airplane is valid
cursor = conn.cursor()
query = 'select * from airplane where airplane_id = %s'
cursor.execute(query, (airplaneid))
data = cursor.fetchall()
if not data:
error = 'Invalid Airplane ID'
return redirect(url_for('createFlightPage', error=error))
query = 'insert into flight values (%s, %s, %s, %s, %s, %s, %s, %s, %s)'
cursor.execute(query, (airline, flightnum, departport, departtime, arriveport, arrivetime, price, status, airplaneid))
conn.commit()
cursor.close()
return redirect(url_for('staffHome', message="Operation Successful"))
@app.route('/staffHome/changeFlight')
def changeFlightStatusPage():
if authenticateStaff():
error = request.args.get('error')
return render_template('changeFlight.html', error=error)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/changeFlight/Auth', methods=['POST'])
def changeFlightStatus():
# prevent unauthorized users from doing this action
if not authenticateStaff():
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
username = session['username']
cursor = conn.cursor()
flightnum = request.form['flightnum']
status = request.form['status']
if not status:
error = 'Did not select new status'
return redirect(url_for('changeFlightStatusPage', error=error))
airline = getStaffAirline()
#Check that the flight is from the same airline as the staff
query = 'select * from flight where flight_num = %s and airline_name = %s'
cursor.execute(query, (flightnum, airline))
data = cursor.fetchall()
##################################################################################
if not data:
error = 'Incorrect enter - flight number is not in your airline '
return redirect(url_for('changeFlightStatusPage', error=error))
##################################################################################
#Update the specified flight
query = 'update flight set status=%s where flight_num=%s and airline_name = %s'
cursor.execute(query, (status, flightnum, airline))
conn.commit()
cursor.close()
return redirect(url_for('staffHome', message="Operation Successful"))
@app.route('/staffHome/addAirplane')
def addAirplanePage():
if authenticateStaff():
error = request.args.get('error')
return render_template('addAirplane.html', error=error)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/addAirplane/confirm', methods=['POST'])
def addAirplane():
# prevent unauthorized users from doing this action
if not authenticateStaff():
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
username = session['username']
planeid = request.form['id']
seats = request.form['seats']
airline = getStaffAirline()
#Check if planeid is not taken
cursor = conn.cursor()
query = 'select * from airplane where airplane_id = %s'
cursor.execute(query, (planeid))
data = cursor.fetchall()
if data:
error = "Airplane ID already taken"
return redirect(url_for('addAirplanePage', error=error))
#Insert the airplane
query = 'insert into airplane values (%s, %s, %s)'
cursor.execute(query, (airline, planeid, seats))
conn.commit()
#Get a full list of airplanes
query = 'select * from airplane where airline_name = %s'
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor.close()
return render_template('addAirplaneConfirm.html', results=data)
@app.route('/staffHome/addAirport')
def addAirportPage():
if authenticateStaff():
error = request.args.get('error')
return render_template('addAirport.html', error=error)
else:
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/addAirport/Auth', methods=['POST'])
def addAirport():
# prevent unauthorized users from doing this action
if not authenticateStaff():
error = 'Invalid Credentials'
return redirect(url_for('errorpage', error=error))
username = session['username']
name = request.form['name']
city = request.form['city']
#####################################################################
if len(name)>3:
error = "Please enter the abbreviation of airport."
return redirect(url_for('addAirportPage', error=error))
cursor = conn.cursor()
query = "select * from airport where airport_name = %s and airport_city = %s"
cursor.execute(query,(name,city))
data1 = cursor.fetchall()
cursor.close()
if data1:
error = "Airport Already exits."
return redirect(url_for('addAirportPage', error=error))
#####################################################################
cursor = conn.cursor()
query = 'insert into airport values (%s, %s)'
cursor.execute(query, (name, city))
conn.commit()
cursor.close()
return redirect(url_for('staffHome', message="Operation Successful"))
@app.route('/staffHome/viewAgents')
def viewAgentsPage():
if authenticateStaff():
error = request.args.get('error')
return render_template('viewAgents.html', error=error)
else:
error = "Invalid Credentials"
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/viewAgents/sales', methods=['POST'])
def viewAgentsSales():
if authenticateStaff():
daterange = request.form['range']
airline = getStaffAirline()
#datrange specify the past 1 month or year
cursor = conn.cursor()
query = 'select email,count(ticket_id) as sales \
from booking_agent natural join purchases natural join ticket \
where purchase_date >= date_sub(curdate(), interval 1 ' + daterange + ') \
and airline_name=%s group by email order by sales DESC limit 5'
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor.close()
#Use only the top 5 sellers
#Python will not break if we try to access a range that extends beyond the end of the array
return render_template('viewAgentsSales.html', results = data[0:5], date=daterange)
else:
error = "Invalid Credentials"
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/viewAgents/commission')
def viewAgentsCommission():
if authenticateStaff():
airline = getStaffAirline()
cursor = conn.cursor()
query = "select email,sum(flight.price)*0.1 as commission \
from booking_agent natural join purchases natural join ticket natural join flight \
where purchase_date >= date_sub(curdate(), interval 1 year) and airline_name=%s\
group by email order by commission DESC limit 5"
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor.close()
#Use only the top 5 sellers
#Python will not break if we try to access a range that extends beyond the end of the array
return render_template('viewAgentsCommission.html', results = data[0:5])
else:
error = "Invalid Credentials"
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/viewCustomers')
def viewCustomersPage():
if authenticateStaff():
airline = getStaffAirline()
cursor = conn.cursor()
query = 'select customer_email, count(ticket_id) as customerpurchases \
from purchases natural join ticket \
where airline_name= %s \
and purchase_date >= date_sub(curdate(), interval 1 year) group by customer_email \
having customerpurchases \
>= all (select count(ticket_id) \
from purchases natural join ticket \
where airline_name = %s \
and purchase_date >= date_sub(curdate(), interval 1 year) GROUP by customer_email)'
cursor.execute(query, (airline, airline))
data = cursor.fetchall()
cursor.close()
error = request.args.get('error')
return render_template('viewCustomers.html', error=error, results=data)
else:
error = "Invalid Credentials"
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/viewCustomers/results', methods=['POST'])
def viewCustomers():
if authenticateStaff():
airline = getStaffAirline()
customer = request.form['email']
cursor = conn.cursor()
query1 = "select * from customer where email = %s"
cursor.execute(query1,customer)
data1 = cursor.fetchone()
error = request.args.get('error')
cursor.close()
if not data1:
error = "Not a customer email, please enter a customer email."
return redirect(url_for('viewCustomersPage',error = error))
else:
cursor = conn.cursor()
query = 'select distinct flight_num from purchases natural join ticket where airline_name = %s and customer_email=%s'
cursor.execute(query, (airline, customer))
data = cursor.fetchall()
cursor.close()
return render_template('viewCustomersResults.html', results=data, customer=customer)
else:
error = "Invalid Credentials"
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/viewReports')
def viewReportsPage():
if authenticateStaff():
airline = getStaffAirline()
currentmonth = datetime.datetime.now().month
monthtickets = []
cursor = conn.cursor()
for i in range(0, 12):
query = 'select count(ticket_id) as sales \
from purchases natural join ticket \
where year(purchase_date) = year(curdate() - interval ' + str(i) + ' month) \
and month(purchase_date) = month(curdate() - interval ' + str(i) + ' month) \
and airline_name=%s'
cursor.execute(query, (airline))
data = cursor.fetchall()
salemonth = ((currentmonth - (i+1)) % 12) + 1
# print (data[0]['sales'])
monthtickets.append([data[0]['sales'], salemonth])
cursor.close()
c1 = (
Bar()
.add_xaxis([d[1] for d in monthtickets])
.add_yaxis('total ticket number',[d[0] for d in monthtickets])
.set_global_opts(xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=0)),
title_opts=opts.TitleOpts(title="Ticket Amount in the Past",
subtitle= "In the past 1 year"),
legend_opts=opts.LegendOpts(pos_right="15%"))
)
error = request.args.get('error')
return render_template('viewReports.html',
bar_options1=c1.dump_options(),error = error)
else:
error = "Invalid Credentials"
return redirect(url_for('errorpage', error=error))
@app.route('/staffHome/viewReports/dates', methods=['POST'])
def viewReportsDates():
if authenticateStaff():
airline = getStaffAirline()
begintime = request.form['begintime']
endtime = request.form['endtime']
if not validateDates(begintime, endtime):
error = 'Invalid date range'
return redirect(url_for('viewReportsPage', error=error))
cursor = conn.cursor()
query = 'select count(ticket_id) as sales \
from purchases natural join ticket where airline_name=%s\
and purchase_date between %s and %s'
cursor.execute(query, (airline, begintime, endtime))
data = cursor.fetchall()
cursor.close()
return render_template('viewReportsDate.html', sales=data[0]['sales'], begintime=begintime, endtime=endtime)
else:
error = "Invalid Credentials"
return render_template('error.html',error=error)
@app.route('/staffHome/viewReports/past', methods=['POST'])
def viewReportsPast():
if authenticateStaff():
airline = getStaffAirline()
daterange = request.form['range']
cursor = conn.cursor()
query = 'select count(ticket_id) as sales \
from purchases natural join ticket where airline_name=%s \
and purchase_date >= date_sub(curdate(), interval 1 ' + daterange + ')'
cursor.execute(query, (airline))
data = cursor.fetchall()
cursor.close()
return render_template('viewReportsPast.html', sales=data[0]['sales'], datetime=daterange)
else:
error = "Invalid Credentials"
return render_template('error.html',error=error)
@app.route('/staffHome/ComparisonRevenue')
def ComparisonRevenue():
if authenticateStaff():
username = session['username']
error = None
# query for airline_name the staff works for
cursor = conn.cursor()
queryGetairline = "SELECT airline_name FROM airline_staff WHERE username= %s"
cursor.execute(queryGetairline, username)
airline_name = cursor.fetchone()['airline_name']
# query for direct purchase revenue (last month)
query1 = "select sum(flight.price) as rev\
from purchases, ticket, flight\
where purchases.ticket_id = ticket.ticket_id \
and ticket.flight_num = flight.flight_num\
and ticket.airline_name = flight.airline_name\
and flight.airline_name = %s\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 1 MONTH) and curdate()\
and purchases.booking_agent_id is null"
cursor.execute(query1,str(airline_name))
direct_revenue = cursor.fetchone()['rev']
# query for indirect purchase revenue (last month)
query2 = "select sum(flight.price) as rev\
from purchases, ticket, flight\
where purchases.ticket_id = ticket.ticket_id \
and ticket.flight_num = flight.flight_num\
and ticket.airline_name = flight.airline_name\
and flight.airline_name = %s\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 1 MONTH) and curdate()\
and purchases.booking_agent_id is not null"
cursor.execute(query2,str(airline_name))
indirect_revenue = cursor.fetchone()['rev']
#draw the pie chart (last month)
x_data = ['Direct Revenue','Indirect Revenue']
y_data = [direct_revenue,indirect_revenue]
data_pair = [list(z) for z in zip(x_data, y_data)]
c1 = (
Pie()
.add('',[d for d in data_pair])
.set_global_opts(title_opts=opts.TitleOpts(title="Revenue Comparison",
subtitle = "Last Month"),
legend_opts=opts.LegendOpts(pos_right="15%"))
.set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
)
#Customized pie (a fancier version pie chart)
# c1 = (
# Pie()
# .add(
# series_name="Revenue Source",
# data_pair=data_pair,
# rosetype="radius",
# radius="55%",
# center=["50%", "50%"],
# label_opts=opts.LabelOpts(is_show=False, position="center"),
# )
# .set_global_opts(
# title_opts=opts.TitleOpts(
# title="Revenue Source (last month)",
# pos_left="center",
# pos_top="20",
# title_textstyle_opts=opts.TextStyleOpts(color="black"),
# ),
# legend_opts=opts.LegendOpts(is_show=False),
# )
# .set_series_opts(
# tooltip_opts=opts.TooltipOpts(
# trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
# ),
# label_opts=opts.LabelOpts(color="rgba(0,0,0,255)"),
# )
# )
# query for direct purchase revenue (last year)
query1_ = "select sum(flight.price) as rev\
from purchases, ticket, flight\
where purchases.ticket_id = ticket.ticket_id \
and ticket.flight_num = flight.flight_num\
and ticket.airline_name = flight.airline_name\
and flight.airline_name = %s\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 1 YEAR) and curdate()\
and purchases.booking_agent_id is null"
cursor.execute(query1_,str(airline_name))
direct_revenue_ = cursor.fetchone()['rev']
# query for indirect purchase revenue (last month)
query2_ = "select sum(flight.price) as rev\
from purchases, ticket, flight\
where purchases.ticket_id = ticket.ticket_id \
and ticket.flight_num = flight.flight_num\
and ticket.airline_name = flight.airline_name\
and flight.airline_name = %s\
and purchases.purchase_date between DATE_SUB(curdate(), INTERVAL 1 YEAR) and curdate()\
and purchases.booking_agent_id is not null"
cursor.execute(query2_,str(airline_name))
indirect_revenue_ = cursor.fetchone()['rev']
cursor.close()
#draw the pie chart (last month)
x_data_ = ['Direct Revenue','Indirect Revenue']
y_data_ = [direct_revenue_,indirect_revenue_]
data_pair_ = [list(z) for z in zip(x_data_, y_data_)]
c2 = (
Pie()
.add('',[d for d in data_pair_])
.set_global_opts(title_opts=opts.TitleOpts(title="Revenue Comparison",
subtitle = "Last Year"),
legend_opts=opts.LegendOpts(pos_right="15%"))
.set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
)
if direct_revenue and indirect_revenue:
return render_template('ComparisonRevenue.html',
pie_options1 = c1.dump_options(),
pie_options2 = c2.dump_options())
else:
error = 'Sorry! No data available Right Now.'
return render_template('ComparisonRevenue.html',error = error)
else:
error = "Invalid Credentials"
return render_template('error.html',error=error)
| 40.400778 | 129 | 0.596969 |
255a812a0890850fc537c0377c504771edd7d281 | 261 | py | Python | app/main.py | athul/jimbru | bc22449dbfbea19d9605e6271a154dbc7037bafb | [
"MIT"
] | 42 | 2020-11-12T11:34:29.000Z | 2022-01-17T11:40:29.000Z | app/main.py | athul/jimbru | bc22449dbfbea19d9605e6271a154dbc7037bafb | [
"MIT"
] | 1 | 2021-06-09T11:41:49.000Z | 2021-06-09T11:41:49.000Z | app/main.py | athul/jimbru | bc22449dbfbea19d9605e6271a154dbc7037bafb | [
"MIT"
] | 2 | 2021-03-17T18:16:15.000Z | 2021-06-08T17:29:38.000Z | from fastapi import FastAPI
try:
from routes import analytics,templates,auth
except:
from .routes import analytics,templates,auth
app = FastAPI()
app.include_router(analytics.router)
app.include_router(templates.router)
app.include_router(auth.authr) | 21.75 | 48 | 0.800766 |
255c0c175a4464c1c647d5f7bc88a0e78a8ca610 | 873 | py | Python | tests/test_query_releases.py | hysds/hysds-framework | 701fdf39f1fdb71bcb5c2f6fb6a81da2778fccc0 | [
"Apache-2.0"
] | 1 | 2020-02-09T14:15:11.000Z | 2020-02-09T14:15:11.000Z | tests/test_query_releases.py | hysds/hysds-framework | 701fdf39f1fdb71bcb5c2f6fb6a81da2778fccc0 | [
"Apache-2.0"
] | 12 | 2018-04-16T09:09:40.000Z | 2020-04-15T07:09:15.000Z | tests/test_query_releases.py | hysds/hysds-framework | 701fdf39f1fdb71bcb5c2f6fb6a81da2778fccc0 | [
"Apache-2.0"
] | 7 | 2018-04-07T01:43:48.000Z | 2020-07-23T08:12:37.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import str
from future import standard_library
standard_library.install_aliases()
import query_releases
import unittest
class TestQueryReleases(unittest.TestCase):
def test_parse_url(self):
"""
Tests the parse_url function
"""
base_url = "https://test.url"
token1, url1 = query_releases.parse_url(base_url)
self.assertIsNone(token1)
self.assertEqual(url1, base_url)
test_url_2 = "https://token@test.url" # 2nd test URL is based on the base_url
token2, url2 = query_releases.parse_url(test_url_2)
self.assertEqual(token2, "token")
self.assertEqual(url2, base_url)
if __name__ == "__main__":
unittest.main()
| 27.28125 | 86 | 0.719359 |
255d364d93e590ec6297742e59e58cf8fe8ad6e3 | 1,211 | py | Python | products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/iPythonSysDyn/simplePendulum.py | wmmurrah/computationalScience | a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb | [
"W3C"
] | null | null | null | products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/iPythonSysDyn/simplePendulum.py | wmmurrah/computationalScience | a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb | [
"W3C"
] | null | null | null | products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/iPythonSysDyn/simplePendulum.py | wmmurrah/computationalScience | a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb | [
"W3C"
] | null | null | null | # simplePendulum.py
# Model of a simple pendulum
import math
def simplePendulum(length = 1, angle = math.pi/4, angular_velocity = 0, DT = 0.0001, simLength = 12):
numIterations = int(simLength/DT) + 1
g = 9.81
angle_change = angular_velocity
angular_acceleration = -g * math.sin(angle)/length
t = 0
timeLst = [0]
angleLst = [angle]
angular_velocityLst = [angular_velocity]
angular_accelerationLst = [angular_acceleration]
for i in range(1, numIterations):
t = i * DT
timeLst.append(t)
angle = angle + (angle_change) * DT
angleLst.append(angle)
angular_velocity = angular_velocity + (angular_acceleration) * DT
angular_velocityLst.append(angular_velocity)
angle_change = angular_velocity
angular_acceleration = -g * math.sin(angle)/length
angular_accelerationLst.append(angular_acceleration)
return timeLst, angleLst, angular_velocityLst, angular_accelerationLst | 1,211 | 1,211 | 0.560694 |
255d6b11cbe644a12928787786f04d1940067a84 | 303 | py | Python | setup.py | StrykerKKD/dropbox-backup | 8ee692ef1de5be1e3257a627dc268b331694b2b8 | [
"MIT"
] | null | null | null | setup.py | StrykerKKD/dropbox-backup | 8ee692ef1de5be1e3257a627dc268b331694b2b8 | [
"MIT"
] | null | null | null | setup.py | StrykerKKD/dropbox-backup | 8ee692ef1de5be1e3257a627dc268b331694b2b8 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='dropboxbackup',
version='0.1',
py_modules=['dropboxbackup'],
install_requires=[
'click',
'dropbox',
'simple-crypt'
],
entry_points='''
[console_scripts]
dropboxbackup=dropboxbackup:cli
''',
)
| 17.823529 | 39 | 0.574257 |
255e4c40128d8dd3b9bb2375467740bbfa0ffbee | 6,896 | py | Python | scripts/example_tvm_tune.py | AndrewZhaoLuo/CenterFaceTVMDemo | 4c9d63d502b33b7b13666258a7da97e909de4b36 | [
"MIT"
] | 5 | 2021-12-25T10:18:07.000Z | 2022-02-20T00:24:41.000Z | scripts/example_tvm_tune.py | AndrewZhaoLuo/CenterFaceTVMDemo | 4c9d63d502b33b7b13666258a7da97e909de4b36 | [
"MIT"
] | 2 | 2022-01-16T10:12:07.000Z | 2022-03-22T00:34:26.000Z | scripts/example_tvm_tune.py | AndrewZhaoLuo/CenterFaceTVMDemo | 4c9d63d502b33b7b13666258a7da97e909de4b36 | [
"MIT"
] | null | null | null | from os import path
from shutil import copyfile
import tvm
from tvm import relay
from tvm.driver import tvmc
from tvm.driver.tvmc.model import TVMCModel
from tvm.relay.transform import InferType, ToMixedPrecision
"""Copy pasted mostly from:
https://github.com/AndrewZhaoLuo/TVM-Sandbox/blob/bb209e8845440ed9f40af1b2580618196c939745/fp16_pass/benchmark_fp16.py#L1
Creates centerface autoscheduler log files, which are included in this repo so you
don't have to spend 24 hrs running the tuning script!
Run on a 2020, 13-inch macbook pro (m1 mac)
FP32:
Processing centerface_autoscheduler_30000kt_fp32_llvm
Execution time summary:
mean (ms) median (ms) max (ms) min (ms) std (ms)
33.8869 33.6213 35.0154 33.1292 0.7192
Output Names:
['output_0', 'output_1', 'output_2', 'output_3']
FP16:
Processing centerface_autoscheduler_10000kt_fp16_llvm
Execution time summary:
mean (ms) median (ms) max (ms) min (ms) std (ms)
22.3274 22.2959 23.4356 21.7442 0.4560
Output Names:
['output_0', 'output_1', 'output_2', 'output_3']
"""
def load_model(name, **kwargs):
return tvmc.load(path.join("models", name), **kwargs)
def graph_optimize(
tvmc_model, run_fp16_pass, run_other_opts, try_nhwc_layout=False, fast_math=True
):
mod, params = tvmc_model.mod, tvmc_model.params
mod = tvm.IRModule.from_expr(mod["main"])
# nhwc is typically better for autoscheduler -- more schedules available
# also winograd is only available for nhwc
if try_nhwc_layout:
desired_layouts = {
"nn.conv2d": ["NHWC", "default"],
"nn.conv2d_transpose": ["NHWC", "default"],
"nn.upsampling": ["NHWC", "default"],
"image.resize2d": ["NHWC", "default"],
"vision.roi_align": ["NHWC", "default"],
}
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
mod = relay.transform.InferType()(mod)
mod = relay.transform.ConvertLayout(desired_layouts)(mod)
mod = tvm.relay.transform.EliminateCommonSubexpr()(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
if run_other_opts:
mod = tvm.relay.transform.FastMath()(mod) if fast_math else mod
mod = tvm.relay.transform.EliminateCommonSubexpr()(mod)
BindPass = tvm.relay.transform.function_pass(
lambda fn, new_mod, ctx: tvm.relay.build_module.bind_params_by_name(
fn, params
),
opt_level=1,
)
mod = BindPass(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
mod = tvm.relay.transform.CombineParallelBatchMatmul()(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
if run_fp16_pass:
mod = InferType()(mod)
mod = ToMixedPrecision()(mod)
if run_other_opts and run_fp16_pass:
# run one more pass to clean up new subgraph
mod = tvm.relay.transform.EliminateCommonSubexpr()(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
mod = tvm.relay.transform.CombineParallelBatchMatmul()(mod)
mod = tvm.relay.transform.FoldConstant()(mod)
mod = tvm.relay.transform.FastMath()(mod) if fast_math else mod
return TVMCModel(mod, params)
def benchmark_model(
model_func,
name,
run_fp16_pass=True,
run_other_opts=True,
enable_autoscheduler=False,
try_nhwc_layout=False,
target="llvm",
target_host="llvm",
tuning_trials=10000,
tuning_repeat_trials=5,
measure_number=100,
measure_repeats=10,
):
print("*" * 30, name, "*" * 30)
print("FP16 pass" if run_fp16_pass else "FP32 pass")
"""Get Module"""
tvmc_model = model_func(
run_pass=run_fp16_pass, run_opts=run_other_opts, try_nhwc_layout=try_nhwc_layout
)
print("Final relay mod:")
print(tvmc_model.mod)
tuning_records = tvmc.tune(
tvmc_model,
target=target,
enable_autoscheduler=enable_autoscheduler,
trials=tuning_trials,
repeat=tuning_repeat_trials,
tuner="xgb_knob",
target_host=target_host,
)
copyfile(tuning_records, f"tuning_logs/{name}.log")
# Create package artifacts
package = tvmc.compile(tvmc_model, target=target, tuning_records=tuning_records)
result = tvmc.run(
package,
device="cpu" if "llvm" in target else target,
repeat=measure_number,
number=measure_repeats,
)
print(result)
print()
def get_centerface(run_pass=True, run_opts=True, try_nhwc_layout=False):
tvmc_model = load_model("centerface.onnx")
return graph_optimize(
tvmc_model, run_pass, run_opts, try_nhwc_layout=try_nhwc_layout
)
def benchmark_and_compile_so_and_whl(
model_func,
name,
run_fp16_pass=True,
run_other_opts=True,
try_nhwc_layout=False,
target="llvm",
):
print(f"Processing {name}")
tvmc_model = model_func(
run_pass=run_fp16_pass, run_opts=run_other_opts, try_nhwc_layout=try_nhwc_layout
)
tuning_records = f"tuning_logs/{name}.log"
package = tvmc.compile(tvmc_model, target=target, tuning_records=tuning_records)
result = tvmc.run(
package,
device="cpu" if "llvm" in target else target,
repeat=10,
number=100,
)
print(result)
print()
copyfile(package.package_path, f"compiled_packages/{name}.tar")
if __name__ == "__main__":
benchmark_model(
get_centerface,
"centerface_autoscheduler_30000kt_fp16_llvm",
run_fp16_pass=True,
run_other_opts=True,
enable_autoscheduler=True,
try_nhwc_layout=True,
tuning_trials=30000,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
target_host="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
benchmark_model(
get_centerface,
"centerface_autoscheduler_30000kt_fp32_llvm",
run_fp16_pass=False,
run_other_opts=True,
enable_autoscheduler=True,
try_nhwc_layout=True,
tuning_trials=30000,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
target_host="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
benchmark_and_compile_so_and_whl(
get_centerface,
"centerface_autoscheduler_30000kt_fp16_llvm",
run_fp16_pass=True,
run_other_opts=True,
try_nhwc_layout=True,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
benchmark_and_compile_so_and_whl(
get_centerface,
"centerface_autoscheduler_30000kt_fp32_llvm",
run_fp16_pass=False,
run_other_opts=True,
try_nhwc_layout=True,
target="llvm -mcpu=apple-latest -mtriple=arm64-apple-macos",
)
| 32.074419 | 121 | 0.664588 |
255ead5625498b81a0e784e802611ba152b63d6e | 1,547 | py | Python | mod_flan_doodle.py | AndrewWayne/bot-flandre | 6c14c96e55c99ec7961216c8cafbc46f62700bbe | [
"Apache-2.0"
] | null | null | null | mod_flan_doodle.py | AndrewWayne/bot-flandre | 6c14c96e55c99ec7961216c8cafbc46f62700bbe | [
"Apache-2.0"
] | null | null | null | mod_flan_doodle.py | AndrewWayne/bot-flandre | 6c14c96e55c99ec7961216c8cafbc46f62700bbe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
XJB Generate Images Module (/doodle)
Created on Sun Sep 1 16:03:16 2019
@author: user
"""
import os
import asyncio
import uuid
import tg_connection
gen_path = "D:/AndroidProjects/ScarletKindom/flandre-generator/wgan/sample.png"
inp_base = "D:/AndroidProjects/ScarletKindom/flandre-generator/wgan/"
sketchr_query = set()
def get_pf_path():
pf = str(uuid.uuid4())
return inp_base + "sketch-input-" + pf, inp_base + "sketch-output-" + pf + ".txt"
async def command_doodle(session, chat_id):
while not os.path.exists(gen_path):
await asyncio.sleep(0.6)
with open(gen_path, "rb") as fi:
png_bytes = fi.read(-1)
os.unlink(gen_path)
await tg_connection.send_photo_by_contents(session, chat_id, png_bytes)
async def command_sketchr(session, chat_id, from_user):
sketchr_query.add(from_user)
await tg_connection.send_msg(session, chat_id, "Send me a sketch!")
async def command_sketchr_clbk(session, chat_id, frid, msg):
if frid in sketchr_query:
sketchr_query.remove(frid)
else:
return
if "photo" in msg:
fid = msg["photo"][0]["file_id"]
b = await tg_connection.get_file_contents(session, fid)
fi, fo = get_pf_path()
with open(fi, "wb") as writer:
writer.write(b)
while os.path.exists(fi):
await asyncio.sleep(0.6)
with open(fo, "rt") as reader:
txt = reader.read(-1)
await tg_connection.send_msg(session, chat_id, txt)
os.unlink(fo)
| 27.625 | 85 | 0.659341 |
255eb03b3149f28db58ee09e23382f4784f486dd | 362 | py | Python | leadmanager/leads/views.py | mydjangoandreactprojects/lead-manager | 844c655dcd1010fb0b1cd889ddc94872aa4f15a0 | [
"MIT"
] | 1 | 2020-03-26T06:25:47.000Z | 2020-03-26T06:25:47.000Z | leadmanager/leads/views.py | mydjangoandreactprojects/lead-manager | 844c655dcd1010fb0b1cd889ddc94872aa4f15a0 | [
"MIT"
] | null | null | null | leadmanager/leads/views.py | mydjangoandreactprojects/lead-manager | 844c655dcd1010fb0b1cd889ddc94872aa4f15a0 | [
"MIT"
] | null | null | null | from rest_framework import viewsets, permissions
from leads.serializers import LeadSerializer
from leads.models import Lead
class LeadViewSet(viewsets.ModelViewSet):
"""Manage CRUD operations for Leads in the database"""
queryset = Lead.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = LeadSerializer
| 25.857143 | 58 | 0.756906 |
255edfec817ac332c0a59a30e33ffe4ca99dbfbc | 207 | py | Python | app/main/errors.py | BABAYAGI/newsapi | 6127d51e702983f2928849bef08c5920f7d06a96 | [
"MIT"
] | 1 | 2019-10-15T08:16:17.000Z | 2019-10-15T08:16:17.000Z | app/main/errors.py | BABAYAGI/newsapi | 6127d51e702983f2928849bef08c5920f7d06a96 | [
"MIT"
] | null | null | null | app/main/errors.py | BABAYAGI/newsapi | 6127d51e702983f2928849bef08c5920f7d06a96 | [
"MIT"
] | null | null | null | from flask import render_template
from . import main
@main.app_errorhandler(404)
def fo_O_fo(error):
"""
Function to render the 404 error page
"""
return render_template('fo_O_fo.html'), 404 | 23 | 47 | 0.714976 |
255f08813afc83e4a9438097dc0b9eb5bb612867 | 408 | py | Python | 2020/network/network/models.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | 2020/network/network/models.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | 2020/network/network/models.py | 133794m3r/cs50-web | 1f695cd7fb4ec368ec45e0d3154dd7eebc2c81e2 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
followers = models.ManyToManyField('self',symmetrical=False,related_name='following')
class Post(models.Model):
username = models.ForeignKey(User,on_delete=models.CASCADE,related_name='posts')
content = models.TextField()
datetime = models.DateTimeField()
likes = models.ManyToManyField(User)
| 31.384615 | 86 | 0.801471 |
255f1d23b8f394dc79d9946c976e6a08c2991d2e | 18,476 | py | Python | Collage_generator/_insertion.py | alexliyihao/AAPI_code | 81c6cc40a9efb4d4fedf6678c27aac83f5057a70 | [
"MIT"
] | 2 | 2020-11-29T17:00:52.000Z | 2022-01-06T19:24:23.000Z | Collage_generator/_insertion.py | alexliyihao/AAPI_code | 81c6cc40a9efb4d4fedf6678c27aac83f5057a70 | [
"MIT"
] | null | null | null | Collage_generator/_insertion.py | alexliyihao/AAPI_code | 81c6cc40a9efb4d4fedf6678c27aac83f5057a70 | [
"MIT"
] | null | null | null | import PIL.Image as Img
import numpy as np
from tqdm.notebook import tqdm
from PIL import ImageFilter
import tables
import time
import gc
"""
all the insert/append function for collage generator
_canvas_append takes the inserting operation, the rest are finding add_point logic
"""
class _insertion():
def _canvas_append(self,
canvas: np.ndarray,
add_point: np.ndarray,
img: np.ndarray,
mask: np.ndarray = None,
mask_label: int = None,
mode = "label",
format = "pixel"):
"""
the actual working part, add a image to a canvas
Args:
canvas: np.ndarray, 3-channel canvas
add_point: tuple of int, the topleft point of the image to be added
img: np.ndarray, 3-channel, the vignette to be added
mask: np.ndarray(if it's there), 1-channel/4-channels, the mask with the canvas
mask_label: int/2d np.ndarray/4 channels np.ndarray, the value of this label onto the mask
mode: str, "label" or "pattern", how the mask be overwritten,
if "label", it will use the int mask_label
if "pattern", it will copy the np.ndarray passed to mask_label
format: str, "pixel" or "COCO", how the mask will be updates by new vignettes
in "pixel", each individual mask will be saved on the same dimension
if "COCO", each individual mask will be saved by a different color on a 3-channel mask
Return:
canvas: np.ndarray of 3 channels, the canvas with img added.
mask: if format is "pixel" np.ndarray of 1 channel, the mask with img's label added.
if format is "COCO", np.ndarray of 4-channels, the mask with img's label added.
"""
assert format in ["pixel", "COCO"]
# if there's no mask (preview/background)
if type(mask) != np.ndarray:
# add img to canvas, if there's any overlap, skip it
np.add(canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
img,
where = (canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]] == 0),
out = canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
casting = "unsafe")
# return canvas
return canvas
#if there's a mask, from the logic of the functions below,
#we are going to direcly add these value to a 0-filled canvas and mask
else:
if format == "pixel":
# add image to canvas
np.add(canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
img,
out = canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
casting = "unsafe")
# add label to mask
if mode == "label":
# if in label mode, we are adding this label int value to all nonzero space
np.add(mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
mask_label*np.any(img, axis = 2),
out = mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
casting = "unsafe")
else:
#else we are adding a pattern, copy the while pattern
np.add(mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
mask_label,
out = mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
casting = "unsafe")
return canvas, mask
# if we are building a COCO mode
if format == "COCO":
if mode == "label":
# generate a new color for this object
_new_color, self.existed_color = self._generate_new_color(self.existed_color)
self.color_dict[str(tuple(_new_color.tolist()))] = mask_label
# we have COCO format use as following, first layer will work as the pixel mask,
# and the rest will following, the first layer will be removed when converted to COCO
if mask.ndim == 2:
# if the mask only have one layer, it must be the start mask
# add 3 new layers as the RGB recording
mask = np.stack((mask, np.zeros_like(mask),np.zeros_like(mask),np.zeros_like(mask)), axis = -1)
# add image to canvas, add different label to different channel of the mask
np.add(canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
img,
out = canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
casting="unsafe")
np.add(mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1],0],
mask_label*np.any(img, axis = 2),
out = mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1],0],
casting="unsafe")
np.add(mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1],1:4],
np.any(img, axis = 2, keepdims = True)*_new_color,
mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1],1:4],
casting="unsafe")
return canvas, mask
else:
if mask.ndim == 2:
mask = np.stack((mask, np.zeros_like(mask),np.zeros_like(mask),np.zeros_like(mask)), axis = -1)
np.add(canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
img,
out = canvas[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
casting="unsafe")
np.add(mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
mask_label,
out = mask[add_point[0]:add_point[0]+img.shape[0], add_point[1]:add_point[1]+img.shape[1]],
casting="unsafe")
return canvas, mask
def _init_insert(self,
img: np.ndarray,
canvas: np.ndarray,
mask: np.ndarray,
label: int,
mode = "pattern",
format = "pixel"):
"""
find a random legal position in canvas, append img to canvas and mask
Args:
img: np.ndarray of 3 channels, the vignette to be added
canvas: np.ndarray of 3 channels, the canvas
mask: 2d/4-channel np.ndarray, the mask
label: the label to be added
mode: str, "label" or "pattern", see mode in _canvas_append()
format: str, see format in _canvas_append()
Return:
canvas: np.ndarray of 3 channels, the canvas with img added.
mask: np.ndarray of 1 channel, the mask with img's label added.
"""
_outer_bound = (canvas.shape[0] - img.shape[0], canvas.shape[1] - img.shape[1])
# select an initial add_point
_add_point = np.array((np.random.randint(low = self._scanning_constant,
high = _outer_bound[0] - self._scanning_constant),
np.random.randint(low = self._scanning_constant,
high = _outer_bound[1] - self._scanning_constant)))
# create a binary mask of the img
_img_mask = np.any(img, axis = 2)
# directly use the _add_point
canvas, mask = self._canvas_append(canvas = canvas,
add_point = _add_point,
img = img,
mask = mask,
mask_label = label,
mode = mode,
format = format)
return canvas, mask
def _secondary_insert(self,
img: np.ndarray,
canvas: np.ndarray,
mask: np.ndarray,
label: int,
patience: int,
mode = "label",
format = "pixel"):
"""
find a random non-overlapping position in canvas, append img to canvas and mask
Args:
img: np.ndarray of 3 channels, the vignette to be added
canvas: np.ndarray of 3 channels, the canvas
mask: 2d/4-channel np.ndarray, the mask
label: the label to be added
patience: int, the retry time for finding non-overlapping position
mode: str, "label" or "pattern", see mode in _canvas_append()
format: str, see format in _canvas_append()
Return:
canvas: np.ndarray of 3 channels, the canvas with img added,
if the tries in {patience} times succssfully added the img onto canvas,
otherwise the original canvas is returned
mask: np.ndarray of 2d or 4 channels, the mask with img added,
if the tries in {patience} times succssfully added the img onto canvas,
otherwise the original mask if returned
"""
_outer_bound = (canvas.shape[0] - img.shape[0], canvas.shape[1] - img.shape[1])
# select an initial add_point
_add_point = np.array((np.random.randint(
low = self._scanning_constant,
high = _outer_bound[0] - self._scanning_constant),
np.random.randint(
low = self._scanning_constant,
high = _outer_bound[1] - self._scanning_constant)
))
# create a binary mask of the img
_img_mask = np.any(img, axis = 2)
for retry in range(patience):
# for each time make a small move
_add_point = _add_point + np.random.randint(
low = -1*self._scanning_constant,
high = self._scanning_constant,
size = 2)
# make sure the new value is legal
_add_point = np.clip(a = _add_point,
a_min = (0,0),
a_max = _outer_bound)
# check if there's any overlap
# in pixel format check the mask directly
if format == "pixel":
_check_zone = mask[_add_point[0]:_add_point[0]+_img_mask.shape[0],
_add_point[1]:_add_point[1]+_img_mask.shape[1]]
# in COCO format check the first layer of mask
else:
_check_zone = mask[_add_point[0]:_add_point[0]+_img_mask.shape[0],
_add_point[1]:_add_point[1]+_img_mask.shape[1],
0]
# if so
if np.any(np.multiply(_check_zone,_img_mask)) == True:
#retry for a new point
continue
# otherwise add the img to canvas and mask and stop retry
else:
canvas, mask = self._canvas_append(canvas = canvas,
add_point = _add_point,
img = img,
mask = mask,
mask_label = label,
mode = mode,
format = format)
break
gc.collect()
return canvas, mask
def _try_insert(self,
img: np.ndarray,
canvas: np.ndarray,
mask: np.ndarray,
label: int,
patience: int,
mode = "label",
format = "pixel"):
"""
try to insert img into canvas and mask using a escape-overlapping algorithm
if the initial point is overlapping, try to "escape" the overlapping
and append at the first position successfuly escape
if the initial point is not overlapping, try to find a overlapping point
and append at the last non-overlapping point before this one
Args:
img: np.ndarray of 3 channels, the vignette to be added
canvas: np.ndarray of 3 channels, the canvas
mask: 2d/4-channel np.ndarray, the mask
label: the label to be added
patience: int, the retry time for finding non-overlapping position
mode: str, "label" or "pattern", see mode in _canvas_append()
format: str, see format in _canvas_append()
Return:
canvas: np.ndarray of 3 channels, the canvas with img added,
if the tries in {patience} times succssfully added the img onto canvas,
otherwise the original canvas is returned
mask: np.ndarray of 2d or 4 channels, the mask with img added,
if the tries in {patience} times succssfully added the img onto canvas,
otherwise the original mask if returned
"""
_outer_bound = (canvas.shape[0] - img.shape[0], canvas.shape[1] - img.shape[1])
# select an initial add_point
_add_point = np.array((np.random.randint(low = self._scanning_constant,
high = _outer_bound[0] - self._scanning_constant),
np.random.randint(low = self._scanning_constant,
high = _outer_bound[1] - self._scanning_constant)))
# create a binary mask of the img
_img_mask = np.any(img, axis = 2)
# check if there's any overlap
if format == "pixel":
_check_zone = mask[_add_point[0]:_add_point[0]+_img_mask.shape[0],
_add_point[1]:_add_point[1]+_img_mask.shape[1]]
# in COCO format check the first layer of mask
else:
_check_zone = mask[_add_point[0]:_add_point[0]+_img_mask.shape[0],
_add_point[1]:_add_point[1]+_img_mask.shape[1],
0]
# if we start with an overlap, we need to escape from overlap, otherwise we need to find a overlap
_init_overlapped = np.any(np.multiply(_check_zone,_img_mask))
# if we are in a finding mode and need to record the last add point
_last_add_point = _add_point
# in the patience time
for retry in range(patience):
# for each time make a small move
_add_point = _add_point + np.random.randint(low = -1*self._scanning_constant,
high = self._scanning_constant,
size = 2)
# make sure the new value is legal
_add_point = np.clip(a = _add_point,
a_min = (0,0),
a_max = _outer_bound)
# check if there's any overlap
if format == "pixel":
_check_zone = mask[_add_point[0]:_add_point[0]+_img_mask.shape[0],
_add_point[1]:_add_point[1]+_img_mask.shape[1]]
# in COCO format check the first layer of mask
else:
_check_zone = mask[_add_point[0]:_add_point[0]+_img_mask.shape[0],
_add_point[1]:_add_point[1]+_img_mask.shape[1],
0]
# check if there's overlap
_overlap = np.any(np.multiply(_check_zone,_img_mask))
# if we had a overlap in "escaping"
if (_overlap == True) and (_init_overlapped == True):
#retry for a new point
continue
# if we met the first non-overlap while escaping
elif (_overlap == False) and (_init_overlapped == True):
#stop the finding
canvas, mask = self._canvas_append(canvas = canvas,
add_point = _add_point,
img = img,
mask = mask,
mask_label = label,
mode = mode,
format = format)
break
# if we are finding but not found
elif (_overlap == False) and (_init_overlapped == False):
#record last add_point and retry for a new point
_last_add_point = _add_point
continue
# or we are finding a overlap and found it, we need to use the last
else:
canvas, mask = self._canvas_append(canvas = canvas,
add_point = _last_add_point,
img = img,
mask = mask,
mask_label = label,
mode = mode,
format = format)
break
gc.collect()
return canvas, mask
| 54.662722 | 120 | 0.500758 |
255fc1c3062c1fbdf6dc873744212e8248b03800 | 190,066 | py | Python | openshift/client/apis/build_openshift_io_v1_api.py | asetty/openshift-restclient-python | c6f2168d7a02a24c030fb67959919fd4a9eb260d | [
"Apache-2.0"
] | null | null | null | openshift/client/apis/build_openshift_io_v1_api.py | asetty/openshift-restclient-python | c6f2168d7a02a24c030fb67959919fd4a9eb260d | [
"Apache-2.0"
] | null | null | null | openshift/client/apis/build_openshift_io_v1_api.py | asetty/openshift-restclient-python | c6f2168d7a02a24c030fb67959919fd4a9eb260d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a client. By listing and beginning a watch from the returned resourceVersion, clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BuildOpenshiftIoV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def connect_post_namespaced_build_config_instantiatebinary(self, name, namespace, **kwargs):
"""
connect POST requests to instantiatebinary of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_build_config_instantiatebinary(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BinaryBuildRequestOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str as_file: asFile determines if the binary should be created as a file within the source rather than extracted as an archive
:param str revision_author_email: revision.authorEmail of the source control user
:param str revision_author_name: revision.authorName of the source control user
:param str revision_commit: revision.commit is the value identifying a specific commit
:param str revision_committer_email: revision.committerEmail of the source control user
:param str revision_committer_name: revision.committerName of the source control user
:param str revision_message: revision.message is the description of a specific commit
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_build_config_instantiatebinary_with_http_info(name, namespace, **kwargs)
else:
(data) = self.connect_post_namespaced_build_config_instantiatebinary_with_http_info(name, namespace, **kwargs)
return data
def connect_post_namespaced_build_config_instantiatebinary_with_http_info(self, name, namespace, **kwargs):
"""
connect POST requests to instantiatebinary of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_build_config_instantiatebinary_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BinaryBuildRequestOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str as_file: asFile determines if the binary should be created as a file within the source rather than extracted as an archive
:param str revision_author_email: revision.authorEmail of the source control user
:param str revision_author_name: revision.authorName of the source control user
:param str revision_commit: revision.commit is the value identifying a specific commit
:param str revision_committer_email: revision.committerEmail of the source control user
:param str revision_committer_name: revision.committerName of the source control user
:param str revision_message: revision.message is the description of a specific commit
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'as_file', 'revision_author_email', 'revision_author_name', 'revision_commit', 'revision_committer_email', 'revision_committer_name', 'revision_message']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connect_post_namespaced_build_config_instantiatebinary" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `connect_post_namespaced_build_config_instantiatebinary`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_build_config_instantiatebinary`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'as_file' in params:
query_params.append(('asFile', params['as_file']))
if 'revision_author_email' in params:
query_params.append(('revision.authorEmail', params['revision_author_email']))
if 'revision_author_name' in params:
query_params.append(('revision.authorName', params['revision_author_name']))
if 'revision_commit' in params:
query_params.append(('revision.commit', params['revision_commit']))
if 'revision_committer_email' in params:
query_params.append(('revision.committerEmail', params['revision_committer_email']))
if 'revision_committer_name' in params:
query_params.append(('revision.committerName', params['revision_committer_name']))
if 'revision_message' in params:
query_params.append(('revision.message', params['revision_message']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/instantiatebinary', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connect_post_namespaced_build_config_webhooks(self, name, namespace, **kwargs):
"""
connect POST requests to webhooks of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_build_config_webhooks(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_build_config_webhooks_with_http_info(name, namespace, **kwargs)
else:
(data) = self.connect_post_namespaced_build_config_webhooks_with_http_info(name, namespace, **kwargs)
return data
def connect_post_namespaced_build_config_webhooks_with_http_info(self, name, namespace, **kwargs):
"""
connect POST requests to webhooks of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_build_config_webhooks_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'path']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connect_post_namespaced_build_config_webhooks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `connect_post_namespaced_build_config_webhooks`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_build_config_webhooks`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'path' in params:
query_params.append(('path', params['path']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/webhooks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def connect_post_namespaced_build_config_webhooks_with_path(self, name, namespace, path, **kwargs):
"""
connect POST requests to webhooks of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_build_config_webhooks_with_path(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_build_config_webhooks_with_path_with_http_info(name, namespace, path, **kwargs)
else:
(data) = self.connect_post_namespaced_build_config_webhooks_with_path_with_http_info(name, namespace, path, **kwargs)
return data
def connect_post_namespaced_build_config_webhooks_with_path_with_http_info(self, name, namespace, path, **kwargs):
"""
connect POST requests to webhooks of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_build_config_webhooks_with_path_with_http_info(name, namespace, path, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: path to the resource (required)
:param str path2: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'path', 'path2']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method connect_post_namespaced_build_config_webhooks_with_path" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `connect_post_namespaced_build_config_webhooks_with_path`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `connect_post_namespaced_build_config_webhooks_with_path`")
# verify the required parameter 'path' is set
if ('path' not in params) or (params['path'] is None):
raise ValueError("Missing the required parameter `path` when calling `connect_post_namespaced_build_config_webhooks_with_path`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'path' in params:
path_params['path'] = params['path']
query_params = []
if 'path2' in params:
query_params.append(('path', params['path2']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/webhooks/{path}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_build_config_for_all_namespaces(self, body, **kwargs):
"""
create a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_build_config_for_all_namespaces(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1BuildConfig body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_build_config_for_all_namespaces_with_http_info(body, **kwargs)
else:
(data) = self.create_build_config_for_all_namespaces_with_http_info(body, **kwargs)
return data
def create_build_config_for_all_namespaces_with_http_info(self, body, **kwargs):
"""
create a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_build_config_for_all_namespaces_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1BuildConfig body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_build_config_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_build_config_for_all_namespaces`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/buildconfigs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildConfig',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_build_for_all_namespaces(self, body, **kwargs):
"""
create a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_build_for_all_namespaces(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_build_for_all_namespaces_with_http_info(body, **kwargs)
else:
(data) = self.create_build_for_all_namespaces_with_http_info(body, **kwargs)
return data
def create_build_for_all_namespaces_with_http_info(self, body, **kwargs):
"""
create a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_build_for_all_namespaces_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_build_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_build_for_all_namespaces`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/builds', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_build(self, namespace, body, **kwargs):
"""
create a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_build_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_build_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_build_with_http_info(self, namespace, body, **kwargs):
"""
create a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_build`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_build`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_build_clone(self, name, namespace, body, **kwargs):
"""
create clone of a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build_clone(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_build_clone_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.create_namespaced_build_clone_with_http_info(name, namespace, body, **kwargs)
return data
def create_namespaced_build_clone_with_http_info(self, name, namespace, body, **kwargs):
"""
create clone of a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build_clone_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_build_clone" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `create_namespaced_build_clone`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_build_clone`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_build_clone`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}/clone', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildRequest',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_build_config(self, namespace, body, **kwargs):
"""
create a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build_config(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildConfig body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_build_config_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_build_config_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_build_config_with_http_info(self, namespace, body, **kwargs):
"""
create a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build_config_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildConfig body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_build_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_build_config`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_build_config`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildConfig',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_build_config_instantiate(self, name, namespace, body, **kwargs):
"""
create instantiate of a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build_config_instantiate(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_build_config_instantiate_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.create_namespaced_build_config_instantiate_with_http_info(name, namespace, body, **kwargs)
return data
def create_namespaced_build_config_instantiate_with_http_info(self, name, namespace, body, **kwargs):
"""
create instantiate of a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_build_config_instantiate_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildRequest (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildRequest body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_build_config_instantiate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `create_namespaced_build_config_instantiate`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_build_config_instantiate`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_build_config_instantiate`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/instantiate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_build(self, namespace, **kwargs):
"""
delete collection of Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_build(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_build_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_build_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_build_with_http_info(self, namespace, **kwargs):
"""
delete collection of Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_build_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_build`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_build_config(self, namespace, **kwargs):
"""
delete collection of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_build_config(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_build_config_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_build_config_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_build_config_with_http_info(self, namespace, **kwargs):
"""
delete collection of BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_build_config_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_build_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_build_config`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_build(self, name, namespace, body, **kwargs):
"""
delete a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_build(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_build_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_build_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_build_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_build_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_build`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_build`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_build`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_build_config(self, name, namespace, body, **kwargs):
"""
delete a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_build_config(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_build_config_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_build_config_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_build_config_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_build_config_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_build_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_build_config`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_build_config`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_build_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_build_config_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_build_config_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildConfigList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_build_config_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_build_config_for_all_namespaces_with_http_info(**kwargs)
return data
def list_build_config_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_build_config_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildConfigList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_build_config_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/buildconfigs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildConfigList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_build_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_build_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_build_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_build_for_all_namespaces_with_http_info(**kwargs)
return data
def list_build_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_build_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_build_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/builds', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_build(self, namespace, **kwargs):
"""
list or watch objects of kind Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_build(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_build_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_build_with_http_info(namespace, **kwargs)
return data
def list_namespaced_build_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_build_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_build`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_build_config(self, namespace, **kwargs):
"""
list or watch objects of kind BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_build_config(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildConfigList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_build_config_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_build_config_with_http_info(namespace, **kwargs)
return data
def list_namespaced_build_config_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_build_config_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1BuildConfigList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_build_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_build_config`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildConfigList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_build(self, name, namespace, body, **kwargs):
"""
partially update the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_build(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_build_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_build_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_build_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_build_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_build`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_build`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_build`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_build_config(self, name, namespace, body, **kwargs):
"""
partially update the specified BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_build_config(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_build_config_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_build_config_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_build_config_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_build_config_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_build_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_build_config`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_build_config`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_build_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildConfig',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_build(self, name, namespace, **kwargs):
"""
read the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_build(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_build_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_build_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_build_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_build_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_build`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_build`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_build_config(self, name, namespace, **kwargs):
"""
read the specified BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_build_config(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_build_config_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_build_config_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_build_config_with_http_info(self, name, namespace, **kwargs):
"""
read the specified BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_build_config_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_build_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_build_config`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_build_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildConfig',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_build_log(self, name, namespace, **kwargs):
"""
read log of the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_build_log(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildLog (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str container: cointainer for which to stream logs. Defaults to only container if there is one container in the pod.
:param bool follow: follow if true indicates that the build log should be streamed until the build terminates.
:param int limit_bytes: limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.
:param bool nowait: noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.
:param str pretty: If 'true', then the output is pretty printed.
:param bool previous: previous returns previous build logs. Defaults to false.
:param int since_seconds: sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.
:param int tail_lines: tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime
:param bool timestamps: timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.
:param int version: version of the build for which to view logs.
:return: V1BuildLog
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_build_log_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_build_log_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_build_log_with_http_info(self, name, namespace, **kwargs):
"""
read log of the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_build_log_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildLog (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str container: cointainer for which to stream logs. Defaults to only container if there is one container in the pod.
:param bool follow: follow if true indicates that the build log should be streamed until the build terminates.
:param int limit_bytes: limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.
:param bool nowait: noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.
:param str pretty: If 'true', then the output is pretty printed.
:param bool previous: previous returns previous build logs. Defaults to false.
:param int since_seconds: sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.
:param int tail_lines: tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime
:param bool timestamps: timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.
:param int version: version of the build for which to view logs.
:return: V1BuildLog
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'container', 'follow', 'limit_bytes', 'nowait', 'pretty', 'previous', 'since_seconds', 'tail_lines', 'timestamps', 'version']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_build_log" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_build_log`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_build_log`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'container' in params:
query_params.append(('container', params['container']))
if 'follow' in params:
query_params.append(('follow', params['follow']))
if 'limit_bytes' in params:
query_params.append(('limitBytes', params['limit_bytes']))
if 'nowait' in params:
query_params.append(('nowait', params['nowait']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'previous' in params:
query_params.append(('previous', params['previous']))
if 'since_seconds' in params:
query_params.append(('sinceSeconds', params['since_seconds']))
if 'tail_lines' in params:
query_params.append(('tailLines', params['tail_lines']))
if 'timestamps' in params:
query_params.append(('timestamps', params['timestamps']))
if 'version' in params:
query_params.append(('version', params['version']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}/log', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildLog',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_build(self, name, namespace, body, **kwargs):
"""
replace the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_build(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_build_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_build_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_build_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_build_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_build`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_build`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_build`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_build_config(self, name, namespace, body, **kwargs):
"""
replace the specified BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_build_config(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildConfig body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_build_config_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_build_config_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_build_config_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified BuildConfig
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_build_config_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the BuildConfig (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1BuildConfig body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1BuildConfig
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_build_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_build_config`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_build_config`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_build_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1BuildConfig',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_build_details(self, name, namespace, body, **kwargs):
"""
replace details of the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_build_details(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_build_details_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_build_details_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_build_details_with_http_info(self, name, namespace, body, **kwargs):
"""
replace details of the specified Build
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_build_details_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Build (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Build body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Build
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_build_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_build_details`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_build_details`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_build_details`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken', 'Oauth2AccessToken', 'Oauth2Implicit']
return self.api_client.call_api('/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}/details', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Build',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 61.470246 | 3,325 | 0.643066 |
256135f3261bda49e4b410a35a4a8f8355d98ad8 | 722 | py | Python | rpi/tcp_server.py | nicolasGibaud7/App-domotic | aee4d80aa05a39388efd92ab9ecf9b5dd1460322 | [
"MIT"
] | 4 | 2020-01-01T15:22:55.000Z | 2020-01-10T09:34:26.000Z | rpi/tcp_server.py | nicolasGibaud7/App-domotic | aee4d80aa05a39388efd92ab9ecf9b5dd1460322 | [
"MIT"
] | 2 | 2020-01-01T15:16:02.000Z | 2020-01-02T13:56:29.000Z | rpi/tcp_server.py | nicolasGibaud7/App-domotic | aee4d80aa05a39388efd92ab9ecf9b5dd1460322 | [
"MIT"
] | null | null | null | import socket
import sys
IP_ADDR = "192.168.1.19"
TCP_PORT = 10000
if __name__ == "__main__":
# Create TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Associate the socket with the server address
server_address = (IP_ADDR, TCP_PORT)
print("Start TCP server at address {} on port {} ".format(server_address[0], server_address[1]))
sock.bind(server_address)
# Mode TCP server
sock.listen(1)
while True:
connection, client_address = sock.accept()
while True :
print("Connection from {} ".format(client_address))
data = connection.recv(16)
print("Data : %s" % data)
else:
connection.close()
| 26.740741 | 100 | 0.631579 |
25630ec4579c4b69b7aa7ebcd6033338a4cfed43 | 269 | py | Python | pandas/pandasReading02.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | 10 | 2020-10-09T11:05:18.000Z | 2022-02-13T03:22:10.000Z | pandas/pandasReading02.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | null | null | null | pandas/pandasReading02.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | 6 | 2020-11-26T12:49:43.000Z | 2022-03-06T06:46:43.000Z | import pandas as pd
countryInformation = pd.read_csv('resource/countryInformation.csv')
#looping row
#for index,row in countryInformation.iterrows():
#print(index, row['country_name'])
print(countryInformation.loc[countryInformation['country_name'] == 'india']) | 26.9 | 76 | 0.773234 |
256327adbdadb9819f932122ab31855bfe822e1d | 2,011 | py | Python | List Comprehensions/examples.py | mervatkheir/kite-python-blog-post-code | 9a331e5d327cd27c6ecd72926f3e74afd252efb5 | [
"MIT"
] | 238 | 2018-10-10T18:50:40.000Z | 2022-02-09T21:26:24.000Z | List Comprehensions/examples.py | mrrizal/kite-python-blog-post-code | 597f2d75b2ad5dda97e9b19f6e9c7195642e1739 | [
"MIT"
] | 38 | 2019-12-04T22:42:45.000Z | 2022-03-12T00:04:57.000Z | List Comprehensions/examples.py | mrrizal/kite-python-blog-post-code | 597f2d75b2ad5dda97e9b19f6e9c7195642e1739 | [
"MIT"
] | 154 | 2018-11-11T22:48:09.000Z | 2022-03-22T07:12:18.000Z | """
List Comprehensions Examples
"""
my_list = []
# my_list.append()
# my_list.extend()
"""
When to use ListComps
"""
phones = [
{
'number': '111-111-1111',
'label': 'phone',
'extension': '1234',
},
{
'number': '222-222-2222',
'label': 'mobile',
'extension': None,
}
]
my_phone_list = []
for phone in phones:
my_phone_list.append(phone['number'])
# List Comprehension
[phone['number'] for phone in phones]
"""
Advanced Usage
"""
# Buld an explicit nested list
table = [
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
]
fields = ['x', 'y', 'z']
rows = [1, 2, 3]
table = []
for r in rows:
row = []
for field in fields:
row.append(field)
table.append(row)
[field for field in fields]
[row for row in rows]
table = [[field for field in fields] for row in rows]
"""
Dictionary Comprehensions
"""
[{str(item): item} for item in [1, 2, 3, ]]
dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
double_dict1 = {k: v * 2 for (k, v) in dict1.items()}
dict_map = {
'apple' : 1,
'cherry': 2,
'earwax': 3,
}
{v:k for (k, v) in dict_map.items()}
items = dict_map.items()
"""
Logical Comparisons
"""
values = [1,2,3]
[i for i in values if i < 3]
[k for k, v in dict_map.items() if v < 3]
"""
Performance, Spongecase Example
"""
original_string = 'hello world'
spongecase_letters = []
for index, letter in enumerate(original_string):
if index % 2 == 1:
spongecase_letters.append(letter.upper())
else:
spongecase_letters.append(letter)
spongecase_string = ''.join(spongecase_letters)
# hElLo wOrLd
def spongecase(index, letter):
if index % 2 == 1:
return letter.upper()
else:
return letter
original_string = 'hello world'
spongecase_letters = []
for index, letter in enumerate(original_string):
transformed_letter = spongecase(index, letter)
spongecase_letters.append(transformed_letter)
spongecase_string = ''.join(spongecase_letters)
# hElLo wOrLd
| 15.960317 | 53 | 0.604674 |
2564c2f1d6dd5e44be1def881988d5a419b3038e | 2,549 | py | Python | ImageDenoising/network/denoising.py | jiunbae/ITE4053 | 873d53493b7588f67406e0e6ed0e74e5e3f957bc | [
"MIT"
] | 5 | 2019-06-20T09:54:04.000Z | 2021-06-15T04:22:49.000Z | ImageDenoising/network/denoising.py | jiunbae/ITE4053 | 873d53493b7588f67406e0e6ed0e74e5e3f957bc | [
"MIT"
] | null | null | null | ImageDenoising/network/denoising.py | jiunbae/ITE4053 | 873d53493b7588f67406e0e6ed0e74e5e3f957bc | [
"MIT"
] | 1 | 2019-04-19T04:52:34.000Z | 2019-04-19T04:52:34.000Z | import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import models as KM
from tensorflow.keras import layers as KL
class DenoisingNetwork(object):
def __new__(cls, mode: str) \
-> KM.Model:
assert mode in ['base', 'skip', 'bn']
inputs = KL.Input(shape=[None, None, 3],
name="input_image")
x = inputs
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer1")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer2")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer3")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer4")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(3, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer5")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
if mode == 'skip' or mode == 'bn':
x = KL.average([x, inputs])
return KM.Model(inputs=inputs, outputs=x,
name='denoising')
@staticmethod
def loss(y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return K.mean(K.square(y_pred - y_true))
@classmethod
def metric(cls, y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return tf.image.psnr(y_true, y_pred, max_val=1.)
@classmethod
def compile(cls, model, optimizer, loss, metric)\
-> None:
model.compile(optimizer=optimizer,
loss=loss,
metrics=[metric])
| 33.986667 | 59 | 0.488035 |
256670e4e127db5ef91b0b78cc07a367f32674c1 | 884 | py | Python | utils/timer.py | YorkSu/hat | b646b6689f3d81c985ed13f3d5c23b6c717fd07d | [
"Apache-2.0"
] | 1 | 2019-04-10T04:49:30.000Z | 2019-04-10T04:49:30.000Z | utils/timer.py | Suger131/HAT-tf2.0 | b646b6689f3d81c985ed13f3d5c23b6c717fd07d | [
"Apache-2.0"
] | null | null | null | utils/timer.py | Suger131/HAT-tf2.0 | b646b6689f3d81c985ed13f3d5c23b6c717fd07d | [
"Apache-2.0"
] | 1 | 2019-06-14T05:53:42.000Z | 2019-06-14T05:53:42.000Z | import time
class Timer(object):
def __init__(self, Log, *args, **kwargs):
self.Log = Log
return super().__init__(*args, **kwargs)
@property
def time(self):
return time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
def mktime(self, timex):
return time.mktime(time.strptime(timex, '%Y-%m-%d-%H-%M-%S'))
def timer(self, text, func, *args, **kwargs):
start_time = self.time
self.Log(start_time, _T=f'{text} Start:')
result = func(*args, **kwargs)
stop_time = self.time
self.Log(stop_time, _T=f'{text} Stop:')
cost_time = self.mktime(stop_time) - self.mktime(start_time)
self.Log(cost_time, _T=f'{text} cost time (second):')
time_dict = {f'{text}_start_time'.upper(): start_time,
f'{text}_stop_time'.upper(): stop_time,
f'{text}_cost_time'.upper(): cost_time}
return time_dict, result | 31.571429 | 65 | 0.623303 |
2568aee40cfce9e5a8b21215e284c31ef6b2bd2a | 17,464 | py | Python | pySPACE/missions/nodes/data_selection/instance_selection.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 32 | 2015-02-20T09:03:09.000Z | 2022-02-25T22:32:52.000Z | pySPACE/missions/nodes/data_selection/instance_selection.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 5 | 2015-05-18T15:08:40.000Z | 2020-03-05T19:18:01.000Z | pySPACE/missions/nodes/data_selection/instance_selection.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 18 | 2015-09-28T07:16:38.000Z | 2021-01-20T13:52:19.000Z | """ Select only a part of the instances
.. todo: group instance selectors
"""
import random
import logging
from collections import defaultdict
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.tools.memoize_generator import MemoizeGenerator
class InstanceSelectionNode(BaseNode):
"""Retain only a certain percentage of the instances
The node InstanceSelectionNode forwards only
*train_percentage_selected* percent of the training instances passed to
him to the successor node and only
*test_percentage_selected* percent of the test instances. The forwarded
instances are selected randomly but so that the class ratio is kept.
If *reduce_class* is used, only the chosen class is reduced, without
keeping the class ratio. So the total mount of reduced data does not match
the percentage values.
**Parameters**
:train_percentage_selected:
The percentage of training instances which
is forwarded to successor node.
(*optional, default: 100*)
:test_percentage_selected:
The percentage of test instances which
is forwarded to successor node.
(*optional, default: 100*)
:reduce_class:
If you want only to reduce one class, choose this parameter
otherwise, both classes are reduced in a balanced fashion.
(*optional, default: False*)
:num_train_instances:
Instead of specifying *train_percentage_selected*, this option
allows to specify the absolute number of training instances of
class *class_label* that should be in the training set.
All instances that occur until *num_train_instances* are found are
used for training.
(*optional, default: None*)
:class_label:
If *num_train_instances*-option is used, this string determines the
class of which training examples are count.
(*optional, default: 'Target'*)
:random:
If *False*, the order of the data is retained. I.e. the first X
percent or number of train instances are used for training. If
*True*, the training data is sampled randomly without taking into
consideration the data's order.
(*optional, default: True*)
**Exemplary call**
.. code-block:: yaml
-
node : InstanceSelection
parameters :
train_percentage_selected : 80
test_percentage_selected : 100
reduce_class : Standard
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2010/03/31
"""
def __init__(self, train_percentage_selected=100,
test_percentage_selected=100, reduce_class=False,
num_train_instances=None, class_label='Target', random=True,
**kwargs):
super(InstanceSelectionNode, self).__init__(**kwargs)
self.set_permanent_attributes(
train_percentage_selected=train_percentage_selected,
test_percentage_selected=test_percentage_selected,
reduce_class=reduce_class,
num_train_instances=num_train_instances,
class_label=class_label, random=random)
def get_num_data(self, iterator):
""" Return a list of instances that contain *num_train_instances* many
instances of class *class_label* and all other instances that occur
up to this point
"""
counter = 0
retained_instances = []
while counter < self.num_train_instances:
try:
instance, label = iterator.next()
except: #TODO: give some warning to user
break
else:
if label == self.class_label:
counter += 1
retained_instances.append((instance,label))
return retained_instances
def request_data_for_training(self, use_test_data):
""" Returns data for training of subsequent nodes
.. todo:: to document
.. note::
This method works differently in InstanceSelectionNode
than in other nodes: Only *percentage_selected* of the available
data are returned.
"""
assert(self.input_node is not None)
if self.train_percentage_selected > 100:
self._log("Train percentage of %f reduced to 100." %
self.train_percentage_selected,
level=logging.ERROR)
self.train_percentage_selected = 100
self._log("Data for training is requested.", level=logging.DEBUG)
if self.train_percentage_selected == 100 and \
self.num_train_instances is None:
return super(InstanceSelectionNode, self).request_data_for_training(
use_test_data)
# If we haven't computed the data for training yet
if self.data_for_training is None:
self._log("Producing data for training.", level=logging.DEBUG)
# Train this node
self.train_sweep(use_test_data)
if not self.num_train_instances is None and self.random == False:
retained_instances = self.get_num_data(
self.input_node.request_data_for_training(use_test_data))
else:
# Store all data
if self.num_train_instances is None:
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_training(
use_test_data):
all_instances[label].append(instance)
else:
all_instances = list(
self.input_node.request_data_for_traning(use_test_data))
if self.random:
r = random.Random(self.run_number)
if not self.num_train_instances is None and self.random:
r.shuffle(all_instances)
retained_instances = self.get_num_data(
all_instances.__iter__())
else:
retained_instances = []
self._log("Keeping only %s percent of training data" %
self.train_percentage_selected,
level=logging.DEBUG)
# Retain only *percentage_selected* percent of the data
for label, instances in all_instances.iteritems():
# enable random choice of samples
r.shuffle(instances)
if not self.reduce_class or \
self.train_percentage_selected == 100:
end_index = int(round(len(instances) *
self.train_percentage_selected / 100))
elif not (self.reduce_class == label):
end_index = len(instances)
else: # self.reduce_class==label--> reduction needed
end_index = int(round(len(instances) *
self.train_percentage_selected / 100))
retained_instances.extend(zip(instances[0:end_index],
[label]*end_index))
if self.random:
# mix up samples between the different labels
r.shuffle(retained_instances)
# Compute a generator the yields the train data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that will
# yield the same sequence
train_data_generator = ((self.execute(data), label)
for (data, label) in retained_instances)
self.data_for_training = MemoizeGenerator(train_data_generator,
caching=self.caching)
self._log("Data for training finished", level=logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_training.fresh()
def request_data_for_testing(self):
""" Returns data for testing of subsequent nodes
.. todo:: to document
"""
assert(self.input_node is not None)
if self.test_percentage_selected > 100:
self._log("Test percentage of %f reduced to 100." %
self.test_percentage_selected,
level=logging.ERROR)
self.test_percentage_selected = 100
self._log("Data for testing is requested.", level=logging.DEBUG)
if self.test_percentage_selected == 100:
return super(InstanceSelectionNode, self).request_data_for_testing()
# If we haven't computed the data for testing yet
if self.data_for_testing is None:
# Assert that this node has already been trained
assert(not self.is_trainable() or
self.get_remaining_train_phase() == 0)
# Divide available instances according to label
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_testing():
all_instances[label].append(instance)
self._log("Keeping only %s percent of test data" %
self.test_percentage_selected,
level=logging.DEBUG)
r = random.Random(self.run_number)
# Retain only *percentage_selected* percent of the data
retained_instances = []
for label, instances in all_instances.iteritems():
# enable random choice of samples
r.shuffle(instances)
if not self.reduce_class or \
self.test_percentage_selected == 100:
end_index = int(round(len(instances) *
self.test_percentage_selected / 100))
elif not (self.reduce_class == label):
end_index = len(instances)
else: # self.reduce_class==label--> reduction needed
end_index = int(round(len(instances) *
self.test_percentage_selected / 100))
retained_instances.extend(zip(instances[0:end_index],
[label]*end_index))
# mix up samples between the different labels
r.shuffle(retained_instances)
# Compute a generator the yields the test data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that'll
# yield the same sequence
self._log("Producing data for testing.", level=logging.DEBUG)
test_data_generator = ((self.execute(data), label)
for (data, label) in retained_instances)
self.data_for_testing = MemoizeGenerator(test_data_generator,
caching=self.caching)
self._log("Data for testing finished", level=logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_testing.fresh()
def _execute(self, time_series):
return time_series # We don't do anything with the kept instances
class ReduceOverrepresentedClassNode(BaseNode):
""" Reject instances to balance categories for classification
The node forwards only a reduced number
of the training and test instances of the bigger class
to get a balanced ratio of the
classes. The forwarded instances are selected randomly.
All data of the underrepresented class is
forwarded.
**Parameters**
**Exemplary call**
.. code-block:: yaml
-
node : Reduce_Overrepresented_Class
:Author: Hendrik Woehrle (hendrik.woehrle@dfki.de)
:Created: 2010/09/22
"""
def __init__(self, **kwargs):
super(ReduceOverrepresentedClassNode, self).__init__(**kwargs)
def request_data_for_training(self, use_test_data):
""" Returns data for training of subsequent nodes
.. todo:: to document
"""
assert(self.input_node is not None)
self._log("Data for testing is requested.", level=logging.DEBUG)
if self.data_for_training is None:
self._log("Producing data for training.", level=logging.DEBUG)
# Train this node
self.train_sweep(use_test_data)
# Divide available instances according to label
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_training(
use_test_data):
all_instances[label].append(instance)
retained_instances = self.balance_instances(all_instances)
# Compute a generator the yields the test data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that will
# yield the same sequence
self._log("Producing data for testing.", level=logging.DEBUG)
train_data_generator = ((self.execute(data), label)
for (data, label) in retained_instances)
self.data_for_training = MemoizeGenerator(train_data_generator,
caching=self.caching)
self._log("Data for training finished", level=logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_training.fresh()
def request_data_for_testing(self):
""" Returns data for testing of subsequent nodes
.. todo:: to document
"""
assert(self.input_node is not None)
self._log("Data for testing is requested.", level=logging.DEBUG)
# If we haven't computed the data for testing yet
if self.data_for_testing is None:
# Assert that this node has already been trained
assert(not self.is_trainable() or
self.get_remaining_train_phase() == 0)
# Divide available instances according to label
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_testing():
all_instances[label].append(instance)
retained_instances = self.balance_instances(all_instances)
# Compute a generator the yields the test data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that will
# yield the same sequence
self._log("Producing data for testing.", level=logging.DEBUG)
test_data_generator = ((self.execute(data), label)
for (data, label) in retained_instances)
self.data_for_testing = MemoizeGenerator(test_data_generator,
caching=self.caching)
self._log("Data for testing finished", level=logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_testing.fresh()
def _execute(self, time_series):
return time_series # We don't do anything with the kept instances
def balance_instances(self, all_instances):
"""Method that performs the rejections of the data in the oversized class"""
retained_instances = []
# it is supposed to have a binary classifier, e.g. to have exactly 2 classes
#if not len(all_instances.keys())==2:
# raise ValueError("Too many classes: only binary classification supported")
# count the number of instances per class
min_num_instances_per_class = float("+inf")
for label, instances in all_instances.iteritems():
min_num_instances_per_class = min(min_num_instances_per_class,
len(instances))
r = random.Random(self.run_number)
# retain only the number of instances that corresponds
# to the size of smaller class
for label, instances in all_instances.iteritems():
r.shuffle(instances)
retained_instances.extend(
zip(instances[0:min_num_instances_per_class],
[label]*min_num_instances_per_class))
r.shuffle(retained_instances)
return retained_instances
_NODE_MAPPING = {"RandomInstanceSelection": InstanceSelectionNode,
"Reduce_Overrepresented_Class": ReduceOverrepresentedClassNode}
| 43.334988 | 88 | 0.584918 |
256a8cd6b55c2a6f3936b57c2975d63cfcb67d9a | 4,050 | py | Python | tests/test_functional.py | tirkarthi/humpty | 8652cf7b18a09d1a1d73465afd38581ef4e2369e | [
"BSD-3-Clause"
] | 14 | 2015-09-05T20:20:50.000Z | 2021-04-08T08:53:20.000Z | tests/test_functional.py | tirkarthi/humpty | 8652cf7b18a09d1a1d73465afd38581ef4e2369e | [
"BSD-3-Clause"
] | 6 | 2017-05-12T20:46:40.000Z | 2020-02-08T05:05:03.000Z | tests/test_functional.py | tirkarthi/humpty | 8652cf7b18a09d1a1d73465afd38581ef4e2369e | [
"BSD-3-Clause"
] | 8 | 2017-02-13T15:38:53.000Z | 2020-11-11T20:16:58.000Z | # -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from contextlib import contextmanager
import imp
import posixpath
from zipfile import ZipFile
from click.testing import CliRunner
import pkginfo
import pytest
from six import PY3
def test_pyfile_compiled(packages, tmpdir):
packages.require_eggs('dist1')
unzip = False
if PY3:
# Python >= 3.2 doesn't seem to run .pyc files from PEP 3147
# (__pycache__) repository directories.
unzip = True
venv = packages.get_venv('dist1', unzip=unzip)
assert venv.run("__import__('dist1').test_is_compiled()") == 0
@pytest.fixture
def dist1_metadata(packages):
egg = packages.get_egg('dist1')
return pkginfo.BDist(str(egg))
def test_summary(dist1_metadata):
assert dist1_metadata.summary == "A dummy distribution"
def test_description(dist1_metadata):
assert dist1_metadata.description.rstrip() \
== u"Long description.\n\nGruß."
def test_script_wrapper(packages):
packages.require_eggs('dist1')
venv = packages.get_venv('dist1')
assert venv.call(['dist1_wrapper']) == 42
def test_old_style_script(packages):
packages.require_eggs('dist1')
venv = packages.get_venv('dist1')
assert venv.call(['dist1_script']) == 42
def test_namespace_package(packages):
packages.require_eggs('dist1', 'dist2')
venv = packages.get_venv('dist2')
prog = (
'import sys\n'
'from dist2.plugins.builtin import the_answer\n'
'sys.exit(the_answer)\n'
)
assert venv.run(prog) == 42
def test_namespace_stubs_in_egg(packages):
dist2_egg = packages.get_egg('dist2')
dist2_stubs = with_byte_compiled(['dist2/__init__.py',
'dist2/plugins/__init__.py'])
with fileobj(ZipFile(str(dist2_egg))) as zf:
files_in_egg = dist2_stubs.intersection(zf.namelist())
# Make sure we generated the stubs (or not, depending on python
# version)
stubs_in_egg = files_in_egg.intersection(dist2_stubs)
assert stubs_in_egg == dist2_stubs
# Make sure we didn't copy the .pth file that the wheel installer
# creates for the namespaces
assert not any(fn.lower().endswith('.pth')
for fn in files_in_egg)
def test_extension(packages):
packages.require_eggs('extension_dist')
venv = packages.get_venv('extension_dist')
assert venv.run("__import__('extension_dist').test_extension()") == 0
def test_eager_resources(packages):
packages.require_eggs('extension_dist')
venv = packages.get_venv('extension_dist')
assert venv.run("__import__('extension_dist').test_eager_resources()") == 0
def test_extras(packages):
packages.require_eggs('dist1', 'extension_dist')
venv = packages.get_venv('dist1[extras]')
assert venv.run("__import__('dist1').test_extras()") == 0
def test_no_extras(packages):
packages.require_eggs('dist1', 'extension_dist')
venv = packages.get_venv('dist1')
assert venv.run("__import__('dist1').test_no_extras()") == 0
def test_main(packages, tmpdir):
from humpty import main
wheel = packages.get_wheel('dist1')
runner = CliRunner()
result = runner.invoke(main, ['-d', str(tmpdir), str(wheel)])
assert result.exit_code == 0
eggs = list(tmpdir.listdir(fil="*.egg"))
assert len(eggs) == 1
egg = eggs[0]
assert egg.isfile()
assert egg.fnmatch("dist1-*")
@contextmanager
def fileobj(fp):
try:
yield fp
finally:
fp.close()
def with_byte_compiled(paths):
""" Augment PATHS with paths of byte-compiled files.
"""
get_tag = getattr(imp, 'get_tag', None)
compiled = set()
for path in paths:
head, tail = posixpath.split(path)
root, ext = posixpath.splitext(tail)
if ext == '.py':
if get_tag:
root = '%s.%s' % (root, get_tag())
head = posixpath.join(head, '__pycache__')
compiled.add(posixpath.join(head, root + '.pyc'))
return compiled.union(paths)
| 27.739726 | 79 | 0.66716 |
256b83c7f65a2f6d348541c27824ba4aba67696c | 1,649 | py | Python | policytools/master_list/actions_master_list_base.py | samkeen/policy-tools | 5183a710ac7b3816c6b6f3f8493d410712018873 | [
"Apache-2.0"
] | 1 | 2021-04-03T12:16:53.000Z | 2021-04-03T12:16:53.000Z | policytools/master_list/actions_master_list_base.py | samkeen/policy-tools | 5183a710ac7b3816c6b6f3f8493d410712018873 | [
"Apache-2.0"
] | 6 | 2019-05-07T03:36:58.000Z | 2021-02-02T22:49:53.000Z | policytools/master_list/actions_master_list_base.py | samkeen/policy-tools | 5183a710ac7b3816c6b6f3f8493d410712018873 | [
"Apache-2.0"
] | null | null | null | import logging
from abc import ABC, abstractmethod
logger = logging.getLogger(__name__)
class ActionsMasterListBase(ABC):
"""
Base class meant to hold the entire Set of IAM resource actions.
It is up to a concrete class to implement a source document parser (parse_actions_source)
"""
def __init__(self, source_master):
"""
:param source_master:
:type source_master: str
"""
self._actions_set = self.parse_actions_source(source_master)
self._actions_set_case_insensitive_lookup = {resource_action.lower(): resource_action for resource_action in
self._actions_set}
super().__init__()
@abstractmethod
def parse_actions_source(self, source_master):
"""
:param source_master:
:type source_master: str
:return:
:rtype: set
"""
pass
@abstractmethod
def all_actions_for_resource(self, resource_name):
"""
This must return a sorted list of all actions for the given resource
:param resource_name:
:type resource_name: str
:return:
:rtype: list
"""
def all_actions_set(self, lower=False):
return set(item.lower() for item in self._actions_set) if lower else self._actions_set
def lookup_action(self, action):
"""
Case insensitive lookup for all known actions. Returned in PascalCase
:param action:
:type action: str
:return:
:rtype: str
"""
return self._actions_set_case_insensitive_lookup.get(action.lower())
| 28.431034 | 116 | 0.62644 |
256b989b63c37dd38e854142d7a19f85d5f03b4f | 1,401 | py | Python | diy_gym/addons/debug/joint_trace.py | wassname/diy-gym | 83232ae6971341a86683d316feecf4d34d3caf47 | [
"MIT"
] | null | null | null | diy_gym/addons/debug/joint_trace.py | wassname/diy-gym | 83232ae6971341a86683d316feecf4d34d3caf47 | [
"MIT"
] | null | null | null | diy_gym/addons/debug/joint_trace.py | wassname/diy-gym | 83232ae6971341a86683d316feecf4d34d3caf47 | [
"MIT"
] | null | null | null | import pybullet as p
from gym import spaces
import pybullet_planning as pbp
import numpy as np
from diy_gym.addons.addon import Addon
class JointTrace(Addon):
"""
JointTrace
Trace the follows a joints movements
"""
def __init__(self, parent, config):
super().__init__(parent, config)
self.uid = parent.uid
joint_info = [p.getJointInfo(self.uid, i) for i in range(p.getNumJoints(self.uid))]
if 'joint' in config:
joints = [config.get('joint')]
elif 'joints' in config:
joints = config.get('joints')
else:
joints = [info[1].decode('UTF-8') for info in joint_info]
self.joint_ids = [info[0] for info in joint_info if info[1].decode('UTF-8') in joints and info[3] > -1]
self.last = None
def reset(self):
p.removeAllUserDebugItems()
self.last = None
def update(self, action):
# A colored trace for each joint
joint_pos = np.array([pbp.get_link_pose(self.uid, i)[0] for i in self.joint_ids])
if self.last is not None:
m = len(joint_pos)
for i in range(n):
p.addUserDebugLine(
lineFromXYZ=joint_pos[i],
lineToXYZ=self.last[i], lineColorRGB=[(n-i)/(n+1), 0.9, i/(n+1)], lineWidth=1, lifeTime=360)
self.last = joint_pos
| 31.133333 | 120 | 0.581727 |
256c5471eacba768e9791f30d6ef0762118cc682 | 181 | py | Python | codility/1_3.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 3 | 2019-03-09T05:19:23.000Z | 2019-04-06T09:26:36.000Z | codility/1_3.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2020-02-23T10:38:04.000Z | 2020-02-23T10:38:04.000Z | codility/1_3.py | love-adela/algorithm | 4ccd02173c96f8369962f1fd4e5166a221690fa2 | [
"MIT"
] | 1 | 2019-05-22T13:47:53.000Z | 2019-05-22T13:47:53.000Z | def solution(S):
rs = ""
for i in S:
if i != " ":
rs += i
else:
rs += "%20"
return rs
S = "Mr John Smith"
print(solution(S))
| 12.066667 | 23 | 0.381215 |
256c54c224c3656056ad73a0292f2c0577a7fce0 | 1,612 | py | Python | ngraph/flex/flexargparser.py | NervanaSystems/ngraph-python | ac032c83c7152b615a9ad129d54d350f9d6a2986 | [
"Apache-2.0"
] | 18 | 2018-03-19T04:16:49.000Z | 2021-02-08T14:44:58.000Z | ngraph/flex/flexargparser.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 2 | 2019-04-16T06:41:49.000Z | 2019-05-06T14:08:13.000Z | ngraph/flex/flexargparser.py | rsumner31/ngraph | 5e5c9bb9f24d95aee190b914dd2d44122fc3be53 | [
"Apache-2.0"
] | 11 | 2018-06-16T15:59:08.000Z | 2021-03-06T00:45:30.000Z | from __future__ import print_function
import ngraph.transformers as ngt
from ngraph.flex.names import flex_gpu_transformer_name
import argparse
class FlexNgraphArgparser():
"""
Flex specific command line args
"""
@staticmethod
def setup_flex_args(argParser):
"""
Add flex specific arguments to other default args used by ngraph
"""
# use fixed point for flex backend
argParser.add_argument('--fixed_point',
action="store_true",
help=argparse.SUPPRESS)
# turn on flex verbosity for debug
argParser.add_argument('--flex_verbose',
action="store_true",
help=argparse.SUPPRESS)
# collect flex data and save it to h5py File
argParser.add_argument('--collect_flex_data',
action="store_true",
default=argparse.SUPPRESS)
@staticmethod
def make_and_set_transformer_factory(args):
flex_args = ('fixed_point', 'flex_verbose', 'collect_flex_data')
# default value for all flex args if not given, confusing with store_true in add_argument
default = False
if args.backend == flex_gpu_transformer_name:
flex_args_dict = dict((a, getattr(args, a, default)) for a in flex_args)
factory = ngt.make_transformer_factory(args.backend, **flex_args_dict)
else:
factory = ngt.make_transformer_factory(args.backend)
ngt.set_transformer_factory(factory)
| 36.636364 | 97 | 0.614144 |
256d49d818eb371b9cdddf6e67c307560654cf96 | 969 | py | Python | src/hydep/simplerom.py | CORE-GATECH-GROUP/hydep | 3cb65325eb03251629b3aaa8c3895a002e05d55d | [
"MIT"
] | 2 | 2020-11-12T03:08:07.000Z | 2021-10-04T22:09:48.000Z | src/hydep/simplerom.py | CORE-GATECH-GROUP/hydep | 3cb65325eb03251629b3aaa8c3895a002e05d55d | [
"MIT"
] | 2 | 2020-11-25T16:24:29.000Z | 2021-08-28T23:19:39.000Z | src/hydep/simplerom.py | CORE-GATECH-GROUP/hydep | 3cb65325eb03251629b3aaa8c3895a002e05d55d | [
"MIT"
] | 1 | 2020-11-12T03:08:10.000Z | 2020-11-12T03:08:10.000Z | """
Simple reduced order solver.
More of a no-op, in that it doesn't actually
perform a flux solution
"""
import numpy
from hydep.internal.features import FeatureCollection
from hydep.internal import TransportResult
from .lib import ReducedOrderSolver
class SimpleROSolver(ReducedOrderSolver):
"""The simplest reduced order flux solution where nothing happens"""
needs = FeatureCollection()
def __init__(self):
self._flux = None
def processBOS(self, txResult, _timestep, _power):
"""Store flux from a high fidelity transport solution"""
self._flux = txResult.flux
def substepSolve(self, *args, **kwargs):
"""Return the beginning-of-step flux with no modifications
Returns
-------
hydep.internal.TransportResult
Transport result with the flux provided in :meth:`processBOS`
"""
return TransportResult(self._flux, [numpy.nan, numpy.nan], runTime=numpy.nan)
| 26.916667 | 85 | 0.693498 |
25713c734ac79b5bf287eaff619cf02ebcde4535 | 449 | py | Python | TopQuarkAnalysis/TopEventProducers/python/sequences/ttGenEvent_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | TopQuarkAnalysis/TopEventProducers/python/sequences/ttGenEvent_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | TopQuarkAnalysis/TopEventProducers/python/sequences/ttGenEvent_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
#
# produce ttGenEvent with all necessary ingredients
#
from TopQuarkAnalysis.TopEventProducers.producers.TopInitSubset_cfi import *
from TopQuarkAnalysis.TopEventProducers.producers.TopDecaySubset_cfi import *
from TopQuarkAnalysis.TopEventProducers.producers.TtGenEvtProducer_cfi import *
makeGenEvtTask = cms.Task(
initSubset,
decaySubset,
genEvt
)
makeGenEvt = cms.Sequence(makeGenEvtTask)
| 28.0625 | 79 | 0.830735 |
2571f7e0a4f394d6c21f691f7de829e3237dd090 | 8,442 | py | Python | models/linnet.py | mengxiangke/bsn | df6458a44b8d8b442c086e158366dd296fab54cc | [
"Apache-2.0"
] | 5 | 2020-09-19T18:05:08.000Z | 2022-01-23T14:55:07.000Z | models/linnet.py | mengxiangke/bsn | df6458a44b8d8b442c086e158366dd296fab54cc | [
"Apache-2.0"
] | null | null | null | models/linnet.py | mengxiangke/bsn | df6458a44b8d8b442c086e158366dd296fab54cc | [
"Apache-2.0"
] | 7 | 2020-09-19T18:05:11.000Z | 2021-12-28T02:41:12.000Z | import os
from os.path import join as pjoin
import time
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import CosineAnnealingLR
try:
from .radam import RAdam
except (ImportError, ModuleNotFoundError) as err:
from radam import RAdam
try:
from torch.nn import Flatten
except ImportError:
class Flatten(nn.Module):
__constants__ = ['start_dim', 'end_dim']
def __init__(self, start_dim=1, end_dim=-1):
super(Flatten, self).__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input):
return input.flatten(self.start_dim, self.end_dim)
class HPF(nn.Conv1d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
*args,
**kwargs):
super(HPF, self).__init__(in_channels,
out_channels,
kernel_size,
*args, bias=False, **kwargs)
self.hpf_kernel = np.array([[[ 1, -1, 0, 0, 0]],
[[ 1, -2, 1, 0, 0]],
[[ 1, -3, 3, -1, 0]],
[[ 1, -4, 6, -4, 1]]])
self.weight.data = torch.tensor(self.hpf_kernel,
dtype=self.weight.dtype)
def initialize_parameters(self):
device = next(iter(self.parameters())).device
self.weight.data = torch.tensor(self.hpf_kernel,
dtype=self.weight.dtype,
device=device)
# The following settings does not allow training HPF.
#self.bias.data.fill_(0)
#self.hpf.bias.requires_grad = False
class TLU(nn.Module):
def __init__(self, thr=3.0):
"""truncated linear unit (TLU)
"""
super(TLU, self).__init__()
self.thr = thr
def forward(self, x):
return x.clamp(-self.thr, self.thr) #torch.min(torch.max(x, -self.thr), self.thr)
class Group1(nn.Module):
def __init__(self):
super(Group1, self).__init__()
self.module = nn.Sequential(nn.Conv1d(4, 8, 1),
TLU(3.0),
nn.Conv1d(8, 8, 5, padding=2),
nn.Conv1d(8, 16, 1))
def forward(self, x):
return self.module(x)
class Group2(nn.Module):
def __init__(self):
super(Group2, self).__init__()
self.module = nn.Sequential(nn.Conv1d(16, 16, 5, padding=2),
nn.ReLU(),
nn.Conv1d(16, 32, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group3(nn.Module):
def __init__(self):
super(Group3, self).__init__()
self.module = nn.Sequential(nn.Conv1d(32, 32, 5, padding=2),
nn.ReLU(),
nn.Conv1d(32, 64, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group4(nn.Module):
def __init__(self):
super(Group4, self).__init__()
self.module = nn.Sequential(nn.Conv1d(64, 64, 5, padding=2),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group5(nn.Module):
def __init__(self):
super(Group5, self).__init__()
self.module = nn.Sequential(nn.Conv1d(128, 128, 5, padding=2),
nn.ReLU(),
nn.Conv1d(128, 256, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group6(nn.Module):
def __init__(self):
super(Group6, self).__init__()
self.module = nn.Sequential(nn.Conv1d(256, 256, 5, padding=2),
nn.ReLU(),
nn.Conv1d(256, 512, 1),
nn.ReLU(),
nn.AdaptiveAvgPool1d(1))
def forward(self, x):
return self.module(x)
class Classifier(nn.Module):
def __init__(self, n_classes=2):
super(Classifier, self).__init__()
self.module = nn.Sequential(Flatten(1),
nn.Linear(512, n_classes))
def forward(self, x):
return self.module(x)
class LinNet(nn.Module):
@staticmethod
def get_optimizer(model, lr):
return RAdam(model.parameters(), lr=lr, weight_decay=1e-5)
@staticmethod
def get_lr_scheduler(optimizer):
return CosineAnnealingLR(optimizer, T_max=20, eta_min=1e-7)
def __str__(self):
return self._name
def __init__(self, n_classes=2):
super(LinNet, self).__init__()
self._name = "linnet"
# HPF
self.hpf = HPF(1, 4, 5, padding=2)
self.group1 = Group1()
self.group2 = Group2()
self.group3 = Group3()
self.group4 = Group4()
self.group5 = Group5()
self.group6 = Group6()
self.classifier = Classifier(n_classes)
self.initialize_parameters()
def forward(self, x):
y = self.hpf(x)
g1 = self.group1(y)
g2 = self.group2(g1)
g3 = self.group3(g2)
g4 = self.group4(g3)
g5 = self.group5(g4)
g6 = self.group6(g5)
logits = self.classifier(g6)
return logits
def initialize_parameters(self):
"""
In the original paper, Lin et al.
Conv1d: Xavier uniform initializer with zero biases
"""
"""
[Original]
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
elif isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight,
mode='fan_in',
nonlinearity='relu')
nn.init.constant_(m.bias.data, val=1e-3)
elif isinstance(m, nn.Linear):
# Zero mean Gaussian with std 0.01
nn.init.normal_(m.weight, 0.0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 1e-3)
"""
# Following settings is the same with that of BSN.
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
def initialize_curriculum_learning(self):
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
elif isinstance(m, nn.Linear):
# Zero mean Gaussian with std 0.01
nn.init.normal_(m.weight, 0.0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 1e-3)
if __name__ == "__main__":
model = LinNet()
n_ch = 1
for i in range(1, 2):
x = torch.randn(1, n_ch, i*16000)
t_beg = time.time()
out = model(x)
t_end = time.time()
print("LinNet model output:", out)
print("Execution time:", t_end - t_beg)
# end of for
| 31.977273 | 98 | 0.47394 |
c24fdcfaa37586667c8318eb6776d1204e6b7822 | 6,043 | py | Python | vendor/packages/nose/functional_tests/test_importer.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 2 | 2019-08-19T17:08:47.000Z | 2019-10-05T11:37:02.000Z | vendor/packages/nose/functional_tests/test_importer.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | vendor/packages/nose/functional_tests/test_importer.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T23:29:13.000Z | 2019-11-02T23:29:13.000Z | import os
import sys
import unittest
from nose.importer import Importer
class TestImporter(unittest.TestCase):
def setUp(self):
self.dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'support'))
self.imp = Importer()
self._mods = sys.modules.copy()
self._path = sys.path[:]
sys.modules.pop('mod', None)
sys.modules.pop('pak', None)
sys.modules.pop('pak.mod', None)
sys.modules.pop('pak.sub', None)
def tearDown(self):
to_del = [ m for m in sys.modules.keys() if
m not in self._mods ]
if to_del:
for mod in to_del:
del sys.modules[mod]
sys.modules.update(self._mods)
sys.path = self._path[:]
def test_import_from_dir(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
d2 = os.path.join(self.dir, 'dir2')
# simple name
m1 = imp.importFromDir(d1, 'mod')
m2 = imp.importFromDir(d2, 'mod')
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1.__file__, m2.__file__)
# dotted name
p1 = imp.importFromDir(d1, 'pak.mod')
p2 = imp.importFromDir(d2, 'pak.mod')
self.assertNotEqual(p1, p2)
self.assertNotEqual(p1.__file__, p2.__file__)
def test_import_from_path(self):
imp = self.imp
jn = os.path.join
d1 = jn(self.dir, 'dir1')
d2 = jn(self.dir, 'dir2')
# simple name
m1 = imp.importFromPath(jn(d1, 'mod.py'), 'mod')
m2 = imp.importFromPath(jn(d2, 'mod.py'), 'mod')
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1.__file__, m2.__file__)
# dotted name
p1 = imp.importFromPath(jn(d1, 'pak', 'mod.py'), 'pak.mod')
p2 = imp.importFromPath(jn(d2, 'pak', 'mod.py'), 'pak.mod')
self.assertNotEqual(p1, p2)
self.assertNotEqual(p1.__file__, p2.__file__)
# simple name -- package
sp1 = imp.importFromPath(jn(d1, 'pak'), 'pak')
sp2 = imp.importFromPath(jn(d2, 'pak'), 'pak')
self.assertNotEqual(sp1, sp2)
assert sp1.__path__
assert sp2.__path__
self.assertNotEqual(sp1.__path__, sp2.__path__)
# dotted name -- package
dp1 = imp.importFromPath(jn(d1, 'pak', 'sub'), 'pak.sub')
dp2 = imp.importFromPath(jn(d2, 'pak', 'sub'), 'pak.sub')
self.assertNotEqual(dp1, dp2)
assert dp1.__path__
assert dp2.__path__
self.assertNotEqual(dp1.__path__, dp2.__path__)
def test_import_sets_intermediate_modules(self):
imp = self.imp
path = os.path.join(self.dir,
'package2', 'test_pak', 'test_sub', 'test_mod.py')
mod = imp.importFromPath(path, 'test_pak.test_sub.test_mod')
print mod, dir(mod)
assert 'test_pak' in sys.modules, 'test_pak was not imported?'
test_pak = sys.modules['test_pak']
assert hasattr(test_pak, 'test_sub'), "test_pak.test_sub was not set"
def test_cached_no_reload(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
m1 = imp.importFromDir(d1, 'mod')
m2 = imp.importFromDir(d1, 'mod')
assert m1 is m2, "%s is not %s" % (m1, m2)
def test_cached_no_reload_dotted(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
p1 = imp.importFromDir(d1, 'pak.mod')
p2 = imp.importFromDir(d1, 'pak.mod')
assert p1 is p2, "%s is not %s" % (p1, p2)
def test_import_sets_sys_modules(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
p1 = imp.importFromDir(d1, 'pak.mod')
assert sys.modules['pak.mod'] is p1, "pak.mod not in sys.modules"
assert sys.modules['pak'], "pak not in sys.modules"
assert sys.modules['pak'].mod is p1, \
"sys.modules['pak'].mod is not the module we loaded"
def test_failed_import_raises_import_error(self):
imp = self.imp
def bad_import():
imp.importFromPath(self.dir, 'no.such.module')
self.assertRaises(ImportError, bad_import)
def test_sys_modules_same_path_no_reload(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
d2 = os.path.join(self.dir, 'dir2')
sys.path.insert(0, d1)
mod_sys_imported = __import__('mod')
mod_nose_imported = imp.importFromDir(d1, 'mod')
assert mod_nose_imported is mod_sys_imported, \
"nose reimported a module in sys.modules from the same path"
mod_nose_imported2 = imp.importFromDir(d2, 'mod')
assert mod_nose_imported2 != mod_sys_imported, \
"nose failed to reimport same name, different dir"
def test_import_pkg_from_path_fpw(self):
imp = self.imp
imp.config.firstPackageWins = True
jn = os.path.join
d1 = jn(self.dir, 'dir1')
d2 = jn(self.dir, 'dir2')
# dotted name
p1 = imp.importFromPath(jn(d1, 'pak', 'mod.py'), 'pak.mod')
p2 = imp.importFromPath(jn(d2, 'pak', 'mod.py'), 'pak.mod')
self.assertEqual(p1, p2)
self.assertEqual(p1.__file__, p2.__file__)
# simple name -- package
sp1 = imp.importFromPath(jn(d1, 'pak'), 'pak')
sp2 = imp.importFromPath(jn(d2, 'pak'), 'pak')
self.assertEqual(sp1, sp2)
assert sp1.__path__
assert sp2.__path__
self.assertEqual(sp1.__path__, sp2.__path__)
# dotted name -- package
dp1 = imp.importFromPath(jn(d1, 'pak', 'sub'), 'pak.sub')
dp2 = imp.importFromPath(jn(d2, 'pak', 'sub'), 'pak.sub')
self.assertEqual(dp1, dp2)
assert dp1.__path__
assert dp2.__path__
self.assertEqual(dp1.__path__, dp2.__path__)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 35.757396 | 78 | 0.580837 |
c2516c459b4df1dceb074080d5a8ce6f229681ed | 16,278 | py | Python | mvmm/multi_view/SpectralPenSearchByBlockMVMM.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | 1 | 2021-08-17T13:22:54.000Z | 2021-08-17T13:22:54.000Z | mvmm/multi_view/SpectralPenSearchByBlockMVMM.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | null | null | null | mvmm/multi_view/SpectralPenSearchByBlockMVMM.py | idc9/mvmm | 64fce755a7cd53be9b08278484c7a4c77daf38d1 | [
"MIT"
] | null | null | null | from sklearn.base import clone
import pandas as pd
from abc import ABCMeta
from time import time
from datetime import datetime
import numpy as np
from sklearn.model_selection import ParameterGrid
from sklearn.base import BaseEstimator, MetaEstimatorMixin
from mvmm.utils import get_seeds
from mvmm.multi_view.utils import linspace_zero_to, \
expspace_zero_to, polyspace_zero_to
from mvmm.multi_view.block_diag.graph.linalg import geigh_Lsym_bp_smallest
from mvmm.multi_view.block_diag.utils import asc_sort
from mvmm.clustering_measures import unsupervised_cluster_scores, \
several_unsupervised_cluster_scores, MEASURE_MIN_GOOD
class SpectralPenSearchByBlockMVMM(MetaEstimatorMixin, BaseEstimator,
metaclass=ABCMeta):
"""
Does a grid search over the continuous hyper-parameter for the spentral penalized MVMM. Stores the best MVMM for each block.
Parameters
----------
base_mvmm_0:
Unconstrained MVMM.
base_wbd_mvmm: mvmm.multi_view.BlockDiagMVMM.BlockDiagMVMM()
The base class for the spectral penalized MVMM
eval_weights:
The weights to put on the generalized eigenvalues.
adapt_expon:
max_n_blocks:
Maximum number of blocks to get i.e. the number of eigenvalues to penalized.
user_eval_weights:
(Optional) User provied eignvalue weights.
pen_max: str, float
Largest penalty value to try. If 'default' will make an automatic, educated guess.
n_pen_seq: int
Number of penalty values to try.
user_pen_vals: None, list
(Optional) User provided penalty values to try
default_c: float
Multiplicative factor for infering pen_max with the default method.
pen_seq_spacing: str
How to space the penalty values along the penalty sequence.
n_init: int
Number of random initalizations.
random_state: None, int
Random seed.
select_metric: str
How to pick the best model for each fixed number of blocks.
metrics2compute: list of st
Model selection measures to compute for tracking purposes.
verbosity: int
Level of printout
"""
def __init__(self, base_mvmm_0, base_wbd_mvmm,
eval_weights='adapt', adapt_expon=1,
max_n_blocks='default', user_eval_weights=None,
pen_max='default', n_pen_seq=100, user_pen_vals=None,
# adapt_pen=False, pen_incr=.5, max_n_pen_incr=200,
default_c=100, pen_seq_spacing='lin',
n_init=1, random_state=None,
select_metric='bic',
metrics2compute=['aic', 'bic'],
verbosity=0):
self.base_mvmm_0 = base_mvmm_0
self.base_wbd_mvmm = base_wbd_mvmm
self.eval_weights = eval_weights
self.adapt_expon = adapt_expon
self.max_n_blocks = max_n_blocks
self.user_eval_weights = user_eval_weights
self.pen_max = pen_max
self.n_pen_seq = n_pen_seq
self.user_pen_vals = user_pen_vals
self.default_c = default_c
self.pen_seq_spacing = pen_seq_spacing
assert pen_seq_spacing in ['lin', 'quad', 'exp']
# self.adapt_pen = adapt_pen
# self.pen_incr = pen_incr
# self.max_n_pen_incr = max_n_pen_incr
# if self.adapt_pen:
# assert self.user_pen_vals is None
self.random_state = random_state
self.n_init = n_init
self.select_metric = select_metric
self.metrics2compute = metrics2compute
self.verbosity = verbosity
def get_pen_seq_from_max(self, pen_max):
if self.pen_seq_spacing == 'lin':
return linspace_zero_to(stop=pen_max,
num=self.n_pen_seq)
elif self.pen_seq_spacing == 'quad':
return polyspace_zero_to(stop=pen_max,
num=self.n_pen_seq,
deg=2)
elif self.pen_seq_spacing == 'exp':
return expspace_zero_to(stop=pen_max,
num=self.n_pen_seq,
base=10)
@property
def n_pen_vals_(self):
if self.user_pen_vals is not None:
return len(self.user_pen_vals) + 1
else:
return self.n_pen_seq + 1
@property
def param_grid_(self):
"""
List of all parameter settings
"""
if hasattr(self, 'est_n_blocks_'):
param_grid = {'n_blocks': self.est_n_blocks_}
return list(ParameterGrid(param_grid))
else:
return None
def get_default_pen_max(self, model, X):
# steup temp model
temp_model = clone(model)
temp_model.view_models_ = \
[temp_model.base_view_models[v]
for v in range(temp_model.n_views)]
temp_model.initialize_parameters(X)
eval_pen_default = temp_model.\
get_eval_pen_guess(X=X, c=self.default_c,
use_bipt_sp=True,
K='default')
if self.verbosity >= 1:
print('default pen val', eval_pen_default)
return eval_pen_default
def fit(self, X):
# assert all(self.pen_vals_[1:] > 0)
# assert len(np.unique(self.pen_vals)) == len(self.pen_vals)
init_seeds = get_seeds(n_seeds=self.n_init,
random_state=self.random_state)
fit_data = pd.DataFrame()
n_blocks_best_models = {}
n_blocks_best_idx = {}
init_adapt_weights = []
for init in range(self.n_init):
if self.verbosity >= 1:
current_time = datetime.now().strftime("%H:%M:%S")
print('Initialization {}/{} at {}'.
format(init + 1, self.n_init, current_time))
# max number of evals to penalize
if self.max_n_blocks == 'default':
K = min(self.base_mvmm_0.n_view_components)
else:
K = int(self.max_n_blocks)
for pen_idx in range(self.n_pen_vals_):
if self.verbosity >= 1:
current_time = datetime.now().strftime("%H:%M:%S")
print('Penalty {}/{} at {}'.
format(pen_idx + 1, self.n_pen_vals_,
current_time))
data = {'pen_idx': pen_idx, 'init': init}
start_time = time()
if pen_idx == 0:
pen_val = None
# fit model
fit_model = clone(self.base_mvmm_0)
fit_model.set_params(random_state=init_seeds[init],
n_init=1)
fit_model.fit(X)
# get current parameter values for warm starting
current_view_params = fit_model._get_parameters()['views']
current_bd_weights = fit_model.weights_mat_
current_bd_weights = current_bd_weights * \
self.base_wbd_mvmm.epsilon_tilde / \
current_bd_weights.sum()
# track data
data['n_blocks'] = 1
data['n_steps'] = fit_model.opt_data_['n_steps']
# compute adaptive weights
if self.eval_weights == 'adapt':
evals = geigh_Lsym_bp_smallest(X=self.bd_weights_,
rank=K,
zero_tol=1e-10,
method='tsym')
# deal with 0 evals by artificially setting
# them to the smallest non-zero eval
zero_evals = evals < 1e-6
if np.mean(zero_evals) == 1:
# edge case: if all evals are 0 just use uiniform
evals = np.ones(len(evals))
else:
evals[zero_evals] = min(evals[~zero_evals])
# clip for numerical stability
eval_weights = (1 / evals) ** self.adapt_expon
init_adapt_weights.append(eval_weights)
else:
# setup and fit model
fit_model = clone(self.base_wbd_mvmm)
params = {'init_params_method': 'user',
'init_params_value': current_view_params,
'init_weights_method': 'user',
'init_weights_value': current_bd_weights
# 'eval_pen_base': pen_val,
}
params.update({'n_pen_tries': 1,
'n_init': 1,
# 'fine_tune_n_steps': None
})
fit_model.set_params(**params)
# set eval weights
if self.user_eval_weights:
eval_weights = self.user_eval_weights
elif self.eval_weights == 'adapt':
eval_weights = init_adapt_weights[init]
elif self.eval_weights == 'uniform':
eval_weights = np.ones(K)
elif self.eval_weights == 'lin':
eval_weights = 1 / np.arange(1, K + 1)
elif self.eval_weights == 'quad':
eval_weights = (1 / np.arange(1, K + 1)) ** 2
elif self.eval_weights == 'exp':
eval_weights = .5 ** np.arange(1, K + 1)
else:
raise ValueError("invalid input for eval_weights: {}"
.format(self.eval_weights))
def process(x):
x = np.clip(x, a_min=0, a_max=1e5)
return asc_sort(x * len(x) / np.sum(x))
# eval_weights = np.clip(eval_weights, a_min=0, a_max=1e5)
# superficial normalization step keeps
# penalty value reasonable
# eval_weights *= K / np.sum(eval_weights)
# eval_weights = desc_sort(eval_weights)
eval_weights = process(eval_weights)
fit_model.set_params(eval_weights=eval_weights)
# set penalty sequence for this initialization
if pen_idx == 1:
if self.user_pen_vals is not None:
pen_seq = np.sort(self.user_pen_vals)
elif self.pen_max == 'default':
# compute default max penalty
default_pen_max = \
self.get_default_pen_max(model=fit_model, X=X)
pen_seq = self.\
get_pen_seq_from_max(pen_max=default_pen_max)
elif self.pen_max != 'default':
pen_seq = self.\
get_pen_seq_from_max(pen_max=self.pen_max)
pen_seq = np.concatenate([[None], pen_seq])
# set penalty value
pen_val = pen_seq[pen_idx]
fit_model.set_params(eval_pen_base=pen_val)
fit_model.fit(X)
# get current parameter values for warm starting
current_view_params = fit_model._get_parameters()['views']
current_bd_weights = fit_model.bd_weights_
# track data
data['pen_val'] = pen_val
data['n_blocks'] = fit_model.opt_data_['n_blocks_est']
data['n_steps'] = \
fit_model.opt_data_['adpt_opt_data']['n_steps']
# store tracking data
data['fit_time'] = time() - start_time
tracking_data = fit_model.compute_tracking_data(X)
data['loss_val'] = tracking_data['loss_val']
data['obs_nll'] = tracking_data['obs_nll']
# TODO: possibly precompute distances
model_sel_scores = \
unsupervised_cluster_scores(X=X,
estimator=fit_model,
measures=self.metrics2compute)
for measure in model_sel_scores.keys():
data[measure] = model_sel_scores[measure]
# data['bic'] = fit_model.bic(X)
# data['aic'] = fit_model.aic(X)
fit_data = fit_data.append(data, ignore_index=True)
# save this model if it is the best
current_n_blocks = data['n_blocks'] # current n_blocks
# get th
block_scores = fit_data.query("n_blocks == @current_n_blocks")
if MEASURE_MIN_GOOD[self.select_metric]:
best_idx = block_scores[self.select_metric].idxmin()
else:
best_idx = block_scores[self.select_metric].idxmax()
# best_idx = fit_data.\
# query("n_blocks == @n_blocks")[self.select_metric].\
# idxmin()
if fit_data.loc[best_idx, 'init'] == init:
n_blocks_best_models[current_n_blocks] = fit_model
n_blocks_best_idx[current_n_blocks] = best_idx
self.est_n_blocks_ = np.sort(list(n_blocks_best_models.keys()))
self.estimators_ = [n_blocks_best_models[n_blocks]
for n_blocks in self.est_n_blocks_]
int_cols = ['init', 'pen_idx', 'n_blocks', 'n_steps']
fit_data[int_cols] = fit_data[int_cols].astype(int)
self.init_fit_data_ = fit_data
self.fit_init_best_idxs = [n_blocks_best_idx[n_blocks]
for n_blocks in self.est_n_blocks_]
if self.eval_weights == 'adapt':
self.init_adapt_weights_ = init_adapt_weights
self.model_sel_scores_ = \
several_unsupervised_cluster_scores(X=X,
estimators=self.estimators_,
measures=self.metrics2compute)
return self
def check_fit(self):
return hasattr(self, 'estimators_')
@property
def best_idx_(self):
"""
Index of selected model.
"""
if self.check_fit():
if MEASURE_MIN_GOOD[self.select_metric]:
return self.model_sel_scores_[self.select_metric].idxmin()
else:
return self.model_sel_scores_[self.select_metric].idxmax()
else:
return None
@property
def best_estimator_(self):
"""
Selected estimator.
"""
if self.check_fit():
return self.estimators_[self.best_idx_]
else:
return None
def predict(self, X):
"""
Predict the labels for the data samples in X using trained model.
"""
return self.best_estimator_.predict(X)
def predict_proba(self, X):
"""
Predict posterior probability of each component given the data.
"""
return self.best_estimator_.predict_proba(X)
def sample(self, n_samples=1):
"""
Generate random samples from the fitted Gaussian distribution.
"""
return self.best_estimator_.sample(n_samples=n_samples)
def score(self, X, y=None):
"""
Compute the per-sample average log-likelihood of the given data X.
"""
return self.best_estimator_.score(X)
def score_samples(self, X):
"""
Compute the weighted log probabilities for each sample.
"""
return self.best_estimator_.score_samples(X)
| 37.42069 | 128 | 0.531699 |
c251ec2f4862db71edcfa85809de82aead64c14b | 812 | py | Python | tests/unit/providers/traversal/test_delegate_py3.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/providers/traversal/test_delegate_py3.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/providers/traversal/test_delegate_py3.py | YelloFam/python-dependency-injector | 541131e33858ee1b8b5a7590d2bb9f929740ea1e | [
"BSD-3-Clause"
] | null | null | null | """Delegate provider traversal tests."""
from dependency_injector import providers
def test_traversal_provider():
another_provider = providers.Provider()
provider = providers.Delegate(another_provider)
all_providers = list(provider.traverse())
assert len(all_providers) == 1
assert another_provider in all_providers
def test_traversal_provider_and_overriding():
provider1 = providers.Provider()
provider2 = providers.Provider()
provider3 = providers.Provider()
provider3.override(provider2)
provider = providers.Delegate(provider1)
provider.override(provider3)
all_providers = list(provider.traverse())
assert len(all_providers) == 3
assert provider1 in all_providers
assert provider2 in all_providers
assert provider3 in all_providers
| 24.606061 | 51 | 0.752463 |
c2531eebc4b5c56768575d213a86688eb0c965b8 | 161 | py | Python | rhg_compute_tools/__init__.py | dpa9694/rhg_compute_tools | f111c380e3672983fa62795346be631e62c12611 | [
"MIT"
] | null | null | null | rhg_compute_tools/__init__.py | dpa9694/rhg_compute_tools | f111c380e3672983fa62795346be631e62c12611 | [
"MIT"
] | 2 | 2020-05-31T20:40:25.000Z | 2020-07-15T16:51:55.000Z | rhg_compute_tools/__init__.py | dpa9694/rhg_compute_tools | f111c380e3672983fa62795346be631e62c12611 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for RHG Compute Tools."""
__author__ = """Michael Delgado"""
__email__ = 'mdelgado@rhg.com'
__version__ = '0.2.1'
| 20.125 | 46 | 0.645963 |
c253281fece2f931537ba0aac860be0c88c05f35 | 481 | py | Python | grocers_panel/migrations/0005_alter_shop_food.py | delitamakanda/GroceryApp | 8b0eeb40197b480598928dd7e95e63ca180c9bf1 | [
"MIT"
] | 1 | 2021-05-25T02:46:42.000Z | 2021-05-25T02:46:42.000Z | grocers_panel/migrations/0005_alter_shop_food.py | delitamakanda/GroceryApp | 8b0eeb40197b480598928dd7e95e63ca180c9bf1 | [
"MIT"
] | null | null | null | grocers_panel/migrations/0005_alter_shop_food.py | delitamakanda/GroceryApp | 8b0eeb40197b480598928dd7e95e63ca180c9bf1 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-12-19 17:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grocers_panel', '0004_shop_grocer'),
]
operations = [
migrations.AlterField(
model_name='shop',
name='food',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grocers_panel.food'),
),
]
| 24.05 | 117 | 0.640333 |
c2544d4b8163352d260ea54398086333ae611bb9 | 271 | py | Python | apps/core/models/colabore.py | bispojr/observatorio-ufj-covid19 | 8667fae1367b95a7dfa8558fbac3b1b0b708af8d | [
"MIT"
] | 3 | 2020-04-02T21:59:19.000Z | 2020-12-03T12:37:26.000Z | apps/core/models/colabore.py | bispojr/observatorio-ufj-covid19 | 8667fae1367b95a7dfa8558fbac3b1b0b708af8d | [
"MIT"
] | 68 | 2020-03-28T22:40:08.000Z | 2020-07-08T18:04:07.000Z | apps/core/models/colabore.py | bispojr/observatorio-ufj-covid19 | 8667fae1367b95a7dfa8558fbac3b1b0b708af8d | [
"MIT"
] | 5 | 2020-03-28T21:35:30.000Z | 2020-06-10T01:28:14.000Z |
class Colabore():
def getContext(self):
return self.__contextColabore(self)
def __contextColabore(self):
context = {
"grupo": "geral",
"grupo_link": "saiba_mais",
"titulo": "Observatório UFJ Covid-19 - Colabore"
}
return context | 20.846154 | 50 | 0.630996 |
c254aa30204c44e620331c5c8033c1497466fa14 | 6,339 | py | Python | tests/logic/order_history_test.py | rirwin/stock-analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | null | null | null | tests/logic/order_history_test.py | rirwin/stock-analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | 1 | 2020-06-24T04:41:59.000Z | 2020-06-24T04:41:59.000Z | tests/logic/order_history_test.py | rirwin/stock_analysis | d13b9be86265ad87c10847422a04f93409b0bf51 | [
"Apache-2.0"
] | null | null | null | import datetime
from sqlalchemy.orm import sessionmaker
from database import db
from database.order_history import OrderHistory
from stock_analysis.logic import order_history
from stock_analysis.logic.order_history import Order
from stock_analysis.logic.order_history import OrderHistoryLogic
from stock_analysis.logic.order_history import TickerDate
Session = sessionmaker(bind=db.engine)
class TestOrderHistoryLogic(object):
@db.in_sandbox
def test_add_buy_orders(self):
logic = OrderHistoryLogic()
user_id = 1
order = Order(
user_id=user_id,
order_type=order_history.BUY_ORDER_TYPE,
date=datetime.date(2015, 8, 9),
ticker='AAPL',
num_shares=20,
price=150.001,
)
logic.add_orders([order])
session = Session()
resp = session.query(OrderHistory).all()
assert len(resp) == 1
order_in_db = resp[0]
assert order_in_db.user_id == user_id
assert order_in_db.date == order.date.isoformat()
assert order_in_db.ticker == order.ticker
assert order_in_db.num_shares == order.num_shares
assert order_in_db.price == order.price
session.close()
@db.in_sandbox
def test_get_tickers_and_min_dates_for_user(self):
logic = OrderHistoryLogic()
user_id = 1
order1 = Order(
user_id=user_id,
order_type=order_history.BUY_ORDER_TYPE,
date=datetime.date(2015, 8, 9),
ticker='AAPL',
num_shares=20,
price=150.002,
)
order2 = Order(
user_id=user_id,
order_type=order_history.BUY_ORDER_TYPE,
date=datetime.date(2017, 1, 1),
ticker='AAPL',
num_shares=20,
price=152.333,
)
logic.add_orders([order1, order2])
ticker_dates = logic.get_tickers_and_min_dates_for_user(user_id)
assert ticker_dates == [TickerDate(order1.ticker, order1.date)]
@db.in_sandbox
def test_get_all_order_tickers_min_date(self):
logic = OrderHistoryLogic()
user_id_1 = 1
user_id_2 = 2
order1 = Order(
user_id=user_id_1,
order_type=order_history.BUY_ORDER_TYPE,
date=datetime.date(2015, 8, 9),
ticker='AAPL',
num_shares=20,
price=150.001,
)
order2 = Order(
user_id=user_id_2,
order_type=order_history.BUY_ORDER_TYPE,
date=datetime.date(2017, 1, 1),
ticker='AAPL',
num_shares=20,
price=152.333,
)
logic.add_orders([order1, order2])
ticker_dates = logic.get_all_order_tickers_min_date()
assert ticker_dates == [TickerDate(order1.ticker, order1.date)]
@db.in_sandbox
def test_get_orders_for_user(self):
logic = OrderHistoryLogic()
order1 = Order(
user_id=1,
order_type=order_history.BUY_ORDER_TYPE,
ticker='AAPL',
date=datetime.date(2017, 6, 12),
num_shares=2,
price=150.0,
)
order2 = Order(
user_id=1,
order_type=order_history.BUY_ORDER_TYPE,
ticker='AAPL',
date=datetime.date(2017, 6, 19),
num_shares=3,
price=170.0,
)
order3 = Order(
user_id=2,
order_type=order_history.BUY_ORDER_TYPE,
ticker='AAPL',
date=datetime.date(2017, 6, 19),
num_shares=3,
price=170.0,
)
logic.add_orders([order1, order2, order3])
assert set(logic.get_orders_for_user(1)) == set([order1, order2])
assert logic.get_ticker_to_orders(1) == {'AAPL': [order1, order2]}
@db.in_sandbox
def test_get_portfolio_shares_owned_on_date(self):
logic = OrderHistoryLogic()
order1 = Order(
user_id=1,
order_type=order_history.BUY_ORDER_TYPE,
ticker='AAPL',
date=datetime.date(2017, 6, 12),
num_shares=2,
price=150.0,
)
order2 = Order(
user_id=1,
order_type=order_history.BUY_ORDER_TYPE,
ticker='ATVI',
date=datetime.date(2017, 6, 19),
num_shares=3,
price=170.0,
)
order3 = Order(
user_id=1,
order_type=order_history.SELL_ORDER_TYPE,
ticker='AAPL',
date=datetime.date(2017, 6, 26),
num_shares=1,
price=180.0,
)
logic.add_orders([order1, order2, order3])
results = logic.get_portfolio_shares_owned_on_date(order1.user_id, datetime.date(2017, 6, 27))
assert results == {'AAPL': 1, 'ATVI': 3}
@db.in_sandbox
def test_get_ticker_total_purchased_sold(self):
logic = OrderHistoryLogic()
order1 = Order(
user_id=1,
order_type=order_history.BUY_ORDER_TYPE,
ticker='AAPL',
date=datetime.date(2017, 6, 12),
num_shares=2,
price=150.0,
)
order2 = Order(
user_id=1,
order_type=order_history.BUY_ORDER_TYPE,
ticker='ATVI',
date=datetime.date(2017, 6, 19),
num_shares=3,
price=170.0,
)
order3 = Order(
user_id=1,
order_type=order_history.SELL_ORDER_TYPE,
ticker='AAPL',
date=datetime.date(2017, 6, 26),
num_shares=1,
price=180.0,
)
order4 = Order(
user_id=1,
order_type=order_history.SELL_ORDER_TYPE,
ticker='ATVI',
date=datetime.date(2017, 6, 29),
num_shares=3,
price=120.0,
)
logic.add_orders([order1, order2, order3, order4])
purchased, sold = logic.get_ticker_total_purchased_sold(order1.user_id)
assert purchased == {
'AAPL': order1.num_shares * order1.price,
'ATVI': order2.num_shares * order2.price
}
assert sold == {
'AAPL': order3.num_shares * order3.price,
'ATVI': order4.num_shares * order4.price
}
| 31.073529 | 102 | 0.56602 |
c256ecf86fa244e6c6873a974253c22509fa427e | 3,380 | py | Python | source_dir/densenet_3d_estimator.py | ffeijoo/3d-DenseNet | baec68af07294ac5e432096055909ff08ea2e81c | [
"MIT"
] | null | null | null | source_dir/densenet_3d_estimator.py | ffeijoo/3d-DenseNet | baec68af07294ac5e432096055909ff08ea2e81c | [
"MIT"
] | null | null | null | source_dir/densenet_3d_estimator.py | ffeijoo/3d-DenseNet | baec68af07294ac5e432096055909ff08ea2e81c | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
from densenet_3d_model import DenseNet3D
def model_fn(features, labels, mode, params):
# Define the model
model = DenseNet3D(
video_clips=features['video_clips'], labels=labels, **params)
# Get the prediction result
if mode == tf.estimator.ModeKeys.PREDICT:
model.is_training = False
return _predict_result(model)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=model.losses,
train_op=model.train_op,
eval_metric_ops={'eval_accuracy': model.accuracy})
def _predict_result(model):
predictions = {'prediction': model.prediction, 'probability': model.probability, 'logits': model.logits}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions)
def serving_input_fn(params):
inputs = {
'video_clips':
tf.placeholder(
tf.float32,
shape=[
None, params['num_frames_per_clip'], params['height'],
params['width'], params['channel']
])
}
return tf.estimator.export.build_raw_serving_input_receiver_fn(inputs)()
def train_input_fn(training_dir, params):
directory = os.path.join(training_dir, 'train.tfrecord')
return _build_tfrecord_dataset(directory, params['train_total_video_clip'],
**params)
def eval_input_fn(evaluating_dir, params):
directory = os.path.join(evaluating_dir, 'eval.tfrecord')
return _build_tfrecord_dataset(directory, params['eval_total_video_clip'],
**params)
def _build_tfrecord_dataset(directory, total_clip_num, batch_size, **params):
'''
Buffer the training dataset to TFRecordDataset with the following video shape
[num_frames_per_clip, height, width, channel]
ex: [16, 100, 120, 3]
'''
print('Building dataset, number of clips: ' + str(total_clip_num))
dataset = tf.data.TFRecordDataset(directory)
dataset = dataset.shuffle(buffer_size=total_clip_num)
dataset = dataset.map(
map_func=
lambda serialized_example: _parser(serialized_example, **params))
dataset = dataset.repeat()
iterator = dataset.batch(batch_size=batch_size).make_one_shot_iterator()
clips, labels = iterator.get_next()
return {'video_clips': clips}, labels
def _parser(serialized_example, num_frames_per_clip, **params):
features = tf.parse_single_example(
serialized_example,
features={
'clip/width': tf.FixedLenFeature([], tf.int64),
'clip/height': tf.FixedLenFeature([], tf.int64),
'clip/channel': tf.FixedLenFeature([], tf.int64),
'clip/raw': tf.FixedLenFeature([num_frames_per_clip], tf.string),
'clip/label': tf.FixedLenFeature([], tf.int64)
})
def mapping_func(image):
return _decode_image(image, **params)
clip = tf.map_fn(mapping_func, features['clip/raw'], dtype=tf.float32)
return clip, features['clip/label']
def _decode_image(image, channel, width, height, **params):
image = tf.image.decode_jpeg(image, channels=channel)
# This set_shape step is necesary for the last trainsition_layer_to_classes layer in the model
image.set_shape([height, width, channel])
image = tf.cast(image, tf.float32)
return image | 34.845361 | 108 | 0.671598 |