id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3393812 | <filename>safecast_deploy/same_env.py
import datetime
import pprint
import sys
from safecast_deploy import git_logger, verbose_sleep
class SameEnv:
def __init__(self, state):
self.state = state
self._c = state.eb_client
def run(self):
self.start_time = datetime.datetime.now(datetime.timezone.utc)
# Handle the worker environment first, to ensure that database
# migrations are applied
self._handle_worker()
self._handle_web()
result = self._generate_result()
self._print_result(result)
git_logger.log_result(result)
def _handle_worker(self):
print("Deploying to the worker.", file=sys.stderr)
env_name = self.state.env_metadata[self.state.subenvs['wrk']]['name']
self._update_environment(env_name)
def _handle_web(self):
print("Deploying to the web instances.", file=sys.stderr)
env_name = self.state.env_metadata[self.state.subenvs['web']]['name']
self._update_environment(env_name)
def _generate_result(self):
completed_time = datetime.datetime.now(datetime.timezone.utc)
result = {
'app': self.state.app,
'completed_at': completed_time,
'elapsed_time': (completed_time - self.start_time).total_seconds(),
'env': self.state.env,
'event': 'same_env',
'started_at': self.start_time,
'web': {
'env': self.state.env_metadata[self.state.subenvs['web']]['name'],
'new_version': self.state.new_version,
'new_version_parsed': self.state.new_versions_parsed['web'],
'old_version': self.state.env_metadata[self.state.subenvs['web']]['version'],
'old_version_parsed': self.state.old_versions_parsed['web'],
},
'wrk': {
'env': self.state.env_metadata[self.state.subenvs['wrk']]['name'],
'new_version': self.state.new_version,
'new_version_parsed': self.state.new_versions_parsed['wrk'],
'old_version': self.state.env_metadata[self.state.subenvs['wrk']]['version'],
'old_version_parsed': self.state.old_versions_parsed['wrk'],
},
}
self._add_git('web', result)
self._add_git('wrk', result)
return result
def _add_git(self, role, result):
repo_names = {
'api': 'safecastapi',
'ingest': 'ingest',
}
if 'git_commit' in self.state.old_versions_parsed[role] \
and 'git_commit' in self.state.new_versions_parsed[role]:
result[role]['github_diff'] = 'https://github.com/Safecast/{}/compare/{}...{}'.format(
repo_names[self.state.app],
self.state.old_versions_parsed[role]['git_commit'],
self.state.new_versions_parsed[role]['git_commit']
)
def _print_result(self, result):
pprint.PrettyPrinter(stream=sys.stderr).pprint(result)
print("Deployment completed.", file=sys.stderr)
def _update_environment(self, env_name):
self._c.update_environment(
ApplicationName=self.state.app,
EnvironmentName=env_name,
VersionLabel=self.state.new_version,
)
print("Waiting for instance health to return to normal.", file=sys.stderr)
self._wait_for_green(env_name)
def _wait_for_green(self, env_name):
verbose_sleep(70)
wait_seconds = 0
while wait_seconds < 480:
health = self._c.describe_environment_health(
EnvironmentName=env_name,
AttributeNames=['HealthStatus', ]
)['HealthStatus']
if health == 'Ok':
print("Environment health has returned to normal.", file=sys.stderr)
return
verbose_sleep(40)
wait_seconds += 40
print("Environment health did not return to normal within 480 seconds. Aborting further operations.",
file=sys.stderr)
exit(1)
| StarcoderdataPython |
4846339 | #!/usr/bin/env python3
from .curve import *
from .geodesics import *
from .manifold import *
| StarcoderdataPython |
9785570 | #!/usr/bin/env python
"""Simple parsers for the output of WMI queries."""
import binascii
import calendar
import struct
import time
from grr.lib import parser
from grr.lib import rdfvalue
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import wmi as rdf_wmi
def BinarySIDtoStringSID(sid):
"""Converts a binary SID to its string representation.
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379597.aspx
The byte representation of an SID is as follows:
Offset Length Description
00 01 revision
01 01 sub-authority count
02 06 authority (big endian)
08 04 subauthority #1 (little endian)
0b 04 subauthority #2 (little endian)
...
Args:
sid: A byte array.
Returns:
SID in string form.
Raises:
ValueError: If the binary SID is malformed.
"""
if not sid:
return ""
str_sid_components = [ord(sid[0])]
# Now decode the 48-byte portion
if len(sid) >= 8:
subauthority_count = ord(sid[1])
identifier_authority = struct.unpack(">H", sid[2:4])[0]
identifier_authority <<= 32
identifier_authority |= struct.unpack(">L", sid[4:8])[0]
str_sid_components.append(identifier_authority)
start = 8
for i in range(subauthority_count):
authority = sid[start:start + 4]
if not authority:
break
if len(authority) < 4:
raise ValueError(
"In binary SID '%s', component %d has been truncated. "
"Expected 4 bytes, found %d: (%s)" % (",".join(
[str(ord(c)) for c in sid]), i, len(authority), authority))
str_sid_components.append(struct.unpack("<L", authority)[0])
start += 4
return "S-%s" % ("-".join([str(x) for x in str_sid_components]))
class WMIEventConsumerParser(parser.WMIQueryParser):
"""Base class for WMI EventConsumer Parsers."""
__abstract = True # pylint: disable=invalid-name
def Parse(self, query, result, knowledge_base):
"""Parse a WMI Event Consumer."""
_ = query, knowledge_base
wmi_dict = result.ToDict()
try:
wmi_dict["CreatorSID"] = BinarySIDtoStringSID("".join(
[chr(i) for i in wmi_dict["CreatorSID"]]))
except (ValueError, TypeError) as e:
# We recover from corrupt SIDs by outputting it raw as a string
wmi_dict["CreatorSID"] = str(wmi_dict["CreatorSID"])
except KeyError as e:
pass
for output_type in self.output_types:
anomalies = []
output = rdfvalue.RDFValue.classes[output_type]()
for k, v in wmi_dict.iteritems():
try:
output.Set(k, v)
except AttributeError as e:
# Skip any attribute we don't know about
anomalies.append("Unknown field %s, with value %s" % (k, v))
except ValueError as e:
anomalies.append("Invalid value %s for field %s: %s" % (v, k, e))
# Yield anomalies first to help with debugging
if anomalies:
yield rdf_anomaly.Anomaly(
type="PARSER_ANOMALY",
generated_by=self.__class__.__name__,
finding=anomalies)
# Raise if the parser generated no output but there were fields.
if wmi_dict and not output:
raise ValueError("Non-empty dict %s returned empty output." % wmi_dict)
yield output
class WMIActiveScriptEventConsumerParser(WMIEventConsumerParser):
"""Parser for WMI ActiveScriptEventConsumers.
https://msdn.microsoft.com/en-us/library/aa384749(v=vs.85).aspx
"""
output_types = [rdf_wmi.WMIActiveScriptEventConsumer.__name__]
supported_artifacts = ["WMIEnumerateASEC"]
class WMICommandLineEventConsumerParser(WMIEventConsumerParser):
"""Parser for WMI CommandLineEventConsumers.
https://msdn.microsoft.com/en-us/library/aa389231(v=vs.85).aspx
"""
output_types = [rdf_wmi.WMICommandLineEventConsumer.__name__]
supported_artifacts = ["WMIEnumerateCLEC"]
class WMIInstalledSoftwareParser(parser.WMIQueryParser):
"""Parser for WMI output. Yields SoftwarePackage rdfvalues."""
output_types = [rdf_client.SoftwarePackage.__name__]
supported_artifacts = ["WMIInstalledSoftware"]
def Parse(self, query, result, knowledge_base):
"""Parse the WMI packages output."""
_ = query, knowledge_base
status = rdf_client.SoftwarePackage.InstallState.INSTALLED
soft = rdf_client.SoftwarePackage(
name=result["Name"],
description=result["Description"],
version=result["Version"],
install_state=status)
yield soft
class WMIHotfixesSoftwareParser(parser.WMIQueryParser):
"""Parser for WMI output. Yields SoftwarePackage rdfvalues."""
output_types = [rdf_client.SoftwarePackage.__name__]
supported_artifacts = ["WMIHotFixes"]
def AmericanDateToEpoch(self, date_str):
"""Take a US format date and return epoch."""
try:
epoch = time.strptime(date_str, "%m/%d/%Y")
return int(calendar.timegm(epoch)) * 1000000
except ValueError:
return 0
def Parse(self, query, result, knowledge_base):
"""Parse the WMI packages output."""
_ = query, knowledge_base
status = rdf_client.SoftwarePackage.InstallState.INSTALLED
result = result.ToDict()
# InstalledOn comes back in a godawful format such as '7/10/2013'.
installed_on = self.AmericanDateToEpoch(result.get("InstalledOn", ""))
soft = rdf_client.SoftwarePackage(
name=result.get("HotFixID"),
description=result.get("Caption"),
installed_by=result.get("InstalledBy"),
install_state=status,
installed_on=installed_on)
yield soft
class WMIUserParser(parser.WMIQueryParser):
"""Parser for WMI Win32_UserAccount and Win32_UserProfile output."""
output_types = [rdf_client.User.__name__]
supported_artifacts = [
"WMIProfileUsersHomeDir", "WMIAccountUsersDomain", "WMIUsers"
]
account_mapping = {
# Win32_UserAccount
"Name": "username",
"Domain": "userdomain",
"SID": "sid",
# Win32_UserProfile
"LocalPath": "homedir"
}
def Parse(self, query, result, knowledge_base):
"""Parse the WMI Win32_UserAccount output."""
_ = query, knowledge_base
kb_user = rdf_client.User()
for wmi_key, kb_key in self.account_mapping.items():
try:
kb_user.Set(kb_key, result[wmi_key])
except KeyError:
pass
# We need at least a sid or a username. If these are missing its likely we
# retrieved just the userdomain for an AD account that has a name collision
# with a local account that is correctly populated. We drop the bogus
# domain account.
if kb_user.sid or kb_user.username:
yield kb_user
class WMILogicalDisksParser(parser.WMIQueryParser):
"""Parser for LogicalDisk WMI output. Yields Volume rdfvalues."""
output_types = [rdf_client.Volume.__name__]
supported_artifacts = ["WMILogicalDisks"]
def Parse(self, query, result, knowledge_base):
"""Parse the WMI packages output."""
_ = query, knowledge_base
result = result.ToDict()
winvolume = rdf_client.WindowsVolume(
drive_letter=result.get("DeviceID"), drive_type=result.get("DriveType"))
try:
size = int(result.get("Size"))
except (ValueError, TypeError):
size = None
try:
free_space = int(result.get("FreeSpace"))
except (ValueError, TypeError):
free_space = None
# Since we don't get the sector sizes from WMI, we just set them at 1 byte
volume = rdf_client.Volume(
windowsvolume=winvolume,
name=result.get("VolumeName"),
file_system_type=result.get("FileSystem"),
serial_number=result.get("VolumeSerialNumber"),
sectors_per_allocation_unit=1,
bytes_per_sector=1,
total_allocation_units=size,
actual_available_allocation_units=free_space)
yield volume
class WMIComputerSystemProductParser(parser.WMIQueryParser):
"""Parser for WMI Output. Yeilds Identifying Number."""
output_types = [rdf_client.HardwareInfo.__name__]
supported_artifacts = ["WMIComputerSystemProduct"]
def Parse(self, query, result, knowledge_base):
"""Parse the WMI output to get Identifying Number."""
# Currently we are only grabbing the Identifying Number
# as the serial number (catches the unique number for VMs).
# This could be changed to include more information from
# Win32_ComputerSystemProduct.
_ = query, knowledge_base
yield rdf_client.HardwareInfo(
serial_number=result["IdentifyingNumber"],
system_manufacturer=result["Vendor"])
class WMIInterfacesParser(parser.WMIQueryParser):
"""Parser for WMI output. Yields SoftwarePackage rdfvalues."""
output_types = [
rdf_client.Interface.__name__, rdf_client.DNSClientConfiguration.__name__
]
supported_artifacts = []
def WMITimeStrToRDFDatetime(self, timestr):
"""Return RDFDatetime from string like 20140825162259.000000-420.
Args:
timestr: WMI time string
Returns:
rdfvalue.RDFDatetime
We have some timezone manipulation work to do here because the UTC offset is
in minutes rather than +-HHMM
"""
# We use manual parsing here because the time functions provided (datetime,
# dateutil) do not properly deal with timezone information.
offset_minutes = timestr[21:]
year = timestr[:4]
month = timestr[4:6]
day = timestr[6:8]
hours = timestr[8:10]
minutes = timestr[10:12]
seconds = timestr[12:14]
microseconds = timestr[15:21]
unix_seconds = calendar.timegm(
map(int, [year, month, day, hours, minutes, seconds]))
unix_seconds -= int(offset_minutes) * 60
return rdfvalue.RDFDatetime(unix_seconds * 1e6 + int(microseconds))
def _ConvertIPs(self, io_tuples, interface, output_dict):
for inputkey, outputkey in io_tuples:
addresses = []
if isinstance(interface[inputkey], list):
for ip_address in interface[inputkey]:
addresses.append(
rdf_client.NetworkAddress(human_readable_address=ip_address))
else:
addresses.append(
rdf_client.NetworkAddress(
human_readable_address=interface[inputkey]))
output_dict[outputkey] = addresses
return output_dict
def Parse(self, query, result, knowledge_base):
"""Parse the WMI packages output."""
_ = query, knowledge_base
args = {"ifname": result["Description"]}
args["mac_address"] = binascii.unhexlify(result["MACAddress"].replace(
":", ""))
self._ConvertIPs([("IPAddress", "addresses"),
("DefaultIPGateway", "ip_gateway_list"),
("DHCPServer", "dhcp_server_list")], result, args)
if "DHCPLeaseExpires" in result:
args["dhcp_lease_expires"] = self.WMITimeStrToRDFDatetime(
result["DHCPLeaseExpires"])
if "DHCPLeaseObtained" in result:
args["dhcp_lease_obtained"] = self.WMITimeStrToRDFDatetime(
result["DHCPLeaseObtained"])
yield rdf_client.Interface(**args)
yield rdf_client.DNSClientConfiguration(
dns_server=result["DNSServerSearchOrder"],
dns_suffix=result["DNSDomainSuffixSearchOrder"])
| StarcoderdataPython |
38065 | <reponame>bhrutledge/jahhills.com<filename>docs/_ext/django_models.py
# Auto-document Django models
# Copied and adapted from https://djangosnippets.org/snippets/2533/
import inspect
from django.utils.html import strip_tags
from django.utils.encoding import force_text
from django.db import models
def process_docstring(app, what, name, obj, options, lines):
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
fields = obj._meta.fields
for field in fields:
help_text = strip_tags(force_text(field.help_text))
verbose_name = force_text(field.verbose_name).capitalize()
field_type = type(field).__name__
if help_text:
lines.append(':param %s: %s' % (field.attname, help_text))
else:
lines.append(':param %s: %s' % (field.attname, verbose_name))
lines.append(':type %s: %s' % (field.attname, field_type))
# Return the extended docstring
return lines
def setup(app):
app.connect('autodoc-process-docstring', process_docstring)
| StarcoderdataPython |
3248296 | <reponame>polrev-github/polrev-django
from django import forms
from django.contrib.admin import site, widgets
from areas.models import LocalCouncilDistrict
from .area_forms import AreaForm
class LocalCouncilDistrictForm(AreaForm):
class Meta:
model = LocalCouncilDistrict
fields = AreaForm.Meta.fields + ['state_ref', 'place_ref']
widgets = {
'place_ref': widgets.AutocompleteSelect(
LocalCouncilDistrict.place_ref.field,
site,
),
}
| StarcoderdataPython |
1934983 | <reponame>mari-hernandez/03-tarea-mari-hernandez
import numpy as np
from matplotlib import pyplot as plt
G = 1
M = 1
m = 1
class Planeta(object):
"""
La clase planeta, crea un planeta dadas su condiciones iniciales de
posicion y velocidad ademas de un alpha opcional.Posee metodos para la
ecuacion de movimiento de este, y los metodos de runge Kutta 4, Verlet
y Beeman que avanzan un paso. Tambien se puede calcular la energía total
del planeta en las condiciones actuales
"""
def __init__(self, condicion_inicial, alpha=0):
"""
__init__ es un método especial que se usa para inicializar las
instancias de una clase.
y_actual guarda posicion y velocidades en el tiempo actual de la forma
[x, y, vx, vy]
fuerza_actual_x guarda la fuerza en el eje x en el momento actual
en la posicion [-1]
fuerza_actual_y guarda la fuerza en el eje y en el momento actual
en la posicion [-1]
t_actual guarda el tiempo
alpha guarda el valor alpha, el cual si no es especificado
se toma como 0
"""
self.y_actual = condicion_inicial
self.fuerza_actual_x = []
self.fuerza_actual_y = []
self.t_actual = 0.
self.alpha = alpha
def ecuacion_de_movimiento(self):
"""
Implementa la ecuación de movimiento, como sistema de ecuaciónes de
primer orden.
"""
x, y, vx, vy = self.y_actual
r = (x**2 + y**2)**(1/2)
fx = - x * (G * M * m / r**3 - 2 * self.alpha * G * M * m / r**4)
fy = - y * (G * M * m / r**3 - 2 * self.alpha * G * M * m / r**4)
self.fuerza_actual_x.append(fx)
self.fuerza_actual_y.append(fy)
return [vx, vy, fx, fy]
def avanza_rk4(self, dt):
"""
Toma la condición actual del planeta y avanza su posicion y velocidad
en un intervalo de tiempo dt usando el método de RK4. El método no
retorna nada, pero modifica los valores de self.y_actual y de
self.t_actual.
"""
x, y, vx, vy = self.y_actual
k1 = self.ecuacion_de_movimiento()
self.y_actual = [x+k1[0]/2, y+k1[1]/2, vx+k1[2]/2, vy+k1[3]/2]
k2 = self.ecuacion_de_movimiento()
self.y_actual = [x+k2[0]/2, y+k2[1]/2, vx+k2[2]/2, vy+k2[3]/2]
k3 = self.ecuacion_de_movimiento()
self.y_actual = [x+k3[0], y+k3[1], vx+k3[2], vy+k3[3]]
k4 = self.ecuacion_de_movimiento()
x_n = x+(1/6)*dt*(k1[0]+2*k2[0]+2*k3[0]+k4[0])
y_n = y+(1/6)*dt*(k1[1]+2*k2[1]+2*k3[1]+k4[1])
vx_n = vx+(1/6)*dt*(k1[2]+2*k2[2]+2*k3[2]+k4[2])
vy_n = vy+(1/6)*dt*(k1[3]+2*k2[3]+2*k3[3]+k4[3])
self.y_actual = [x_n, y_n, vx_n, vy_n]
self.t_actual += dt
pass
def avanza_verlet(self, dt):
"""
Similar a avanza_rk4, pero usando Verlet.
"""
x, y, vx, vy = self.y_actual
vx, vy, fx, fy = self.ecuacion_de_movimiento()
x_n = x+dt*vx+fx*(dt**2)/2
y_n = y+dt*vy+fy*(dt**2)/2
self.y_actual = [x_n, y_n, vx, vy]
vx1, vy1, fx1, fy1 = self.ecuacion_de_movimiento()
vx_n = vx+fx1*dt/2+fx*dt/2
vy_n = vy+fy1*dt/2+fy*dt/2
self.y_actual = [x_n, y_n, vx_n, vy_n]
self.t_actual += dt
pass
def avanza_beeman(self, dt):
"""
Similar a avanza_rk4, pero usando Beeman.
"""
x, y, vx, vy = self.y_actual
fx = self.fuerza_actual_x[-1]
fy = self.fuerza_actual_y[-1]
vx, vy, fx_mas1, fy_mas1 = self.ecuacion_de_movimiento()
x_n = x+vx*dt+(1/6)*(4*fx_mas1-fx)*(dt**2)
y_n = y+vy*dt+(1/6)*(4*fy_mas1-fy)*(dt**2)
vx_mas, vy_mas, fx_mas2, fy_mas2 = self.ecuacion_de_movimiento()
vx_n = vx+(1/12)*(5*fx_mas2+8*fx_mas1-fx)*dt
vy_n = vy+(1/12)*(5*fy_mas2+8*fy_mas1-fy)*dt
self.y_actual = [x_n, y_n, vx_n, vy_n]
self.t_actual += dt
pass
def energia_total(self):
'''
Calcula la enérgía total del sistema en las condiciones actuales.
'''
x, y, vx, vy = self.y_actual
r = (x**2+y**2)**(1/2)
energia_Cinetica = (1/2)*m*(vx**2+vy**2)
energia_Potencial = -G*M*m/r + self.alpha*G*M*m/r**2
return energia_Cinetica + energia_Potencial
| StarcoderdataPython |
123780 | <filename>cla_backend/apps/knowledgebase/management/commands/grant_cla_superusers_article_categories_permissions.py
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Permission, Group, ContentType
class Command(BaseCommand):
def handle(self, *args, **options):
content_type = ContentType.objects.get(app_label="knowledgebase", model="articlecategory")
perms = Permission.objects.filter(content_type=content_type)
group = Group.objects.get(name="CLA Superusers")
for perm in perms:
group.permissions.add(perm)
| StarcoderdataPython |
1928151 | <filename>firestore/datatypes/integer.py
from firestore.datatypes.number import Number
class Integer(Number):
"""
64bit signed non decimal integer
"""
def __init__(self, *args, **kwargs):
self.py_type = int
super(Integer, self).__init__(*args, **kwargs)
| StarcoderdataPython |
1798233 | <reponame>MOvations/speedRuns
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.neural_network import MLPClassifier
import os
data = pd.read_csv("Data/winequality-red.csv")
# %%
data
# %%
##### PREPROCESSING #####
data.isnull().sum()
# %%
data.dtypes
# %%
data["quality"].unique() # interesting...
# %%
y = data["quality"]
X = data.drop("quality", axis=1)
# %%
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns)
X
#%%
##### CLUSTERING #####
kmeans = KMeans(n_clusters=6)
kmeans.fit(X)
KMeans(n_clusters=6)
clusters = kmeans.predict(X)
clusters
# %%
pca = PCA(n_components=2)
reduced_X = pd.DataFrame(pca.fit_transform(X), columns=["PC1", "PC2"])
reduced_X
# %%
reduced_X["cluster"] = clusters
reduced_X
# %%
reduced_centers = pca.transform(kmeans.cluster_centers_)
reduced_centers
#%%
plt.figure(figsize=(14, 10))
plt.scatter(
reduced_X[reduced_X["cluster"] == 0].loc[:, "PC1"],
reduced_X[reduced_X["cluster"] == 0].loc[:, "PC2"],
color="slateblue",
)
plt.scatter(
reduced_X[reduced_X["cluster"] == 1].loc[:, "PC1"],
reduced_X[reduced_X["cluster"] == 1].loc[:, "PC2"],
color="springgreen",
)
plt.scatter(
reduced_X[reduced_X["cluster"] == 2].loc[:, "PC1"],
reduced_X[reduced_X["cluster"] == 2].loc[:, "PC2"],
color="indigo",
)
plt.scatter(
reduced_X[reduced_X["cluster"] == 3].loc[:, "PC1"],
reduced_X[reduced_X["cluster"] == 3].loc[:, "PC2"],
color="teal",
)
plt.scatter(
reduced_X[reduced_X["cluster"] == 4].loc[:, "PC1"],
reduced_X[reduced_X["cluster"] == 4].loc[:, "PC2"],
color="lightcoral",
)
plt.scatter(
reduced_X[reduced_X["cluster"] == 5].loc[:, "PC1"],
reduced_X[reduced_X["cluster"] == 5].loc[:, "PC2"],
color="gold",
)
plt.scatter(
reduced_centers[:, 0], reduced_centers[:, 1], color="black", marker="x", s=300
)
plt.show()
# %%
##### TRAINING #####
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
model = MLPClassifier(hidden_layer_sizes=(256, 256), max_iter=500)
model.fit(X_train, y_train)
print(f"Model Accuracy: {model.score(X_test, y_test)}")
# %%
| StarcoderdataPython |
8062754 | <filename>elit/layers/embeddings/word2vec.py
# ========================================================================
# Copyright 2020 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# -*- coding:utf-8 -*-
# Author: hankcs
from typing import Optional, Callable, Union
import torch
from torch import nn
from elit.common.structure import AutoConfigurable
from elit.common.transform import VocabDict
from elit.common.vocab import Vocab
from elit.layers.dropout import WordDropout
from elit.layers.embeddings.embedding import Embedding, EmbeddingDim
from elit.layers.embeddings.util import build_word2vec_with_vocab
class Word2VecEmbeddingModule(nn.Module, EmbeddingDim):
def __init__(self, field: str, embed: nn.Embedding, word_dropout: WordDropout = None, cpu=False,
second_channel=False, num_tokens_in_trn=None, unk_idx=1) -> None:
super().__init__()
self.cpu = cpu
self.field = field
self.embed = embed
self.word_dropout = word_dropout
self.num_tokens_in_trn = num_tokens_in_trn
self.unk_idx = unk_idx
if second_channel:
n_words, n_embed = embed.weight.size()
if num_tokens_in_trn:
n_words = num_tokens_in_trn
second_channel = nn.Embedding(num_embeddings=n_words,
embedding_dim=n_embed)
nn.init.zeros_(second_channel.weight)
self.second_channel = second_channel
def forward(self, batch: dict, **kwargs):
x: torch.Tensor = batch[f'{self.field}_id']
if self.cpu:
device = x.device
x = x.cpu()
if self.word_dropout:
x = self.word_dropout(x)
if self.second_channel:
ext_mask = x.ge(self.second_channel.num_embeddings)
ext_words = x.masked_fill(ext_mask, self.unk_idx)
x = self.embed(x) + self.second_channel(ext_words)
else:
x = self.embed(x)
if self.cpu:
# noinspection PyUnboundLocalVariable
x = x.to(device)
return x
@property
def embedding_dim(self) -> int:
return self.embed.embedding_dim
# noinspection PyMethodOverriding
# def to(self, device, **kwargs):
# print(self.cpu)
# exit(1)
# if self.cpu:
# return super(Word2VecEmbeddingModule, self).to(-1, **kwargs)
# return super(Word2VecEmbeddingModule, self).to(device, **kwargs)
def _apply(self, fn):
if not self.cpu: # This might block all fn not limiting to moving between devices.
return super(Word2VecEmbeddingModule, self)._apply(fn)
class Word2VecEmbedding(Embedding, AutoConfigurable):
def __init__(self,
field,
embed: Union[int, str],
extend_vocab=True,
pad=None,
unk=None,
lowercase=False,
trainable=False,
second_channel=False,
word_dropout: float = 0,
normalize=False,
cpu=False,
init='zeros') -> None:
super().__init__()
self.pad = pad
self.second_channel = second_channel
self.cpu = cpu
self.normalize = normalize
self.word_dropout = word_dropout
self.init = init
self.lowercase = lowercase
self.unk = unk
self.extend_vocab = extend_vocab
self.trainable = trainable
self.embed = embed
self.field = field
def module(self, vocabs: VocabDict, **kwargs) -> Optional[nn.Module]:
vocab = vocabs[self.field]
num_tokens_in_trn = len(vocab)
embed = build_word2vec_with_vocab(self.embed,
vocab,
self.extend_vocab,
self.unk,
self.lowercase,
self.trainable,
normalize=self.normalize)
if self.word_dropout:
assert vocab.unk_token, f'unk_token of vocab {self.field} has to be set in order to ' \
f'make use of word_dropout'
padding = []
if vocab.pad_token:
padding.append(vocab.pad_idx)
word_dropout = WordDropout(self.word_dropout, vocab.unk_idx, exclude_tokens=padding)
else:
word_dropout = None
return Word2VecEmbeddingModule(self.field, embed, word_dropout=word_dropout, cpu=self.cpu,
second_channel=self.second_channel, num_tokens_in_trn=num_tokens_in_trn,
unk_idx=vocab.unk_idx)
def transform(self, vocabs: VocabDict = None, **kwargs) -> Optional[Callable]:
assert vocabs is not None
if self.field not in vocabs:
vocabs[self.field] = Vocab(pad_token=self.pad, unk_token=self.unk)
return super().transform(**kwargs)
| StarcoderdataPython |
1719897 | <reponame>jbyu/HorizonNet
#!/usr/bin/env python
from .naive import grid_sample as naive
from .faster import grid_sample as faster
__all__ = [
"faster",
"naive",
]
| StarcoderdataPython |
5140040 | <reponame>zehengl/ezapi_tmdb
from .base import process_response, ENDPOINT, any_required_kwargs
class ListMixin:
@process_response
def get_list(self, list_id, **kwargs):
"""
GET /list/{list_id}
"""
url = f"{ENDPOINT}/4/list/{list_id}"
return self.make_request("GET", url, kwargs)
@any_required_kwargs(["media_type", "media_id"])
@process_response
def get_list_item_status(self, list_id, **kwargs):
"""
GET /list/{list_id}/item_status
"""
url = f"{ENDPOINT}/4/list/{list_id}/item_status"
return self.make_request("GET", url, kwargs)
| StarcoderdataPython |
1850313 | import csv
with open('Emp_details.csv','a') as csvfile:
ID="114185"
Name="RAMU"
Location="MUMBAI"
BU="FS"
newemp=ID+","+Name+","+Location+","+BU
csvfile.write(newemp+"\n")
| StarcoderdataPython |
9695101 | <reponame>hennr/buildnotify<gh_stars>10-100
import unittest
from buildnotifylib.core.continous_integration_server import ContinuousIntegrationServer
from buildnotifylib.core.projects import OverallIntegrationStatus
from buildnotifylib.project_status_notification import ProjectStatus, ProjectStatusNotification
from test.fake_conf import ConfigBuilder
from test.project_builder import ProjectBuilder
class ProjectStatusTest(unittest.TestCase):
def test_should_identify_failing_builds(self):
old_projects = [
ProjectBuilder({'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder({'name': 'proj2', 'lastBuildStatus': 'Success', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:37'}).build()]
new_projects = [
ProjectBuilder({'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder({'name': 'proj2', 'lastBuildStatus': 'Failure', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:37'}).build()]
failing_builds = ProjectStatusTest.build(old_projects, new_projects).failing_builds()
self.assertEqual(1, len(failing_builds))
self.assertEqual("proj2", failing_builds[0])
def test_should_identify_fixed_builds(self):
old_projects = [
ProjectBuilder({'name': 'proj1', 'lastBuildStatus': 'Failure', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder({'name': 'proj2', 'lastBuildStatus': 'Failure', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:37'}).build()]
new_projects = [
ProjectBuilder({'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder({'name': 'proj2', 'lastBuildStatus': 'Failure', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:37'}).build()]
successful_builds = ProjectStatusTest.build(old_projects, new_projects).successful_builds()
self.assertEqual(1, len(successful_builds))
self.assertEqual("proj1", successful_builds[0])
def test_should_identify_still_failing_builds(self):
old_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildLabel': '1',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder(
{'name': 'stillfailingbuild', 'lastBuildStatus': 'Failure', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildLabel': '10', 'lastBuildTime': '2009-05-29T13:54:37'}).build()]
new_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildLabel': '1',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder(
{'name': 'stillfailingbuild', 'lastBuildStatus': 'Failure', 'activity': 'Sleeping', 'url': 'someurl',
'lastBuildLabel': '11', 'lastBuildTime': '2009-05-29T13:54:47'}).build()]
still_failing_builds = ProjectStatusTest.build(old_projects, new_projects).still_failing_builds()
self.assertEqual(1, len(still_failing_builds))
self.assertEqual("stillfailingbuild", still_failing_builds[0])
def test_should_identify_still_successful_builds(self):
old_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl', 'lastBuildLabel': '1',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder(
{'name': 'Successbuild', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildLabel': '10', 'lastBuildTime': '2009-05-29T13:54:37'}).build()]
new_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl', 'lastBuildLabel': '1',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder(
{'name': 'Successbuild', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildLabel': '11', 'lastBuildTime': '2009-05-29T13:54:47'}).build()]
still_successful_builds = ProjectStatusTest.build(old_projects, new_projects).still_successful_builds()
self.assertEqual(1, len(still_successful_builds))
self.assertEqual("Successbuild", still_successful_builds[0])
def test_should_build_tuples_by_server_url_and_name(self):
project_s1 = ProjectBuilder({'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).server('s1').build()
project_s2 = ProjectBuilder({'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).server('s2').build()
old_projects = [project_s1, project_s2]
new_projects = [project_s2, project_s1]
tuple = ProjectStatusTest.build(old_projects, new_projects).tuple_for(project_s2)
self.assertEqual('s2', tuple.current_project.server_url)
self.assertEqual('s2', tuple.old_project.server_url)
def test_should_identify_new_builds(self):
old_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).build()]
new_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder(
{'name': 'Successbuild', 'lastBuildStatus': 'Success',
'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:47'}).build()]
still_successful_builds = ProjectStatusTest.build(old_projects, new_projects).still_successful_builds()
self.assertEqual(1, len(still_successful_builds))
self.assertEqual("Successbuild", still_successful_builds[0])
def test_should_include_prefix_in_notification(self):
old_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).prefix('R1').build()]
new_projects = [
ProjectBuilder(
{'name': 'proj1', 'lastBuildStatus': 'Success', 'activity': 'Sleeping',
'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:07'}).prefix('R1').build(),
ProjectBuilder(
{'name': 'Successbuild', 'lastBuildStatus': 'Success',
'activity': 'Sleeping', 'url': 'someurl',
'lastBuildTime': '2009-05-29T13:54:47'}).prefix('R1').build()]
still_successful_builds = ProjectStatusTest.build(old_projects, new_projects).still_successful_builds()
self.assertEqual(1, len(still_successful_builds))
self.assertEqual("[R1] Successbuild", still_successful_builds[0])
@classmethod
def build(cls, old_projects, new_projects):
return ProjectStatus(old_projects, new_projects)
def test_should_return_notifications(mocker):
old_projects = [ProjectBuilder({'name': 'proj1',
'lastBuildStatus': 'Success',
'activity': 'Sleeping',
'url': 'someurl',
'lastBuildLabel': '1',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder({'name': 'Successbuild',
'lastBuildStatus': 'Failure',
'activity': 'Sleeping',
'url': 'someurl',
'lastBuildLabel': '10',
'lastBuildTime': '2009-05-29T13:54:37'}).build()]
new_projects = [ProjectBuilder({'name': 'proj1',
'lastBuildStatus': 'Failure',
'activity': 'Sleeping',
'url': 'someurl',
'lastBuildLabel': '2',
'lastBuildTime': '2009-05-29T13:54:07'}).build(),
ProjectBuilder({'name': 'Successbuild',
'lastBuildStatus': 'Success',
'activity': 'Sleeping',
'url': 'someurl',
'lastBuildLabel': '11',
'lastBuildTime': '2009-05-29T13:54:47'}).build()]
old = OverallIntegrationStatus([ContinuousIntegrationServer('url', old_projects)])
new = OverallIntegrationStatus([ContinuousIntegrationServer('url', new_projects)])
class NotificationFake(object):
def __init__(self):
pass
def show_message(self, **kwargs):
print(kwargs)
m = mocker.patch.object(NotificationFake, 'show_message')
notification = ProjectStatusNotification(ConfigBuilder().build(), old, new, NotificationFake())
notification.show_notifications()
m.assert_any_call('Broken builds', 'proj1')
m.assert_any_call('Fixed builds', 'Successbuild')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3425288 | #! python3
# renameDates.py - Renames a bunch of files with US-style dates in the name to
# have UK-style dates
import shutil
import os
import re
date_pattern = re.compile(
r"""^(.*?) # all text before the date
((0|1)?\d)- # an optional 0 or 1 followed by a digit (month) then hyphen
([0-3]?\d)- # an optional 0 to 3 followed by a digit (day) then hyphen
((19|20)\d\d) # 19xx or 20xx for the year
(.*?)$ # other characters
""", re.VERBOSE
)
# TODO implementation of program
| StarcoderdataPython |
8046496 | """Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_PROFILE,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.loader import bind_hass
@bind_hass
def turn_on(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
rgbw_color=None,
rgbww_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Turn all or specified light on."""
hass.add_job(
async_turn_on,
hass,
entity_id,
transition,
brightness,
brightness_pct,
rgb_color,
rgbw_color,
rgbww_color,
xy_color,
hs_color,
color_temp,
kelvin,
white_value,
profile,
flash,
effect,
color_name,
)
async def async_turn_on(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
rgbw_color=None,
rgbww_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Turn all or specified light on."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PROFILE, profile),
(ATTR_TRANSITION, transition),
(ATTR_BRIGHTNESS, brightness),
(ATTR_BRIGHTNESS_PCT, brightness_pct),
(ATTR_RGB_COLOR, rgb_color),
(ATTR_RGBW_COLOR, rgbw_color),
(ATTR_RGBWW_COLOR, rgbww_color),
(ATTR_XY_COLOR, xy_color),
(ATTR_HS_COLOR, hs_color),
(ATTR_COLOR_TEMP, color_temp),
(ATTR_KELVIN, kelvin),
(ATTR_WHITE_VALUE, white_value),
(ATTR_FLASH, flash),
(ATTR_EFFECT, effect),
(ATTR_COLOR_NAME, color_name),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
@bind_hass
def turn_off(hass, entity_id=ENTITY_MATCH_ALL, transition=None, flash=None):
"""Turn all or specified light off."""
hass.add_job(async_turn_off, hass, entity_id, transition, flash)
async def async_turn_off(hass, entity_id=ENTITY_MATCH_ALL, transition=None, flash=None):
"""Turn all or specified light off."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_TRANSITION, transition),
(ATTR_FLASH, flash),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
@bind_hass
def toggle(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Toggle all or specified light."""
hass.add_job(
async_toggle,
hass,
entity_id,
transition,
brightness,
brightness_pct,
rgb_color,
xy_color,
hs_color,
color_temp,
kelvin,
white_value,
profile,
flash,
effect,
color_name,
)
async def async_toggle(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Turn all or specified light on."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PROFILE, profile),
(ATTR_TRANSITION, transition),
(ATTR_BRIGHTNESS, brightness),
(ATTR_BRIGHTNESS_PCT, brightness_pct),
(ATTR_RGB_COLOR, rgb_color),
(ATTR_XY_COLOR, xy_color),
(ATTR_HS_COLOR, hs_color),
(ATTR_COLOR_TEMP, color_temp),
(ATTR_KELVIN, kelvin),
(ATTR_WHITE_VALUE, white_value),
(ATTR_FLASH, flash),
(ATTR_EFFECT, effect),
(ATTR_COLOR_NAME, color_name),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data, blocking=True)
| StarcoderdataPython |
3382608 | import InputReader
"""
Decrypt word by counting how many times each character appears in each position in a word for a list of words
Uses a dictionary for each character position with the character as key and count as value.
"""
class wordDecrypter:
def __init__(self,wordLength):
self.dictionaries = [{} for _ in range(wordLength)]
def addWord(self, word):
for index in range(0, len(word)-1): # -1 to skip newline char
char = word[index]
if char in self.dictionaries[index].keys():
self.dictionaries[index][char] += 1
else:
self.dictionaries[index][char] = 1
def getWords(self):
finalWord = ["",""]
for dictionary in self.dictionaries:
# sort by value then key
orderedList = (sorted(dictionary.items(), key=lambda x: (-x[1], x[0])))
finalWord[0] += orderedList[0][0] # most common letter
finalWord[1] += orderedList[-1][0] # least common letter
return finalWord
def run():
data = InputReader.read("Inputs/6.txt")
decrypter = wordDecrypter(len(data[0])-1) # -1 for newline char
for line in data:
decrypter.addWord(line)
correctWords = decrypter.getWords()
print("The word from most common letters is: {0} \nand for the least common it is: {1}".format(correctWords[0], correctWords[1]))
if __name__ == "__main__":
run()
| StarcoderdataPython |
8012990 | <reponame>jacquerie/leetcode
# -*- coding: utf-8 -*-
class Solution:
def countGoodSubstrings(self, s: str) -> int:
return sum(ss[0] != ss[1] and ss[1] != ss[2] and ss[0] != ss[2] for ss in zip(*(s[i:] for i in range(3))))
if __name__ == '__main__':
solution = Solution()
assert 1 == solution.countGoodSubstrings('xyzzaz')
assert 4 == solution.countGoodSubstrings('aababcabc')
| StarcoderdataPython |
165553 | <reponame>arjunkhunti-crest/security_content
"""
Disable a list of AWS IAM user accounts. After checking the list of accounts against an allowlist and confirming with an analyst, each account is disabled. The change can be reversed with the "enable user" action.
"""
import phantom.rules as phantom
import json
from datetime import datetime, timedelta
def on_start(container):
phantom.debug('on_start() called')
# call 'filter_1' block
filter_1(container=container)
return
def filter_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("filter_1() called")
# collect filtered artifact ids and results for 'if' condition 1
matched_artifacts_1, matched_results_1 = phantom.condition(
container=container,
conditions=[
["playbook_input:aws_username", "in", "custom_list:aws_inactive_user_allowlist"]
],
name="filter_1:condition_1")
# call connected blocks if filtered artifacts or results
if matched_artifacts_1 or matched_results_1:
indicator_tag_4(action=action, success=success, container=container, results=results, handle=handle, filtered_artifacts=matched_artifacts_1, filtered_results=matched_results_1)
# collect filtered artifact ids and results for 'if' condition 2
matched_artifacts_2, matched_results_2 = phantom.condition(
container=container,
conditions=[
["playbook_input:aws_username", "not in", "custom_list:aws_inactive_user_allowlist"]
],
name="filter_1:condition_2")
# call connected blocks if filtered artifacts or results
if matched_artifacts_2 or matched_results_2:
aws_disable_user_check(action=action, success=success, container=container, results=results, handle=handle, filtered_artifacts=matched_artifacts_2, filtered_results=matched_results_2)
return
def aws_disable_user_check(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("aws_disable_user_check() called")
# set user and message variables for phantom.prompt call
user = "proyer"
message = """The following AWS user(s) were found to be inactive:\n\n```\n{0}\n```"""
# parameter list for template variable replacement
parameters = [
"filtered-data:filter_1:condition_2:playbook_input:aws_username"
]
# responses
response_types = [
{
"prompt": "Should those user account(s) be disabled?",
"options": {
"type": "list",
"choices": [
"Yes",
"No"
],
},
}
]
phantom.prompt2(container=container, user=user, message=message, respond_in_mins=30, name="aws_disable_user_check", parameters=parameters, response_types=response_types, callback=decision_1)
return
def decision_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("decision_1() called")
# check for 'if' condition 1
found_match_1 = phantom.decision(
container=container,
conditions=[
["aws_disable_user_check:action_result.summary.responses.0", "==", "Yes"]
])
# call connected blocks if condition 1 matched
if found_match_1:
disable_user_1(action=action, success=success, container=container, results=results, handle=handle)
return
return
def disable_user_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("disable_user_1() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
filtered_input_0_aws_username = phantom.collect2(container=container, datapath=["filtered-data:filter_1:condition_2:playbook_input:aws_username"])
parameters = []
# build parameters list for 'disable_user_1' call
for filtered_input_0_aws_username_item in filtered_input_0_aws_username:
if filtered_input_0_aws_username_item[0] is not None:
parameters.append({
"username": filtered_input_0_aws_username_item[0],
"disable_access_keys": True,
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.act("disable user", parameters=parameters, name="disable_user_1", assets=["aws_iam"])
return
def indicator_tag_4(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("indicator_tag_4() called")
filtered_input_0_aws_username = phantom.collect2(container=container, datapath=["filtered-data:filter_1:condition_1:playbook_input:aws_username"])
parameters = []
# build parameters list for 'indicator_tag_4' call
for filtered_input_0_aws_username_item in filtered_input_0_aws_username:
parameters.append({
"indicator": filtered_input_0_aws_username_item[0],
"tags": "aws_inactive_user_allowlist",
"overwrite": None,
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.custom_function(custom_function="community/indicator_tag", parameters=parameters, name="indicator_tag_4")
return
def on_finish(container, summary):
phantom.debug("on_finish() called")
################################################################################
## Custom Code Start
################################################################################
# This function is called after all actions are completed.
# summary of all the action and/or all details of actions
# can be collected here.
# summary_json = phantom.get_summary()
# if 'result' in summary_json:
# for action_result in summary_json['result']:
# if 'action_run_id' in action_result:
# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)
# phantom.debug(action_results)
################################################################################
## Custom Code End
################################################################################
return | StarcoderdataPython |
12863664 | #!/usr/bin/env python
__author__ = "bt3"
import random
''' The simplest way...'''
def quickSelect(seq, k):
# this part is the same as quick sort
len_seq = len(seq)
if len_seq < 2: return seq
# we could use a random choice here doing
#pivot = random.choice(seq)
ipivot = len_seq // 2
pivot = seq[ipivot]
# O(n)
smallerList = [x for i,x in enumerate(seq) if x <= pivot and i != ipivot]
largerList = [x for i,x in enumerate(seq) if x > pivot and i != ipivot]
# here starts the different part
m = len(smallerList)
if k == m:
return pivot
elif k < m:
return quickSelect(smallerList, k)
else:
return quickSelect(largerList, k-m-1)
''' If you don't want to use pythons feature at all and
also select pivot randomly'''
def swap(seq, x, y):
tmp = seq[x]
seq[x] = seq[y]
seq[y] = tmp
def quickSelectHard(seq, k, left=None, right=None):
left = left or 0
right = right or len(seq) - 1
#ipivot = random.randint(left, right)
ipivot = len(seq)//2
pivot = seq[ipivot]
# Move pivot out of the sorting range
swap(seq, ipivot, right)
swapIndex, i = left, left
while i < right:
if seq[i] < pivot:
swap(seq, i, swapIndex)
swapIndex += 1
i += 1
# Move pivot to final position
swap(seq, right, swapIndex)
# Check if pivot matches, else recurse on the correct half
rank = len(seq) - swapIndex
if k == rank:
return seq[swapIndex]
elif k < rank:
return quickSelectHard(seq, k, swapIndex+1, right)
else:
return quickSelectHard(seq, k, left, swapIndex-1)
if __name__ == '__main__':
# Checking the Answer
seq = [10, 60, 100, 50, 60, 75, 31, 50, 30, 20, 120, 170, 200]
#seq = [3, 7, 2, 1, 4, 6, 5, 10, 9, 11]
# we want the middle element
k = len(seq) // 2
# Note that this only work for odd arrays, since median in
# even arrays is the mean of the two middle elements
print(quickSelect(seq, k))
print(quickSelectHard(seq, k))
import numpy
print numpy.median(seq) | StarcoderdataPython |
8089763 | # ======================================================================
# Knights of the Dinner Table
# Advent of Code 2015 Day 13 -- <NAME> -- https://adventofcode.com
#
# Python implementation by Dr. <NAME> III
# ======================================================================
# ======================================================================
# t e s t _ s e a t i n g . p y
# ======================================================================
"Test solver for Advent of Code 2015 day 13, Knights of the Dinner Table"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import unittest
import aoc_13
import seating
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
EXAMPLE_TEXT = """
Alice would gain 54 happiness units by sitting next to Bob.
Alice would lose 79 happiness units by sitting next to Carol.
Alice would lose 2 happiness units by sitting next to David.
Bob would gain 83 happiness units by sitting next to Alice.
Bob would lose 7 happiness units by sitting next to Carol.
Bob would lose 63 happiness units by sitting next to David.
Carol would lose 62 happiness units by sitting next to Alice.
Carol would gain 60 happiness units by sitting next to Bob.
Carol would gain 55 happiness units by sitting next to David.
David would gain 46 happiness units by sitting next to Alice.
David would lose 7 happiness units by sitting next to Bob.
David would gain 41 happiness units by sitting next to Carol.
"""
PART_ONE_TEXT = EXAMPLE_TEXT
PART_TWO_TEXT = EXAMPLE_TEXT
PART_ONE_RESULT = 330
PART_TWO_RESULT = 286
# ======================================================================
# TestSeating
# ======================================================================
class TestSeating(unittest.TestCase): # pylint: disable=R0904
"Test Seating object"
def test_empty_init(self):
"Test the default Seating creation"
# 1. Create default Seating object
myobj = seating.Seating()
# 2. Make sure it has the default values
self.assertEqual(myobj.part2, False)
self.assertEqual(myobj.text, None)
self.assertEqual(len(myobj.people), 0)
self.assertEqual(len(myobj.preferences), 0)
def test_text_init(self):
"Test the Seating object creation from text"
# 1. Create Seating object from text
myobj = seating.Seating(text=aoc_13.from_text(EXAMPLE_TEXT))
# 2. Make sure it has the expected values
self.assertEqual(myobj.part2, False)
self.assertEqual(len(myobj.text), 12)
self.assertEqual(len(myobj.people), 4)
self.assertEqual(len(myobj.preferences), 12)
def test_part_one(self):
"Test part one example of Seating object"
# 1. Create Seating object from text
myobj = seating.Seating(text=aoc_13.from_text(PART_ONE_TEXT))
# 2. Check the part one result
self.assertEqual(myobj.part_one(verbose=False), PART_ONE_RESULT)
def test_part_two(self):
"Test part two example of Seating object"
# 1. Create Seating object from text
myobj = seating.Seating(part2=True, text=aoc_13.from_text(PART_TWO_TEXT))
# 2. Check the part two result
self.assertEqual(myobj.part_two(verbose=False), PART_TWO_RESULT)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end t e s t _ s e a t i n g . p y end
# ======================================================================
| StarcoderdataPython |
11357639 | # Generated by Django 3.0.4 on 2020-04-22 08:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hp', '0017_auto_20200422_0815'),
]
operations = [
migrations.AddField(
model_name='complaint',
name='reply',
field=models.CharField(default='Will be answered shortly', max_length=50),
),
]
| StarcoderdataPython |
11305277 | from .full import Spotify
| StarcoderdataPython |
6705657 | import discord
def pretty_keys(ctx, keys):
"""
Returns an embed for keys for a prettier discord format
"""
embed = discord.Embed(
title='Office Keys',
color=0x03f8fc,
timestamp=ctx.message.created_at)
for key in keys:
id_value = f'{key[0]}' + '\u2800' * 45
embed.add_field(name='ID', value=id_value, inline=False)
embed.add_field(name='Key', value=key[1], inline=False)
embed.set_footer(text=f"Requested by {ctx.message.author.display_name}")
return embed
| StarcoderdataPython |
4931196 | <filename>hapy/hapy/render.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import jinja2
from loguru import logger
import shutil
'''
Jinja2 Render Util
'''
class Render(object):
def __init__(self):
pass
def render_j2_template_file(self, templateFile, searchPath, **kwargs):
""" The API will render a Jinja2 template
:type templateFile: String
:param templateFile: Name of the template file
:type searchPath: String
:param searchPath: Path to the templates directory
:type renderObj: dict
:param renderObj: Dictionary object to substitute the template vars
:returns: Rendered string
"""
template_loader = jinja2.FileSystemLoader(searchpath=searchPath)
env = jinja2.Environment(
loader=template_loader, trim_blocks=True, lstrip_blocks=True
)
template = env.get_template(templateFile)
renderedData = template.render(kwargs)
return renderedData
def render_j2_template_dir(self, templateDir, destDir, ext=None, **kwargs):
"""
The API will render all <file>.j2 files to corresponding <file>.<ext>
:param templateDir Path to where the templates are
:param destDir Path to save the rendered files.
:param ext Extension to add to the rendered files
:param **kwargs Options
Options:
dest_path The destination path where to copy the rendered files (Default: templateDir)
ext The file extension to apply to the rendered files.
"""
if not os.path.exists(templateDir):
logger.error("templateDir {} does not exist", templateDir)
return -1
# Create destDir if does not exist.
if not os.path.exists(destDir):
os.mkdir(destDir)
template_files = os.listdir(templateDir)
for filename in template_files:
src_file_path = os.path.join(templateDir, filename)
(filename_tmp, ext_type) = os.path.splitext(filename)
# Simply copy the non-template files
if ext_type != ".j2":
dest_file_path = os.path.join(destDir, filename)
shutil.copyfile(src_file_path, dest_file_path)
continue
if ext is not None:
filename_tmp += ".%s" % ext
dest_file_path = os.path.join(destDir, filename_tmp)
rendered_data = self.render_j2_template_file(src_file_path,
".",
**kwargs)
print("Renndered data: ", rendered_data)
with open(dest_file_path, 'w') as fhandle:
fhandle.write(rendered_data)
return 0
def render_j2_template_string(self, templateString, **kwargs):
""" one line description
:param template-string: A Jinja2 template (type: string)
:param **kwargs: key-value paris (substitue key/value in template
:returns: Rendered string
"""
env = jinja2.Environment(
loader=jinja2.BaseLoader, trim_blocks=True, lstrip_blocks=True
)
template = env.from_string(templateString)
renderedData = template.render(kwargs)
return renderedData
| StarcoderdataPython |
167325 | # Demonstrate how to use dictionary comprehensions
def main():
# define a list of temperature values
ctemps = [0, 12, 34, 100]
# Use a comprehension to build a dictionary
tempDict = {t: (t * 9/5) + 32 for t in ctemps if t < 100}
print(tempDict)
print(tempDict[12])
# Merge two dictionaries with a comprehension
team1 = {"Jones": 24, "Jameson": 18, "Smith": 58, "Burns": 7}
team2 = {"White": 12, "Macke": 88, "Perce": 4}
newTeam = {k: v for team in (team1, team2) for k, v in team.items()}
print(newTeam)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6646186 | import numpy as np
import json
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.utils.data as data
from sklearn.metrics import confusion_matrix
from sklearn.datasets import load_svmlight_file
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
import argparse
import logging
import os
import copy
import datetime
import math
import xgboost as xgb
import pandas as pd
from model import *
from datasets import MNIST_truncated, SVHN_custom, CustomTensorDataset, CelebA_custom, ImageFolder_custom, PneumoniaDataset, ImageFolder_public
from trees import *
libsvm_datasets = {
"a9a": "binary_cls",
"cod-rna": "binary_cls"
}
n_workers = 0
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='MLP', help='neural network used in training')
parser.add_argument('--dataset', type=str, default='mnist', help='dataset used for training')
parser.add_argument('--net_config', type=lambda x: list(map(int, x.split(', '))))
parser.add_argument('--partition', type=str, default='hetero-dir', help='how to partition the dataset on local workers')
parser.add_argument('--batch-size', type=int, default=128, help='input batch size for training (default: 64)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 0.01)')
parser.add_argument('--epochs', type=int, default=5, help='how many epochs will be trained in a training process')
parser.add_argument('--n_parties', type=int, default=2, help='number of workers in a distributed cluster')
parser.add_argument('--n_teacher_each_partition', type=int, default=1,
help='number of local nets in a partitioning of a party')
parser.add_argument('--alg', type=str, default='fedavg',
help='which type of communication strategy is going to be used: fedavg/fedkt/fedprox/simenb')
parser.add_argument('--comm_round', type=int, default=1,
help='number of communication rounds')
parser.add_argument('--trials', type=int, default=1, help="Number of trials for each run")
parser.add_argument('--init_seed', type=int, default=0, help="Random seed")
parser.add_argument('--dropout_p', type=float, required=False, default=0.0, help="Dropout probability. Default=0.0")
parser.add_argument('--datadir', type=str, required=False, default="./data/", help="Data directory")
parser.add_argument('--reg', type=float, default=1e-5, help="L2 regularization strength")
parser.add_argument('--logdir', type=str, required=True, default="./logs/", help='Log directory path')
parser.add_argument('--modeldir', type=str, required=False, default="./models/", help='Model directory path')
parser.add_argument('--max_tree_depth', type=int, default=6, help='Max tree depth for the tree model')
parser.add_argument('--n_ensemble_models', type=int, default=10, help="Number of the models in the final ensemble")
parser.add_argument('--train_local_student', type=int, default=1, help="whether use PATE to train local student models before aggregation")
parser.add_argument('--auxiliary_data_portion', type=float, default=0.5, help="the portion of test data that is used as the auxiliary data for PATE")
parser.add_argument('--stu_epochs', type=int, default=100, help='Number of epochs for the student model')
parser.add_argument('--with_unlabeled', type=int, default=1, help='Whether there is public unlabeled data')
parser.add_argument('--stu_lr', type=float, default=0.001, help='The learning rate for the student model')
parser.add_argument('--is_local_split', type=int, default=1, help='Whether split the local data for local model training')
parser.add_argument('--beta', type=float, default=0.5, help='The parameter for the dirichlet distribution for data partitioning')
parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program')
parser.add_argument('--ensemble_method', type=str, default='max_vote', help='Choice: max_vote or averaging')
parser.add_argument('--log_file_name', type=str, default=None, help='The log file name')
parser.add_argument('--n_partition', type=int, default=1, help='The partition times of each party')
parser.add_argument('--gamma', type=float, default=None, help='The parameter for differential privacy')
parser.add_argument('--privacy_analysis_file_name', type=str, default=None, help='The file path to save the information for privacy analysis')
parser.add_argument('--n_stu_trees', type=int, default=100, help='The number of trees in a student model')
parser.add_argument('--optimizer', type=str, default='adam', help='sgd or adam optimizer')
parser.add_argument('--local_training_epochs', type=int, default=None, help='the number of epochs for the local trainig alg')
parser.add_argument('--dp_level', type=int, default=0, help='1 represents add dp on the server side. 2 represents add dp on the party side')
parser.add_argument('--query_portion', type=float, default=0.5, help='how many queries are used to train the final model')
parser.add_argument('--local_query_portion', type=float, default=0.5, help='how many queries are used to train the student models')
parser.add_argument('--filter_query', type=int, default=0, help='Whether to filter the query or not')
parser.add_argument('--max_z', type=int, default=1, help='the maximum partition that may be influenced when changing a single record')
parser.add_argument('--mu', type=float, default=1, help='the mu parameter for fedprox')
parser.add_argument('--fedkt_seed', type=int, default=0, help='the seed before run fedkt')
parser.add_argument('--pub_datadir', type=str, default=None, help='the path to the public data')
parser.add_argument('--prob_threshold', type=float, default=None, help='a threshold to filter the votes')
parser.add_argument('--min_require', type=int, default=None, help='require that the minimum number of samples of each class is at least min_require')
parser.add_argument('--prob_threshold_apply', type=int, default=0,
help='0 means no apply, 1 means apply only at server part, 2 means apply only at party part, 3 means apply at both parts')
parser.add_argument('--apply_consistency', type=int, default=1, help='the votes of the party will only be counted if they are the same if set to 1')
parser.add_argument('--save_global_model', type=int, default=0, help='whether save the global model or not')
parser.add_argument('--final_stu_epochs', type=int, default=100, help='the number of epochs to train the final student model')
parser.add_argument('--init_std', type=float, default=-1, help='the stdv for the initialization of the weights, -1 for norm initialization')
parser.add_argument('--std_place', type=int, default=0, help='1 for std in teacher model, 2 add student model')
parser.add_argument('--retrain_local_epoch', type=int, default=10, help='the local epoch in fedavg/fedprox after fedkt')
parser.add_argument('--n_final_stu_trees', type=int, default=100, help='the number of trees of the final model')
parser.add_argument('--npartyseed', type=str, default=None, help='nparty-seed')
parser.add_argument('--new_scaffold', type=int, default=0, help='whether use new scaffold')
args = parser.parse_args()
return args
def mkdirs(dirpath):
try:
os.makedirs(dirpath)
except Exception as _:
pass
def load_mnist_data(datadir):
transform = transforms.Compose([transforms.ToTensor()])
mnist_train_ds = MNIST_truncated(datadir, train=True, download=True, transform=transform)
mnist_test_ds = MNIST_truncated(datadir, train=False, download=True, transform=transform)
X_train, y_train = mnist_train_ds.data, mnist_train_ds.target
X_test, y_test = mnist_test_ds.data, mnist_test_ds.target
X_train = X_train.data.numpy()
y_train = y_train.data.numpy()
X_test = X_test.data.numpy()
y_test = y_test.data.numpy()
return (X_train, y_train, X_test, y_test)
def load_svhn_data(datadir):
transform = transforms.Compose([transforms.ToTensor()])
svhn_train_ds = SVHN_custom(datadir, train=True, download=True, transform=transform)
svhn_test_ds = SVHN_custom(datadir, train=False, download=True, transform=transform)
X_train, y_train = svhn_train_ds.data, svhn_train_ds.target
X_test, y_test = svhn_test_ds.data, svhn_test_ds.target
# X_train = X_train.data.numpy()
# y_train = y_train.data.numpy()
# X_test = X_test.data.numpy()
# y_test = y_test.data.numpy()
return (X_train, y_train, X_test, y_test)
def load_celeba_data(datadir):
transform = transforms.Compose([transforms.ToTensor()])
celeba_train_ds = CelebA_custom(datadir, split='train', target_type="attr", download=True, transform=transform)
celeba_test_ds = CelebA_custom(datadir, split='test', target_type="attr", download=True, transform=transform)
gender_index = celeba_train_ds.attr_names.index('Male')
y_train = celeba_train_ds.attr[:,gender_index:gender_index+1].reshape(-1)
y_test = celeba_test_ds.attr[:,gender_index:gender_index+1].reshape(-1)
# y_train = y_train.numpy()
# y_test = y_test.numpy()
return (None, y_train, None, y_test)
def load_xray_data(datadir):
transform = transforms.Compose([transforms.ToTensor()])
xray_train_ds = ImageFolder_custom(datadir+'./train/', transform=transform)
xray_test_ds = ImageFolder_custom(datadir+'./test/', transform=transform)
X_train, y_train = xray_train_ds.samples, xray_train_ds.target
X_test, y_test = xray_test_ds.samples, xray_test_ds.target
return (X_train, y_train, X_test, y_test)
def record_net_data_stats(y_train, net_dataidx_map, logdir):
net_cls_counts = {}
for net_i, dataidx in net_dataidx_map.items():
unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_cls_counts[net_i] = tmp
logger.info('Data statistics: %s' % str(net_cls_counts))
return net_cls_counts
def partition_data(dataset, datadir, logdir, partition, n_parties, beta=0.4, min_require=None):
if dataset == 'mnist':
X_train, y_train, X_test, y_test = load_mnist_data(datadir)
elif dataset == 'svhn':
X_train, y_train, X_test, y_test = load_svhn_data(datadir)
elif dataset == 'celeba':
X_train, y_train, X_test, y_test = load_celeba_data(datadir)
elif dataset == 'xray' :
X_train, y_train, X_test, y_test = load_xray_data(datadir)
elif dataset in libsvm_datasets:
# X_train, y_train = load_svmlight_file(datadir + dataset + '.train')
# X_test, y_test = load_svmlight_file(datadir + dataset + '.test')
X, y = load_svmlight_file(datadir + dataset)
y_i_transform = np.zeros(y.size)
for i in range(y.size):
if y[i] == y[0]:
y_i_transform[i] = 1
y=np.copy(y_i_transform)
X_train, X_test, y_train, y_test = train_test_split(X, y)
n_train = y_train.shape[0]
if partition == "homo":
idxs = np.random.permutation(n_train)
batch_idxs = np.array_split(idxs, n_parties)
net_dataidx_map = {i: batch_idxs[i] for i in range(n_parties)}
elif partition == "hetero-dir":
min_size = 0
min_require_size = 10
if min_require is not None:
min_require_size = min_require
if dataset == 'mnist' or dataset == 'svhn':
K = 10
elif dataset in libsvm_datasets or dataset == 'celeba' or dataset == 'xray':
K = 2
# min_require_size = 100
N = y_train.shape[0]
net_dataidx_map = {}
while min_size < min_require_size:
idx_batch = [[] for _ in range(n_parties)]
for k in range(K):
idx_k = np.where(y_train == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(beta, n_parties))
# print("proportions1: ", proportions)
# print("sum pro1:", np.sum(proportions))
## Balance
proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)])
# print("proportions2: ", proportions)
proportions = proportions / proportions.sum()
# print("proportions3: ", proportions)
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
# print("proportions4: ", proportions)
idx_split = np.split(idx_k, proportions)
idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, idx_split)]
min_size = min([len(idx_j) for idx_j in idx_batch])
if min_require is not None:
min_size = min(min_size, min([len(idx) for idx in idx_split]))
# if K == 2 and n_parties <= 10:
# if np.min(proportions) < 200:
# min_size = 0
# break
for j in range(n_parties):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir)
return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts)
def init_nets(net_configs, dropout_p, n_parties, args, n_teacher_each_partition = 1, stdv=None):
n_total_nets = n_parties * n_teacher_each_partition
nets = {net_i: None for net_i in range(n_total_nets)}
for net_i in range(n_total_nets):
if args.model == "mlp":
input_size = net_configs[0]
output_size = net_configs[-1]
hidden_sizes = net_configs[1:-1]
net = FcNet(input_size, hidden_sizes, output_size, stdv, dropout_p)
# elif args.model == "vgg":
# net = vgg11()
elif args.model == "simple-cnn":
if args.dataset in ("svhn"):
net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=10)
elif args.dataset == "mnist":
net = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=10)
elif args.dataset == 'celeba' or args.dataset == 'xray':
net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=2)
elif args.model == "vgg-9":
if args.dataset in ("mnist"):
net = ModerateCNNMNIST()
elif args.dataset in ("svhn"):
# print("in moderate cnn")
net = ModerateCNN()
elif args.dataset == 'celeba':
net = ModerateCNN(output_dim=2)
# elif args.model == "resnet":
# net = ResNet50()
# elif args.model == "vgg16":
# net = vgg16()
elif args.model == 'lr':
if args.dataset == 'a9a':
net = LogisticRegression(123,2)
else:
print("not supported yet")
exit(1)
nets[net_i] = net
model_meta_data = []
layer_type = []
for (k, v) in nets[0].state_dict().items():
model_meta_data.append(v.shape)
layer_type.append(k)
return nets, model_meta_data, layer_type
def init_weights(m):
if type(m)==nn.Linear or type(m)==nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def get_trainable_parameters(net):
'return trainable parameter values as a vector (only the first parameter set)'
trainable=filter(lambda p: p.requires_grad, net.parameters())
# print("net.parameter.data:", list(net.parameters()))
paramlist=list(trainable)
N=0
for params in paramlist:
N+=params.numel()
# print("params.data:", params.data)
X=torch.empty(N,dtype=torch.float64)
X.fill_(0.0)
offset=0
for params in paramlist:
numel=params.numel()
with torch.no_grad():
X[offset:offset+numel].copy_(params.data.view_as(X[offset:offset+numel].data))
offset+=numel
# print("get trainable x:", X)
return X
def get_all_parameters(net):
'return trainable parameter values as a vector (only the first parameter set)'
# print("net.parameter.data:", list(net.parameters()))
paramlist=list(net.parameters())
N=0
for params in paramlist:
N+=params.numel()
# print("params.data:", params.data)
X=torch.empty(N,dtype=torch.float64)
X.fill_(0.0)
offset=0
for params in paramlist:
numel=params.numel()
with torch.no_grad():
X[offset:offset+numel].copy_(params.data.view_as(X[offset:offset+numel].data))
offset+=numel
# print("get trainable x:", X)
return X
def put_trainable_parameters(net,X):
'replace trainable parameter values by the given vector (only the first parameter set)'
trainable=filter(lambda p: p.requires_grad, net.parameters())
paramlist=list(trainable)
offset=0
for params in paramlist:
numel=params.numel()
with torch.no_grad():
params.data.copy_(X[offset:offset+numel].data.view_as(params.data))
offset+=numel
def put_all_parameters(net,X):
'replace trainable parameter values by the given vector (only the first parameter set)'
paramlist=list(net.parameters())
offset=0
for params in paramlist:
numel=params.numel()
with torch.no_grad():
params.data.copy_(X[offset:offset+numel].data.view_as(params.data))
offset+=numel
def compute_accuracy(model, dataloader, get_confusion_matrix=False, device="cpu"):
was_training = False
if model.training:
model.eval()
was_training = True
true_labels_list, pred_labels_list = np.array([]), np.array([])
correct, total = 0, 0
with torch.no_grad():
for batch_idx, (x, target, _) in enumerate(dataloader):
x, target = x.to(device), target.to(device)
out = model(x)
_, pred_label = torch.max(out.data, 1)
total += x.data.size()[0]
correct += (pred_label == target.data).sum().item()
if device == "cpu":
pred_labels_list = np.append(pred_labels_list, pred_label.numpy())
true_labels_list = np.append(true_labels_list, target.data.numpy())
else:
pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy())
true_labels_list = np.append(true_labels_list, target.data.cpu().numpy())
if get_confusion_matrix:
conf_matrix = confusion_matrix(true_labels_list, pred_labels_list)
if was_training:
model.train()
if get_confusion_matrix:
return correct/float(total), conf_matrix
return correct/float(total)
def prepare_weight_matrix(n_classes, weights: dict):
weights_list = {}
for net_i, cls_cnts in weights.items():
cls = np.array(list(cls_cnts.keys()))
cnts = np.array(list(cls_cnts.values()))
weights_list[net_i] = np.array([0] * n_classes, dtype=np.float32)
weights_list[net_i][cls] = cnts
weights_list[net_i] = torch.from_numpy(weights_list[net_i]).view(1, -1)
return weights_list
def prepare_uniform_weights(n_classes, net_cnt, fill_val=1):
weights_list = {}
for net_i in range(net_cnt):
temp = np.array([fill_val] * n_classes, dtype=np.float32)
weights_list[net_i] = torch.from_numpy(temp).view(1, -1)
return weights_list
def prepare_sanity_weights(n_classes, net_cnt):
return prepare_uniform_weights(n_classes, net_cnt, fill_val=0)
def normalize_weights(weights):
Z = np.array([])
eps = 1e-6
weights_norm = {}
for _, weight in weights.items():
if len(Z) == 0:
Z = weight.data.numpy()
else:
Z = Z + weight.data.numpy()
for mi, weight in weights.items():
weights_norm[mi] = weight / torch.from_numpy(Z + eps)
return weights_norm
def get_weighted_average_pred(models: list, weights: dict, x, device="cpu"):
out_weighted = None
# Compute the predictions
for model_i, model in enumerate(models):
#logger.info("Model: {}".format(next(model.parameters()).device))
#logger.info("data device: {}".format(x.device))
out = F.softmax(model(x), dim=-1) # (N, C)
# print("model(x):", model(x))
# print("out:", out)
weight = weights[model_i].to(device)
if out_weighted is None:
weight = weight.to(device)
out_weighted = (out * weight)
else:
out_weighted += (out * weight)
return out_weighted
def get_pred_votes(models, x, threshold=None, device="cpu"):
# print("input x:", x)
# Compute the predictions
votes=torch.LongTensor([]).to(device)
for model_i, model in enumerate(models):
#logger.info("Model: {}".format(next(model.parameters()).device))
#logger.info("data device: {}".format(x.device))
out = F.softmax(model(x), dim=-1) # (N, C)
pred_probs, pred_label = torch.max(out,1)
if threshold is not None:
# pred_probs.to("cpu")
# pred_label.to("cpu")
for index, prob in enumerate(pred_probs):
if prob < threshold:
pred_label[index] = -1
# pred_label.to(device)
votes=torch.cat((votes, pred_label),dim=0)
return votes
def compute_ensemble_accuracy(models: list, dataloader, n_classes, ensemble_method="max_vote", train_cls_counts=None,
uniform_weights=False, sanity_weights=False, device="cpu"):
correct, total = 0, 0
true_labels_list, pred_labels_list = np.array([]), np.array([])
was_training = [False]*len(models)
for i, model in enumerate(models):
if model.training:
was_training[i] = True
model.eval()
if ensemble_method == "averaging":
if uniform_weights is True:
weights_list = prepare_uniform_weights(n_classes, len(models))
elif sanity_weights is True:
weights_list = prepare_sanity_weights(n_classes, len(models))
else:
weights_list = prepare_weight_matrix(n_classes, train_cls_counts)
weights_norm = normalize_weights(weights_list)
with torch.no_grad():
for batch_idx, (x, target, _) in enumerate(dataloader):
x, target = x.to(device), target.to(device)
target = target.long()
if ensemble_method == "averaging":
out = get_weighted_average_pred(models, weights_norm, x, device=device)
_, pred_label = torch.max(out, 1)
elif ensemble_method == "max_vote":
votes = get_pred_votes(models, x, device=device)
pred_label, _ = torch.mode(votes.view(-1, x.data.size()[0]), dim=0)
total += x.data.size()[0]
correct += (pred_label == target.data).sum().item()
if device == "cpu":
pred_labels_list = np.append(pred_labels_list, pred_label.numpy())
true_labels_list = np.append(true_labels_list, target.data.numpy())
else:
pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy())
true_labels_list = np.append(true_labels_list, target.data.cpu().numpy())
#logger.info(correct, total)
conf_matrix = confusion_matrix(true_labels_list, pred_labels_list)
for i, model in enumerate(models):
if was_training[i]:
model.train()
return correct / float(total), conf_matrix
def train_net(net_id, net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, device="cpu"):
logger.info('Training network %s' % str(net_id))
logger.info('n_training: %d' % len(train_dataloader))
logger.info('n_test: %d' % len(test_dataloader))
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)
criterion = nn.CrossEntropyLoss().to(device)
cnt = 0
for epoch in range(epochs):
epoch_loss_collector = []
for batch_idx, (x, target, _) in enumerate(train_dataloader):
x, target = x.to(device), target.to(device)
#for adam l2 reg
# l2_reg = torch.zeros(1)
# l2_reg.requires_grad = True
optimizer.zero_grad()
x.requires_grad = True
target.requires_grad = False
target = target.long()
out = net(x)
loss = criterion(out, target)
loss.backward()
optimizer.step()
cnt += 1
epoch_loss_collector.append(loss.item())
# logger.info('Epoch: %d Loss: %f L2 loss: %f' % (epoch, loss.item(), reg*l2_reg))
epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
if epoch % 10 == 0:
logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Training accuracy: %f' % train_acc)
logger.info('>> Test accuracy: %f' % test_acc)
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Training accuracy: %f' % train_acc)
logger.info('>> Test accuracy: %f' % test_acc)
logger.info(' ** Training complete **')
return train_acc, test_acc
def train_net_fedprox(net_id, net, global_net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, mu, model, device="cpu"):
logger.info('Training network %s' % str(net_id))
logger.info('n_training: %d' % len(train_dataloader))
logger.info('n_test: %d' % len(test_dataloader))
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)
criterion = nn.CrossEntropyLoss().to(device)
cnt = 0
# mu = 0.001
global_weight_collector = list(global_net.to(device).parameters())
for epoch in range(epochs):
epoch_loss_collector = []
for batch_idx, (x, target, _) in enumerate(train_dataloader):
x, target = x.to(device), target.to(device)
#for adam l2 reg
# l2_reg = torch.zeros(1)
# l2_reg.requires_grad = True
optimizer.zero_grad()
x.requires_grad = True
target.requires_grad = False
target = target.long()
out = net(x)
loss = criterion(out, target)
#for fedprox
fed_prox_reg = 0.0
# fed_prox_reg += np.linalg.norm([i - j for i, j in zip(global_weight_collector, get_trainable_parameters(net).tolist())], ord=2)
for param_index, param in enumerate(net.parameters()):
fed_prox_reg += ((mu / 2) * torch.norm((param - global_weight_collector[param_index]))**2)
loss += fed_prox_reg
loss.backward()
optimizer.step()
cnt += 1
epoch_loss_collector.append(loss.item())
# logger.info('Epoch: %d Loss: %f L2 loss: %f' % (epoch, loss.item(), reg*l2_reg))
epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))
if epoch % 10 == 0:
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Training accuracy: %f' % train_acc)
logger.info('>> Test accuracy: %f' % test_acc)
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Training accuracy: %f' % train_acc)
logger.info('>> Test accuracy: %f' % test_acc)
logger.info(' ** Training complete **')
return train_acc, test_acc
def train_net_scaffold(net_id, net, global_net, train_dataloader, test_dataloader, epochs, lr, args_optimizer, args, server_c, client_c, device="cpu"):
logger.info('Training network %s' % str(net_id))
logger.info('n_training: %d' % len(train_dataloader))
logger.info('n_test: %d' % len(test_dataloader))
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=lr, weight_decay=args.reg)
criterion = nn.CrossEntropyLoss().to(device)
cnt = 0
# mu = 0.001
global_collector = list(global_net.to(device).parameters())
server_c_collector = list(server_c.to(device).parameters())
client_c_collector = list(client_c.to(device).parameters())
client_c_delta = copy.deepcopy(client_c_collector)
c_global_para = get_all_parameters(server_c)
c_local_para = get_all_parameters(client_c)
for epoch in range(epochs):
epoch_loss_collector = []
for batch_idx, (x, target, _) in enumerate(train_dataloader):
x, target = x.to(device), target.to(device)
#for adam l2 reg
# l2_reg = torch.zeros(1)
# l2_reg.requires_grad = True
optimizer.zero_grad()
x.requires_grad = True
target.requires_grad = False
target = target.long()
out = net(x)
loss = criterion(out, target)
loss.backward()
for param_index, param in enumerate(net.parameters()):
param.grad += server_c_collector[param_index] - client_c_collector[param_index]
optimizer.step()
# net_para = get_all_parameters(net)
# net_para = net_para - args.lr * (c_global_para - c_local_para)
# put_all_parameters(net, net_para)
# for param_index, param in enumerate(net.parameters()):
# r_grad = param.requires_grad
# param.requires_grad = False
# param -= args.lr*(server_c_collector[param_index] - client_c_collector[param_index])
# param.requires_grad = r_grad
cnt += 1
epoch_loss_collector.append(loss.item())
# logger.info('Epoch: %d Loss: %f L2 loss: %f' % (epoch, loss.item(), reg*l2_reg))
epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))
if epoch % 10 == 0:
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Training accuracy: %f' % train_acc)
logger.info('>> Test accuracy: %f' % test_acc)
for param_index, param in enumerate(net.parameters()):
client_c_delta[param_index] = (global_collector[param_index] - param) / (
args.epochs * len(train_dataloader) * lr) - server_c_collector[param_index]
client_c_collector[param_index] += client_c_delta[param_index]
train_acc = compute_accuracy(net, train_dataloader, device=device)
test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Training accuracy: %f' % train_acc)
logger.info('>> Test accuracy: %f' % test_acc)
logger.info(' ** Training complete **')
return train_acc, test_acc, client_c_delta
def save_model(model, model_index, args):
logger.info("saving model-{}".format(model_index))
with open(os.path.join(args.logdir, args.log_file_name) + ".model" + str(model_index), "wb") as f_:
torch.save(model.state_dict(), f_)
return
def load_model(model, model_index, rank=0, device="cpu"):
#
with open("trained_local_model"+str(model_index), "rb") as f_:
model.load_state_dict(torch.load(f_))
model.to(device)
return model
def local_train_net(nets, args, net_dataidx_map, X_train = None, y_train = None, X_test = None, y_test = None, remain_test_dl = None, local_split=False, retrain_epoch=None, device="cpu"):
# save local dataset
# local_datasets = []
n_teacher_each_partition = args.n_teacher_each_partition
avg_acc = 0.0
if local_split:
split_datasets = []
for party_id in range(args.n_parties):
np.random.shuffle(net_dataidx_map[party_id])
split_datasets.append(np.array_split(net_dataidx_map[party_id], args.n_teacher_each_partition))
for net_id, net in nets.items():
if not local_split:
dataidxs = net_dataidx_map[net_id//n_teacher_each_partition]
else:
dataidxs = list(split_datasets[net_id//n_teacher_each_partition][net_id%n_teacher_each_partition])
logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs)))
# move the model to cuda device:
net.to(device)
if args.dataset in libsvm_datasets:
party_id = net_id // n_teacher_each_partition
train_ds_local = CustomTensorDataset(torch.tensor(X_train[net_dataidx_map[party_id]].toarray(), dtype=torch.float32),
torch.tensor(y_train[net_dataidx_map[party_id]], dtype=torch.long))
public_ds = CustomTensorDataset(torch.tensor(X_test[:public_data_size].toarray(), dtype=torch.float32),
torch.tensor(y_test[:public_data_size], dtype=torch.long))
remain_test_ds = CustomTensorDataset(torch.tensor(X_test[public_data_size:].toarray(), dtype=torch.float32),
torch.tensor(y_test[public_data_size:], dtype=torch.long))
train_dl_local = data.DataLoader(dataset=train_ds_local, batch_size=args.batch_size, shuffle=True)
remain_test_dl = data.DataLoader(dataset=remain_test_ds, batch_size=32, shuffle=False)
else:
train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs)
train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
# local_datasets.append((train_dl_local, test_dl_local))
# switch to global test set here
# if remain_test_dl is not None:
# test_dl_global = remain_test_dl
if args.alg == 'local_training':
n_epoch = args.local_training_epochs
else:
n_epoch = args.epochs
if retrain_epoch is not None:
n_epoch = retrain_epoch
trainacc, testacc = train_net(net_id, net, train_dl_local, remain_test_dl, n_epoch, args.lr, args.optimizer, device=device)
logger.info("net %d final test acc %f" % (net_id, testacc))
avg_acc += testacc
# saving the trained models here
# save_model(net, net_id, args)
# else:
# load_model(net, net_id, device=device)
avg_acc /= args.n_parties
if args.alg == 'local_training':
logger.info("avg test acc %f" % avg_acc)
nets_list = list(nets.values())
return nets_list
def local_train_net_fedprox(nets, global_model, args, net_dataidx_map, X_train = None, y_train = None, X_test = None, y_test = None, remain_test_dl = None, local_split=False, retrain_epoch=None, device="cpu"):
# save local dataset
# local_datasets = []
n_teacher_each_partition = args.n_teacher_each_partition
avg_acc = 0.0
if local_split:
split_datasets = []
for party_id in range(args.n_parties):
np.random.shuffle(net_dataidx_map[party_id])
split_datasets.append(np.array_split(net_dataidx_map[party_id], args.n_teacher_each_partition))
for net_id, net in nets.items():
if not local_split:
dataidxs = net_dataidx_map[net_id//n_teacher_each_partition]
else:
dataidxs = list(split_datasets[net_id//n_teacher_each_partition][net_id%n_teacher_each_partition])
logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs)))
# move the model to cuda device:
net.to(device)
if args.dataset in libsvm_datasets:
party_id = net_id//n_teacher_each_partition
train_ds_local = CustomTensorDataset(torch.tensor(X_train[net_dataidx_map[party_id]].toarray(), dtype=torch.float32),
torch.tensor(y_train[net_dataidx_map[party_id]], dtype=torch.long))
public_ds = CustomTensorDataset(torch.tensor(X_test[:public_data_size].toarray(), dtype=torch.float32),
torch.tensor(y_test[:public_data_size], dtype=torch.long))
remain_test_ds = CustomTensorDataset(torch.tensor(X_test[public_data_size:].toarray(), dtype=torch.float32),
torch.tensor(y_test[public_data_size:], dtype=torch.long))
train_dl_local = data.DataLoader(dataset=train_ds_local, batch_size=args.batch_size, shuffle=True, num_workers=n_workers)
remain_test_dl = data.DataLoader(dataset=remain_test_ds, batch_size=32, shuffle=False)
else:
train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs)
train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
if args.alg == 'local_training':
n_epoch = args.local_training_epochs
else:
n_epoch = args.epochs
if retrain_epoch is not None:
n_epoch = retrain_epoch
trainacc, testacc = train_net_fedprox(net_id, net, global_model, train_dl_local, remain_test_dl, n_epoch, args.lr, args.optimizer, args.mu, args.model, device=device)
logger.info("net %d final test acc %f" % (net_id, testacc))
avg_acc += testacc
avg_acc /= args.n_parties
if args.alg == 'local_training':
logger.info("avg test acc %f" % avg_acc)
nets_list = list(nets.values())
return nets_list
def local_train_net_scaffold(nets, global_model, args, net_dataidx_map, X_train = None, y_train = None, X_test = None, y_test = None, server_c=None, clients_c=None, remain_test_dl = None, local_split=False, device="cpu"):
n_teacher_each_partition = args.n_teacher_each_partition
avg_acc = 0.0
if local_split:
split_datasets = []
for party_id in range(args.n_parties):
np.random.shuffle(net_dataidx_map[party_id])
split_datasets.append(np.array_split(net_dataidx_map[party_id], args.n_teacher_each_partition))
server_c_collector = list(server_c.to(device).parameters())
new_server_c_collector = copy.deepcopy(server_c_collector)
for net_id, net in nets.items():
if not local_split:
dataidxs = net_dataidx_map[net_id // n_teacher_each_partition]
else:
dataidxs = list(split_datasets[net_id // n_teacher_each_partition][net_id % n_teacher_each_partition])
logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs)))
# move the model to cuda device:
net.to(device)
if args.dataset in libsvm_datasets:
party_id = net_id // n_teacher_each_partition
train_ds_local = CustomTensorDataset(
torch.tensor(X_train[net_dataidx_map[party_id]].toarray(), dtype=torch.float32),
torch.tensor(y_train[net_dataidx_map[party_id]], dtype=torch.long))
public_ds = CustomTensorDataset(torch.tensor(X_test[:public_data_size].toarray(), dtype=torch.float32),
torch.tensor(y_test[:public_data_size], dtype=torch.long))
remain_test_ds = CustomTensorDataset(torch.tensor(X_test[public_data_size:].toarray(), dtype=torch.float32),
torch.tensor(y_test[public_data_size:], dtype=torch.long))
train_dl_local = data.DataLoader(dataset=train_ds_local, batch_size=args.batch_size, shuffle=True,
num_workers=n_workers)
remain_test_dl = data.DataLoader(dataset=remain_test_ds, batch_size=32, shuffle=False)
else:
train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32,
dataidxs)
train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
if args.alg == 'local_training':
n_epoch = args.local_training_epochs
else:
n_epoch = args.epochs
trainacc, testacc, c_delta = train_net_scaffold(net_id, net, global_model, train_dl_local, remain_test_dl, n_epoch,
args.lr, args.optimizer, args, server_c, clients_c[net_id], device=device)
if args.new_scaffold:
for param_index, param in enumerate(server_c.parameters()):
new_server_c_collector[param_index] += c_delta[param_index] / args.n_parties
logger.info("net %d final test acc %f" % (net_id, testacc))
avg_acc += testacc
if args.new_scaffold:
for param_index, param in enumerate(server_c.parameters()):
server_c_collector[param_index] = new_server_c_collector[param_index]
avg_acc /= args.n_parties
if args.alg == 'local_training':
logger.info("avg test acc %f" % avg_acc)
nets_list = list(nets.values())
return nets_list
def local_train_net_on_a_party(nets, args, net_dataidx_map, party_id, X_train = None, y_train = None, X_test = None, y_test = None, remain_test_dl = None, local_split=0, device="cpu"):
# save local dataset
# local_datasets = []
n_teacher_each_partition = args.n_teacher_each_partition
if local_split:
split_datasets = []
np.random.shuffle(net_dataidx_map[party_id])
split_datasets = np.array_split(net_dataidx_map[party_id], args.n_teacher_each_partition)
for net_id, net in nets.items():
if not local_split:
dataidxs = net_dataidx_map[party_id]
else:
dataidxs = list(split_datasets[net_id])
logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs)))
# move the model to cuda device:
net.to(device)
if args.dataset in libsvm_datasets:
train_ds_local = CustomTensorDataset(torch.tensor(X_train[net_dataidx_map[party_id]].toarray(), dtype=torch.float32),
torch.tensor(y_train[net_dataidx_map[party_id]], dtype=torch.long))
public_ds = CustomTensorDataset(torch.tensor(X_test[:public_data_size].toarray(), dtype=torch.float32),
torch.tensor(y_test[:public_data_size], dtype=torch.long))
remain_test_ds = CustomTensorDataset(torch.tensor(X_test[public_data_size:].toarray(), dtype=torch.float32),
torch.tensor(y_test[public_data_size:], dtype=torch.long))
train_dl_local = data.DataLoader(dataset=train_ds_local, batch_size=args.batch_size, shuffle=True)
test_dl_global = data.DataLoader(dataset=remain_test_ds, batch_size=32, shuffle=False)
else:
train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs)
train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
# local_datasets.append((train_dl_local, test_dl_local))
# switch to global test set here
if remain_test_dl is not None:
test_dl_global = remain_test_dl
trainacc, testacc = train_net(net_id, net, train_dl_local, test_dl_global, args.epochs, args.lr, args.optimizer, device=device)
# saving the trained models here
# save_model(net, net_id, args)
# else:
# load_model(net, net_id, device=device)
nets_list = list(nets.values())
return nets_list
def central_train_net_on_a_party(nets, args, X_train = None, y_train = None, X_test = None, y_test = None, remain_test_dl = None, local_split=0, device="cpu"):
# save local dataset
# local_datasets = []
n_teacher_each_partition = args.n_teacher_each_partition
dataidx_arr = np.arange(len(y_train))
np.random.shuffle(dataidx_arr)
# partition the local data to n_local_models parts
dataidx = np.array_split(dataidx_arr, n_teacher_each_partition)
for net_id, net in nets.items():
# logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs)))
# move the model to cuda device:
net.to(device)
if args.dataset in libsvm_datasets:
train_ds_local = CustomTensorDataset(torch.tensor(X_train[net_dataidx_map[party_id]].toarray(), dtype=torch.float32),
torch.tensor(y_train[net_dataidx_map[party_id]], dtype=torch.long))
public_ds = CustomTensorDataset(torch.tensor(X_test[:public_data_size].toarray(), dtype=torch.float32),
torch.tensor(y_test[:public_data_size], dtype=torch.long))
remain_test_ds = CustomTensorDataset(torch.tensor(X_test[public_data_size:].toarray(), dtype=torch.float32),
torch.tensor(y_test[public_data_size:], dtype=torch.long))
train_dl_local = data.DataLoader(dataset=train_ds_local, batch_size=args.batch_size, shuffle=True)
test_dl_global = data.DataLoader(dataset=remain_test_ds, batch_size=32, shuffle=False)
else:
train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidx[net_id])
train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
# local_datasets.append((train_dl_local, test_dl_local))
# switch to global test set here
if remain_test_dl is not None:
test_dl_global = remain_test_dl
trainacc, testacc = train_net(net_id, net, train_dl_local, test_dl_global, args.epochs, args.lr, args.optimizer, device=device)
# saving the trained models here
# save_model(net, net_id, args)
# else:
# load_model(net, net_id, device=device)
nets_list = list(nets.values())
return nets_list
def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, no_trans=None):
if dataset in ('mnist', 'svhn'):
if dataset == 'mnist':
dl_obj = MNIST_truncated
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
elif dataset == 'svhn':
dl_obj = SVHN_custom
transform_train = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
if no_trans == 'test':
# transform_train = None
transform_test = None
train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, num_workers=n_workers)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False)
elif dataset == 'celeba':
dl_obj = CelebA_custom
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if no_trans == 'test':
# transform_train = None
transform_test = None
train_ds = dl_obj(datadir, dataidxs=dataidxs, split='train', target_type="attr", transform=transform_train, download=True)
test_ds = dl_obj(datadir, split='test', target_type="attr", transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, num_workers=n_workers)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False)
elif dataset == 'xray':
dl_obj = ImageFolder_custom
transform_train = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if no_trans == 'test':
# transform_train = None
transform_test = None
train_ds = dl_obj(datadir+'./train/', dataidxs=dataidxs, transform=transform_train)
test_ds = dl_obj(datadir+'./test/', transform=transform_test)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, num_workers=n_workers)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False)
return train_dl, test_dl, train_ds, test_ds
def get_prediction_labels(models, n_classes, dataloader, args, gamma=None, method="max_vote", train_cls_counts=None,
uniform_weights=True, sanity_weights=False, is_subset=0, is_final_student = False, device="cpu"):
# correct, total = 0, 0
# true_labels_list = []
pred_labels_list = []
was_training = [False]*len(models)
for i, model in enumerate(models):
if model.training:
was_training[i] = True
model.eval()
if method == "averaging":
if uniform_weights is True:
weights_list = prepare_uniform_weights(n_classes, len(models))
elif sanity_weights is True:
weights_list = prepare_sanity_weights(n_classes, len(models))
else:
weights_list = prepare_weight_matrix(n_classes, train_cls_counts)
weights_norm = normalize_weights(weights_list)
vote_counts_save = np.empty((n_classes, 0), dtype=int)
correct, total = 0, 0
top2_counts_differ_one = 0
with torch.no_grad():
for batch_idx, (x, target, index) in enumerate(dataloader):
x = x.to(device)
# target = target.to(device)
# target = target.long()
if method == "averaging":
out = get_weighted_average_pred(models, weights_norm, x, device=device)
_, pred_label = torch.max(out, 1)
elif method == "max_vote":
votes = get_pred_votes(models, x, args.prob_threshold, device=device)
votes_view = votes.view(-1, x.data.size()[0])
vote_counts_real = torch.LongTensor([]).to(device)
for class_id in range(n_classes):
if (args.apply_consistency == 2) or (is_final_student and args.apply_consistency):
vote_count_real = torch.zeros(x.data.size()[0], dtype=torch.long, device=device)
for pid in range(args.n_parties):
votes_view_perparty = votes_view[pid*args.n_partition : (pid+1)*args.n_partition]
vote_count_real_party = (votes_view_perparty == class_id).sum(dim=0, dtype=torch.long)
# vote_count_real_party = vote_count_real_party * ((vote_count_real_party>=math.ceil(args.n_partition*args.nvote_threshold)).long())
vote_count_real += vote_count_real_party
else:
vote_count_real = (votes_view == class_id).sum(dim=0)
vote_counts_real = torch.cat((vote_counts_real, vote_count_real), dim=0)
# print("vote_counts_real:", vote_counts_real)
# print("vote counts view:", vote_counts_real.view(-1,x.data.size()[0]))
vote_counts_save = np.append(vote_counts_save, vote_counts_real.view(-1,x.data.size()[0]).to("cpu").numpy(), axis=1)
# print("vote_counts_save:", vote_counts_save)
if gamma is None or gamma == 0:
# pred_label, _ = torch.mode(votes_view, dim=0)
_, pred_label = torch.max(vote_counts_real.view(-1, x.data.size()[0]), 0)
else:
vote_counts = torch.FloatTensor([]).to(device)
for class_id in range(n_classes):
vote_count = (votes_view==class_id).sum(dim=0).float()
if args.apply_consistency and is_final_student:
for idx, vote in enumerate(vote_count):
if vote != args.n_partition:
vote_count[idx] = 0
for i in range(len(vote_count)):
vote_count[i]+=np.random.laplace(loc=0.0, scale=float(1.0/gamma))
vote_counts=torch.cat((vote_counts,vote_count),dim=0)
# print("vote_counts:", vote_counts.to("cpu"))
# print("vote_counts view:", vote_counts.view(-1,x.data.size()[0]).to("cpu"))
_, pred_label=torch.max(vote_counts.view(-1,x.data.size()[0]),0)
total += x.data.size()[0]
# correct += (pred_label == target.data).sum().item()
if device == "cpu":
pred_labels_list.append(list(pred_label.numpy()))
# true_labels_list.append(list(target.data.numpy()))
if is_subset == 2:
dataloader.dataset.dataset.dataset.target[index] = torch.LongTensor(list(pred_label.numpy()))
elif is_subset == 1:
dataloader.dataset.dataset.target[index]=torch.LongTensor(list(pred_label.numpy()))
else:
dataloader.dataset.target[index]=torch.LongTensor(list(pred_label.numpy()))
else:
pred_labels_list.append(list(pred_label.cpu().numpy()))
# true_labels_list.append(list(target.data.cpu().numpy()))
if is_subset == 2:
dataloader.dataset.dataset.dataset.target[index] = torch.LongTensor(list(pred_label.cpu().numpy()))
elif is_subset == 1:
dataloader.dataset.dataset.target[index] = torch.LongTensor(list(pred_label.cpu().numpy()))
else:
dataloader.dataset.target[index] = torch.LongTensor(list(pred_label.cpu().numpy()))
# print("target:", target)
# target = torch.LongTensor(list(pred_label.numpy())).cpu()
vote_counts_save = np.transpose(vote_counts_save)
top1_class_counts = np.zeros(500)
top2_class_counts = np.zeros(500)
top_diff_counts = np.zeros(500)
for row in vote_counts_save:
top2_counts = row[np.argsort(row)[-2:]]
if top2_counts[1] - top2_counts[0] <= 1:
top2_counts_differ_one+=1
top_diff_counts[top2_counts[1] - top2_counts[0]] += 1
top1_class_counts[top2_counts[1]] += 1
top2_class_counts[top2_counts[0]] += 1
return pred_labels_list, top2_counts_differ_one, vote_counts_save
def train_a_student(tea_nets, public_dataloader, public_ds, remain_test_dataloader, stu_net, n_classes, args,
gamma=None, is_subset=0, is_final_student=False, filter_query=0,device = 'cpu'):
if args.pub_datadir is not None:
is_subset = 0
public_labels, top2_counts_differ_one, vote_counts_save = get_prediction_labels(tea_nets, n_classes, public_dataloader, args, gamma=gamma,
method=args.ensemble_method, is_subset=is_subset, is_final_student= is_final_student, device=device)
if filter_query:
confident_query_idx = []
for idx, row in enumerate(vote_counts_save):
top2_counts = row[np.argsort(row)[-2:]]
if top2_counts[1] - top2_counts[0] > 2:
confident_query_idx.append(idx)
print("len confident query idx:", len(confident_query_idx))
logger.info("len confident query idx: %d" % len(confident_query_idx))
# local_query_ds = data.Subset(public_ds, confident_query_idx)
public_dataloader = data.DataLoader(dataset=public_ds, batch_size=32, sampler=data.SubsetRandomSampler(confident_query_idx), num_workers=n_workers)
logger.info('len public_labels: %d' % len(public_labels))
logger.info('Training student network')
logger.info('n_public: %d' % len(public_ds))
stu_net.to(device)
train_acc = compute_accuracy(stu_net, public_dataloader, device=device)
# test_acc, conf_matrix = compute_accuracy(net, test_dataloader, get_confusion_matrix=True, device=device)
logger.info('>> Pre-Training Training accuracy: {}'.format(train_acc))
# logger.info('>> Pre-Training Test accuracy: {}'.format(test_acc))
if args.optimizer == 'adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, stu_net.parameters()), lr=args.stu_lr, weight_decay=args.reg)
elif args.optimizer == 'adam_ams':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, stu_net.parameters()), lr=args.stu_lr, weight_decay=args.reg,
amsgrad=True)
elif args.optimizer == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, stu_net.parameters()), lr=args.stu_lr, momentum=0.9, weight_decay=args.reg)
criterion = nn.CrossEntropyLoss().to(device)
cnt = 0
if is_final_student:
n_epoch = args.final_stu_epochs
else:
n_epoch = args.stu_epochs
for epoch in range(n_epoch):
epoch_loss_collector = []
for batch_idx, (x, target, _) in enumerate(public_dataloader):
x, target = x.to(device), target.to(device)
optimizer.zero_grad()
x.requires_grad = True
target.requires_grad = False
target = target.long()
out = stu_net(x)
loss = criterion(out, target)
loss.backward()
optimizer.step()
cnt += 1
epoch_loss_collector.append(loss.item())
# test_acc = compute_accuracy(stu_net, remain_test_dataloader, device=device)
# test_acc, conf_matrix = compute_accuracy(stu_net, test_dataloader, get_confusion_matrix=True, device=device)
# logger.info('>> Test accuracy in epoch %d: %f' % (epoch, test_acc))
# logger.info('Epoch: %d Loss: %f L2 loss: %f' % (epoch, loss.item(), reg*l2_reg))
epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector)
logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss))
if epoch % 10 == 0:
train_acc = compute_accuracy(stu_net, public_dataloader, device=device)
test_acc = compute_accuracy(stu_net, remain_test_dataloader, device=device)
logger.info('>> Training accuracy: %f Test accuracy: %f' % (train_acc, test_acc))
logger.info(' ** Training complete **')
return train_acc, top2_counts_differ_one, vote_counts_save
if __name__ == '__main__':
# torch.set_printoptions(profile="full")
args = get_args()
mkdirs(args.logdir)
mkdirs(args.modeldir)
if args.log_file_name is None:
argument_path='experiment_arguments-%s.json' % datetime.datetime.now().strftime("%Y-%m-%d-%H:%M-%S")
else:
argument_path=args.log_file_name+'.json'
with open(os.path.join(args.logdir, argument_path), 'w') as f:
json.dump(str(args), f)
device = torch.device(args.device)
# logging.basicConfig(filename='test.log', level=logger.info, filemode='w')
# logging.info("test")
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
if args.log_file_name is None:
args.log_file_name = 'experiment_log-%s-%d-%d' % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M-%S"),args.init_seed, args.trials)
log_path=args.log_file_name+'.log'
logging.basicConfig(
filename=os.path.join(args.logdir, log_path),
# filename='/home/qinbin/test.log',
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M', level=logging.INFO, filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info(device)
if args.npartyseed is not None:
args.n_parties = int(args.npartyseed[0:2])
args.init_seed = int(args.npartyseed[-1])
if args.n_partition == 1:
args.apply_consistency = 0
test_accs=[]
for n_exp in range(args.trials):
seed = n_exp + args.init_seed
logger.info("#" * 100)
logger.info("Executing Trial %d with seed %d" % (n_exp, seed))
np.random.seed(seed)
torch.manual_seed(seed)
logger.info("Partitioning data")
if args.alg == "pate":
args.partition = 'homo'
X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data(
args.dataset, args.datadir, args.logdir, args.partition, args.n_parties, beta=args.beta, min_require=args.min_require)
n_classes = len(np.unique(y_train))
if args.model == 'lr':
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
query_data_size = int(len(y_test) * args.query_portion)
local_query_data_size = int(len(y_test) * args.local_query_portion)
if args.dataset in libsvm_datasets:
train_ds_global = CustomTensorDataset(torch.tensor(X_train.toarray(), dtype=torch.float32),
torch.tensor(y_train, dtype=torch.long))
public_ds = CustomTensorDataset(torch.tensor(X_test[:public_data_size].toarray(), dtype=torch.float32),
torch.tensor(y_test[:public_data_size], dtype=torch.long))
remain_test_ds = CustomTensorDataset(torch.tensor(X_test[public_data_size:].toarray(), dtype=torch.float32),
torch.tensor(y_test[public_data_size:], dtype=torch.long))
train_dl_global = data.DataLoader(dataset=train_ds_global, batch_size=args.batch_size, shuffle=True)
public_dl = data.DataLoader(dataset=public_ds, batch_size=32, shuffle=True)
remain_test_dl = data.DataLoader(dataset=remain_test_ds, batch_size=32, shuffle=False)
query_dl = data.DataLoader(dataset=public_ds, batch_size=32,
sampler=data.SubsetRandomSampler(list(range(query_data_size))))
local_query_dl = data.DataLoader(dataset=public_ds, batch_size=32,
sampler=data.SubsetRandomSampler(list(range(local_query_data_size))))
elif args.dataset not in libsvm_datasets:
train_dl_global, test_dl_global, train_ds_global, test_ds_global = get_dataloader(args.dataset,
args.datadir,
args.batch_size,
32)
print("len train_ds_global:", len(train_ds_global))
public_data_size = int(len(test_ds_global) * args.auxiliary_data_portion)
query_data_size = int(len(test_ds_global) * args.query_portion)
local_query_data_size = int(len(test_ds_global) * args.local_query_portion)
remain_data_size = len(test_ds_global) - public_data_size
# unquery_size = len(test_ds_global) - query_data_size
# local_unquery_size = len(test_ds_global) - local_query_data_size
if args.dataset !='xray':
public_ds, remain_test_ds = data.random_split(test_ds_global,
[public_data_size, remain_data_size])
public_dl = data.DataLoader(dataset=public_ds, batch_size=32, shuffle=True, num_workers=n_workers)
# query_dl = data.DataLoader(dataset=query_ds, batch_size=32, shuffle=False)
# local_query_dl = data.DataLoader(dataset=local_query_ds, batch_size=32, shuffle=False)
remain_test_dl = data.DataLoader(dataset=remain_test_ds, batch_size=32, shuffle=False)
query_dl = data.DataLoader(dataset=public_ds, batch_size=32,
sampler=data.SubsetRandomSampler(list(range(query_data_size))), num_workers=n_workers)
local_query_dl = data.DataLoader(dataset=public_ds, batch_size=32,
sampler=data.SubsetRandomSampler(list(range(local_query_data_size))), num_workers=n_workers)
else:
remain_test_ds = test_ds_global
remain_test_dl = test_dl_global
transform = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# use rsna pneumonia as public data
df_train = pd.read_csv(args.pub_datadir + './stage_2_train_labels.csv')
pIds_train = df_train['patientId'].unique()
#pIds_train = df_train['Target']
public_ds = PneumoniaDataset(root=args.pub_datadir + './stage_2_train_images/', subset='train', pIds=pIds_train, transform=transform, warping=True)
# use covid chest dataset as public data
# public_ds = ImageFolder_public(root=args.pub_datadir, transform=transform)
public_dl = data.DataLoader(dataset=public_ds, batch_size=32, shuffle=True, num_workers=n_workers)
if query_data_size == public_data_size and local_query_data_size == public_data_size:
query_dl = public_dl
local_query_dl = public_dl
else:
query_dl = data.DataLoader(dataset=public_ds, batch_size=32,
sampler=data.SubsetRandomSampler(list(range(query_data_size))), num_workers=n_workers)
local_query_dl = data.DataLoader(dataset=public_ds, batch_size=32,
sampler=data.SubsetRandomSampler(list(range(local_query_data_size))), num_workers=n_workers)
if args.alg == 'fedavg':
logger.info("Initializing nets")
args.n_teacher_each_partition = 1
args.is_local_split = 0
nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
# torch.manual_seed(seed)
global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 0, 1, args)
global_model = global_models[0]
for round in range(args.comm_round):
logger.info("in comm round:" + str(round))
global_para = get_trainable_parameters(global_model)
# global_para = get_trainable_parameters(nets[0])
for net_id, net in nets.items():
put_trainable_parameters(net, global_para)
local_train_net(nets, args, net_dataidx_map, X_train, y_train, X_test, y_test, remain_test_dl = remain_test_dl, local_split=False, device=device)
# local_train_net(nets, args, net_dataidx_map, local_split=False, device=device)
# update global model
total_data_points = sum([len(net_dataidx_map[r]) for r in range(args.n_parties)])
fed_avg_freqs = [len(net_dataidx_map[r]) / total_data_points for r in range(args.n_parties)]
weights = [get_trainable_parameters(nets[i].cpu()) for i in range(args.n_parties)]
average_weight = sum(weights[i] * fed_avg_freqs[i] for i in range(args.n_parties))
put_trainable_parameters(global_model, average_weight)
logger.info('global n_training: %d' % len(train_dl_global))
logger.info('global n_test: %d' % len(remain_test_dl))
global_model.to(device)
train_acc = compute_accuracy(global_model, train_dl_global, device=device)
test_acc, conf_matrix = compute_accuracy(global_model, remain_test_dl, get_confusion_matrix=True, device=device)
logger.info('>> Global Model Train accuracy: %f' % train_acc)
logger.info('>> Global Model Test accuracy: %f' % test_acc)
elif args.alg == 'fedprox':
logger.info("Initializing nets")
args.n_teacher_each_partition = 1
args.is_local_split = 0
nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 0, 1, args)
global_model = global_models[0]
for round in range(args.comm_round):
logger.info("in comm round:" + str(round))
global_para = get_trainable_parameters(global_model)
for net_id, net in nets.items():
put_trainable_parameters(net, global_para)
local_train_net_fedprox(nets, global_model, args, net_dataidx_map, X_train, y_train, X_test, y_test, remain_test_dl = remain_test_dl, local_split=False, device=device)
global_model.to('cpu')
# update global model
total_data_points = sum([len(net_dataidx_map[r]) for r in range(args.n_parties)])
fed_avg_freqs = [len(net_dataidx_map[r]) / total_data_points for r in range(args.n_parties)]
weights = [get_trainable_parameters(nets[i].cpu()) for i in range(args.n_parties)]
average_weight = sum(weights[i] * fed_avg_freqs[i] for i in range(args.n_parties))
put_trainable_parameters(global_model, average_weight)
logger.info('global n_training: %d' % len(train_dl_global))
logger.info('global n_test: %d' % len(remain_test_dl))
train_acc = compute_accuracy(global_model, train_dl_global)
test_acc, conf_matrix = compute_accuracy(global_model, remain_test_dl, get_confusion_matrix=True)
logger.info('>> Global Model Train accuracy: %f' % train_acc)
logger.info('>> Global Model Test accuracy: %f' % test_acc)
elif args.alg == 'scaffold':
logger.info("Initializing nets")
args.n_teacher_each_partition = 1
args.is_local_split = 0
nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
global_models, global_model_meta_data, global_layer_type = init_nets(args.net_config, 0, 1, args)
global_model = global_models[0]
clients_c, _, _ = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
servers_c, _, _ = init_nets(args.net_config, args.dropout_p, 1, args)
server_c = servers_c[0]
server_c_w = server_c.state_dict()
for key in server_c_w:
server_c_w[key] *= 0.0
server_c.load_state_dict(server_c_w)
for param in server_c.parameters():
param.requires_grad = False
for net_id, net in clients_c.items():
for param in net.parameters():
param.requires_grad = False
client_c_w = clients_c[net_id].state_dict()
for key in client_c_w:
client_c_w[key] *= 0.0
clients_c[net_id].load_state_dict(client_c_w)
for round in range(args.comm_round):
logger.info("in comm round:" + str(round))
global_para = get_trainable_parameters(global_model)
# global_para = get_trainable_parameters(nets[0])
for net_id, net in nets.items():
put_trainable_parameters(net, global_para)
local_train_net_scaffold(nets, global_model, args, net_dataidx_map, X_train, y_train, X_test, y_test, server_c, clients_c, remain_test_dl = remain_test_dl, local_split=False, device=device)
global_model.to('cpu')
# update global model
total_data_points = sum([len(net_dataidx_map[r]) for r in range(args.n_parties)])
fed_avg_freqs = [len(net_dataidx_map[r]) / total_data_points for r in range(args.n_parties)]
weights = [get_trainable_parameters(nets[i].cpu()) for i in range(args.n_parties)]
average_weight = sum(weights[i] * fed_avg_freqs[i] for i in range(args.n_parties))
put_trainable_parameters(global_model, average_weight)
if args.new_scaffold == 0:
server_c_w = server_c.state_dict()
for net_id, net in clients_c.items():
net_para = net.state_dict()
if net_id == 0:
for key in net_para:
server_c_w[key] = net_para[key] * fed_avg_freqs[net_id]
else:
for key in net_para:
server_c_w[key] += net_para[key] * fed_avg_freqs[net_id]
server_c.load_state_dict(server_c_w)
logger.info('global n_training: %d' % len(train_dl_global))
logger.info('global n_test: %d' % len(remain_test_dl))
train_acc = compute_accuracy(global_model, train_dl_global)
test_acc, conf_matrix = compute_accuracy(global_model, remain_test_dl, get_confusion_matrix=True)
logger.info('>> Global Model Train accuracy: %f' % train_acc)
logger.info('>> Global Model Test accuracy: %f' % test_acc)
elif args.alg == 'local_training':
args.n_teacher_each_partition = 1
args.is_local_split = 0
avg_acc = 0.0
local_acc_list = []
if args.model == 'tree':
# logger.info("Initializing trees")
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
for party_id in range(args.n_parties):
local_forest = RandomForestClassifier(max_depth = args.max_tree_depth, n_estimators = args.n_stu_trees)
local_forest.fit(X_train[net_dataidx_map[party_id]], y_train[net_dataidx_map[party_id]])
local_acc = local_forest.score(X_test[public_data_size:], y_test[public_data_size:])
logger.info("In party %d local test acc: %f" % (party_id, local_acc))
avg_acc += local_acc
local_acc_list.append(local_acc)
avg_acc /= args.n_parties
logger.info("average test acc: %f" % avg_acc)
logger.info("min test acc: %f" % min(local_acc_list))
logger.info("max test acc: %f" % max(local_acc_list))
elif args.model == 'gbdt':
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
param = {'max_depth': args.max_tree_depth, 'objective': 'binary:logistic', 'gamma':1, 'lambda':1, 'eta':0.1}
for party_id in range(args.n_parties):
gbdt = xgb.XGBClassifier(max_depth=args.max_tree_depth, n_estimators = args.n_stu_trees, learning_rate = args.lr, gamma = 1, reg_lambda = 1, tree_method='hist')
gbdt.fit(X_train[net_dataidx_map[party_id]], y_train[net_dataidx_map[party_id]])
local_acc = gbdt.score(X_test[public_data_size:], y_test[public_data_size:])
logger.info("In party %d local test acc: %f" % (party_id, local_acc))
avg_acc += local_acc
local_acc_list.append(local_acc)
# dtrain = xgb.DMatrix(X_train[net_dataidx_map[party_id]], y_train[net_dataidx_map[party_id]])
# gbdt = xgb.train(param, dtrain, args.n_stu_trees)
avg_acc /= args.n_parties
logger.info("average test acc: %f" % avg_acc)
logger.info("min test acc: %f" % min(local_acc_list))
logger.info("max test acc: %f" % max(local_acc_list))
else:
# logger.info("Initializing nets")
if args.local_training_epochs is None:
args.local_training_epochs = args.stu_epochs
nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args)
local_train_net(nets, args, net_dataidx_map, X_train, y_train, X_test, y_test, remain_test_dl=remain_test_dl, local_split=False,
device=device)
elif args.alg == 'all_in':
if args.model == 'tree':
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
forest = RandomForestClassifier(max_depth = args.max_tree_depth, n_estimators = args.n_stu_trees)
forest.fit(X_train, y_train)
acc = forest.score(X_test[public_data_size:], y_test[public_data_size:])
logger.info("all in test acc: %f" % acc)
elif args.model == 'gbdt':
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
gbdt = xgb.XGBClassifier(max_depth=args.max_tree_depth, n_estimators = args.n_stu_trees, learning_rate = args.lr, gamma = 1, reg_lambda = 1, tree_method='hist')
gbdt.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test[public_data_size:], y_test[public_data_size:])],
eval_metric='error',
verbose=True)
evals_result = gbdt.evals_result()
print("evals result:", evals_result)
logger.info("evals result: " + ' '.join([str(elem) for elem in evals_result['validation_1']['error']]))
logger.info("eval result 50 rounds: %f" % evals_result['validation_1']['error'][49])
logger.info("eval last result: %f" % evals_result['validation_1']['error'][args.n_stu_trees-1])
else:
nets, _, _ = init_nets(args.net_config, args.dropout_p, 1, args)
nets[0].to(device)
trainacc, testacc = train_net(0, nets[0], train_dl_global, remain_test_dl, args.stu_epochs, args.lr,
args.optimizer, device=device)
logger.info("all in test acc: %f" % testacc)
elif args.alg == 'fedenb':
logger.info("Initializing nets")
nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args, args.n_teacher_each_partition)
logger.info("Training nets")
nets_list = local_train_net(nets, args, net_dataidx_map, X_train, y_train, X_test, y_test, local_split=False, device=device)
train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32)
logger.info("Compute uniform ensemble accuracy")
uens_train_acc, _ = compute_ensemble_accuracy(nets_list, train_dl_global, n_classes, ensemble_method=args.ensemble_method, uniform_weights=True, device=device)
uens_test_acc, _ = compute_ensemble_accuracy(nets_list, test_dl_global, n_classes, ensemble_method=args.ensemble_method, uniform_weights=True, device=device)
logger.info("Uniform ensemble (Train acc): {}".format(uens_train_acc))
logger.info("Uniform ensemble (Test acc): {}".format(uens_test_acc))
elif args.alg == 'fedboost':
if args.model != 'tree':
print("not supported yet")
exit(1)
logger.info("Initializing trees")
trees = init_trees(args.max_tree_depth, args.n_parties, args.n_teacher_each_partition, libsvm_datasets[args.dataset])
logger.info("Training trees")
trees_list = local_train_trees(trees, args, net_dataidx_map, X_train, y_train, X_test, y_test)
ens_train_acc = compute_tree_ensemble_accuracy(trees_list, X_train, y_train)
ens_test_acc = compute_tree_ensemble_accuracy(trees_list, X_test, y_test)
logger.info("All trees ensemble train acc: %f" % ens_train_acc)
logger.info("All trees ensemble test acc: %f" % ens_test_acc)
fedboost(trees, args, net_dataidx_map, X_train, y_train, X_test, y_test, libsvm_datasets[args.dataset])
elif args.alg =='pate':
if args.model == 'tree' or args.model == 'gbdt' or args.model == 'gbdt_tree':
logger.info("Initializing trees")
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
query_data_size = int(len(y_test) * args.query_portion)
local_query_data_size = int(len(y_test) * args.local_query_portion)
tea_trees = []
n_parti_top2_differ_one = np.zeros(args.n_parties)
n_instances_portion = np.zeros(args.n_parties)
vote_counts_parties = []
# filter_query=0
trees = init_trees(args.max_tree_depth, 1, args.n_teacher_each_partition, libsvm_datasets[args.dataset], args)
# logger.info("In party %d Train local trees" % party_id)
central_train_trees_in_a_party(trees, args, X_train, y_train, X_test, y_test)
if args.model == 'tree':
stu_forest = RandomForestClassifier(max_depth = args.max_tree_depth, n_estimators=args.n_stu_trees)
elif args.model == 'gbdt' or args.model == 'gbdt_tree':
stu_forest = xgb.XGBClassifier(max_depth=args.max_tree_depth, n_estimators = args.n_stu_trees, learning_rate=args.lr,
gamma=1, reg_lambda=1, tree_method='hist')
filter_query = args.filter_query
if args.dp_level == 2:
gamma = args.gamma
top2_counts_differ_one, vote_counts_in_a_party = train_a_student_tree(trees, X_test[:local_query_data_size],
y_test[:local_query_data_size], 2,
stu_forest, gamma, filter_query)
else:
gamma = 0
top2_counts_differ_one, vote_counts_in_a_party = train_a_student_tree(trees, X_test[:local_query_data_size],
y_test[:local_query_data_size],
2, stu_forest,
gamma, filter_query)
test_acc = stu_forest.score(X_test[public_data_size:], y_test[public_data_size:])
logger.info("central pate test acc %f" % test_acc)
else:
nets, _, _ = init_nets(args.net_config, args.dropout_p, args.n_parties, args,
args.n_teacher_each_partition)
nets_list = central_train_net_on_a_party(nets, args, X_train, y_train, X_test,
y_test, remain_test_dl, local_split=args.is_local_split,
device=device)
# nets_list_partition.append(nets_list)
if args.train_local_student:
if not args.with_unlabeled:
print("need public unlabeled data!")
exit(1)
# logger.info("in party %d" % party_id)
stu_nets, _, _ = init_nets(args.net_config, args.dropout_p, 1, args, 1)
stu_net = stu_nets[0]
if args.dp_level == 2:
gamma = args.gamma
_, top2_counts_differ_one, vote_counts_in_a_party = train_a_student(nets_list,
local_query_dl,
public_ds,
remain_test_dl,
stu_net, n_classes,
args, gamma=gamma,
is_subset=1,
filter_query=0,
device=device)
else:
gamma = 0
_, top2_counts_differ_one, vote_counts_in_a_party = train_a_student(nets_list, local_query_dl,
public_ds,
remain_test_dl, stu_net,
n_classes, args, gamma=gamma,
is_subset=1,
filter_query=0,
device=device)
local_stu_test_acc = compute_accuracy(stu_net, remain_test_dl, device=device)
# local_stu_test_acc = compute_accuracy(stu_net, remain_test_dl, device=device)
logger.info("stu_test_acc: %f" % local_stu_test_acc)
# tea_nets.append(stu_net)
else:
for net in nets_list:
tea_nets.append(net)
elif args.alg =='pate2':
args.n_teacher_each_partition = 1
if args.model == 'tree' or args.model == 'gbdt':
logger.info("Initializing trees")
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
query_data_size = int(len(y_test) * args.query_portion)
local_query_data_size = int(len(y_test) * args.local_query_portion)
tea_trees = []
n_parti_top2_differ_one = np.zeros(args.n_parties)
n_instances_portion = np.zeros(args.n_parties)
vote_counts_parties = []
# filter_query=0
# logger.info("In party %d Train local trees" % party_id)
tree_list = []
for party_id in range(args.n_parties):
trees = init_trees(args.max_tree_depth, 1, 1,
libsvm_datasets[args.dataset], args)
local_train_trees_in_a_party(trees, args, net_dataidx_map[party_id], X_train, y_train, X_test, y_test)
tree_list.append(trees[0])
if args.model == 'tree':
stu_forest = RandomForestClassifier(max_depth = args.max_tree_depth, n_estimators=args.n_stu_trees)
elif args.model == 'gbdt':
stu_forest = xgb.XGBClassifier(max_depth=args.max_tree_depth, n_estimators = args.n_stu_trees, learning_rate=args.lr,
gamma=1, reg_lambda=1, tree_method='hist')
filter_query = args.filter_query
if args.dp_level == 2:
gamma = args.gamma
top2_counts_differ_one, vote_counts_in_a_party = train_a_student_tree(tree_list, X_test[:local_query_data_size],
y_test[:local_query_data_size], 2,
stu_forest, gamma, filter_query)
else:
gamma = 0
top2_counts_differ_one, vote_counts_in_a_party = train_a_student_tree(tree_list, X_test[:local_query_data_size],
y_test[:local_query_data_size],
2, stu_forest,
gamma, filter_query)
test_acc = stu_forest.score(X_test[public_data_size:], y_test[public_data_size:])
logger.info("central pate test acc %f" % test_acc)
else:
tea_model = []
for party_id in range(args.n_parties):
nets, _, _ = init_nets(args.net_config, args.dropout_p, args.n_parties, args,
args.n_teacher_each_partition)
nets_list = local_train_net_on_a_party(nets, args, net_dataidx_map, party_id, X_train, y_train, X_test,
y_test, remain_test_dl, local_split=0,
device=device)
tea_model.append(nets_list[0])
# nets_list_partition.append(nets_list)
if args.train_local_student:
if not args.with_unlabeled:
print("need public unlabeled data!")
exit(1)
logger.info("in party %d" % party_id)
stu_nets, _, _ = init_nets(args.net_config, args.dropout_p, 1, args, 1)
stu_net = stu_nets[0]
if args.dp_level == 2:
gamma = args.gamma
_, top2_counts_differ_one, vote_counts_in_a_party = train_a_student(tea_model,
local_query_dl,
public_ds,
remain_test_dl,
stu_net, n_classes,
args, gamma=gamma,
is_subset=1,
filter_query=0,
device=device)
else:
gamma = 0
_, top2_counts_differ_one, vote_counts_in_a_party = train_a_student(tea_model, local_query_dl,
public_ds,
remain_test_dl, stu_net,
n_classes, args, gamma=gamma,
is_subset=1,
filter_query=0,
device=device)
local_stu_test_acc = compute_accuracy(stu_net, remain_test_dl, device=device)
# local_stu_test_acc = compute_accuracy(stu_net, remain_test_dl, device=device)
logger.info("stu_test_acc: %f" % local_stu_test_acc)
# tea_nets.append(stu_net)
elif args.alg == 'fedkt' or args.alg == 'fedkt_fedavg' or args.alg=='fedkt_fedprox' or args.alg=='simenb':
if args.fedkt_seed is not None:
np.random.seed(args.fedkt_seed)
torch.manual_seed(args.fedkt_seed)
if args.model == 'tree' or args.model == 'gbdt' or args.model == 'random_forest' or args.model == 'gbdt_ntree' or args.model == 'gbdt_tree':
logger.info("Initializing trees")
if args.n_teacher_each_partition == 1:
args.is_local_split = 0
args.train_local_student = 0
# X_test = np.random.shuffle(X_test)
# y_test = np.random.shuffle(y_test)
public_data_size = int(len(y_test) * args.auxiliary_data_portion)
query_data_size = int(len(y_test) * args.query_portion)
local_query_data_size = int(len(y_test) * args.local_query_portion)
tea_trees = []
n_parti_top2_differ_one = np.zeros(args.n_parties)
n_instances_portion = np.zeros(args.n_parties)
vote_counts_parties = []
# filter_query=0
for party_id in range(args.n_parties):
n_instances_portion[party_id] = len(net_dataidx_map[party_id]) / X_train.shape[0]
for i in range(args.n_partition):
trees = init_trees(args.max_tree_depth, 1, args.n_teacher_each_partition, libsvm_datasets[args.dataset], args)
logger.info("In party %d Train local trees" % party_id)
local_train_trees_in_a_party(trees, args, net_dataidx_map[party_id], X_train, y_train, X_test, y_test)
if args.train_local_student:
if args.model == 'tree' or args.model == 'random_forest':
stu_forest = RandomForestClassifier(max_depth = args.max_tree_depth, n_estimators=args.n_stu_trees)
elif args.model == 'gbdt' or args.model == 'gbdt_ntree' or args.model == 'gbdt_tree':
stu_forest = xgb.XGBClassifier(max_depth=args.max_tree_depth, n_estimators = args.n_stu_trees, learning_rate=args.lr,
gamma=1, reg_lambda=1, tree_method='hist')
filter_query = args.filter_query
if i < args.max_z:
filter_query=0
if args.dp_level == 2:
gamma = args.gamma
top2_counts_differ_one, vote_counts_in_a_party = train_a_student_tree(trees, X_test[:local_query_data_size],
y_test[:local_query_data_size], 2,
stu_forest, gamma, filter_query, args.prob_threshold)
else:
gamma = 0
top2_counts_differ_one, vote_counts_in_a_party = train_a_student_tree(trees, X_test[:local_query_data_size],
y_test[:local_query_data_size],
2, stu_forest,
gamma, filter_query, args.prob_threshold)
# logger.info("vote counts in a local party:")
# logger.info('\n'.join('\t'.join('%d' %x for x in y) for y in vote_counts_in_a_party))
vote_counts_parties = np.append(vote_counts_parties,vote_counts_in_a_party)
print("top2_counts_differ_one: ", top2_counts_differ_one)
logger.info("top2_counts_differ_one: %d" % top2_counts_differ_one)
if top2_counts_differ_one != 0:
n_parti_top2_differ_one[party_id] += 1
tea_trees.append(stu_forest)
else:
for tree in trees:
tea_trees.append(tree)
if args.alg == 'simenb':
simple_ensemble_acc = compute_tree_ensemble_accuracy(tea_trees, X_test[public_data_size:], y_test[public_data_size:])
logger.info("simple ensemble acc: %f" % simple_ensemble_acc)
exit(0)
# vote_counts_parties = np.reshape(vote_counts_parties, (args.n_parties,-1))
if args.model == 'tree' or args.model == 'random_forest':
final_forest = RandomForestClassifier(max_depth=args.max_tree_depth, n_estimators=args.n_final_stu_trees)
elif args.model == 'gbdt' or args.model == 'gbdt_ntree' or args.model == 'gbdt_tree':
final_forest = xgb.XGBClassifier(max_depth=args.max_tree_depth, n_estimators = args.n_final_stu_trees, learning_rate=args.lr,
gamma=1, reg_lambda=1, tree_method='hist')
if args.dp_level == 1:
gamma = args.gamma
_, vote_counts = train_a_student_tree(tea_trees, X_test[:query_data_size],
y_test[:query_data_size], 2, final_forest, gamma, 0, args.prob_threshold,
args.n_partition, args.apply_consistency, is_final_student=True)
else:
gamma = 0
_, vote_counts = train_a_student_tree(tea_trees, X_test[:query_data_size],
y_test[:query_data_size], 2, final_forest, gamma, 0, args.prob_threshold,
args.n_partition, args.apply_consistency, is_final_student=True)
test_acc = final_forest.score(X_test[public_data_size:], y_test[public_data_size:])
logger.info("global test acc %f" % test_acc)
if args.privacy_analysis_file_name is not None:
file_path = os.path.join(args.logdir, args.privacy_analysis_file_name + "-%d" % n_exp)
else:
file_path = os.path.join(args.logdir, log_path + "-%d" % n_exp)
file_path1 = file_path + '-dp0'
file_path2 = file_path + '-dp1'
np.savez(file_path1, n_instances_portion, n_parti_top2_differ_one, vote_counts)
print("vote counts parties:", vote_counts_parties)
if args.train_local_student:
np.savez(file_path2, n_instances_portion, n_parti_top2_differ_one, vote_counts_parties.reshape(-1,2))
avg_acc = 0.0
for party_id in range(args.n_parties):
logger.info("Local training")
local_forest = RandomForestClassifier(max_depth = args.max_tree_depth, n_estimators=args.n_stu_trees)
local_forest.fit(X_train[net_dataidx_map[party_id]], y_train[net_dataidx_map[party_id]])
local_acc = local_forest.score(X_test[public_data_size:], y_test[public_data_size:])
logger.info("In party %d local test acc: %f" % (party_id, local_acc))
avg_acc += local_acc
avg_acc /= args.n_parties
logger.info("avg local acc: %f" % avg_acc)
global_model = final_forest
else:
logger.info("Initializing nets")
if args.n_teacher_each_partition == 1:
args.is_local_split = 0
args.train_local_student = 0
logger.info("Training nets")
tea_nets = []
n_parti_top2_differ_one = np.zeros(args.n_parties)
n_instances_portion = np.zeros(args.n_parties)
vote_counts_parties = []
prob_threshold = args.prob_threshold
if args.prob_threshold_apply != 2 and args.prob_threshold_apply != 3:
args.prob_threshold = None
for party_id in range(args.n_parties):
#start training student models
for i in range(args.n_partition):
is_subset_temp = 1
stu_public_ds = public_ds
stu_query_dl = query_dl
stu_local_query_dl = local_query_dl
filter_query = args.filter_query
if i < args.max_z:
filter_query = 0
if args.std_place > 0:
init_std = args.init_std
else:
init_std = None
nets, _, _ = init_nets(args.net_config, args.dropout_p, 1, args,
args.n_teacher_each_partition, init_std)
nets_list = local_train_net_on_a_party(nets, args, net_dataidx_map, party_id, X_train, y_train, X_test, y_test, remain_test_dl, local_split=args.is_local_split,
device=device)
# nets_list_partition.append(nets_list)
if args.train_local_student:
if not args.with_unlabeled:
print("need public unlabeled data!")
exit(1)
logger.info("in party %d" % party_id)
if args.std_place > 1:
init_std = args.init_std
else:
init_std = None
stu_nets, _, _ = init_nets(args.net_config, args.dropout_p, 1, args, 1, init_std)
stu_net = stu_nets[0]
if args.dp_level == 2:
gamma = args.gamma
_, top2_counts_differ_one, vote_counts_in_a_party = train_a_student(nets_list,
stu_local_query_dl,
stu_public_ds,
remain_test_dl,
stu_net, n_classes,
args, gamma=gamma,
is_subset=is_subset_temp,
filter_query=filter_query,
device=device)
else:
gamma = 0
_, top2_counts_differ_one, vote_counts_in_a_party = train_a_student(nets_list, stu_local_query_dl, stu_public_ds,
remain_test_dl, stu_net, n_classes, args, gamma=gamma, is_subset=is_subset_temp, filter_query=filter_query,device=device)
vote_counts_parties = np.append(vote_counts_parties, vote_counts_in_a_party)
if top2_counts_differ_one != 0:
n_parti_top2_differ_one[party_id] += 1
local_stu_test_acc, conf_mat = compute_accuracy(stu_net, remain_test_dl, get_confusion_matrix=True, device=device)
logger.info("local_stu_test_acc: %f" % local_stu_test_acc)
tea_nets.append(stu_net)
else:
for net in nets_list:
tea_nets.append(net)
n_instances_portion[party_id] = len(net_dataidx_map[party_id]) / len(train_ds_global)
if args.prob_threshold_apply == 1 or args.prob_threshold_apply == 3:
args.prob_threshold = prob_threshold
else:
args.prob_threshold = None
# print("portion sum:", np.sum(n_instances_portion))
if args.alg == 'simenb':
simple_ensemble_acc, _ = compute_ensemble_accuracy(tea_nets, remain_test_dl, n_classes, ensemble_method=args.ensemble_method, uniform_weights=True, device=device)
logger.info("simple ensemble acc:%f" % simple_ensemble_acc)
exit(0)
if args.with_unlabeled:
global_stu_nets, _, _=init_nets(args.net_config, args.dropout_p, 1, args, 1)
global_stu_net = global_stu_nets[0]
if args.dp_level == 1:
gamma = args.gamma
_, _, vote_counts = train_a_student(tea_nets, query_dl, public_ds, remain_test_dl, global_stu_net,
n_classes, args, gamma=args.gamma,
is_subset=1, is_final_student=True, filter_query=0,device=device)
else:
gamma = 0
_, _, vote_counts = train_a_student(tea_nets, query_dl, public_ds, remain_test_dl, global_stu_net,
n_classes, args, gamma=args.gamma,
is_subset=1, is_final_student=True, filter_query=0,device=device)
# can change to local data to train the student
global_stu_test_acc, conf_mat = compute_accuracy(global_stu_net, remain_test_dl, get_confusion_matrix=True, device=device)
test_acc = global_stu_test_acc
logger.info("global_stu_test_acc: %f"% global_stu_test_acc)
else:
print("not supported yet")
if args.privacy_analysis_file_name is not None:
file_path = os.path.join(args.logdir, args.privacy_analysis_file_name + "-%d" % n_exp)
else:
file_path = os.path.join(args.logdir, log_path + "-%d" % n_exp)
file_path1 = file_path + '-dp0'
file_path2 = file_path + '-dp1'
np.savez(file_path1, n_instances_portion, n_parti_top2_differ_one, vote_counts)
if args.train_local_student:
np.savez(file_path2, n_instances_portion, n_parti_top2_differ_one, vote_counts_parties.reshape(-1,10))
global_model = global_stu_net
if args.save_global_model:
save_model(global_model, 0, args)
if args.alg == 'fedkt_fedavg' or args.alg == 'fedkt_fedprox':
logger.info("Initializing nets")
args.n_teacher_each_partition = 1
args.is_local_split = 0
nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties,
args)
global_model.to("cpu")
for round in range(args.comm_round):
logger.info("in comm round:" + str(round))
global_para = get_trainable_parameters(global_model)
for net_id, net in nets.items():
put_trainable_parameters(net, global_para)
if args.alg == 'fedkt_fedavg':
local_train_net(nets, args, net_dataidx_map, X_train, y_train, X_test, y_test,
remain_test_dl=remain_test_dl, local_split=False, retrain_epoch=args.retrain_local_epoch, device=device)
elif args.alg == 'fedkt_fedprox':
local_train_net_fedprox(nets, global_model, args, net_dataidx_map, X_train, y_train, X_test,
y_test, remain_test_dl=remain_test_dl, local_split=False, retrain_epoch=args.retrain_local_epoch, device=device)
global_model.to('cpu')
# local_train_net(nets, args, net_dataidx_map, local_split=False, device=device)
# update global model
total_data_points = sum([len(net_dataidx_map[r]) for r in range(args.n_parties)])
fed_avg_freqs = [len(net_dataidx_map[r]) / total_data_points for r in range(args.n_parties)]
weights = [get_trainable_parameters(nets[i].cpu()) for i in range(args.n_parties)]
# print("avg freqs:", fed_avg_freqs)
# print("weights:", weights)
average_weight = sum(weights[i] * fed_avg_freqs[i] for i in range(args.n_parties))
# print("average_weight:", average_weight)
put_trainable_parameters(global_model, average_weight)
logger.info('global n_training: %d' % len(train_dl_global))
logger.info('global n_test: %d' % len(remain_test_dl))
train_acc = compute_accuracy(global_model, train_dl_global)
test_acc, conf_matrix = compute_accuracy(global_model, remain_test_dl, get_confusion_matrix=True)
logger.info('>> Global Model Train accuracy: %f' % train_acc)
logger.info('>> Global Model Test accuracy: %f' % test_acc)
| StarcoderdataPython |
55368 | # Copyright 2019 The Regents of the University of California.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Original version written by <NAME> <<EMAIL>>
from swift.common.utils import get_logger, split_path, list_from_csv
from swift.common.swob import Request, Response, wsgify
from swift.common.constraints import valid_api_version
from swift.common.request_helpers import get_param
from swift.proxy.controllers.base import get_container_info, get_object_info
from swift.common.swob import wsgify
from avro_streamer.avro_streamer import GenericStrippingAvroParser
class AvroFilterMiddleware(object):
"""
Swift middleware for removing certain fields from downloaded Avro
objects, depending on a user's role.
Essentially, this allows the Avro objects to be selectively censored
for different classes of user -- for instance, there may be sensitive
data that is being collected that should only be made available to
privileged users.
See attached README.md file for instructions on how to configure this
middleware appropriately.
Stripping is only applied to objects that have a Content-Type of
'application/vnd.caida.<datatype>.avro'.
Requires: python-avro-streamer (https://github.com/CAIDA/python-avro-streamer)
"""
def __init__(self, app, conf, logger=None):
self.app = app
if logger:
self.logger = logger
else:
self.logger = get_logger(conf, log_route='avrofilter')
# Any roles specified as "nostrip_roles" will always receive the
# full uncensored Avro data
if 'nostrip_roles' in conf:
self.nostrip_roles = set([x.strip() \
for x in conf['nostrip_roles'].split(',')])
else:
self.nostrip_roles = set()
# admin should always be a nostrip role
self.nostrip_roles.add('admin')
self.defaultstrip = {}
self.dontstrip = {}
# Any field mentioned in a "retain_keys" option will be stripped
# by default, unless the user matches a role where that field is
# explicitly listed as being retained
# In other words: defaultstrip is the union of all of the fields that
# are explicitly configured as retainable. Any "public" fields should
# NOT be listed as a retained field for any role.
for k,v in conf.iteritems():
# The role that this option applies to is specified in the
# prefix of the configuration option name
# e.g. "swiftro_retain_keys" -> role = "swiftro"
if not k.endswith("_retain_keys"):
continue
role = k[:-12]
if role in self.dontstrip:
self.logger.info("Warning: role '%s' appears multiple times in AvroFilterMiddleware configuration" % (role))
# TODO only warn once per duplicate role
continue
self.dontstrip[role] = {}
for ts in list_from_csv(v):
ts = ts.strip()
if len(ts) == 0:
continue
# fields are listed using <datatype>:<fieldname> format, e.g.
# "flowtuple:netacq_country"
ts = ts.split(':')
if len(ts) != 2:
self.logger.info("Invalid 'retain_keys' parameter format, should be <data type>:<field name> (not %s)" % (ts))
continue
if ts[0] not in self.dontstrip[role]:
self.dontstrip[role][ts[0]] = set()
if ts[0] not in self.defaultstrip:
self.defaultstrip[ts[0]] = set()
self.dontstrip[role][ts[0]].add(ts[1])
self.defaultstrip[ts[0]].add(ts[1])
@wsgify
def __call__(self, req):
try:
(version, account, container, obj) = \
split_path(req.path_info, 4, 4, True)
except ValueError:
return req.get_response(self.app)
# Only worry about data fetches, not uploads.
if not valid_api_version(version) or req.method not in ('GET', 'HEAD'):
return req.get_response(self.app)
# Get all roles that apply to the user making the request
roles = set()
if (req.environ.get('HTTP_X_IDENTITY_STATUS') == 'Confirmed' or \
req.environ.get('HTTP_X_SERVICE_IDENTITY_STATUS') in \
(None, "Confirmed")):
roles = set(list_from_csv(req.environ.get('HTTP_X_ROLES', '')))
# If we have one of the "nostrip" roles, then don't do any stripping
if roles.intersection(self.nostrip_roles):
return req.get_response(self.app)
# Perform the request and grab a response object that we can work
# with
resp = req.get_response(self.app)
# Check that the requested object is actually a CAIDA avro file
conttype = resp.headers.get("Content-Type", None)
if conttype is None:
return resp
if not conttype.startswith("application/vnd.caida."):
return resp
if not conttype.endswith(".avro"):
return resp
dtype = conttype.replace("application/vnd.caida.", "", 1)[:-5]
if dtype not in self.defaultstrip:
return resp
# Start by planning to strip all fields for this datatype that have
# been explicitly appeared in the config file. Then for each role that
# the user has, remove any fields from the strip set that should be
# retained for that role.
tostrip = self.defaultstrip[dtype]
for r in roles:
if r not in self.dontstrip:
# No specified config for this role, so leave strip set as is
continue
if dtype not in self.dontstrip[r]:
continue
tostrip = tostrip - self.dontstrip[r][dtype]
# Remove the Etag because otherwise swift clients get very upset
# about the md5sum of the response body not matching the md5sum
# in the Etag header :/
if 'Etag' in resp.headers:
del(resp.headers['Etag'])
# If we are going to be stripping fields, replace our response
# iterable with one that will parse the received Avro and remove
# the desired fields. The swift proxy should handle the rest.
x = GenericStrippingAvroParser(resp.app_iter, resp.body, tostrip)
resp.app_iter = x
return resp
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def avro_strip(app):
return AvroFilterMiddleware(app, conf)
return avro_strip
# vim: set sw=4 tabstop=4 softtabstop=4 expandtab :
| StarcoderdataPython |
276002 | ## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, 5)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool = nn.MaxPool2d(2, 2)
# second conv layer: 32 inputs, 64 outputs, 5x5 conv
## output size = (W-F)/S +1 = (110-5)/1 +1 = 106
# the output tensor will have dimensions: (64, 106, 106)
# after another pool layer this becomes (64, 53, 53);
self.conv2 = nn.Conv2d(32, 64, 5)
# dropout with p=0.5
self.conv2_drop = nn.Dropout(p=0.5)
# third conv layer: 64 inputs, 96 outputs, 5x5 conv
## output size = (W-F)/S +1 = (53-5)/1 +1 = 49
# the output tensor will have dimensions: (96, 49, 49)
# after another pool layer this becomes (96, 24, 24); 24.5 is rounded down
self.conv3 = nn.Conv2d(64, 96, 5)
# dropout with p=0.4
self.conv3_drop = nn.Dropout(p=0.4)
# fourth conv layer: 96 inputs, 64 outputs, 3x3 conv
## output size = (W-F)/S +1 = (24-3)/1 +1 = 22
# the output tensor will have dimensions: (128, 22, 22)
# after another pool layer this becomes (128, 11, 11);
self.conv4 = nn.Conv2d(96, 128, 3)
# 128 outputs * the 11*11 filtered/pooled map size
self.fc1 = nn.Linear(128 * 11 * 11, 816)
# dropout with p=0.4
self.fc1_drop = nn.Dropout(p=0.4)
# second fully connected layer
self.fc2 = nn.Linear(816, 272)
# dropout with p=0.3
self.fc2_drop = nn.Dropout(p=0.3)
# finally, create 136 output channels
self.fc3 = nn.Linear(272, 136)
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.conv2_drop(x)
x = self.pool(F.relu(self.conv3(x)))
x = self.conv3_drop(x)
x = self.pool(F.relu(self.conv4(x)))
# prep for linear layer
# this line of code is the equivalent of Flatten in Keras
x = x.view(x.size(0), -1)
# two linear layers with dropout in between
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
x = F.relu(self.fc2(x))
x = self.fc2_drop(x)
x = self.fc3(x)
# a modified x, having gone through all the layers of your model, should be returned
return x
| StarcoderdataPython |
137172 | # Copyright 2018-2019 SourceOptics Project Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
import re
from django.utils.dateparse import parse_datetime
from ..models import Author, Commit, File, FileChange
from . import commands
# we use git --log with a special one-line format string to capture certain fields
# we regex across those fields with a custom delimiter to make it easy to find them
DEL = '&DEL&>'
# Fields recorded (in order)
# commit hash %H
# author_name %an
# author_date %ad
# commit_date %cd
# author_email %ae
# subject %f
PRETTY_STRING = f"'{DEL}%H{DEL}%an{DEL}%ad{DEL}%cd{DEL}%ae{DEL}%f{DEL}'"
# the regex to match the string, which must watch the log format PRETTY_STRING
PARSER_RE_STRING = f"{DEL}(?P<commit>.*){DEL}(?P<author_name>.*){DEL}(?P<author_date>.*){DEL}(?P<commit_date>.*){DEL}(?P<author_email>.*){DEL}(?P<subject>.*){DEL}"
PARSER_RE = re.compile(PARSER_RE_STRING, re.VERBOSE)
FILES_HACK_REPO = None
FILES_HACK = dict()
# Regex for handling arbitrary file path renames;
# `(/)?`: First captured group that matches `/` optionally
# `(?:\{[^}=]+=>)`: non-captured group to match until `=>`
# `([^}]+)`: matches the portion upto next `}` (`\}`)
# In the replacement, only the captured groups are used.
FILE_PATH_RENAME_RE = re.compile(r'(/)?(?:\{[^}=]+=>)([^}]+)\}')
class Commits:
"""
This class clones a repository (git) using a provided URL and credential
and proceeds to execute git log on it to scan its data
"""
@classmethod
def get_file(cls, repo, path, filename):
"""
provide a lookup of File objects for repo/path/filename tuples to
prevent excessive database access. This cache is only kept around
for the current repository
"""
global FILES_HACK_REPO
global FILES_HACK
if FILES_HACK_REPO != repo.name:
FILES_HACK = dict()
FILES_HACK_REPO = repo.name
files = File.objects.filter(repo=repo).all()
for fobj in files:
assert fobj is not None
key = os.path.join(fobj.path, fobj.name)
FILES_HACK[key] = fobj
original = os.path.join(path, filename)
result = FILES_HACK[original]
assert result is not None
return result
@classmethod
def bulk_create(cls, total_commits, total_files, total_file_changes):
"""
we keep a list of the three types of objects and only create them periodically,
to prevent from doing too many database transactions. The batch size here is fairly
arbitrary.
"""
# by not ignoring conflicts, we can test whether our scanner "overwork" code is correct
# use -F to try a full test from scratch
if len(total_commits):
Commit.objects.bulk_create(total_commits, 100, ignore_conflicts=True)
del total_commits[:]
if len(total_files):
File.objects.bulk_create(total_files, 100, ignore_conflicts=True)
del total_files[:]
if len(total_file_changes):
FileChange.objects.bulk_create(total_file_changes, 100, ignore_conflicts=True)
del total_file_changes[:]
@classmethod
def process_commits(cls, repo, repo_dir, mode='Commit'):
"""
Uses git log to gather the commit data for a repository. This is run three times in three different
modes over the same git log output. See usage in processor.py.
"""
cmd_string = 'git rev-list --all --count'
commit_total = commands.execute_command(repo, cmd_string, log=False, timeout=600, chdir=repo_dir, capture=True)
try:
commit_total = int(commit_total)
except TypeError:
print("no commits yet")
return
cmd_string = ('git log --all --numstat --date=iso-strict-local --pretty=format:'
+ PRETTY_STRING)
last_commit = None
count = 0
total_commits = []
total_file_changes = []
total_files = []
global GLITCH_COUNT
def handler(line):
"""
this code processes every line from the output
"""
nonlocal last_commit
nonlocal count
if count % 200 == 0:
print("scanning (repo:%s) (mode:%s): %s/%s" % (repo, mode, count, commit_total))
if count % 2000 == 0:
cls.bulk_create(total_commits, total_files, total_file_changes)
if not line or line == "\n":
#print("F1")
return True # continue
elif line.startswith(DEL):
commit = cls.handle_diff_information(repo, line, mode)
if last_commit != commit:
count = count + 1
last_commit = commit
total_commits.append(commit)
return True
elif "does not have any commits yet" in line:
#print("skipping, no commits yet")
return False
else:
if mode != 'Commit':
assert last_commit is not None
cls.handle_file_information(repo, line, last_commit, mode, total_files, total_file_changes)
return True
commands.execute_command(repo, cmd_string, log=False, timeout=1200, chdir=repo_dir, handler=handler)
cls.bulk_create(total_commits, total_files, total_file_changes)
return True
@classmethod
def create_file(cls, full_path, commit, la, lr, binary, mode, total_files, total_file_changes):
"""
After we have recorded commits, this function creates either Files or FileChange objects
depending on what scanner pass we are running through.
"""
assert commit is not None
assert mode in [ 'File', 'FileChange' ]
fname = os.path.basename(full_path)
# find the extension
(_, ext) = os.path.splitext(full_path)
path = os.path.dirname(full_path)
if mode == 'File':
# update the global file object with the line counts
total_files.append(File(
repo=commit.repo,
path=path,
name=fname,
ext=ext,
binary=binary
))
# BOOKMARK
elif mode == 'FileChange':
file = cls.get_file(commit.repo, path, fname)
if file is None:
# this shouldn't happen, but if we get here the parser has a bug.
raise Exception("FATAL, MISSING FILE RECORD, SHOULDN'T BE HERE!")
total_file_changes.append(FileChange(
commit=commit,
lines_added=la,
lines_removed=lr,
file=file
))
@classmethod
def matches(self, needle, haystack, exact=False, trim_dot=False):
"""
This function is used by the source code filtering feature to see if a file path
matches an expression or not.
"""
# user input may be inconsistent about trailing slashes so be flexible
if haystack.endswith("/"):
haystack = haystack[:-1]
if needle.endswith("/"):
needle = needle[:-1]
if trim_dot:
# for extension checking, do not require the user input to be ".mp4" to mean "mp4"
haystack = haystack.replace(".","")
needle = needle.replace(".", "")
if "?" in needle or "*" in needle or "[" in needle:
# this looks like a fnmatch pattern
return fnmatch.fnmatch(haystack, needle)
elif exact:
# we are processing an extension, require an exact match
return haystack == needle
else:
# we are processing paths, not extensions, so just require it to start with the substring
return haystack.startswith(needle)
@classmethod
def has_matches(cls, needles, haystack, exact=False, trim_dot=False):
"""
tests whether a file pattern has any one of multiple matches
"""
for needle in needles:
if cls.matches(needle, haystack, exact=exact, trim_dot=trim_dot):
return True
return False
@classmethod
def has_no_matches(cls, needles, haystack, exact=False, trim_dot=False):
return not cls.has_matches(needles, haystack, exact=exact, trim_dot=trim_dot)
@classmethod
def repair_move_path(cls, path):
"""
handles details about moves in git log by fixing path elements like /{org=>com}/
to just log the file in the final path. This will possibly give users credit for
aspects of a move but this something we can explore later. Not sure if it does - MPD.
"""
return FILE_PATH_RENAME_RE.sub(r'\1\2', path)
@classmethod
def should_process_path(cls, repo, path):
"""
Repository configuration supports filtering based on path, to decide to index or not-index
certain files. This might be used to only index a 'src/' directory or otherwise not
index a directory called 'docs/', and is off by default. This function handles
a decision on whether to process a path.
"""
org = repo.organization
directory_allow = repo.scanner_directory_allow_list or org.scanner_directory_allow_list
directory_deny = repo.scanner_directory_deny_list or org.scanner_directory_deny_list
extension_allow = repo.scanner_extension_allow_list or org.scanner_extension_allow_list
extension_deny = repo.scanner_extension_deny_list or org.scanner_extension_deny_list
dirname = os.path.dirname(path)
split_ext = os.path.splitext(path)
extension = None
if len(split_ext) > 1:
extension = split_ext[-1]
if directory_allow:
directory_allow = directory_allow.split("\n")
if directory_deny:
directory_deny = directory_deny.split("\n")
if extension_allow:
extension_allow = extension_allow.split("\n")
if extension_deny:
extension_deny = extension_deny.split("\n")
if directory_allow and cls.has_no_matches(directory_allow, dirname):
return False
if directory_deny and cls.has_matches(directory_deny, dirname):
return False
if extension:
if extension_allow and cls.has_no_matches(extension_allow, extension, exact=True, trim_dot=True):
return False
if extension_deny and cls.has_matches(extension_deny, extension, exact=True, trim_dot=True):
return False
return True
@classmethod
def handle_file_information(cls, repo, line, last_commit, mode, total_files, total_file_changes):
"""
process the list of file changes in this commit
"""
tokens = line.split()
(added, removed, path) = (tokens[0], tokens[1], ''.join(tokens[2:]))
# binary files will have '-' in their field changes. Set these to 0
binary = False
if added == '-':
binary = True
added = 0
if removed == '-':
binary = True
removed = 0
# FIXME: when scanning one repo, the added string containted
try:
added = int(added)
removed = int(removed)
except:
# FIXME:
# I found one instance in one repo where the 'added' text returns "warning: inexact" and in this case
# we might as well keep going, we probably need to parse the line differently in this instance.
# example found in kubernetes/kubernetes on github. This reference is not an endorsement.
added = 0
removed = 0
path = cls.repair_move_path(path)
if not cls.should_process_path(repo, path):
return None
cls.create_file(path, last_commit, added, removed, binary, mode, total_files, total_file_changes)
@classmethod
def handle_diff_information(cls, repo, line, mode):
"""
process the amount of lines changed in this commit
"""
# FIXME: give all these fields names
match = PARSER_RE.match(line)
if not match:
raise Exception("DOESN'T MATCH? %s" % line)
data = match.groupdict()
if mode != 'Commit':
# running back through the logs to set up the file changes
commit = Commit.objects.get(sha=data['commit'])
return commit
email = data['author_email']
author, created = Author.objects.get_or_create(email=email)
commit_date = parse_datetime(data['commit_date'])
author_date = parse_datetime(data['author_date'])
# will pass on to bulk_create
return Commit(
sha=data['commit'],
subject=data['subject'],
repo=repo,
author=author,
author_date=author_date,
commit_date=commit_date
)
| StarcoderdataPython |
11307473 | from typing import Any, Tuple, TextIO
import swan.io.ioutil
import swan.util
import os
import platform
import datetime
class File:
""" Describes a file and allows for the aplication of usefull functions.
This class describes a file with usefull information like permissions size and other features
like the ability to easilly hide a file OS independently (for the most common OS's).
"""
lock: bool
size: float
__stream__: TextIO
__file_path__: str
__mode__: str
has_open_stream: bool
hash: Tuple[bytes, str]
def __init__(self, path: str, open_stream: bool=False, mode: str='rb', cache_hash: bool = True, lock: bool = True):
""" Constructor of the File class
:arg path -- the path to create a File instance with
:arg open_stream -- Shall a stream be opened?
:arg mode -- if open_stream is true pass a mode with this argument
"""
if not os.path.exists(path):
raise IOError(f'File {path} not found')
self.__file_path__ = path
self.__stream__ = None
self.__mode__ = mode
self.has_open_stream = False
self.lock = lock
self.size = os.path.getsize(self.__file_path__)
self.modate = os.path.getmtime(self.__file_path__)
self.createdate = self.creation_date()
self.human_modate = datetime.datetime.fromtimestamp(self.modate).isoformat()
"""Hash is a tuple with the bytes object on 0 and the hex string on 1"""
self.hash = None
if open_stream:
try:
self.__stream__ = open(self.__file_path__, self.__mode__)
except IOError as e:
raise EnvironmentError(f'An IO exception has occurred [{e}]')
else:
self.has_open_stream = True
if cache_hash and open_stream:
self.hash = self.get_hash()
def __enter__(self): return self
def __exit__(self, type, value, traceback):
if self.has_open_stream:
self.__stream__.close()
self.has_open_stream = False
def change_mode(self, mode):
if self.has_open_stream:
self.__stream__.close()
self.__stream__ = open(self.__file_path__, mode)
else:
raise ValueError('There is not an open stream yet for this file')
def creation_date(self):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
"""
if platform.system() == 'Windows':
return os.path.getctime(self.__file_path__)
else:
stat = os.stat(self.__file_path__)
try:
return stat.st_birthtime
except AttributeError:
# Probably on Linux. No easy way to get creation dates,
# so we'll settle for when its content was last modified.
return stat.st_mtime
def get_hash(self):
if self.has_open_stream:
return swan.ioutil.stream_sha256_hash(self.__stream__)
else:
raise EnvironmentError('No open stream to retrieve data from')
if __name__ == "__main__":
raise RuntimeError('this file is a module and should not be ran directly')
| StarcoderdataPython |
8022497 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='MaForecasting a time series using regression, ARIMA and RNN methods among others',
author='Fernando_Montes',
license='MIT',
)
| StarcoderdataPython |
4833062 | # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from ._base import BaseHandler
from synapse.api.constants import LoginType
from synapse.types import UserID
from synapse.api.errors import LoginError, Codes
from synapse.http.client import SimpleHttpClient
from synapse.util.async import run_on_reactor
from twisted.web.client import PartialDownloadError
import logging
import bcrypt
import simplejson
import synapse.util.stringutils as stringutils
logger = logging.getLogger(__name__)
class AuthHandler(BaseHandler):
def __init__(self, hs):
super(AuthHandler, self).__init__(hs)
self.checkers = {
LoginType.PASSWORD: self._check_password_auth,
LoginType.RECAPTCHA: self._check_recaptcha,
LoginType.EMAIL_IDENTITY: self._check_email_identity,
LoginType.DUMMY: self._check_dummy_auth,
}
self.sessions = {}
@defer.inlineCallbacks
def check_auth(self, flows, clientdict, clientip):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the login flow.
As a side effect, this function fills in the 'creds' key on the user's
session with a map, which maps each auth-type (str) to the relevant
identity authenticated by that auth-type (mostly str, but for captcha, bool).
Args:
flows (list): A list of login flows. Each flow is an ordered list of
strings representing auth-types. At least one full
flow must be completed in order for auth to be successful.
clientdict: The dictionary from the client root level, not the
'auth' key: this method prompts for auth if none is sent.
clientip (str): The IP address of the client.
Returns:
A tuple of (authed, dict, dict) where authed is true if the client
has successfully completed an auth flow. If it is true, the first
dict contains the authenticated credentials of each stage.
If authed is false, the first dictionary is the server response to
the login request and should be passed back to the client.
In either case, the second dict contains the parameters for this
request (which may have been given only in a previous call).
"""
authdict = None
sid = None
if clientdict and 'auth' in clientdict:
authdict = clientdict['auth']
del clientdict['auth']
if 'session' in authdict:
sid = authdict['session']
session = self._get_session_info(sid)
if len(clientdict) > 0:
# This was designed to allow the client to omit the parameters
# and just supply the session in subsequent calls so it split
# auth between devices by just sharing the session, (eg. so you
# could continue registration from your phone having clicked the
# email auth link on there). It's probably too open to abuse
# because it lets unauthenticated clients store arbitrary objects
# on a home server.
# Revisit: Assumimg the REST APIs do sensible validation, the data
# isn't arbintrary.
session['clientdict'] = clientdict
self._save_session(session)
elif 'clientdict' in session:
clientdict = session['clientdict']
if not authdict:
defer.returnValue(
(False, self._auth_dict_for_flows(flows, session), clientdict)
)
if 'creds' not in session:
session['creds'] = {}
creds = session['creds']
# check auth type currently being presented
if 'type' in authdict:
if authdict['type'] not in self.checkers:
raise LoginError(400, "", Codes.UNRECOGNIZED)
result = yield self.checkers[authdict['type']](authdict, clientip)
if result:
creds[authdict['type']] = result
self._save_session(session)
for f in flows:
if len(set(f) - set(creds.keys())) == 0:
logger.info("Auth completed with creds: %r", creds)
self._remove_session(session)
defer.returnValue((True, creds, clientdict))
ret = self._auth_dict_for_flows(flows, session)
ret['completed'] = creds.keys()
defer.returnValue((False, ret, clientdict))
@defer.inlineCallbacks
def add_oob_auth(self, stagetype, authdict, clientip):
"""
Adds the result of out-of-band authentication into an existing auth
session. Currently used for adding the result of fallback auth.
"""
if stagetype not in self.checkers:
raise LoginError(400, "", Codes.MISSING_PARAM)
if 'session' not in authdict:
raise LoginError(400, "", Codes.MISSING_PARAM)
sess = self._get_session_info(
authdict['session']
)
if 'creds' not in sess:
sess['creds'] = {}
creds = sess['creds']
result = yield self.checkers[stagetype](authdict, clientip)
if result:
creds[stagetype] = result
self._save_session(sess)
defer.returnValue(True)
defer.returnValue(False)
@defer.inlineCallbacks
def _check_password_auth(self, authdict, _):
if "user" not in authdict or "password" not in authdict:
raise LoginError(400, "", Codes.MISSING_PARAM)
user_id = authdict["user"]
password = authdict["password"]
if not user_id.startswith('@'):
user_id = UserID.create(user_id, self.hs.hostname).to_string()
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
self._check_password(user_id, password, password_hash)
defer.returnValue(user_id)
@defer.inlineCallbacks
def _check_recaptcha(self, authdict, clientip):
try:
user_response = authdict["response"]
except KeyError:
# Client tried to provide captcha but didn't give the parameter:
# bad request.
raise LoginError(
400, "Captcha response is required",
errcode=Codes.CAPTCHA_NEEDED
)
logger.info(
"Submitting recaptcha response %s with remoteip %s",
user_response, clientip
)
# TODO: get this from the homeserver rather than creating a new one for
# each request
try:
client = SimpleHttpClient(self.hs)
resp_body = yield client.post_urlencoded_get_json(
self.hs.config.recaptcha_siteverify_api,
args={
'secret': self.hs.config.recaptcha_private_key,
'response': user_response,
'remoteip': clientip,
}
)
except PartialDownloadError as pde:
# Twisted is silly
data = pde.response
resp_body = simplejson.loads(data)
if 'success' in resp_body and resp_body['success']:
defer.returnValue(True)
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
@defer.inlineCallbacks
def _check_email_identity(self, authdict, _):
yield run_on_reactor()
if 'threepid_creds' not in authdict:
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
threepid_creds = authdict['threepid_creds']
identity_handler = self.hs.get_handlers().identity_handler
logger.info("Getting validated threepid. threepidcreds: %r" % (threepid_creds,))
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
if not threepid:
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
threepid['threepid_creds'] = authdict['threepid_creds']
defer.returnValue(threepid)
@defer.inlineCallbacks
def _check_dummy_auth(self, authdict, _):
yield run_on_reactor()
defer.returnValue(True)
def _get_params_recaptcha(self):
return {"public_key": self.hs.config.recaptcha_public_key}
def _auth_dict_for_flows(self, flows, session):
public_flows = []
for f in flows:
public_flows.append(f)
get_params = {
LoginType.RECAPTCHA: self._get_params_recaptcha,
}
params = {}
for f in public_flows:
for stage in f:
if stage in get_params and stage not in params:
params[stage] = get_params[stage]()
return {
"session": session['id'],
"flows": [{"stages": f} for f in public_flows],
"params": params
}
def _get_session_info(self, session_id):
if session_id not in self.sessions:
session_id = None
if not session_id:
# create a new session
while session_id is None or session_id in self.sessions:
session_id = stringutils.random_string(24)
self.sessions[session_id] = {
"id": session_id,
}
return self.sessions[session_id]
@defer.inlineCallbacks
def login_with_password(self, user_id, password):
"""
Authenticates the user with their username and password.
Used only by the v1 login API.
Args:
user_id (str): User ID
password (str): Password
Returns:
The access token for the user's session.
Raises:
StoreError if there was a problem storing the token.
LoginError if there was an authentication problem.
"""
user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id)
self._check_password(user_id, password, password_hash)
reg_handler = self.hs.get_handlers().registration_handler
access_token = reg_handler.generate_token(user_id)
logger.info("Logging in user %s", user_id)
yield self.store.add_access_token_to_user(user_id, access_token)
defer.returnValue((user_id, access_token))
@defer.inlineCallbacks
def _find_user_id_and_pwd_hash(self, user_id):
"""Checks to see if a user with the given id exists. Will check case
insensitively, but will throw if there are multiple inexact matches.
Returns:
tuple: A 2-tuple of `(canonical_user_id, password_hash)`
"""
user_infos = yield self.store.get_users_by_id_case_insensitive(user_id)
if not user_infos:
logger.warn("Attempted to login as %s but they do not exist", user_id)
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
if len(user_infos) > 1:
if user_id not in user_infos:
logger.warn(
"Attempted to login as %s but it matches more than one user "
"inexactly: %r",
user_id, user_infos.keys()
)
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
defer.returnValue((user_id, user_infos[user_id]))
else:
defer.returnValue(user_infos.popitem())
def _check_password(self, user_id, password, stored_hash):
"""Checks that user_id has passed password, raises LoginError if not."""
if not bcrypt.checkpw(password, stored_hash):
logger.warn("Failed password login for user %s", user_id)
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
@defer.inlineCallbacks
def set_password(self, user_id, newpassword):
password_hash = <PASSWORD>.hashpw(newpassword, bcrypt.gensalt())
yield self.store.user_set_password_hash(user_id, password_hash)
yield self.store.user_delete_access_tokens(user_id)
yield self.hs.get_pusherpool().remove_pushers_by_user(user_id)
yield self.store.flush_user(user_id)
@defer.inlineCallbacks
def add_threepid(self, user_id, medium, address, validated_at):
yield self.store.user_add_threepid(
user_id, medium, address, validated_at,
self.hs.get_clock().time_msec()
)
def _save_session(self, session):
# TODO: Persistent storage
logger.debug("Saving session %s", session)
self.sessions[session["id"]] = session
def _remove_session(self, session):
logger.debug("Removing session %s", session)
del self.sessions[session["id"]]
| StarcoderdataPython |
1844810 | class Param:
def __init__(self, name, param_line, param_col, param_type,
default_val, value_range, steps=None):
self.name = name
self.param_line=param_line
self.param_col=param_col
self.param_type=param_type
self.param_default=default_val
self.value_range = value_range
self.step = steps
def __str__(self):
return "{0} : {1} : {2} : {3} : {4}".format(self.name, self.param_line, self.param_col, self.param_default, self.param_type)
| StarcoderdataPython |
382571 | from decimal import Decimal
from httmock import HTTMock, all_requests
import pytest
from htmltab.cli import main
from htmltab.utils import numberise
@all_requests
def basic_response(url, request):
with open("tests/fixtures/basic.html") as fh:
file_contents = fh.read()
return {"status_code": 200, "text": file_contents, "content": file_contents}
@all_requests
def response_404(url, request):
return {"status_code": 404, "reason": "NOT FOUND"}
def test_local_file(runner, basic_csv):
"""
Test that the correct CSV data is output when using a local HTML5
document as input.
"""
result = runner.invoke(main, ["tests/fixtures/basic.html"])
assert result.exit_code == 0
assert result.output == basic_csv
def test_url(runner, basic_csv):
with HTTMock(basic_response):
result = runner.invoke(main, ["http://example.org/basic.html"])
assert result.exit_code == 0
assert result.output == basic_csv
def test_zero_is_invalid_select_value(runner, three_csv_table_three):
result = runner.invoke(main, ["-s", "0", "tests/fixtures/three.html"])
assert result.exit_code != 0
assert "Error: value matched no elements" in result.output
def test_integer_select_value(runner, three_csv_table_three):
result = runner.invoke(main, ["--select", "3", "tests/fixtures/three.html"])
assert result.output == three_csv_table_three
result = runner.invoke(main, ["-s", "3", "tests/fixtures/three.html"])
assert result.output == three_csv_table_three
def test_css_select_value(runner, three_csv_table_two):
result = runner.invoke(
main, ["--select", "#data table", "tests/fixtures/three.html"]
)
assert result.output == three_csv_table_two
result = runner.invoke(main, ["-s", "#data table", "tests/fixtures/three.html"])
assert result.output == three_csv_table_two
def test_xpath_select_value(runner, three_csv_table_three):
result = runner.invoke(
main, ["--select", "(//table)[3]", "tests/fixtures/three.html"]
)
assert result.output == three_csv_table_three
result = runner.invoke(main, ["-s", "(//table)[3]", "tests/fixtures/three.html"])
assert result.output == three_csv_table_three
def test_invalid_select_value(runner):
result = runner.invoke(main, ["-s", "!", "tests/fixtures/three.html"])
assert result.exit_code != 0
assert "Error: Invalid value: '!'" in result.output
def test_table_rows_required(runner):
result = runner.invoke(main, ["-s", "#data", "tests/fixtures/three.html"])
assert result.exit_code != 0
assert "Error: select value matched div element" in result.output
result = runner.invoke(main, ["-s", "thead", "tests/fixtures/three.html"])
assert result.exit_code != 0
assert "Error:" in result.output
def test_table_rows_allowed(runner, three_csv_table_two):
result = runner.invoke(
main, ["--select", "#data table tr", "tests/fixtures/three.html"]
)
assert result.exit_code == 0
assert result.output == three_csv_table_two
def test_all_rows_are_same_length(runner, ragged_csv):
result = runner.invoke(main, ["tests/fixtures/ragged.html"])
assert result.exit_code == 0
assert result.output == ragged_csv
def test_default_null_values(runner):
result = runner.invoke(main, ["tests/fixtures/countries.html"])
with open("tests/fixtures/countries_default_nulls.csv") as fh:
file_contents = fh.read()
assert result.exit_code == 0
assert result.output == file_contents
def test_no_null_values(runner):
# Setting "!" as the only null value means the default null values aren't
# used and that "!" is the sole acceptable null value in the source table.
result = runner.invoke(main, ["--null-value", "!", "tests/fixtures/countries.html"])
with open("tests/fixtures/countries_no_nulls.csv") as fh:
file_contents = fh.read()
assert result.exit_code == 0
assert result.output == file_contents
def test_custom_null_values(runner):
with open("tests/fixtures/countries_custom_nulls.csv") as fh:
file_contents = fh.read()
result = runner.invoke(
main,
["--null-value", "0", "--null-value", "na", "tests/fixtures/countries.html"],
)
assert result.exit_code == 0
assert result.output == file_contents
result2 = runner.invoke(
main, ["-n", "0", "-n", "na", "tests/fixtures/countries.html"]
)
assert result.output == result2.output
def test_convert_numbers(runner, three_csv_table_two):
# Test long option name.
result = runner.invoke(
main, ["-s", "2", "--convert-numbers", "tests/fixtures/three.html"]
)
assert result.exit_code == 0
assert result.output == three_csv_table_two
# Test short option name.
result2 = runner.invoke(main, ["-s", "2", "-c", "tests/fixtures/three.html"])
assert result2.exit_code == 0
assert result2.output == three_csv_table_two
# Test that --convert-numbers is the default.
result3 = runner.invoke(main, ["-s", "2", "tests/fixtures/three.html"])
assert result3.exit_code == 0
assert result3.output == three_csv_table_two
def test_keep_numbers(runner, three_csv_table_two_keep):
# Test long option name.
result = runner.invoke(
main, ["-s", "2", "--keep-numbers", "tests/fixtures/three.html"]
)
assert result.exit_code == 0
assert result.output == three_csv_table_two_keep
# Test short option name.
result2 = runner.invoke(main, ["-s", "2", "-k", "tests/fixtures/three.html"])
assert result2.exit_code == 0
assert result2.output == three_csv_table_two_keep
def test_bad_input(runner):
result = runner.invoke(main, input="<")
assert result.exit_code != 0
result2 = runner.invoke(main, input="<html></html>")
assert result2.exit_code != 0
def test_404_response(runner):
with HTTMock(response_404):
result = runner.invoke(main, ["http://example.org/404.html"])
assert result.exit_code != 0
assert "HTTP 404 NOT FOUND (http://example.org/404.html)" in result.output
def test_group_symbol_and_decimal_mark(runner, basic_european_csv):
"""
Test that the group symbol and decimal mark can be set using both
long and short command-line options.
"""
result = runner.invoke(
main,
["--group-symbol", ".", "--decimal-symbol", ",", "tests/fixtures/basic.html"],
)
assert result.exit_code == 0
assert result.output == basic_european_csv
result2 = runner.invoke(main, ["-g", ".", "-d", ",", "tests/fixtures/basic.html"])
assert result2.exit_code == 0
assert result2.output == result.output
def test_currency_symbol(runner, basic_eur_gbp):
"""
Test that custom currency symbols can be set using a long and short
command-line option.
"""
result = runner.invoke(
main,
[
"--currency-symbol",
"€",
"--currency-symbol",
"£",
"tests/fixtures/basic.html",
],
)
assert result.exit_code == 0
assert result.output == basic_eur_gbp
result2 = runner.invoke(main, ["-u", "€", "-u", "£", "tests/fixtures/basic.html"])
assert result2.exit_code == 0
assert result2.output == result.output
def test_numberise():
currency_symbols = ("€", "$")
with pytest.raises(ValueError):
numberise("A", ",", ".", currency_symbols)
assert Decimal("1") == numberise("1", ",", ".", currency_symbols)
assert Decimal("1.23") == numberise("1.23", ",", ".", currency_symbols)
assert Decimal("-50") == numberise("-50", ",", ".", currency_symbols)
assert Decimal("-5.432") == numberise("-5.432", ",", ".", currency_symbols)
assert Decimal("1") == numberise("€1", ",", ".", currency_symbols)
assert Decimal("1.23") == numberise("€1.23", ",", ".", currency_symbols)
assert Decimal("-1") == numberise("€-1", ",", ".", currency_symbols)
assert Decimal("-1.23") == numberise("€-1.23", ",", ".", currency_symbols)
assert Decimal("-1357.91") == numberise("-1,357.91", ",", ".", currency_symbols)
assert Decimal("1357.91") == numberise("1,357.91", ",", ".", currency_symbols)
assert Decimal("-1357.91") == numberise("-1.357,91", ".", ",", currency_symbols)
assert Decimal("1357.91") == numberise("1.357,91", ".", ",", currency_symbols)
| StarcoderdataPython |
11272126 | # -*- coding: utf-8 -*-
"""
识别图像的类,为了快速进行多次识别可以调用此类下面的方法:
R = Recognizer(image_height, image_width, max_captcha)
for i in range(10):
r_img = Image.open(str(i) + ".jpg")
t = R.rec_image(r_img)
简单的图片每张基本上可以达到毫秒级的识别速度
"""
import tensorflow as tf
import numpy as np
from PIL import Image
from sample import sample_conf
class Recognizer(object):
def __init__(self, image_height, image_width, max_captcha, char_set, model_save_dir):
self.w_alpha = 0.01
self.b_alpha = 0.1
self.image_height = image_height
self.image_width = image_width
self.max_captcha = max_captcha
self.char_set = char_set
self.char_set_len = len(self.char_set)
self.model_save_dir = model_save_dir
# tf初始化占位符
self.X = tf.placeholder(tf.float32, [None, self.image_height * self.image_width]) # 特征向量
self.Y = tf.placeholder(tf.float32, [None, self.max_captcha * self.char_set_len]) # 标签
self.keep_prob = tf.placeholder(tf.float32) # dropout值
self.sess = tf.Session()
self.y_predict = self.model()
saver = tf.train.Saver()
saver.restore(self.sess, self.model_save_dir)
def __del__(self):
self.sess.close()
print("session close")
@staticmethod
def convert2gray(img):
"""
图片转为灰度图,如果是3通道图则计算,单通道图则直接返回
:param img:
:return:
"""
if len(img.shape) > 2:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
def text2vec(self, text):
"""
转标签为oneHot编码
:param text: str
:return: numpy.array
"""
text_len = len(text)
if text_len > self.max_captcha:
raise ValueError('验证码最长{}个字符'.format(self.max_captcha))
vector = np.zeros(self.max_captcha * self.char_set_len)
for i, ch in enumerate(text):
idx = i * self.char_set_len + self.char_set.index(ch)
vector[idx] = 1
return vector
def model(self):
x = tf.reshape(self.X, shape=[-1, self.image_height, self.image_width, 1])
print(">>> input x: {}".format(x))
# 卷积层1
wc1 = tf.get_variable(name='wc1', shape=[3, 3, 1, 32], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc1 = tf.Variable(self.b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, self.keep_prob)
# 卷积层2
wc2 = tf.get_variable(name='wc2', shape=[3, 3, 32, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc2 = tf.Variable(self.b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, self.keep_prob)
# 卷积层3
wc3 = tf.get_variable(name='wc3', shape=[3, 3, 64, 128], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc3 = tf.Variable(self.b_alpha * tf.random_normal([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, self.keep_prob)
print(">>> convolution 3: ", conv3.shape)
next_shape = conv3.shape[1] * conv3.shape[2] * conv3.shape[3]
# 全连接层1
wd1 = tf.get_variable(name='wd1', shape=[next_shape, 1024], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bd1 = tf.Variable(self.b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))
dense = tf.nn.dropout(dense, self.keep_prob)
# 全连接层2
wout = tf.get_variable('name', shape=[1024, self.max_captcha * self.char_set_len], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bout = tf.Variable(self.b_alpha * tf.random_normal([self.max_captcha * self.char_set_len]))
y_predict = tf.add(tf.matmul(dense, wout), bout)
return y_predict
def rec_image(self, img):
img_array = np.array(img)
test_image = self.convert2gray(img_array)
test_image = test_image.flatten() / 255
predict = tf.argmax(tf.reshape(self.y_predict, [-1, self.max_captcha, self.char_set_len]), 2)
text_list = self.sess.run(predict, feed_dict={self.X: [test_image], self.keep_prob: 1.})
predict_text = text_list[0].tolist()
p_text = ""
for p in predict_text:
p_text += str(self.char_set[p])
# 返回识别结果
return p_text
def main():
image_height = sample_conf["image_height"]
image_width = sample_conf["image_width"]
max_captcha = sample_conf["max_captcha"]
char_set = sample_conf["char_set"]
model_save_dir = sample_conf["model_save_dir"]
R = Recognizer(image_height, image_width, max_captcha, char_set, model_save_dir)
r_img = Image.open("./sample/test/2b3n_6915e26c67a52bc0e4e13d216eb62b37.jpg")
t = R.rec_image(r_img)
print(t)
if __name__ == '__main__':
main()
| StarcoderdataPython |
50630 | #!/usr/bin/python
import serial
import sys
import time
def main():
while True:
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=5000)
last_time = time.time()
while True:
tag = ser.readline().strip()
new_time = time.time()
print('%s %s'%(tag, new_time - last_time))
last_time = new_time
if __name__ == '__main__':
main()
| StarcoderdataPython |
3207847 | <gh_stars>1-10
"""
Assembles Model
"""
import iomb
import logging as log
from useeiopy.common import modulepath
#Turn on logging to see process in terminal
iomb.log_all(level=log.INFO)
"""
Class to extend iomb.model to give it a name and modelpath
"""
class Model(object):
def __init__(self, iomb_model=iomb.model,
modelname=None, modelpath=None):
self.model = iomb_model
self.name = modelname
self.path = modelpath
def add_matrices(self):
import iomb.matio as matio
self.matrices = matio.Matrices(self.model,DQImatrices=True)
return self
def make(modelname, modelpath):
"""
Builds a Model
:param modelname: str with model name
:param modelpath: str with path
:return: a useeiopy Model
"""
drcfile = modelpath + modelname +"_DRC.csv"
satfile = modelpath+modelname+"_sat.csv"
sectormetafile = modelpath+modelname+'_sector_meta_data.csv'
compartmentmetadatafile = modulepath + "data/USEEIO_compartment_meta_data.csv"
lciafile = modelpath + modelname + "_LCIA.csv"
#Build model
iomb_model = iomb.make_model(drcfile,
[satfile],
sectormetafile,
compartments_csv=compartmentmetadatafile,
ia_tables=[lciafile])
useeiopy_model = Model(iomb_model, modelname, modelpath)
print("Model assembled.")
return(useeiopy_model)
| StarcoderdataPython |
6414544 | #!/usr/bin/env python
# encoding: utf-8
'''
@author: <NAME>
@license: (C) Copyright @ <NAME>
@contact: <EMAIL>
@file: jianzhi_offer_59.py
@time: 2019/5/9 13:23
@desc:
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetrical(self, pRoot):
if not pRoot:
return False
if not pRoot.left and pRoot.right:
return False
if pRoot.left and not pRoot.right:
return False
if not pRoot.left and not pRoot.right:
return True
# compare left tree and right tree
return self.compare(pRoot.left, pRoot.right)
def compare(self, left, right):
if left.val != right.val:
return False
flag_left = False
flag_right = False
if left.left and right.right:
flag_left = self.compare(left.left, right.right)
elif not left.left and not right.right:
flag_left = True
if left.right and right.left:
flag_right = self.compare(left.right, right.left)
elif not left.right and not right.left:
flag_right = True
return flag_left and flag_right
| StarcoderdataPython |
4927174 | <reponame>shopkeep/deis
"""
RESTful URL patterns and routing for the Deis API app.
Clusters
========
.. http:get:: /api/clusters/(string:id)/
Retrieve a :class:`~api.models.Cluster` by its `id`.
.. http:delete:: /api/clusters/(string:id)/
Destroy a :class:`~api.models.Cluster` by its `id`.
.. http:get:: /api/clusters/
List all :class:`~api.models.Cluster`\s.
.. http:post:: /api/clusters/
Create a new :class:`~api.models.Cluster`.
Applications
============
.. http:get:: /api/apps/(string:id)/
Retrieve a :class:`~api.models.App` by its `id`.
.. http:delete:: /api/apps/(string:id)/
Destroy a :class:`~api.models.App` by its `id`.
.. http:get:: /api/apps/
List all :class:`~api.models.App`\s.
.. http:post:: /api/apps/
Create a new :class:`~api.models.App`.
Application Release Components
------------------------------
.. http:get:: /api/apps/(string:id)/config/
List all :class:`~api.models.Config`\s.
.. http:post:: /api/apps/(string:id)/config/
Create a new :class:`~api.models.Config`.
.. http:get:: /api/apps/(string:id)/builds/(string:uuid)/
Retrieve a :class:`~api.models.Build` by its `uuid`.
.. http:get:: /api/apps/(string:id)/builds/
List all :class:`~api.models.Build`\s.
.. http:post:: /api/apps/(string:id)/builds/
Create a new :class:`~api.models.Build`.
.. http:get:: /api/apps/(string:id)/releases/(int:version)/
Retrieve a :class:`~api.models.Release` by its `version`.
.. http:get:: /api/apps/(string:id)/releases/
List all :class:`~api.models.Release`\s.
.. http:post:: /api/apps/(string:id)/releases/rollback/
Rollback to a previous :class:`~api.models.Release`.
Application Infrastructure
--------------------------
.. http:get:: /api/apps/(string:id)/containers/(string:type)/(int:num)/
List all :class:`~api.models.Container`\s.
.. http:get:: /api/apps/(string:id)/containers/(string:type)/
List all :class:`~api.models.Container`\s.
.. http:get:: /api/apps/(string:id)/containers/
List all :class:`~api.models.Container`\s.
Application Domains
-------------------
.. http:delete:: /api/apps/(string:id)/domains/(string:hostname)
Destroy a :class:`~api.models.Domain` by its `hostname`
.. http:get:: /api/apps/(string:id)/domains/
List all :class:`~api.models.Domain`\s.
.. http:post:: /api/apps/(string:id)/domains/
Create a new :class:`~api.models.Domain`\s.
Application Actions
-------------------
.. http:post:: /api/apps/(string:id)/scale/
See also
:meth:`AppViewSet.scale() <api.views.AppViewSet.scale>`
.. http:post:: /api/apps/(string:id)/logs/
See also
:meth:`AppViewSet.logs() <api.views.AppViewSet.logs>`
.. http:post:: /api/apps/(string:id)/run/
See also
:meth:`AppViewSet.run() <api.views.AppViewSet.run>`
Application Sharing
===================
.. http:delete:: /api/apps/(string:id)/perms/(string:username)/
Destroy an app permission by its `username`.
.. http:get:: /api/apps/(string:id)/perms/
List all permissions granted to this app.
.. http:post:: /api/apps/(string:id)/perms/
Create a new app permission.
Keys
====
.. http:get:: /api/keys/(string:id)/
Retrieve a :class:`~api.models.Key` by its `id`.
.. http:delete:: /api/keys/(string:id)/
Destroy a :class:`~api.models.Key` by its `id`.
.. http:get:: /api/keys/
List all :class:`~api.models.Key`\s.
.. http:post:: /api/keys/
Create a new :class:`~api.models.Key`.
API Hooks
=========
.. http:post:: /api/hooks/push/
Create a new :class:`~api.models.Push`.
.. http:post:: /api/hooks/build/
Create a new :class:`~api.models.Build`.
.. http:post:: /api/hooks/config/
Retrieve latest application :class:`~api.models.Config`.
Auth
====
.. http:post:: /api/auth/register/
Create a new User.
.. http:delete:: /api/auth/register/
Destroy the logged-in User.
.. http:post:: /api/auth/login
Authenticate for the REST framework.
.. http:post:: /api/auth/logout
Clear authentication for the REST framework.
.. http:get:: /api/generate-api-key/
Generate an API key.
Admin Sharing
=============
.. http:delete:: /api/admin/perms/(string:username)/
Destroy an admin permission by its `username`.
.. http:get:: /api/admin/perms/
List all admin permissions granted.
.. http:post:: /api/admin/perms/
Create a new admin permission.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from api import routers
from api import views
router = routers.ApiRouter()
# Add the generated REST URLs and login/logout endpoint
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
# clusters
url(r'^clusters/(?P<id>[-_\w]+)/?',
views.ClusterViewSet.as_view({
'get': 'retrieve', 'patch': 'partial_update', 'delete': 'destroy'})),
url(r'^clusters/?',
views.ClusterViewSet.as_view({'get': 'list', 'post': 'create'})),
# application release components
url(r'^apps/(?P<id>{})/config/?'.format(settings.APP_URL_REGEX),
views.AppConfigViewSet.as_view({'get': 'retrieve', 'post': 'create'})),
url(r'^apps/(?P<id>{})/builds/(?P<uuid>[-_\w]+)/?'.format(settings.APP_URL_REGEX),
views.AppBuildViewSet.as_view({'get': 'retrieve'})),
url(r'^apps/(?P<id>{})/builds/?'.format(settings.APP_URL_REGEX),
views.AppBuildViewSet.as_view({'get': 'list', 'post': 'create'})),
url(r'^apps/(?P<id>{})/releases/v(?P<version>[0-9]+)/?'.format(settings.APP_URL_REGEX),
views.AppReleaseViewSet.as_view({'get': 'retrieve'})),
url(r'^apps/(?P<id>{})/releases/rollback/?'.format(settings.APP_URL_REGEX),
views.AppReleaseViewSet.as_view({'post': 'rollback'})),
url(r'^apps/(?P<id>{})/releases/?'.format(settings.APP_URL_REGEX),
views.AppReleaseViewSet.as_view({'get': 'list'})),
# application infrastructure
url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\w]+)/(?P<num>[-_\w]+)/?'.format(
settings.APP_URL_REGEX),
views.AppContainerViewSet.as_view({'get': 'retrieve'})),
url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\w.]+)/?'.format(settings.APP_URL_REGEX),
views.AppContainerViewSet.as_view({'get': 'list'})),
url(r'^apps/(?P<id>{})/containers/?'.format(settings.APP_URL_REGEX),
views.AppContainerViewSet.as_view({'get': 'list'})),
# application domains
url(r'^apps/(?P<id>{})/domains/(?P<domain>[-\._\w]+)/?'.format(settings.APP_URL_REGEX),
views.DomainViewSet.as_view({'delete': 'destroy'})),
url(r'^apps/(?P<id>{})/domains/?'.format(settings.APP_URL_REGEX),
views.DomainViewSet.as_view({'post': 'create', 'get': 'list'})),
# application actions
url(r'^apps/(?P<id>{})/scale/?'.format(settings.APP_URL_REGEX),
views.AppViewSet.as_view({'post': 'scale'})),
url(r'^apps/(?P<id>{})/logs/?'.format(settings.APP_URL_REGEX),
views.AppViewSet.as_view({'post': 'logs'})),
url(r'^apps/(?P<id>{})/run/?'.format(settings.APP_URL_REGEX),
views.AppViewSet.as_view({'post': 'run'})),
# apps sharing
url(r'^apps/(?P<id>{})/perms/(?P<username>[-_\w]+)/?'.format(settings.APP_URL_REGEX),
views.AppPermsViewSet.as_view({'delete': 'destroy'})),
url(r'^apps/(?P<id>{})/perms/?'.format(settings.APP_URL_REGEX),
views.AppPermsViewSet.as_view({'get': 'list', 'post': 'create'})),
# apps base endpoint
url(r'^apps/(?P<id>{})/?'.format(settings.APP_URL_REGEX),
views.AppViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),
url(r'^apps/?',
views.AppViewSet.as_view({'get': 'list', 'post': 'create'})),
# key
url(r'^keys/(?P<id>.+)/?',
views.KeyViewSet.as_view({
'get': 'retrieve', 'delete': 'destroy'})),
url(r'^keys/?',
views.KeyViewSet.as_view({'get': 'list', 'post': 'create'})),
# hooks
url(r'^hooks/push/?',
views.PushHookViewSet.as_view({'post': 'create'})),
url(r'^hooks/build/?',
views.BuildHookViewSet.as_view({'post': 'create'})),
url(r'^hooks/config/?',
views.ConfigHookViewSet.as_view({'post': 'create'})),
# authn / authz
url(r'^auth/register/?',
views.UserRegistrationView.as_view({'post': 'create'})),
url(r'^auth/cancel/?',
views.UserCancellationView.as_view({'delete': 'destroy'})),
url(r'^auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^generate-api-key/',
'rest_framework.authtoken.views.obtain_auth_token'),
# admin sharing
url(r'^admin/perms/(?P<username>[-_\w]+)/?',
views.AdminPermsViewSet.as_view({'delete': 'destroy'})),
url(r'^admin/perms/?',
views.AdminPermsViewSet.as_view({'get': 'list', 'post': 'create'})),
)
| StarcoderdataPython |
3581474 | import math
import os
from utils.utils import get_logger, is_logging_process
def train_model(cfg, model, train_loader, writer):
logger = get_logger(cfg, os.path.basename(__file__))
model.net.train()
for input_, target in train_loader:
model.feed_data(input=input_, GT=target)
model.optimize_parameters()
loss = model.log.loss_v
model.step += 1
if is_logging_process() and (loss > 1e8 or math.isnan(loss)):
logger.error("Loss exploded to %.02f at step %d!" % (loss, model.step))
raise Exception("Loss exploded")
if model.step % cfg.log.summary_interval == 0:
if writer is not None:
writer.logging_with_step(loss, model.step, "train_loss")
if is_logging_process():
logger.info("Train Loss %.04f at step %d" % (loss, model.step))
logger.info(f"Learning rate: {model.optimizer.state_dict()['param_groups'][0]['lr']}")
| StarcoderdataPython |
4945248 | from os import chdir as os_chdir
from pathlib import Path
os_chdir(Path(__file__).resolve().parent)
from sys import path as sys_path
sys_path.append('lib')
import logging
import pyd3ckbase as __
import pyd3ckservice.redis as srv_redis
try:
_cfg = __.init(__.get_arg_parser())
log = logging.getLogger(__name__)
except __.Err as _e:
__.die(_e)
_cfg['redis'] = {
# https://redis-py.readthedocs.io/en/latest/index.html#redis.StrictRedis.from_url
'uri': 'redis://127.0.0.1:6379',
'cfg': {
# 'encoding': 'utf-8' # default is utf-8
}
}
try:
log.info('Initializing redis')
_mgc = srv_redis.init(_cfg['redis']['uri'], _cfg['redis']['cfg'])
log.info('Connection to %s successfully tested', _cfg['redis']['uri'])
except AttributeError:
__.die('Please install redis-py with "pip install redis"')
except __.Err as _e:
__.die(_e)
| StarcoderdataPython |
1739511 | import sys
__all__ = ['register_after_fork']
if sys.platform == 'win32' or sys.version_info < (3, 7):
import multiprocessing.util as _util
def _register(func):
def wrapper(arg):
func()
_util.register_after_fork(_register, wrapper)
else:
import os
def _register(func):
os.register_at_fork(after_in_child=func)
def register_after_fork(func):
"""Register a callable to be executed in the child process after a fork.
Note:
In python < 3.7 this will only work with processes created using the
``multiprocessing`` module. In python >= 3.7 it also works with
``os.fork()``.
Args:
func (function): Function taking no arguments to be called in the child after fork
"""
_register(func)
| StarcoderdataPython |
11391528 | from PySide2 import QtCore, QtWidgets, QtGui
from element.LineEdit import LineEdit
from element.PushButton import DefaultPushButton
class FileEdit(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.label = LineEdit("")
self.button = DefaultPushButton("Browse")
self.button.clicked.connect(self.browse)
self.layout = QtWidgets.QHBoxLayout()
self.layout.addWidget(self.label)
self.layout.addWidget(self.button)
self.layout.setMargin(0)
self.setLayout(self.layout)
def browse(self):
directory = QtWidgets.QFileDialog.getExistingDirectory(self, self.tr("Find Files"), QtCore.QDir.currentPath())
if directory != '':
self.label.setText(directory)
self.label.setSelection(0, 0)
| StarcoderdataPython |
3395159 | <reponame>SpotlightKid/jackclient-python
#!/usr/bin/env python3
"""Display information about time, transport state et cetera.
This is somewhat modeled after the "showtime.c" example of JACK.
https://github.com/jackaudio/example-clients/blob/master/showtime.c
https://github.com/jackaudio/jack2/blob/master/example-clients/showtime.c
"""
from contextlib import suppress
import time
import sys
import jack
try:
client = jack.Client('showtime')
except jack.JackError:
sys.exit('JACK server not running?')
def showtime():
state, pos = client.transport_query()
items = []
items.append('frame = {} frame_time = {} usecs = {} '.format(
pos['frame'], client.frame_time, pos['usecs']))
items.append('state: {}'.format(state))
with suppress(KeyError):
items.append('BBT: {bar:3}|{beat}|{tick:04}'.format(**pos))
with suppress(KeyError):
items.append('TC: ({frame_time:.6f}, {next_time:.6f})'.format(**pos))
with suppress(KeyError):
items.append('BBT offset: ({bbt_offset})'.format(**pos))
with suppress(KeyError):
items.append(
'audio/video: ({audio_frames_per_video_frame})'.format(**pos))
with suppress(KeyError):
video_offset = pos['video_offset']
if video_offset:
items.append(' video@: ({})'.format(video_offset))
else:
items.append(' no video');
print(*items, sep='\t')
@client.set_shutdown_callback
def shutdown(status, reason):
sys.exit('JACK shut down, exiting ...')
with client:
try:
while True:
time.sleep(0.00002)
showtime()
except KeyboardInterrupt:
print('signal received, exiting ...', file=sys.stderr)
sys.exit(0)
| StarcoderdataPython |
1753729 | import json
import os
from pathlib import Path
import shutil
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from attr import dataclass
@dataclass
class PropertyInfo:
Name: str
Type: Optional[str]
Pattern: Optional[str]
UnderlyingDataType: Optional[str]
IsUniqueUnderlyingDataType: bool
Description: Optional[str]
IsResourceType: bool
IsSimpleType: bool
IsComplexType: bool
HideExtension: bool
def __str__(self) -> str:
return f"property_name:{self.Name}, type={self.Type}, underlying_type={self.UnderlyingDataType}"
@dataclass
class ResourceInfo:
Name: str
Type: Optional[str]
Description: Optional[str]
def get_parent_properties(
definitions: Dict[str, Any], parent_resource_reference: Dict[str, Any]
) -> Dict[str, Any]:
parent_ref: str = parent_resource_reference["$ref"]
parent_resource_name: str = parent_ref.split("/")[-1]
parent_resource = definitions[parent_resource_name]
parent_resource_references1 = [r for r in parent_resource["allOf"] if "$ref" in r]
parent_properties1: Dict[str, Any] = {}
for parent_resource_reference1 in parent_resource_references1:
parent_properties1.update(
get_parent_properties(
definitions=definitions,
parent_resource_reference=parent_resource_reference1,
)
)
# now add in any properties
propertyContainer: Dict[str, Any]
for propertyContainer in [r for r in parent_resource["allOf"] if "properties" in r]:
parent_properties1.update(propertyContainer["properties"])
return parent_properties1
def main() -> int:
data_dir: Path = Path(__file__).parent.joinpath("./")
with open(data_dir.joinpath("fhir.schema.json"), "r+") as file:
contents = file.read()
# clean out old stuff
resources_folder = data_dir.joinpath("resources")
if os.path.exists(resources_folder):
shutil.rmtree(resources_folder)
os.mkdir(resources_folder)
resources_folder.joinpath("__init__.py").touch()
complex_types_folder = data_dir.joinpath("complex_types")
if os.path.exists(complex_types_folder):
shutil.rmtree(complex_types_folder)
os.mkdir(complex_types_folder)
complex_types_folder.joinpath("__init__.py").touch()
simple_types_folder = data_dir.joinpath("simple_types")
if os.path.exists(simple_types_folder):
shutil.rmtree(simple_types_folder)
os.mkdir(simple_types_folder)
simple_types_folder.joinpath("__init__.py").touch()
fhir_schema = json.loads(contents)
resources_dict: Dict[str, str] = {}
definitions = fhir_schema["definitions"]
# print(definitions)
# print(type(definitions))
# for key, value in definitions.items():
# print(f"{key}:{value}")
# print(definitions["Patient"])
simple_types: List[str] = [
"number",
"array",
] # number is not defined in fhir schema
# extensions_allowed_for_resources: List[str] = ["Patient", "Identifier"]
extensions_blocked_for_resources: List[str] = []
properties_blocked: List[str] = ["modifierExtension"]
complex_types: List[str] = []
resource_types: List[str] = []
# first pass, decide which items are resources or simple_types or complex_types
# have to do two passes since an item at the beginning of the file may refer to an item at the end
for resource_name, resource in definitions.items():
# resource_name: str = "Patient"
# resource = definitions[resource_name]
if resource_name in []:
continue
if resource_name in resources_dict:
print(f"Added Resource: {resource_name}")
resource_types.append(resource_name.lower())
elif "properties" not in resource and "allOf" not in resource:
print(f"Added Simple Type: {resource_name}")
simple_types.append(resource_name.lower())
else:
print(f"Added Complex Type: {resource_name}")
complex_types.append(resource_name.lower())
# 2nd Pass
# Create the entities
for resource_name, resource in definitions.items():
# resource_name: str = "Patient"
# resource = definitions[resource_name]
if resource_name in []:
continue
print(f"Processing {resource_name}")
# concat properties from allOf
parent_resource_references = (
[r for r in resource["allOf"] if "$ref" in r] if "allOf" in resource else []
)
parent_properties: Dict[str, Any] = {}
# find the properties from parent resources and include those
for parent_resource_reference in parent_resource_references:
parent_properties.update(
get_parent_properties(
definitions=definitions,
parent_resource_reference=parent_resource_reference,
)
)
resource = (
[r for r in resource["allOf"] if "properties" in r][0]
if "allOf" in resource
else {}
)
resource_type: Optional[str] = resource["type"] if "type" in resource else None
resource_description: Optional[str] = (
resource["description"] if "description" in resource else None
)
properties: Dict[str, Any] = parent_properties
properties.update(resource["properties"] if "properties" in resource else {})
properties_info: List[PropertyInfo] = []
# print("---- Properties ----")
for key, value in {
k: v for k, v in properties.items() if not k.startswith("_")
}.items():
property_name = key
description: str = value["description"]
# items: Optional[Dict[str, str]
# ] = value["items"] if "items" in value else None
type_: Optional[str] = value["type"] if "type" in value else None
ref_: Optional[str] = (
value["$ref"]
if "$ref" in value and type_ != "array"
else value["items"]["$ref"]
if "items" in value and "$ref" in value["items"]
else value["items"]["type"]
if "items" in value and "type" in value["items"]
else None
)
pattern = value["pattern"] if "pattern" in value else None
# print(f"{key}:{value}")
# type_ == None means string
reference_type: Optional[str] = (
ref_[ref_.rfind("/") + 1 :] if ref_ else None
)
if not type_ and not reference_type:
type_ = "string" # typically an enum
# print(f"property_name:{property_name}, type={type_}, ref={ref_}, reference_type={reference_type}")
property_info = PropertyInfo(
Name=property_name,
Type=type_,
Pattern=pattern,
UnderlyingDataType=reference_type,
IsUniqueUnderlyingDataType=not any(
[pi.UnderlyingDataType == reference_type for pi in properties_info]
),
Description=description,
IsResourceType=reference_type.lower() in resources_dict
if reference_type
else False,
IsSimpleType=reference_type.lower() in simple_types
if reference_type
else (type_.lower() in simple_types if type_ else False),
IsComplexType=reference_type.lower() in complex_types
if reference_type
else False,
HideExtension=reference_type.lower() == "extension"
and resource_name in extensions_blocked_for_resources
if reference_type
else False,
)
if resource_name.lower() == "extension":
# have to skip a few properties or Spark runs out of memory
allowed_properties = [
"id",
"url",
"extension",
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueCodeableConcept",
"valueCoding",
"valueCount",
"valueIdentifier",
"valueMoney",
"valuePeriod",
"valueQuantity",
"valueRange",
"valueReference",
]
if property_name in allowed_properties:
properties_info.append(property_info)
elif property_name not in properties_blocked:
properties_info.append(property_info)
# assert property_info.IsResourceType or property_info.IsSimpleType or property_info.IsComplexType, \
# f"{resource_name}.{property_name}[{type_}] reference_type:{reference_type}"
# print(properties_info[-1])
# print("")
# use template to generate new code files
with open(data_dir.joinpath("template.jinja2"), "r") as file:
template_contents: str = file.read()
from jinja2 import Template
template = Template(template_contents, trim_blocks=True, lstrip_blocks=True)
result: str = template.render(
resource=ResourceInfo(
Name=resource_name,
Type=resource_type,
Description=resource_description,
),
properties=properties_info,
)
if resource_name in resources_dict:
file_path = resources_folder.joinpath(f"{resource_name.lower()}.py")
print(f"Writing resource: {resource_name.lower()} to {file_path}...")
# print(result)
with open(file_path, "w") as file2:
file2.write(result)
elif "properties" not in resource and "oneOf" not in resource:
file_path = simple_types_folder.joinpath(f"{resource_name.lower()}.py")
print(
f"Writing simple_types_folder: {resource_name.lower()} to {file_path}..."
)
with open(file_path, "w") as file2:
file2.write(result)
else:
file_path = complex_types_folder.joinpath(f"{resource_name.lower()}.py")
print(
f"Writing complex_type: {resource_name.lower()} to {file_path}..."
)
with open(file_path, "w") as file2:
file2.write(result)
# print(result)
return 0
if __name__ == "__main__":
exit(main())
| StarcoderdataPython |
12842835 | <filename>repos/build_pipeline/lambdas/extract_metrics/extract_metrics.py
"""
This Lambda parses the output of ModelQualityStep to extract the value of a specific metric
"""
import json
import boto3
sm_client = boto3.client("sagemaker")
s3 = boto3.resource('s3')
def lambda_handler(event, context):
# model quality report URI
model_quality_report_uri = event['model_quality_report_uri']
metric_name = event['metric_name']
o = s3.Object(*split_s3_path(model_quality_report_uri))
retval = json.load(o.get()['Body'])
metrics = json.load(o.get()['Body'])
return {
"statusCode": 200,
"body": json.dumps(f"{metric_name} extracted"),
"metric_value": json.dumps(metrics['binary_classification_metrics'][metric_name]['value'])
}
def split_s3_path(s3_path):
path_parts=s3_path.replace("s3://","").split("/")
bucket=path_parts.pop(0)
key="/".join(path_parts)
return bucket, key
| StarcoderdataPython |
5099361 | #!/usr/bin/env python3
# encoding: utf-8
# @Time : 2019/5/9 14:18
# @Author : <NAME>
import glob
import os
import numpy as np
import nibabel as nib
import torch
from torch.utils.data import Dataset, DataLoader
import random
class Brats2018(Dataset):
def __init__(self, patients_dir, crop_size, modes, train=True):
self.patients_dir = patients_dir
self.modes = modes
self.train = train
self.crop_size = crop_size
def __len__(self):
return len(self.patients_dir)
def __getitem__(self, index):
patient_dir = self.patients_dir[index]
volumes = []
modes = list(self.modes) + ['seg']
for mode in modes:
patient_id = os.path.split(patient_dir)[-1]
volume_path = os.path.join(patient_dir, patient_id + "_" + mode + '.nii')
volume = nib.load(volume_path).get_data()
if not mode == "seg":
volume = self.normlize(volume) # [0, 1.0]
volumes.append(volume) # [h, w, d]
seg_volume = volumes[-1]
volumes = volumes[:-1]
volume, seg_volume = self.aug_sample(volumes, seg_volume)
wt_volume = seg_volume > 0 # 坏死和无增强的肿瘤区域:1、增强区域(活跃部分):4、周边水肿区域:2
tc_volume = np.logical_or(seg_volume == 4, seg_volume == 1)
et_volume = (seg_volume == 4)
seg_volume = [wt_volume, tc_volume, et_volume]
seg_volume = np.concatenate(seg_volume, axis=0).astype("float32")
return (torch.tensor(volume.copy(), dtype=torch.float),
torch.tensor(seg_volume.copy(), dtype=torch.float))
def aug_sample(self, volumes, mask):
"""
Args:
volumes: list of array, [h, w, d]
mask: array [h, w, d], segmentation volume
Ret: x, y: [channel, h, w, d]
"""
x = np.stack(volumes, axis=0) # [N, H, W, D]
y = np.expand_dims(mask, axis=0) # [channel, h, w, d]
if self.train:
# crop volume
x, y = self.random_crop(x, y)
if random.random() < 0.5:
x = np.flip(x, axis=1)
y = np.flip(y, axis=1)
if random.random() < 0.5:
x = np.flip(x, axis=2)
y = np.flip(y, axis=2)
if random.random() < 0.5:
x = np.flip(x, axis=3)
y = np.flip(y, axis=3)
else:
x, y = self.center_crop(x, y)
return x, y
def random_crop(self, x, y):
"""
Args:
x: 4d array, [channel, h, w, d]
"""
crop_size = self.crop_size
height, width, depth = x.shape[-3:]
sx = random.randint(0, height - crop_size[0] - 1)
sy = random.randint(0, width - crop_size[1] - 1)
sz = random.randint(0, depth - crop_size[2] - 1)
crop_volume = x[:, sx:sx + crop_size[0], sy:sy + crop_size[1], sz:sz + crop_size[2]]
crop_seg = y[:, sx:sx + crop_size[0], sy:sy + crop_size[1], sz:sz + crop_size[2]]
return crop_volume, crop_seg
def center_crop(self, x, y):
crop_size = self.crop_size
height, width, depth = x.shape[-3:]
sx = (height - crop_size[0] - 1) // 2
sy = (width - crop_size[1] - 1) // 2
sz = (depth - crop_size[2] - 1) // 2
crop_volume = x[:, sx:sx + crop_size[0], sy:sy + crop_size[1], sz:sz + crop_size[2]]
crop_seg = y[:, sx:sx + crop_size[0], sy:sy + crop_size[1], sz:sz + crop_size[2]]
return crop_volume, crop_seg
def normlize(self, x):
return (x - x.min()) / (x.max() - x.min())
def split_dataset(data_root, nfold=4, seed=42, select=0):
patients_dir = glob.glob(os.path.join(data_root, "*GG", "Brats18*"))
n_patients = len(patients_dir)
print(f"total patients: {n_patients}")
pid_idx = np.arange(n_patients)
np.random.seed(seed)
np.random.shuffle(pid_idx)
n_fold_list = np.split(pid_idx, nfold)
print(f"split {len(n_fold_list)} folds and every fold have {len(n_fold_list[0])} patients")
val_patients_list = []
train_patients_list = []
for i, fold in enumerate(n_fold_list):
if i == select:
for idx in fold:
val_patients_list.append(patients_dir[idx])
else:
for idx in fold:
train_patients_list.append(patients_dir[idx])
print(f"train patients: {len(train_patients_list)}, test patients: {len(val_patients_list)}")
return train_patients_list, val_patients_list
def make_data_loaders(cfg):
train_list, val_list = split_dataset(cfg.DATASET.DATA_ROOT, cfg.DATASET.NUM_FOLDS, cfg.DATASET.SELECT_FOLD)
train_ds = Brats2018(train_list, crop_size=cfg.DATASET.INPUT_SHAPE, modes=cfg.DATASET.USE_MODES, train=True)
val_ds = Brats2018(val_list, crop_size=cfg.DATASET.INPUT_SHAPE, modes=cfg.DATASET.USE_MODES, train=False)
loaders = {}
loaders['train'] = DataLoader(train_ds, batch_size=cfg.DATALOADER.BATCH_SIZE,
num_workers=cfg.DATALOADER.NUM_WORKERS,
pin_memory=True,
shuffle=True)
loaders['eval'] = DataLoader(val_ds, batch_size=cfg.DATALOADER.BATCH_SIZE,
num_workers=cfg.DATALOADER.NUM_WORKERS,
pin_memory=True,
shuffle=False)
return loaders
if __name__ == "__main__":
from config import _C as cfg
train_list, val_list = split_dataset(cfg.DATASET.DATA_ROOT, cfg.DATASET.NUM_FOLDS, cfg.DATASET.SELECT_FOLD)
train_ds = Brats2018(train_list, crop_size=cfg.DATASET.INPUT_SHAPE, modes=cfg.DATASET.USE_MODES, train=True)
val_ds = Brats2018(val_list, crop_size=cfg.DATASET.INPUT_SHAPE, modes=cfg.DATASET.USE_MODES, train=False)
for i in range(len(train_ds)):
x, y = train_ds[i]
volume = (x.numpy()[0] * 255).astype('uint8')
seg = (np.sum(y.numpy(), axis=0)).astype('uint8')
volume = nib.Nifti1Image(volume, np.eye(4))
seg = nib.Nifti1Image(seg, np.eye(4))
nib.save(volume, 'test'+str(i)+'_volume.nii')
nib.save(seg, 'test' + str(i) + '_seg.nii') | StarcoderdataPython |
3510225 | <gh_stars>1-10
import configparser
import datetime
import logging
import os
import pyspark
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import explode
from pyspark.sql.functions import lower
from pyspark.sql.functions import regexp_replace
from pyspark.sql.functions import split
from pyspark.sql.functions import trim
from pyspark.sql.types import StringType
# config
path_name_file = os.path.basename(__file__)
path_directory = os.path.dirname(os.path.abspath(__file__))
path_config = ''.join(path_directory + '/../configs/etl_config.ini')
config = configparser.ConfigParser()
config.read(path_config)
PATH_DOCS = config['DOC']['PATH_DOCS']
PATH_DICT = config['DOC']['PATH_DICT']
PATH_INDEX = config['DOC']['PATH_INDEX']
LOG_FILE = config['LOG']['LOG_FILE']
LOG_FILEMODE = config['LOG']['FILEMODE']
# log
log_format = '%(asctime)s - %(levelname)s: %(message)s'
date_format = '%d-%b-%y %H:%M:%S'
logging.basicConfig(filename=LOG_FILE,
filemode=LOG_FILEMODE,
format=log_format,
datefmt=date_format)
start_time = datetime.datetime.now()
logging.warning(f'Start: {start_time}')
class JobMapWordIdDocId(object):
def __init__(self,
path_files: str,
path_index: str,
path_dict: str,
file_name: str,
num_partition: int):
path = ''.join(path_files + file_name)
self.__file_name = file_name
conf = SparkConf()
conf.setAll(
[
('spark.app.name', 'Challenge Data Engineer'),
('spark.driver.cores', '4'),
('spark.executor.cores', '4'),
('spark.driver.maxResultSize', '10g'),
('spark.executor.memory', '4g'),
('spark.executor.memoryOverhead ', '4g'),
('spark.driver.memory', '10g'),
('spark.local.dir', PATH_INDEX),
('spark.driver.extraJavaOptions', '-Xmx1024m'),
('spark.memory.offHeap.enabled', 'true'),
('spark.memory.offHeap.size', '20g')
]
)
self.__spark = SparkSession \
.builder \
.config(conf=conf) \
.getOrCreate()
self.__df_dict = self.__spark \
.read \
.parquet(path_dict) \
.repartition(numPartitions=num_partition)
self.__df_doc = self.__spark \
.read \
.text(path)
self.__df_wordid_docid = self.__spark \
.read \
.parquet(path_index) \
.rdd \
.unpersist() \
.repartition(numPartitions=1000)
self.__df_wordid_docid = self.__df_wordid_docid.toDF()
logging.warning(f"Processing doc: {path}")
def __del__(self):
self.__spark.catalog.clearCache()
self.__spark.stop()
end_time = datetime.datetime.now()
logging.warning(f'Total time: {end_time - start_time}')
def clean_data(self, list_words_col: str) -> pyspark.sql.DataFrame:
"""Pre-processing data
Processing executed in function:
- Lower case
- Words start with letters or whitespace
- Remove whitespaces into start and final words
- Remove rows empty
- Transform each row in list
:Args:
:param list_words_col: column's name of Dataframe
:Returns:
:return: Dataframe with a word list in each row
:Samples:
+--------------------+
| value|
+--------------------+
|[over, in, one, n...|
"""
self.__df_doc = self.__df_doc \
.withColumn(list_words_col, lower(col(list_words_col))) \
.withColumn(list_words_col, regexp_replace(str=col(list_words_col),
pattern='[^a-z ]',
replacement='')) \
.withColumn(list_words_col, trim(col(list_words_col))) \
.filter(self.__df_doc[list_words_col] != "") \
.withColumn(list_words_col, split(list_words_col, ' '))
return self
def generate_word_by_row(self, col_words: str) -> pyspark.sql.DataFrame:
"""
Params:
:param col_words: a column in df contains word list in each row
Returns:
:return: df with all words split by row
Samples:
+-----------+
| col|
+-----------+
| project|
"""
self.__df_doc = self.__df_doc \
.select(explode(self.__df_doc[col_words]))
return self
def get_word_id(self, col_words: str, join_operation: str,
col_words_dict: str) -> pyspark.sql.DataFrame:
"""
Params:
:param col_words: a column in df contains word list in each row
:param join_operation: type join
:param col_words_dict: name column of words
Returns:
:return: 'pyspark.sql.dataframe.DataFrame' with all word id
Samples:
+------+
| key|
+------+
|101780|
"""
self.__df_doc = self.__df_doc \
.join(self.__df_dict,
on=self.__df_doc[col_words_dict]
== self.__df_dict[col_words],
how=join_operation) \
.drop(col_words) \
.drop(col_words_dict)
return self
def generate_wordid_docid(self, col_word_key: str):
""" Follow documentation:
https://spark.apache.org/docs/latest/api/python/pyspark.html?highlight=flatmap
Params:
:param col_word_key: a column in df contains word list in each row
Returns:
:return: dataframe with column doc_id and column word_id
Samples:
+---+--------+
| _1| _2|
+---+--------+
| 27|[101780]|
"""
list_key_by_doc = self.__df_doc.select(col_word_key).collect()
doc_rdd = self.__spark \
.sparkContext \
.parallelize([(self.__file_name, list_key_by_doc)])
def f(doc_rdd): return doc_rdd
self.__df_doc = doc_rdd.flatMapValues(f).toDF()
return self
def prepare_df(self,
name_original_col: str,
new_name_key: str,
new_name_doc: str):
"""
Returns:
:return: dataframe with column doc_id and column word_id
Samples:
+------+-------+
|doc_id|word_id|
+------+-------+
| 27| 0|
| 27| 1|
"""
self.__df_doc = self.__df_doc \
.withColumn(name_original_col,
self.__df_doc[name_original_col].cast(StringType())) \
.withColumn(name_original_col,
regexp_replace(str=col(name_original_col),
pattern='[^0-9]',
replacement='')) \
.orderBy(name_original_col, ascending=True) \
.dropDuplicates([name_original_col]) \
.withColumnRenamed('_1', new_name_doc) \
.withColumnRenamed(name_original_col, new_name_key)
print('\nReading data')
self.__df_doc.show(n=2)
return self
def append_df(self):
self.__df_wordid_docid = self.__df_doc \
.union(self.__df_wordid_docid)
return self
def storage_data(self, path_to_storage: str, mode: str):
"""Persist word dict
:Args:
:param path_to_storage: the path
:param format: the format used to save
:param mode: operation when data already exists.
:Returns:
:return: a word dict storage
"""
self.__df_wordid_docid.show()
logging.warning(f'total words = {self.__df_wordid_docid.count()}')
return self.__df_wordid_docid.write.parquet(path=path_to_storage,
mode=mode)
def main():
# prepare dataframe docid_wordid
spark = pyspark.sql.SparkSession(pyspark.SparkContext())
df_index = spark.createDataFrame(data=[('0', '0')],
schema=('doc_id', 'word_id'))
df_index.write.parquet(path=PATH_INDEX, mode='append')
list_docs = os.listdir(PATH_DOCS)
for doc in list_docs:
JobMapWordIdDocId(path_files=PATH_DOCS,
file_name=doc,
path_dict=PATH_DICT,
path_index=PATH_INDEX,
num_partition=1) \
.clean_data(list_words_col='value') \
.generate_word_by_row(col_words='value') \
.get_word_id(col_words='value',
join_operation='right',
col_words_dict='col') \
.generate_wordid_docid(col_word_key='key') \
.prepare_df(name_original_col='_2',
new_name_doc='doc_id',
new_name_key='word_id') \
.append_df()\
.storage_data(path_to_storage=PATH_INDEX,
mode='append')
if __name__ == '__main__':
main()
| StarcoderdataPython |
3344131 | from .mini_zinc import *
| StarcoderdataPython |
3595290 | <reponame>Jac-Lazza/termiko
#author: n01
"""
Useful Keyboard keys
"""
# UP = "\x1b[A"
# DOWN = "\x1b[B"
# RIGHT = "\x1b[C"
# LEFT = "\x1b[D"
UP = "W"
LEFT = "A"
DOWN = "S"
RIGHT = "D"
ENTER = "\r"
# ARROW_PREP = ('\x1b', '[') #Not needed anymore
# ESC = "\x1b" #Now this keys can be finnaly hit EDIT: no, it has problems between platforms
QUIT = "Q"
# DELETE = '\x7f' #This seems terminal-specifics
DELETE = "\x08"
#Some terminals use delete code characters different from \x08 (the standard one), this is strange and I still have to understand why they do this
BAD_DELETES = ["\x7f"]
KEYS = [UP, DOWN, RIGHT, LEFT, ENTER, DELETE, QUIT]
| StarcoderdataPython |
328278 | <filename>qulab/sugar.py
import asyncio
from urllib.parse import urlparse
from qulab._config import config, config_dir
from qulab.dht.network import Server as DHT
from qulab.dht.network import cfg as DHT_config
from qulab.dht.utils import digest
from qulab.exceptions import QuLabRPCError, QuLabRPCTimeout
from qulab.rpc import ZMQClient, ZMQServer
from qulab.utils import getHostIP, getHostIPv6
class Node:
def __init__(self, name, parent=None):
self.name = name
self.parent = parent
self.children = []
self.module = None
self.fullpath = name if parent is None else self.parent.fullpath + '.' + self.name
def mount(self):
pass
def unmout(self):
pass
root = Node('')
__dht = None
def getBootstrapNodes():
kad = config_dir() / 'kad.dat'
if kad.exists():
with kad.open() as f:
bootstrap_nodes = f.readlines()
else:
bootstrap_nodes = []
bootstrap_nodes.extend(DHT_config.get('bootstrap_nodes', []))
def parse(s):
x = urlparse(s)
return x.hostname, x.port
bootstrap_nodes = set(map(parse, bootstrap_nodes))
return list(bootstrap_nodes)
def saveDHTNodes():
if __dht is None:
return
if not config_dir().exists():
config_dir().mkdir(parents=True)
kad = config_dir() / 'kad.dat'
nodes = __dht.bootstrappable_neighbors()
nodes.append((getHostIP(), __dht.port))
kad.write_text('\n'.join(
["kad://%s:%d" % (node[0], node[1]) for node in set(nodes)]))
loop = asyncio.get_event_loop()
loop.call_later(600, saveDHTNodes)
async def getDHT(reboot=False):
global __dht
if reboot:
__dht.stop()
__dht = None
if __dht is None:
__dht = DHT()
await __dht.start(getBootstrapNodes())
saveDHTNodes()
return __dht
async def mount(module, path, *, loop=None):
s = ZMQServer(loop=loop)
s.set_module(module)
s.start()
dht = await getDHT()
await asyncio.sleep(0.1)
addr = 'tcp://%s:%d' % (getHostIP(), s.port)
await dht.set(path, addr)
return s
def unmount(path):
pass
class RemoteMethod:
def __init__(self, name, connection):
self.name = name
self.connection = connection
def __call__(self, *args, **kw):
return self.connection._remoteCall(self.name, args, kw)
def __getattr__(self, name):
return RemoteMethod(f"{self.name}.{name}", self.connection)
class Connection:
_zmq_client_table = {}
def __init__(self, path, loop):
self.path = path
self.loop = loop
self.zmq_client = None
def __getattr__(self, name):
return RemoteMethod(name, self)
async def _remoteCall(self, method, args, kw):
if self.zmq_client is None:
await self.connect()
try:
return await self.zmq_client.remoteCall(self.zmq_client.addr,
method, args, kw)
except QuLabRPCTimeout:
self.zmq_client = None
raise
async def _connect(self):
dht = await getDHT()
addr = await dht.get(self.path)
if addr is None:
raise QuLabRPCError(f"Unknow RPC path {self.path}.")
return ZMQClient(addr, loop=self.loop)
async def connect(self):
if self.path not in Connection._zmq_client_table:
Connection._zmq_client_table[self.path] = await self._connect()
retry = 0
while retry < 3:
if not await Connection._zmq_client_table[self.path].ping():
Connection._zmq_client_table[self.path] = await self._connect()
else:
break
retry += 1
else:
raise QuLabRPCError(f'Can not connect to {self.path}')
self.zmq_client = Connection._zmq_client_table[self.path]
def close(self):
self.zmq_client.__del__()
if self.path in Connection._zmq_client_table:
del Connection._zmq_client_table[self.path]
@classmethod
def close_all(cls):
for c in cls._zmq_client_table.values():
try:
c.close()
except:
pass
cls._zmq_client_table = {}
async def connect(path, *, loop=None):
dht = await getDHT()
if isinstance(path, Connection):
path = path.path
addr = await dht.get(path)
if addr is None:
raise QuLabRPCError(f'Unknow RPC path {path}.')
return Connection(path, loop=loop)
| StarcoderdataPython |
283258 | #
# This file is part of LiteDRAM.
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
import os
import filecmp
import unittest
from litex.build.tools import write_to_file
from litedram.init import get_sdram_phy_c_header, get_sdram_phy_py_header
def compare_with_reference(content, filename):
write_to_file(filename, content)
r = filecmp.cmp(filename, os.path.join("test", "reference", filename))
os.remove(filename)
return r
class TestInit(unittest.TestCase):
def test_sdr(self):
from litex.boards.targets.minispartan6 import BaseSoC
soc = BaseSoC()
c_header = get_sdram_phy_c_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
py_header = get_sdram_phy_py_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
self.assertEqual(compare_with_reference(c_header, "sdr_init.h"), True)
self.assertEqual(compare_with_reference(py_header, "sdr_init.py"), True)
def test_ddr3(self):
from litex.boards.targets.kc705 import BaseSoC
soc = BaseSoC()
c_header = get_sdram_phy_c_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
py_header = get_sdram_phy_py_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
self.assertEqual(compare_with_reference(c_header, "ddr3_init.h"), True)
self.assertEqual(compare_with_reference(py_header, "ddr3_init.py"), True)
def test_ddr4(self):
from litex.boards.targets.kcu105 import BaseSoC
soc = BaseSoC(max_sdram_size=0x4000000)
c_header = get_sdram_phy_c_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
py_header = get_sdram_phy_py_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
self.assertEqual(compare_with_reference(c_header, "ddr4_init.h"), True)
self.assertEqual(compare_with_reference(py_header, "ddr4_init.py"), True)
| StarcoderdataPython |
11305417 | <reponame>ARMmbed/mbed-client-pal-public
# -----------------------------------------------------------------------
# Copyright (c) 2016 ARM Limited. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------
#####################################################################
# Install and run PAL tests for mbed targets.
#
# Arguments: One or more binary files containing tests to execute.
#
# Output: <binary file>_result.txt - Textual result summary
# <binary file>.xml - Junit result summary
# ARM
# <NAME>
#####################################################################
from mbed import MBED
import os
import sys
import unity_to_junit
from time import sleep
# Check that at least one test is specified on the command line
if len(sys.argv) < 2:
sys.exit()
else:
tests = sys.argv[1:]
# List of textual result files.
resultFiles = []
# Supported mbed devices. Can add to this list
deviceList = ["K64F"]
# Loop over attached mbed devices.
for device in deviceList:
mbed = MBED(device)
deviceDetected = mbed.detect()
if deviceDetected:
# Loop over tests
for test in tests:
mbed.install_bin(test)
noSuffix = os.path.splitext(test)[0]
intermediateResultsFile = noSuffix+".int"
# Remove intermediate file just in case it was left over from a failed run.
if os.path.isfile(intermediateResultsFile):
os.remove(intermediateResultsFile)
resultFile = noSuffix+"_result.txt"
# This delay is to allow output that was generated before the reset to be discarded.
sleep(30)
# Capture test results from the serial port
if mbed.run_and_capture_till_timeout(intermediateResultsFile,baud=9600,read_timeout=10,endOfData="***END OF TESTS**"):
# Success. Convert results to Junit format and write to xml file.
unity_to_junit.unity_to_junit("mbedos_" + os.path.basename(noSuffix), intermediateResultsFile, noSuffix+".xml", resultFile)
# Output intermediate results to the console.
with open(intermediateResultsFile, 'r') as fin:
print fin.read()
os.remove(intermediateResultsFile)
# Add result file name to list
resultFiles.append(resultFile)
else:
response = raw_input("Connect Serial port. Enter when ready")
# Clean up. True parameter closes the device opened by mbed.detect()
mbed.end_run(False,True)
# Copy result files to standard output
for file in resultFiles:
with open(file, 'r') as fin:
print fin.read()
| StarcoderdataPython |
4824600 | <filename>userbot/plugins/dumpster_IQ.py
#@TeleOniOn
from telethon import events
import asyncio
from userbot.utils import admin_cmd
from telethon.errors.rpcerrorlist import MessageIdInvalidError
@borg.on(admin_cmd(pattern="dump ?(.*)"))
async def _(message):
try:
obj = message.pattern_match.group(1)
if len(obj) != 3:
raise IndexError
inp = ' '.join(obj)
except IndexError:
inp = "🥞 🎂 🍫"
u, t, g, o, s, n = inp.split(), '🗑', '<(^_^ <)', '(> ^_^)>', '⠀ ', '\n'
h = [(u[0], u[1], u[2]), (u[0], u[1], ''), (u[0], '', '')]
for something in reversed([y for y in ([''.join(x) for x in (
f + (s, g, s + s * f.count(''), t), f + (g, s * 2 + s * f.count(''), t),
f[:i] + (o, f[i], s * 2 + s * f.count(''), t), f[:i] + (s + s * f.count(''), o, f[i], s, t),
f[:i] + (s * 2 + s * f.count(''), o, f[i], t), f[:i] + (s * 3 + s * f.count(''), o, t),
f[:i] + (s * 3 + s * f.count(''), g, t))] for i, f in enumerate(reversed(h)))]):
for something_else in something:
await asyncio.sleep(0.3)
try:
await message.edit(something_else)
except MessageIdInvalidError:
return
| StarcoderdataPython |
9689259 | # ---------------------------------------------------------------------------------------------------------------------
# <NAME> - V0.1 - 15/11/2021 Creation of the python DEMIX library
# <NAME> - V0.2 - 17/11/2021 Added several useful lists to be used
# ---------------------------------------------------------------------------------------------------------------------
dem_list = ["SRTMGL1", "CopDEM_GLO-30", "ASTER_GDEM", "ALOS_World_3D", "Copernicus_DEM", "NASADEM"]
supported_dem_list = ["CopDEM_GLO-30", "SRTMGL1"]
criterion_list = [("A01", "Product fractional cover"),
("A02", "Valid data fraction"),
("A03", "Primary data"),
("A04", "Valid land fraction"),
("A05", "Primary land fraction")]
layer_list = ["validMask", "SourceMask", "landWaterMask", "Heights"]
demix_tile_example = ["N35YE014F", "N64ZW019C"]
| StarcoderdataPython |
3539515 | import pandas as pd
import numpy as np
import pickle
import os
import tensorflow as tf
import yamnet.features as features_lib
import yamnet.params as params
from librosa.core import load
from librosa.feature import melspectrogram
from librosa import power_to_db
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.utils import shuffle
RAW_DATAPATH="genres_raw"
class Label():
def __init__(self, genres_list):
self.le = LabelEncoder()
integer_encoded = self.le.fit_transform(genres_list)
self.one = OneHotEncoder()
self.one.fit(integer_encoded.reshape(len(integer_encoded), 1))
def transform(self, Y):
Y = self.one.transform(np.expand_dims(self.le.transform(Y), axis=1))
return Y
class Data():
def __init__(self, genres):
self.raw_data = None
self.genres_list = genres
self.train_set = None
self.test_set = None
self.label = Label(self.genres_list)
def load_data(self, datapath):
self.datapath = datapath
records = list()
for i, genre in enumerate(self.genres_list):
GENREPATH = self.datapath + genre + "/"
for j, track in enumerate(os.listdir(GENREPATH)):
if j>10:
break
TRACKPATH = GENREPATH + track
print("%d.%s\t\t%s (%d)" % (i + 1, genre, TRACKPATH, j + 1))
y, sr = load(TRACKPATH, mono=True)
spectrogram = features_lib.waveform_to_log_mel_spectrogram(tf.squeeze([y], axis=0), params)
patches = features_lib.spectrogram_to_patches(spectrogram, params)
data_chunks = [(data, genre) for data in patches]
records.append(data_chunks)
records = [data for record in records for data in record]
self.raw_data = pd.DataFrame.from_records(records, columns=['spectrogram', 'genre'])
def build_dataset(self):
df = self.raw_data.copy()
df = shuffle(df)
train_records, test_records = list(), list()
for i, genre in enumerate(self.genres_list):
genre_df = df[df['genre'] == genre]
n = round(len(genre_df) * 0.9)
train_records.append(genre_df.iloc[:n].values)
test_records.append(genre_df.iloc[n:].values)
train_records = shuffle([record for genre_records in train_records for record in genre_records])
test_records = shuffle([record for genre_records in test_records for record in genre_records])
self.train_set = pd.DataFrame.from_records(train_records, columns=['spectrogram', 'genre'])
self.test_set = pd.DataFrame.from_records(test_records, columns=['spectrogram', 'genre'])
def get_train_set(self):
x_train = np.stack(self.train_set['spectrogram'].values)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], x_train.shape[2]))
y_train = np.stack(self.train_set['genre'].values)
y_train = self.label.transform(y_train)
print("x_train shape: ", x_train.shape)
print("y_train shape: ", y_train.shape)
return x_train, y_train
def get_test_set(self):
x_test = np.stack(self.test_set['spectrogram'].values)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], x_test.shape[2]))
y_test = np.stack(self.test_set['genre'].values)
y_test = self.label.transform(y_test)
print("x_test shape : ", x_test.shape)
print("y_test shape : ", y_test.shape)
return x_test, y_test
def save(self):
with open(RAW_DATAPATH, 'wb') as outfile:
pickle.dump(self.raw_data, outfile, pickle.HIGHEST_PROTOCOL)
print('-> Data() object is saved.\n')
return
def load(self):
with open(RAW_DATAPATH, 'rb') as infile:
self.raw_data = pickle.load(infile)
print("-> Data() object is loaded.")
return | StarcoderdataPython |
12865037 | <gh_stars>0
# Copyright 2015-2018,2020 <NAME>
# License MIT (https://opensource.org/licenses/MIT).
{
"name": "POS debranding",
"version": "13.0.1.0.0",
"author": "IT-Projects LLC, <NAME>",
"license": "Other OSI approved licence", # MIT
"category": "Debranding",
"support": "<EMAIL>",
"website": "https://odoo-debranding.com",
"depends": ["point_of_sale"],
"data": ["template.xml"],
"qweb": ["static/src/xml/pos_debranding.xml"],
"installable": True,
}
| StarcoderdataPython |
1678757 | ''' Some public credential information and checks used in Python scripts.
By: WhiteBombo
'''
CLIENT_ID = 'qm7yhtp9i5h2785tjrkyh7a1lvsls3'
def check_id():
''' Checks whether a Client-ID exists and throws a tantrum if it doesn't. '''
try:
if CLIENT_ID == '':
print('Missing Client-ID. A tantrum has been thrown.')
exit()
except NameError:
print('Missing Client-ID. A tantrum has been thrown.')
exit()
| StarcoderdataPython |
11359492 | <reponame>craigh92/ros2cli
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from typing import Set
from typing import Tuple
import importlib_metadata
from ros2doctor.api.format import doctor_warn
class DoctorCheck:
"""Abstract base class of ros2doctor check."""
def category(self) -> str:
""":return: string linking checks and reports."""
raise NotImplementedError
def check(self) -> bool:
""":return: boolean indicating result of checks."""
raise NotImplementedError
class DoctorReport:
"""Abstract base class of ros2doctor report."""
def category(self) -> str:
""":return: string linking checks and reports."""
raise NotImplementedError
def report(self) -> 'Report': # using str as wrapper for custom class Report
""":return: Report object storing report content."""
raise NotImplementedError
class Report:
"""Stores report name and content."""
__slots__ = ['name', 'items']
def __init__(self, name: str):
"""Initialize with report name."""
self.name = name
self.items = []
def add_to_report(self, item_name: str, item_info: str) -> None:
"""Add report content to items list (list of string tuples)."""
self.items.append((item_name, item_info))
class Result:
"""Stores check result."""
__slots__ = ['error', 'warning']
def __init__(self):
"""Initialize with no error or warning."""
self.error = 0
self.warning = 0
def add_error(self):
self.error += 1
def add_warning(self):
self.warning += 1
def run_checks(*, include_warnings=False) -> Tuple[Set[str], int, int]:
"""
Run all checks and return check results.
:return: 3-tuple (categories of failed checks, number of failed checks,
total number of checks)
"""
fail_categories = set() # remove repeating elements
fail = 0
total = 0
for check_entry_pt in importlib_metadata.entry_points().get('ros2doctor.checks', []):
try:
check_class = check_entry_pt.load()
except ImportError:
doctor_warn(f'Check entry point {check_entry_pt.name} fails to load.')
try:
check_instance = check_class()
except Exception:
doctor_warn(f'Unable to instantiate check object from {check_entry_pt.name}.')
try:
check_category = check_instance.category()
result = check_instance.check()
if result.error or (include_warnings and result.warning):
fail += 1
fail_categories.add(check_category)
total += 1
except Exception:
doctor_warn(f'Fail to call {check_entry_pt.name} class functions.')
return fail_categories, fail, total
def generate_reports(*, categories=None) -> List[Report]:
"""
Print all reports or reports of failed checks to terminal.
:return: list of Report objects
"""
reports = []
for report_entry_pt in importlib_metadata.entry_points().get('ros2doctor.report', []):
try:
report_class = report_entry_pt.load()
except ImportError:
doctor_warn(f'Report entry point {report_entry_pt.name} fails to load.')
try:
report_instance = report_class()
except Exception:
doctor_warn(f'Unable to instantiate report object from {report_entry_pt.name}.')
try:
report_category = report_instance.category()
report = report_instance.report()
if categories:
if report_category in categories:
reports.append(report)
else:
reports.append(report)
except Exception:
doctor_warn(f'Fail to call {report_entry_pt.name} class functions.')
return reports
| StarcoderdataPython |
1977743 | # Getting started with APIC-EM APIs
# Follows APIC-EM Basics Learning Lab
# Create a Policy Use Case
# Basic Steps
# 1. Get Hosts
# 2. Get the count of the policies
# 3. Create a new policy
# 4. Check on the progress of the create task
# 5. Get the count of the policies after the task was added
# 6. Get Policies again to show new one that was added
# * THIS SAMPLE APPLICATION AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY
# * OF ANY KIND BY CISCO, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED
# * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR
# * PURPOSE, NONINFRINGEMENT, SATISFACTORY QUALITY OR ARISING FROM A COURSE OF
# * DEALING, LAW, USAGE, OR TRADE PRACTICE. CISCO TAKES NO RESPONSIBILITY
# * REGARDING ITS USAGE IN AN APPLICATION, AND IT IS PRESENTED ONLY AS AN
# * EXAMPLE. THE SAMPLE CODE HAS NOT BEEN THOROUGHLY TESTED AND IS PROVIDED AS AN
# * EXAMPLE ONLY, THEREFORE CISCO DOES NOT GUARANTEE OR MAKE ANY REPRESENTATIONS
# * REGARDING ITS RELIABILITY, SERVICEABILITY, OR FUNCTION. IN NO EVENT DOES
# * CISCO WARRANT THAT THE SOFTWARE IS ERROR FREE OR THAT CUSTOMER WILL BE ABLE
# * TO OPERATE THE SOFTWARE WITHOUT PROBLEMS OR INTERRUPTIONS. NOR DOES CISCO
# * WARRANT THAT THE SOFTWARE OR ANY EQUIPMENT ON WHICH THE SOFTWARE IS USED WILL
# * BE FREE OF VULNERABILITY TO INTRUSION OR ATTACK. THIS SAMPLE APPLICATION IS
# * NOT SUPPORTED BY CISCO IN ANY MANNER. CISCO DOES NOT ASSUME ANY LIABILITY
# * ARISING FROM THE USE OF THE APPLICATION. FURTHERMORE, IN NO EVENT SHALL CISCO
# * OR ITS SUPPLIERS BE LIABLE FOR ANY INCIDENTAL OR CONSEQUENTIAL DAMAGES, LOST
# * PROFITS, OR LOST DATA, OR ANY OTHER INDIRECT DAMAGES EVEN IF CISCO OR ITS
# * SUPPLIERS HAVE BEEN INFORMED OF THE POSSIBILITY THEREOF.-->
# import the requests library so we can use it to make REST calls (http://docs.python-requests.org/en/latest/index.html)
import requests
# import the json library. This library provides many handy features for formatting, displaying
# and manipulating json.
import json
# All of our REST calls will use the url for the APIC EM Controller as the base URL
# So lets define a variable for the controller IP or DNS so we don't have to keep typing it
controller_url = "https://sandboxapic.cisco.com"
################## Get Hosts ##########################################################
# This function allows you to view a list of all the hosts in the network.
get_hosts_url = controller_url + '/api/v0/host/1/3'
# Perform GET on get_hosts_url
r = (requests.get(get_hosts_url, verify=False))
hosts_obj = r.json()
# For this example, we will use the IP Address of the third host in the list
# First we will get a reference to the top level response object in the json which contains the list of hosts
hosts_parent = hosts_obj["response"]
# Select the 3rd host in the list (first host is element 0)
selected_host = hosts_parent[2]["hostIp"]
# Print the IP address of this host so we can see it.
print ("\nThis is the selected host = " + selected_host)
############### Get the count of the policies ###########################################
# Specify URL for policies count
policies_count_url = controller_url + '/api/v0/policy/count'
# Perform GET on policies_count_url
policies_count_response = requests.get(policies_count_url, verify=False)
count = policies_count_response.json()["response"]
# print total number of policies before we create the new one
print ("Total number of policies before = " + str(count))
############### Get the list of policies ##################################################
# Specify URL for the list of policies
policies_url = controller_url + '/api/v0/policy/1/' + str(count)
policies_response = requests.get(policies_url, verify=False)
# set our parent as the top level response object
policies_parent = policies_response.json()
print ("\nPolicies= ")
# Print list of policies before we add the new one
# for each policy returned, print the policyID
for item in policies_parent["response"]:
print ("ID = " + item["id"] + " Name = " + item["policyName"])
################ Create a new Policy #####################################################
# We need to send JSON in the request to specify the attributes of our new policy.
# The combination of policyName, hostIp, and the ports must be unique for a new policy.
# This is the JSON we will use to
#{
# # "policyOwner": "Admin",
# "networkUser": {
# "userIdentifiers": [
# "172.16.31.10"
# ],
# "applications": [
# {
# "raw": "12342;UDP"
# }
# ]
# },
# "actions": [
# "DENY"
# ],
# "policyName": "learningLab"
#}
# Create an object that holds our JSON to create a policy.
# Use our selected_host variable that is defined above to specify the hostIP
payload = {
"policyOwner": "Admin",
"networkUser": {
"userIdentifiers": [
selected_host
],
"applications": [
{
"raw": "12503;UDP"
}
]
},
"actions": [
"DENY"
],
"policyName": "learningLab-firodj-4-29"
}
# To create a policy, we need to use the POST method.
# When using POST, you need to specify the Content-Type header as application/json
headers = {'content-type': 'application/json'}
# specify url to create a policy
create_policy_url = controller_url + '/api/v0/policy'
# Use requests.post to do a POST to the policy API
# Specify request body json data and headers
policy_create_response = requests.post(create_policy_url, data=json.dumps(payload), headers=headers)
print ("\nResult of Create" + policy_create_response.text)
# print (policy_create_response.json()['response']['url'])
###################### Let's check on the status of of the Create Policy operation. ####################################
# We use the task url that is returned in the response to the POST to check the status.
task_url = controller_url + policy_create_response.json()['response']['url']
task_response = requests.get(task_url, verify=False)
print ("Task Progress = " + task_response.json()["response"]["progress"])
####################### Get the NEW count of the policies ##############################################################
# Specify URL for policies count
policies_count_url = controller_url + '/api/v0/policy/count'
# Perform GET on policies_count_url
policies_count_response = requests.get(policies_count_url, verify=False)
count = policies_count_response.json()["response"]
# print total number of policies AFTER we create the new one
print ("Total number of policies after adding a new one = " + str(count))
####################### Get the list of policies AFTER #################################################################
# Specify URL for the list of policies
policies_url = controller_url + '/api/v0/policy/1/' + str(count)
policies_response = requests.get(policies_url, verify=False)
# set our parent as the top level response object
policies_parent = policies_response.json()
print ("\nPolicies after adding a new one = ")
# Print list of policies before we add the new one
# for each policy returned, print the policyID
for item in policies_parent["response"]:
print ("ID = " + item["id"] + " Name = " + item["policyName"])
| StarcoderdataPython |
8127247 | from .models import *
from rest_framework import serializers
# CRUD API
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = ('visited_day', 'region', 'cafe_name', 'theme_name', 'participant_num', 'escape_flag', 'r_time', 'star_num')
class ReviewDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = ('content',)
| StarcoderdataPython |
4869745 | # Copyright (c) 2013, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
start_up_summary = get_start_up_summary(filters)
first_off_summary = get_first_off_summary(filters)
hun_insp_summary = get_hun_insp_summary(filters)
shift_handovers_summary = get_shift_handovers_summary(filters)
quality_defect_summary = get_quality_defect_summary(filters)
production_output_summary = get_production_output_summary(filters)
data = []
data.extend(start_up_summary or [])
data.extend(first_off_summary or [])
data.extend(hun_insp_summary or [])
data.extend(shift_handovers_summary or [])
data.extend(quality_defect_summary or [])
data.extend(production_output_summary or [])
return columns, data
def get_columns():
"""return columns"""
columns = [{
"fieldname": "department",
"label": _("Department"),
"fieldtype": "Link",
"options": "Department",
"width": 150
},
{
"fieldname": "workstation",
"label": _("Workstation"),
"fieldtype": "Link",
"options": "Workstation",
"width": 150
},
{
"fieldname": "workstation_entity",
"label": _("Workstation Entity"),
"fieldtype": "Link",
"options": "Workstation Entity",
"width": 150
},
{
"fieldname": "document",
"label": _("Document Type"),
"fieldtype": "Link",
"options": "DocType",
"width": 100
},
{
"fieldname": "su_name",
"label": _("Document Name"),
"fieldtype": "Dynamic Link",
"options": "document",
"width": 100
},
{
"fieldname": "status",
"label": _("Status"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "creation",
"label": _("Date"),
"fieldtype": "Date",
"width": 100
},
{
"fieldname": "time",
"label": _("Time"),
"fieldtype": "Time",
"width": 50
},
{
"fieldname": "operator",
"label": _("Operator"),
"fieldtype": "Link",
"options": "Employee",
"width": 100
},
{
"fieldname": "job_number",
"label": _("Job"),
"fieldtype": "Link",
"options": "Syteline Job Order",
"width": 100
},
{
"fieldname": "item",
"label": _("Item"),
"fieldtype": "Link",
"options": "Syteline Item",
"width": 100
},
{
"fieldname": "comments",
"label": _("Comments"),
"fieldtype": "Small Text",
"options": "",
"width": 200
},
{
"fieldname": "qty_day",
"label": _("Quantity on Day"),
"fieldtype": "Integer",
"options": "",
"width": 10
},
{
"fieldname": "qty_mtd",
"label": _("Quantity Month-to-Date"),
"fieldtype": "Integer",
"options": "",
"width": 10
}]
return columns
def get_start_up_summary(filters):
company = filters.get("company")
# date = filters.get("date")
return frappe.db.sql("""
SELECT
t_data.department,
t_data.workstation,
t_data.name,
'Start Up' AS document_type,
t_data.su_name,
IFNULL(t_su2.status,'No Start Up Completed') AS status,
IFNULL(DATE(t_data.creation),CURDATE()) AS creation,
IFNULL(LTRIM(LEFT(RIGHT(t_data.creation,15),5)),'00:00') AS time,
t_su2.operator
FROM (
SELECT
t_su.name AS su_name,
t_we.name,
MAX(t_su.creation) AS creation,
LTRIM(LEFT(RIGHT(t_su.creation,15),5)) AS time,
t_w.name as workstation,
t_w.department AS department
FROM `tabWorkstation Entity` t_we
INNER JOIN `tabWorkstation` t_w ON t_w.name = t_we.workstation
INNER JOIN `tabDepartment` t_d ON t_d.name = t_w.department AND t_d.company = '{company}'
LEFT JOIN `tabStart Up` t_su ON t_we.name = t_su.workstation_entity AND DATE(t_su.creation) = CURDATE() AND t_su.docstatus = 1
GROUP BY t_we.name, t_su.workstation, t_w.department
) AS t_data
LEFT JOIN `tabStart Up` t_su2 ON t_data.su_name = t_su2.name
ORDER BY t_data.department, t_data.workstation, t_data.name""".format(company=company)
)
def get_first_off_summary(filters):
company = filters.get("company")
# date = filters.get("date")
return frappe.db.sql("""
SELECT DISTINCT
t_w.department,
IFNULL(t_fo.workstation,t_po.workstation) AS workstation,
IFNULL(t_fo.workstation_entity,t_po.workstation_entity) AS workstation_entity,
"First Off" AS document_type,
t_fo.name,
IFNULL(t_fo.status, 'No First Off Completed') AS status,
DATE(t_fo.creation) as date,
IFNULL(LTRIM(LEFT(RIGHT(t_fo.creation,15),5)),'00:00') AS time,
t_fo.operator,
t_po.job_number,
t_sjo.item
FROM `tabProduction Output` t_po
INNER JOIN `tabSyteline Job Order` t_sjo ON t_sjo.name = t_po.job_number
INNER JOIN `tabWorkstation` t_w ON t_w.name = t_po.workstation
INNER JOIN `tabDepartment` t_d ON t_d.name = t_w.department AND t_d.company = '{company}'
LEFT JOIN `tabFirst Off` t_fo ON t_fo.job_number = t_po.job_number AND t_fo.workstation = t_po.workstation AND DATE(t_fo.creation) = CURDATE() AND t_fo.docstatus = 1
WHERE t_po.date_of_production = CURDATE() AND t_po.job_number <> 'No Job Scheduled'
ORDER BY t_w.department, workstation, workstation_entity""".format(company=company)
)
def get_hun_insp_summary(filters):
company = filters.get("company")
# date = filters.get("date")
return frappe.db.sql("""
SELECT t_his.department, t_his.workstation, t_his.inspection_item, "Hundred Percent Inspection" AS document_type, t_his.name, t_his.status, DATE(t_his.creation) AS creation, IFNULL(LTRIM(LEFT(RIGHT(t_his.creation,15),5)),'00:00') AS time
FROM `tabHundred Percent Inspection` t_his
INNER JOIN `tabDepartment` t_d ON t_d.name = t_his.department
WHERE t_his.docstatus = 1 AND t_his.status = 'Failed - Action Required' AND DATE(t_his.creation) = CURDATE() AND t_d.company = '{company}'
ORDER BY t_his.inspection_item, t_his.creation""".format(company=company)
)
def get_shift_handovers_summary(filters):
company = filters.get("company")
# date = filters.get("date")
return frappe.db.sql("""
SELECT
t_data.name,
"" AS w,
t_data.type,
"Shift Handover" AS document_type,
t_sh3.name,
IF(ISNULL(t_sh3.name),'No Shift Handover Reported','Completed') AS status,
IFNULL(DATE(t_sh3.creation),CURDATE()) AS creation,
IFNULL(LTRIM(LEFT(RIGHT(t_sh3.creation,15),5)),'00:00') AS time,
"" AS o,
"" AS j,
"" AS i,
t_sh3.comments
FROM
(SELECT t_dep.company, t_dep.name, t_dep.require_shift_handover, t_sh.type, MAX(t_sh2.creation) AS creation FROM `tabDepartment` AS t_dep
CROSS JOIN
(SELECT DISTINCT type
FROM `tabShift Handover`) AS t_sh
LEFT JOIN `tabShift Handover` AS t_sh2 ON t_sh2.department = t_dep.name AND t_sh2.type = t_sh.type
GROUP BY t_dep.name, t_sh.type
HAVING t_dep.require_shift_handover = 1 AND t_dep.company = '{company}') as t_data
LEFT JOIN `tabShift Handover` AS t_sh3
ON t_sh3.creation = t_data.creation AND DATE(t_sh3.creation) = CURDATE() AND t_sh3.docstatus = 1
ORDER BY t_data.name, t_data.type DESC""".format(company=company)
)
def get_quality_defect_summary(filters):
company = filters.get("company")
# date = filters.get("date")
return frappe.db.sql("""
SELECT
t_data.name,
"" AS w,
"" AS we,
"Quality Defect" AS document_type,
IFNULL(t_data.last_submission,'Never') AS last_submission,
IFNULL(t_data.status,'No Data Submitted on this day') AS 'status',
DATE(CURDATE()) AS date,
"00:00" AS t,
"" AS o,
"" AS j,
"" AS i,
"" AS com,
IFNULL(t_qday.qty,0) AS qty_day,
IFNULL(t_qmtd.qty,0) AS qty_mtd
FROM
(SELECT
t_dept.name,
IF(CURDATE() - DATE(t_qd.creation) = 0,LTRIM(LEFT(RIGHT(t_qd.creation,15),5)), CONCAT(DATEDIFF(CURDATE(),DATE(t_qd.creation)),' days ago')) AS last_submission,
IF(CURDATE() - DATE(t_qd.creation) = 0,'Defects Submitted', 'No Data Submitted on this day') AS 'status'
FROM `tabDepartment` AS t_dept
LEFT JOIN
(SELECT department, MAX(creation) as creation
FROM `tabQuality Defect`
WHERE docstatus = 1
GROUP BY department) AS t_qd
ON t_qd.department = t_dept.name
WHERE t_dept.is_production = 1 AND t_dept.is_group = 0 AND t_dept.company = '{company}') AS t_data
LEFT JOIN
(SELECT department, SUM(qty) AS qty FROM `tabQuality Defect`
WHERE docstatus = 1 AND no_defects = 0 AND DATE(creation)= CURDATE()
GROUP BY department) AS t_qday
ON t_qday.department = t_data.name
LEFT JOIN
(SELECT department, SUM(qty) AS qty FROM `tabQuality Defect`
WHERE docstatus = 1 AND no_defects = 0 AND (MONTH(creation) = MONTH(NOW()) AND YEAR(creation) = YEAR(NOW()))
GROUP BY department) AS t_qmtd
ON t_qmtd.department = t_data.name
ORDER BY t_data.name""".format(company=company)
)
def get_production_output_summary(filters):
company = filters.get("company")
# date = filters.get("date")
return frappe.db.sql("""
SELECT DISTINCT
t_data.department,
t_data.workstation,
t_data.workstation_entity,
"Production Output" AS document_type,
"" AS name,
'Start Up submitted but no Production Output logged' AS status,
DATE(CURDATE()) AS date
FROM
(SELECT t_su.department, t_su.workstation, t_su.workstation_entity, t_po.name FROM `tabStart Up` AS t_su
LEFT JOIN `tabDepartment` t_d ON t_d.name = t_su.department
LEFT JOIN `tabProduction Output` AS t_po ON t_po.workstation_entity = t_su.workstation_entity AND DATE(t_po.creation) = CURDATE()
WHERE DATE(t_su.creation) = CURDATE() AND t_su.status <> 'No Production Scheduled' AND t_su.docstatus = 1 AND t_d.company = '{company}') AS t_data
WHERE ISNULL(t_data.name)
ORDER BY t_data.department, t_data.workstation, t_data.workstation_entity""".format(company=company)
)
| StarcoderdataPython |
3509050 | APP_NAME = "cartoview_arcgis_portal"
BASE_TPL = APP_NAME + "/base.html"
MAP_LIST_TPL = APP_NAME + "/map_list.html"
MAP_LIST_ACTIONS_TPL = APP_NAME + "/map_list_actions.html"
MAP_EDIT_TPL = APP_NAME + "/map_edit.html"
ITEM_DATA_JSON_TPL = APP_NAME + "/portal_json_config/item_data.json"
OPERATIONAL_LAYERS_JSON_TPL = APP_NAME + "/portal_json_config/operationalLayers.json"
MAP_LIST_URL_NAME = APP_NAME + "_map_list"
MAP_EDIT_URL_NAME = APP_NAME + "_map_edit"
MAP_PUBLISH_URL_NAME = APP_NAME + "_map_publish"
MAP_CONFIG_SAVE_URL_NAME = APP_NAME + "_map_config_save"
MAP_CONFIG_RESET_URL_NAME = APP_NAME + "_map_config_rest" | StarcoderdataPython |
369053 | # !/usr/bin/python
"""
Created on Thu Apr 4 08:37 2013
@author: marcel
"""
import pygame as pg
from pygame.locals import *
from data.tilemap import Tilemap
def main():
pg.init()
screen = pg.display.set_mode((800, 600))
pg.display.set_caption('Bow & Arrows')
pg.mouse.set_visible(1)
clock = pg.time.Clock()
tile = Tilemap()
loop = True
while loop:
clock.tick(60)
screen.fill((0, 128, 1))
for event in pg.event.get():
if event.type == QUIT:
loop = False
break
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
loop = False
break
tile.handle_input(event)
tile.render(event, screen)
pg.display.flip() | StarcoderdataPython |
8003009 | import asyncio
async def echo(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
print("New connection")
try:
while data := await reader.readline():
writer.write(data.upper())
await writer.drain()
print("Leaving connection!")
except asyncio.CancelledError:
print("Connection Dropped!")
async def main_server_function(host="127.0.0.1", port=8888):
server = await asyncio.start_server(echo, host, port)
async with server:
await server.serve_forever()
try:
asyncio.run(main_server_function())
except KeyboardInterrupt:
print("Bye!")
| StarcoderdataPython |
11366423 | from django.db import models
from django.db.models import F
class Agency(models.Model):
id = models.AutoField(primary_key=True)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
toptier_agency = models.ForeignKey("references.ToptierAgency", models.DO_NOTHING, db_index=True)
subtier_agency = models.OneToOneField("references.SubtierAgency", models.DO_NOTHING, null=True, db_index=True)
toptier_flag = models.BooleanField(default=False)
user_selectable = models.BooleanField(default=False)
# Not shown here is an index idx_agency_toptier_agency_id_null_subtier_agency_id_uniq that
# is used to enforce uniquity on toptier_agency_id when subtier_agency_id is null.
class Meta:
db_table = "agency"
@staticmethod
def get_by_toptier(toptier_code):
"""
Get an agency record by toptier information only
Args:
toptier_code: a CGAC or FREC code
Returns:
an Agency instance
"""
return (
Agency.objects.filter(
toptier_agency__toptier_code=toptier_code, subtier_agency__name=F("toptier_agency__name")
)
.order_by("-update_date")
.first()
)
@staticmethod
def get_by_subtier(subtier_code):
"""
Get an agency record by subtier information only
Args:
subtier_code: subtier code
Returns:
an Agency instance
If called with None / empty subtier code, returns None
"""
if subtier_code:
return Agency.objects.filter(subtier_agency__subtier_code=subtier_code).order_by("-update_date").first()
@staticmethod
def get_by_toptier_subtier(toptier_code, subtier_code):
"""
Lookup an Agency record by toptier cgac code and subtier code
Args:
toptier_code: a CGAC or FREC code
subtier_code: an agency subtier code
Returns:
an Agency instance
"""
return (
Agency.objects.filter(toptier_agency__toptier_code=toptier_code, subtier_agency__subtier_code=subtier_code)
.order_by("-update_date")
.first()
)
@staticmethod
def get_by_subtier_only(subtier_code):
"""
Lookup an Agency record by subtier code only
Useful when data source has an inaccurate top tier code,
but an accurate subtier code. Will return an Agency
if and only if a single match for the subtier code exists.
Args:
subtier_code: an agency subtier code
Returns:
an Agency instance
"""
agencies = Agency.objects.filter(subtier_agency__subtier_code=subtier_code)
if agencies.count() == 1:
return agencies.first()
else:
return None
def __str__(self):
stringrep = ""
for agency in [self.toptier_agency, self.subtier_agency]:
if agency:
stringrep = stringrep + agency.name + " :: "
return stringrep
| StarcoderdataPython |
293136 | #!/usr/bin/env python3
#Advent of Code Day 14 "Extended Polymerization"
import sys
sys.path.append("..")
import submarine
sub = submarine.Submarine()
filename = "input.txt"
#Part 1
man = sub.manual
file = open(filename,"r")
man.parse_rules(file.readlines())
print(man.polymer_template)
man.do_steps(number_of_steps=10)
most_common,least_common = man.find_most_and_least_common_elements()
difference = most_common - least_common
print(f"The resultant difference after {man.step} steps is {difference}")
man.step = 0
file.close()
#Part 2 Electric Bugalooo
file = open(filename,"r")
man.parse_rules(file.readlines())
man.do_steps(number_of_steps=40,do_better = True)
letter_count_list = list(man.letters_count.values())
letter_count_list.sort()
# print(f"Letter Count List: {letter_count_list}")
most_common = letter_count_list[-1]
least_common = letter_count_list[0]
difference = most_common - least_common
print(f"The resultant difference after {man.step} steps is {difference}") | StarcoderdataPython |
43434 | <gh_stars>0
from invoke import task
from .common import docker, constants, config, ROOT_DIR
valid_backend_names = constants.D_BACKENDS
@task(help={'name': '|'.join(constants.D_ALL)})
def build(c, name, local=False, build_frontend=True):
assert name in constants.D_ALL
docker.auto_build(c, 'frontend', local=local)
docker.auto_build(c, name, local=local)
@task(help={'name': '|'.join(valid_backend_names)})
def run(c, name, local=False, port=None, rm=True, build_frontend=True):
assert name in valid_backend_names
username = config.get_config(c, 'develop.username')
if build_frontend:
docker.auto_build(c, constants.D_FRONTEND, host=docker.get_host(c, name, local))
if local:
data_mount = f'{ROOT_DIR}/data'
elif name == constants.D_CORAL_DEV_BORAD:
data_mount = f'/home/mendel/{username}/data'
elif name == constants.D_JETSON_NANO:
data_mount = f'/home/teamtpu/{username}/data'
else:
data_mount = f'/home/{username}/data'
if port is None:
port = config.get_config(c, 'develop.host_ports.backend')
docker.auto_build(c, name, local=local)
docker.auto_run(c, name, p=[f'{port}:8000'], v=[f'{data_mount}:/repo/data', f'{data_mount}/root:/root'], rm=rm,
local=local)
| StarcoderdataPython |
3594944 | <filename>flarestack/analyses/angular_error_floor/test_dynamic_pull_correction.py
from __future__ import division
from builtins import str
from builtins import range
import os
import numpy as np
import matplotlib.pyplot as plt
from flarestack.data.icecube.ps_tracks.ps_v002_p01 import IC86_1_dict
from flarestack.data.icecube.ps_tracks.ps_v003_p01 import IC86_234567_dict
from flarestack.shared import plot_output_dir
from flarestack.icecube_utils.dataset_loader import data_loader
from flarestack.core.astro import angular_distance
from numpy.lib.recfunctions import append_fields
basedir = plot_output_dir("analyses/angular_error_floor/dynamic_pull_corrections/")
try:
os.makedirs(basedir)
except OSError:
pass
energy_bins = np.linspace(1.0, 10.0, 20 + 1)
# def get_data(season):
# mc = data_loader(season["mc_path"], floor=False)
# x = np.degrees(angular_distance(
# mc["ra"], mc["dec"], mc["trueRa"], mc["trueDec"]))
# y = np.degrees(mc["sigma"]) * 1.177
# return mc, x, y
def weighted_quantile(values, quantiles, weight):
"""
:param values: numpy.array with data
:param quantiles: array-like with many quantiles needed
:param weight: array-like of the same length as `array`
:return: numpy.array with computed quantiles.
"""
values = np.array(values)
quantiles = np.array(quantiles)
sample_weight = np.array(weight)
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
sin_dec_bins = IC86_1_dict["sinDec bins"]
n_log_e_bins = 20 + 1
log_e_bins = np.linspace(2.0, 6.0, n_log_e_bins)
quantiles = np.linspace(0.0, 1.0, n_log_e_bins)
# mc_path = "/lustre/fs22/group/icecube/data_mirror/misc
# /CombinedTracks_AllSky_Uncorrected_MC.npy"
mc_path = IC86_234567_dict["mc_path"]
mc = data_loader(mc_path, floor=False)
exp = data_loader(IC86_234567_dict["exp_path"], floor=False)
# sin_dec_bins = weighted_quantile(
# mc["sinDec"], quantiles, np.ones_like(mc["sinDec"]))
# for gamma in [1.0, 1.5, 2.0, 3.0, 3.7]:
for gamma in [2.0, 3.5]:
# X = [sin_dec_bins for _ in range(n_log_e_bins)]
# Y = []
Z_quantile = np.ones((len(sin_dec_bins), n_log_e_bins))
Z_uniform = np.ones((len(sin_dec_bins), n_log_e_bins))
Z_floor = np.ones((len(sin_dec_bins), n_log_e_bins)) * np.nan
Z_floor2 = np.ones((len(sin_dec_bins), n_log_e_bins)) * np.nan
x = []
y = []
z = []
subdir = basedir + str(gamma) + "/"
try:
os.makedirs(subdir)
except OSError:
pass
for i, lower in enumerate(sin_dec_bins[:-1]):
upper = sin_dec_bins[i + 1]
mask = np.logical_and(mc["sinDec"] > lower, mc["sinDec"] < upper)
cut_mc = mc[mask]
percentile = np.ones_like(cut_mc["ra"]) * np.nan
cut_mc = append_fields(
cut_mc, "percentile", percentile, usemask=False, dtypes=[np.float]
)
weights = cut_mc["ow"] * cut_mc["trueE"] ** -gamma
# weights = np.ones_like(cut_mc["ow"])
log_e_quantile_bins = weighted_quantile(
cut_mc["logE"], quantiles, np.ones_like(cut_mc["ow"])
)
data_mask = np.logical_and(exp["sinDec"] > lower, exp["sinDec"] < upper)
# log_e_quantile_bins = np.percentile(exp[data_mask]["logE"],
# quantiles)
#
# print log_e_quantile_bins
# print log_e_quantile_bins
# log_e_quantile_bins[-1] += 1e-5
# Y.append(log_e_bins)
# plt.figure()
# plt.hist(cut_mc["logE"], log_e_quantile_bins, weights=weights, density=True)
# plt.xlabel("log(Energy Proxy/GeV)")
# plt.savefig(basedir + "hist.pdf")
# plt.close()
# raw_input("prompt")
meds = []
floors = []
floor2s = []
floor_corrected = []
floor2_corrected = []
ceilings = []
x_vals = []
for j, lower_e in enumerate(log_e_bins[:-1]):
upper_e = log_e_bins[j + 1]
e_mask = np.logical_and(cut_mc["logE"] >= lower_e, cut_mc["logE"] < upper_e)
bin_mc = cut_mc[e_mask]
x = np.degrees(
angular_distance(
bin_mc["ra"], bin_mc["dec"], bin_mc["trueRa"], bin_mc["trueDec"]
)
)
y = np.degrees(bin_mc["sigma"]) * 1.177
if np.sum(e_mask) > 0:
[floor, floor2, ceiling] = weighted_quantile(
x, [0.1, 0.25, 0.9], weights[e_mask]
)
pull = x / y
median_pull = weighted_quantile(pull, 0.5, weights[e_mask])
meds += [median_pull for _ in range(2)]
floors += [floor for _ in range(2)]
floor2s += [floor2 for _ in range(2)]
floor_corrected += [floor * median_pull for _ in range(2)]
floor2_corrected += [floor2 * median_pull for _ in range(2)]
# ceilings += [ceiling for _ in range(2)]
x_vals += [lower_e, upper_e]
# convert_x = np.linspace(0.0, 1.0, 100)
# convert_y = [weighted_quantile(bin_mc["sigma"], x, weights[
# e_mask]) for x in convert_x]
# convert_y[0] = min(bin_mc["sigma"])
# convert_y[-1] = max(bin_mc["sigma"])
# # print convert_x
# # print convert_y
# # print len(bin_mc)
# # raw_input("prompt")
#
# f = interp1d(convert_y, convert_x)
#
# cut_mc["percentile"][e_mask] = f(bin_mc["sigma"])
#
# mapmask = np.isnan(cut_mc["percentile"][e_mask])
# # if np.sum(mapmask) > 0.:
# print bin_mc["sigma"][mapmask], max(bin_mc["sigma"]),
# print min(bin_mc["sigma"])
# print cut_mc["percentile"]
# raw_input("prompt")
Z_uniform[i][j] = median_pull
Z_floor[i][j] = floor * median_pull
Z_floor2[i][j] = floor2 * median_pull
else:
Z_uniform[i][j] = np.nan
for j, lower_e in enumerate(log_e_quantile_bins[:-1]):
upper_e = log_e_quantile_bins[j + 1]
e_mask = np.logical_and(cut_mc["logE"] >= lower_e, cut_mc["logE"] < upper_e)
bin_mc = cut_mc[e_mask]
x = np.degrees(
angular_distance(
bin_mc["ra"], bin_mc["dec"], bin_mc["trueRa"], bin_mc["trueDec"]
)
)
y = np.degrees(bin_mc["sigma"]) * 1.177
[floor, ceiling] = weighted_quantile(x, [0.1, 0.9], weights[e_mask])
pull = x / y
median_pull = weighted_quantile(pull, 0.5, weights[e_mask])
Z_quantile[i][j] = median_pull
plt.figure()
plt.plot(x_vals, meds, label="Median Pull")
plt.plot(
x_vals,
floors,
label="10% Quantile Uncorrected (deg)",
linestyle=":",
color="orange",
)
plt.plot(
x_vals,
floor2s,
label="25% Quantile Uncorrected (deg)",
linestyle=":",
color="green",
)
plt.plot(x_vals, floor_corrected, label="10% Quantile (deg)", color="orange")
plt.plot(x_vals, floor2_corrected, label="25% Quantile (deg)", color="green")
# plt.plot(x_vals, med_errors, label="50% Quantile (deg)")
plt.axhline(1.0, linestyle="--")
# plt.plot(x_vals, ceilings, label="90% Quantile")
plt.xlabel("log(Energy Proxy/GeV)")
plt.legend()
plt.savefig(subdir + "median_pulls_" + str(lower) + ".pdf")
plt.close()
# sigmas = np.linspace(0.0, 1.0, 10)
#
# med_qs = []
# q_centers = 0.5 * (sigmas[1:] + sigmas[:-1])
#
# x = np.degrees(angular_distance(
# cut_mc["ra"], cut_mc["dec"], cut_mc["trueRa"],
# cut_mc["trueDec"]))
# y = np.degrees(cut_mc["sigma"]) * 1.177
# pulls = x/y
#
# base_pull = weighted_quantile(
# pulls, 0.5, weights)
#
# for k, lower_q in enumerate(sigmas[:-1]):
# upper_q = sigmas[k + 1]
#
# q_mask = np.logical_and(
# cut_mc["percentile"] >= lower_q,
# cut_mc["percentile"] < upper_q
# )
#
# bin_mc = cut_mc[q_mask]
#
# x = np.degrees(angular_distance(
# bin_mc["ra"], bin_mc["dec"], bin_mc["trueRa"],
# bin_mc["trueDec"]))
# y = np.degrees(bin_mc["sigma"]) * 1.177
#
# pull = x/y/base_pull
# median_pull = weighted_quantile(
# pull, 0.5, weights[q_mask])
#
# med_qs.append(median_pull)
#
# plt.figure()
# plt.plot(q_centers, med_qs, label="median pull")
# plt.axhline(1.0, linestyle="--")
# # plt.plot(x_vals, ceilings, label="90% Quantile")
# plt.xlabel("Angular Error Percentile")
# plt.ylabel("Pull / bin median pull")
# plt.legend()
# plt.savefig(subdir + "err_pulls_" + str(lower) + ".pdf")
# plt.close()
# sigmas = np.linspace(0.0, 1.0, 10)
#
# med_qs = []
# q_centers = 0.5 * (sigmas[1:] + sigmas[:-1])
#
# x = np.degrees(angular_distance(
# mc["ra"], mc["dec"], mc["trueRa"],
# mc["trueDec"]))
# y = np.degrees(mc["sigma"]) * 1.177
# pulls = x / y
#
# weights = mc["ow"] * mc["trueE"] ** - gamma
#
# base_pull = weighted_quantile(
# pulls, 0.5, weights)
#
# for k, lower_q in enumerate(sigmas[:-1]):
# upper_q = sigmas[k + 1]
#
# q_mask = np.logical_and(
# mc["percentile"] >= lower_q,
# mc["percentile"] < upper_q
# )
#
# bin_mc = mc[q_mask]
#
# x = np.degrees(angular_distance(
# bin_mc["ra"], bin_mc["dec"], bin_mc["trueRa"],
# bin_mc["trueDec"]))
# y = np.degrees(bin_mc["sigma"]) * 1.177
#
# pull = x / y / base_pull
# median_pull = weighted_quantile(
# pull, 0.5, weights[q_mask])
#
# med_qs.append(median_pull)
#
# plt.figure()
# plt.plot(q_centers, med_qs, label="median pull")
# plt.axhline(1.0, linestyle="--")
# # plt.plot(x_vals, ceilings, label="90% Quantile")
# plt.xlabel("Angular Error Percentile")
# plt.ylabel("Pull / bin median pull")
# plt.legend()
# plt.savefig(subdir + "err_pulls_summed.pdf")
# plt.close()
for l, Z in enumerate([Z_floor, Z_floor2]):
# Z = np.log(Z)
plt.figure()
ax = plt.subplot(111)
X, Y = np.meshgrid(sin_dec_bins, log_e_bins)
cbar = ax.pcolor(
X,
Y,
Z.T,
vmin=0.0,
vmax=1.0,
cmap="viridis",
)
plt.colorbar(cbar, label="Error (deg)")
plt.xlabel(r"$\sin(\delta)$")
plt.ylabel("Log(Energy proxy)")
plt.savefig(
basedir
+ "2D_meds_"
+ str(gamma)
+ "_"
+ ["floor_10", "floor_25"][l]
+ ".pdf"
)
plt.close()
for l, Z in enumerate(
[
Z_uniform,
Z_quantile,
]
):
Z = np.log(Z)
max_col = 1.0
plt.figure()
ax = plt.subplot(111)
X, Y = np.meshgrid(
sin_dec_bins, [log_e_bins, quantiles, log_e_bins, log_e_bins][l]
)
cbar = ax.pcolor(
X,
Y,
Z.T,
vmin=-max_col,
vmax=max_col,
cmap="seismic",
)
plt.colorbar(cbar, label="Log(Pull)")
plt.xlabel(r"$\sin(\delta)$")
plt.ylabel(
[
"Log(Energy proxy)",
"Unweighted energy proxy percentile",
"Log(Energy proxy)",
"Log(Energy proxy)",
][l]
)
plt.savefig(
basedir
+ "2D_meds_"
+ str(gamma)
+ "_"
+ ["standard", "percentile"][l]
+ ".pdf"
)
plt.close()
| StarcoderdataPython |
9764709 | <gh_stars>0
print("bob.py was just imported")
def ben():
print("ben")
class Jim:
def jane(self, hey):
print("jane")
| StarcoderdataPython |
9702130 | import cv2
import numpy as np
filename = 'D:\master\opencv-python\image\sad.jpg'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
# print(image)
cv2.imshow('origin', image)
h, w = image.shape[:2] # 把图片2像素的行数,列数以及通道数返回给rows,cols,channels
sum = np.zeros((h + 1, w + 1), dtype=np.float32) # 创建指定大小的数组,数组元素以 0 来填充:
imageIntegral = cv2.integral(image, sum, cv2.CV_32FC1) # 计算积分图,输出是sum
result = np.zeros((h + 1, w + 1), dtype=np.uint8)
cv2.normalize(imageIntegral, result, 0, 255, cv2.NORM_MINMAX,
cv2.CV_8UC1) # 归一化处理
cv2.imshow("Image", result)
# cv2.imwrite("D:/Car_Identify/papers_for_edge/integral_result.jpg", result)
cv2.waitKey()
| StarcoderdataPython |
1747646 | <reponame>Picarro-kskog/mcculw
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from examples.console import util
from examples.props.ai import AnalogInputProps
use_device_detection = True
def run_example():
board_num = 0
if use_device_detection:
ul.ignore_instacal()
if not util.config_first_detected_device(board_num):
print("Could not find device.")
return
channel = 0
ai_props = AnalogInputProps(board_num)
if ai_props.num_ai_chans < 1:
util.print_unsupported_example(board_num)
return
ai_range = ai_props.available_ranges[0]
try:
# Get a value from the device
if ai_props.resolution <= 16:
# Use the v_in method for devices with a resolution <= 16
# (optional parameter omitted)
value = ul.v_in(board_num, channel, ai_range)
else:
# Use the v_in_32 method for devices with a resolution > 16
# (optional parameter omitted)
value = ul.v_in_32(board_num, channel, ai_range)
# Display the value
print("Value: " + str(value))
except ULError as e:
util.print_ul_error(e)
finally:
if use_device_detection:
ul.release_daq_device(board_num)
if __name__ == '__main__':
run_example()
| StarcoderdataPython |
3524522 | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <<EMAIL>>, 2013
# - <NAME>, <<EMAIL>>, 2017
from rucio.core import monitor
class TestMonitor(object):
@classmethod
def setupClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
@staticmethod
def test_record_counter_message():
"""MONITOR (CORE): Send a counter message to graphite """
monitor.record_counter('test.counter', 10)
@staticmethod
def test_record_gauge_message():
"""MONITOR (CORE): Send a gauge message to graphite """
monitor.record_gauge('test.gauge', 10)
@staticmethod
def test_record_timer_message():
"""MONITOR (CORE): Send a timer message to graphite """
monitor.record_timer('test.runtime', 500)
@staticmethod
def test_context_record_timer():
"""MONITOR (CORE): Send a timer message to graphite using context """
with monitor.record_timer_block('test.context_timer'):
var_a = 2 * 100
var_a = var_a * 1
with monitor.record_timer_block(['test.context_timer']):
var_a = 2 * 100
var_a = var_a * 1
with monitor.record_timer_block(['test.context_timer', ('test.context_timer_normal10', 10)]):
var_a = 2 * 100
var_a = var_a * 1
| StarcoderdataPython |
137591 | #Author(s): <NAME> (15051) and <NAME> (15118)
#Code under the project of the course PHY312 Numerical Methods and Programming
import numpy as np
from math import *
import matplotlib.pylab as plt
import pyfits as pf
from mpl_toolkits import mplot3d
from gaussfit import *
from Analysis import *
def plot_all_star(A):
L = all_peaks(A)
for h in range(len(L)):
ax = plt.axes(projection='3d')
(a, b) = L[h]
x_st = a-25 ; x_en = a+25
y_st = b-25 ; y_en = b+25
z = []
for i in range(y_st, y_en+1, 1):
temp = []
for j in range(x_st, x_en+1, 1):
temp.append(A[i][j])
z.append(temp)
x = np.linspace(x_st, x_en, (x_en - x_st) + 1)
y = np.linspace(y_st, y_en, (y_en - y_st) + 1)
Xin, Yin = np.meshgrid(x, y)
z = np.array(z)
data = z
plt.matshow(data, cmap=plt.cm.gist_earth_r)
params = fitgaussian(data)
fit = gaussian(*params)
p =ax.plot_surface(Xin, Yin, fit(*np.indices(data.shape)), cmap=plt.cm.copper)
ax.scatter(Xin, Yin, z, marker = '.')
plt.xlabel('X axis')
plt.ylabel('Y axis')
plt.colorbar()
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('pixel count')
ax = plt.gca()
(height, x, y, width_x, width_y, base) = params
fwhm_x = 2*sqrt(log(2.0))*width_x
fwhm_y = 2*sqrt(log(2.0))*width_y
print 'Coordinates of the target Object',(a,b)
#print 'FWHM-X =', fwhm_x, '\nFWHM-Y =', fwhm_y
print 'Sky base =' , base
print 'Peak value =', height
print 'Mean (x,y) =', (x,y)
plt.show()
return None
if __name__ =='__main__':
A = pf.getdata("J0901p3846_R.4578.0_clean_crop.fits")
plot_all_star(A)
| StarcoderdataPython |
9753514 | import unittest
from typing import List
from octopus import DumboOctopi
class TestDumboOctopi(unittest.TestCase):
def setUp(self) -> None:
pass
def test_dumbo_octopus_sample(self) -> None:
lines = file_read_helper('day-11/sample_input.txt')
dumbo_octopi = DumboOctopi(lines)
flashes = 0
for i in range(100):
flashes += dumbo_octopi.tick()
self.assertEqual(flashes, 1656)
def test_dumbo_octopus_puzzle(self) -> None:
lines = file_read_helper('day-11/puzzle_input.txt')
dumbo_octopi = DumboOctopi(lines)
flashes = 0
for i in range(100):
flashes += dumbo_octopi.tick()
self.assertEqual(flashes, 1647)
def test_dumbo_octopus_sync_sample(self) -> None:
lines = file_read_helper('day-11/sample_input.txt')
dumbo_octopi = DumboOctopi(lines)
i = 1
max_flash = len(dumbo_octopi.octopi_energy) * len(dumbo_octopi.octopi_energy[0])
flash = dumbo_octopi.tick()
while i < 100000 and flash != max_flash:
flash = dumbo_octopi.tick()
i += 1
self.assertEqual(i, 195)
def test_dumbo_octopus_sync_puzzle(self) -> None:
lines = file_read_helper('day-11/puzzle_input.txt')
dumbo_octopi = DumboOctopi(lines)
i = 1
max_flash = len(dumbo_octopi.octopi_energy) * len(dumbo_octopi.octopi_energy[0])
flash = dumbo_octopi.tick()
while i < 100000 and flash != max_flash:
flash = dumbo_octopi.tick()
i += 1
self.assertEqual(i, 348)
def file_read_helper(filename: str) -> List[str]:
lines = []
with open(filename, 'r', encoding='UTF-8') as file:
for line in file:
lines.append(line.strip())
return lines
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
12853550 | from functools import wraps
from flask import request, make_response
from .exceptions import ApiError
from .schemas import create_schema, ma_version_lt_300b7
def request_schema(schema_or_dict, extends=None, many=None, cache_schema=True, pass_data=False):
schema_ = create_schema(schema_or_dict, extends)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
schema = cache_schema and schema_ or create_schema(schema_or_dict, extends)
if request.json is None:
# NOTE: this should be fixed with marshmallow 3 (and 2.16?)
raise ApiError('JSON data required')
data = schema.load(request.json, many=many)
if ma_version_lt_300b7:
data = data.data
if pass_data:
kwargs.update({'data' if pass_data is True else pass_data: data})
else:
kwargs.update(data)
return func(*args, **kwargs)
return wrapper
return decorator
def request_args_schema(schema_or_dict, extends=None, cache_schema=True, pass_data=False):
schema_ = create_schema(schema_or_dict, extends)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
schema = cache_schema and schema_ or create_schema(schema_or_dict, extends)
data = schema.load(request.args)
if ma_version_lt_300b7:
data = data.data
if pass_data:
kwargs.update({'data' if pass_data is True else pass_data: data})
else:
kwargs.update(data)
return func(*args, **kwargs)
return wrapper
return decorator
def response_schema(schema_or_dict, extends=None, many=None, cache_schema=True):
schema_ = create_schema(schema_or_dict, extends)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
schema = cache_schema and schema_ or create_schema(schema_or_dict, extends)
result = func(*args, **kwargs)
if isinstance(result, (list, tuple)) and (schema.many or many):
data = schema.dump(result, many=many)
else:
data = schema.dump(result, many=many)
if ma_version_lt_300b7:
data = data.data
return data
return wrapper
return decorator
def response_headers(headers={}):
"""
This decorator adds the headers passed in to the response
"""
# http://flask.pocoo.org/snippets/100/
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
resp = make_response(func(*args, **kwargs))
h = resp.headers
for header, value in headers.items():
h[header] = value
return resp
return wrapper
return decorator
def response_headers_no_cache(func):
@wraps(func)
@response_headers({
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
})
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
| StarcoderdataPython |
8180928 | import re
result = 0
double = re.compile( r"([a-z]{2}).*\1" )
repeat = re.compile( r"([a-z]).\1")
with open("input.txt", "r") as input:
for line in input:
line = line.strip()
prop1 = double.search(line) is not None
prop2 = repeat.search(line) is not None
if prop1 and prop2:
result += 1
with open("output2.txt", "w") as output:
output.write( str(result) )
print(str(result)) | StarcoderdataPython |
3551155 | <gh_stars>1-10
import numpy as np
from PIL import Image, ImageDraw
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from team_code.base_agent import BaseAgent
from team_code.planner import RoutePlanner
class MapAgent(BaseAgent):
def sensors(self):
result = super().sensors()
result.append({
'type': 'sensor.camera.semantic_segmentation',
'x': 0.0, 'y': 0.0, 'z': 100.0,
'roll': 0.0, 'pitch': -90.0, 'yaw': 0.0,
'width': 512, 'height': 512, 'fov': 5 * 10.0,
'id': 'map'
})
result.append({
'type': 'sensor.other.lane_invasion',
'id': 'lane_detector'})
return result
def set_global_plan(self, global_plan_gps, global_plan_world_coord):
super().set_global_plan(global_plan_gps, global_plan_world_coord)
self._plan_HACK = global_plan_world_coord
self._plan_gps_HACK = global_plan_gps
def _init(self):
super()._init()
self._vehicle = CarlaDataProvider.get_hero_actor()
self._world = self._vehicle.get_world()
self._waypoint_planner = RoutePlanner(4.0, 50)
self._waypoint_planner.set_route(self._plan_gps_HACK, True)
self._traffic_lights = list()
def tick(self, input_data):
self._actors = self._world.get_actors()
#print("actors {}".format(self._actors))
self._traffic_lights = get_nearby_lights(self._vehicle, self._actors.filter('*traffic_light*'))
self._stop_signs = get_nearby_lights(self._vehicle, self._actors.filter('*stop*'))
topdown = input_data['map'][1][:, :, 2]
topdown = draw_traffic_lights(topdown, self._vehicle, self._traffic_lights)
topdown = draw_stop_signs(topdown, self._vehicle, self._stop_signs)
result = super().tick(input_data)
result['topdown'] = topdown
#result['lane_invasion'] =
return result
def get_nearby_lights(vehicle, lights, pixels_per_meter=5.5, size=512, radius=5):
result = list()
transform = vehicle.get_transform()
pos = transform.location
theta = np.radians(90 + transform.rotation.yaw)
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)],
])
for light in lights:
delta = light.get_transform().location - pos
target = R.T.dot([delta.x, delta.y])
target *= pixels_per_meter
target += size // 2
if min(target) < 0 or max(target) >= size:
continue
trigger = light.trigger_volume
light.get_transform().transform(trigger.location)
dist = trigger.location.distance(vehicle.get_location())
a = np.sqrt(
trigger.extent.x ** 2 +
trigger.extent.y ** 2 +
trigger.extent.z ** 2)
b = np.sqrt(
vehicle.bounding_box.extent.x ** 2 +
vehicle.bounding_box.extent.y ** 2 +
vehicle.bounding_box.extent.z ** 2)
if dist > a + b:
continue
result.append(light)
return result
def draw_traffic_lights(image, vehicle, lights, pixels_per_meter=5.5, size=512, radius=5):
image = Image.fromarray(image)
draw = ImageDraw.Draw(image)
transform = vehicle.get_transform()
pos = transform.location
theta = np.radians(90 + transform.rotation.yaw)
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)],
])
for light in lights:
delta = light.get_transform().location - pos
target = R.T.dot([delta.x, delta.y])
target *= pixels_per_meter
target += size // 2
if min(target) < 0 or max(target) >= size:
continue
trigger = light.trigger_volume
light.get_transform().transform(trigger.location)
dist = trigger.location.distance(vehicle.get_location())
a = np.sqrt(
trigger.extent.x ** 2 +
trigger.extent.y ** 2 +
trigger.extent.z ** 2)
b = np.sqrt(
vehicle.bounding_box.extent.x ** 2 +
vehicle.bounding_box.extent.y ** 2 +
vehicle.bounding_box.extent.z ** 2)
if dist > a + b:
continue
x, y = target
draw.ellipse((x-radius, y-radius, x+radius, y+radius), 23 + light.state.real) # 13 changed to 23 for carla 0.9.10
return np.array(image)
def draw_stop_signs(image, vehicle, lights, pixels_per_meter=5.5, size=512, radius=5):
image = Image.fromarray(image)
draw = ImageDraw.Draw(image)
transform = vehicle.get_transform()
pos = transform.location
theta = np.radians(90 + transform.rotation.yaw)
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)],
])
for light in lights:
delta = light.get_transform().location - pos
target = R.T.dot([delta.x, delta.y])
target *= pixels_per_meter
target += size // 2
if min(target) < 0 or max(target) >= size:
continue
trigger = light.trigger_volume
light.get_transform().transform(trigger.location)
dist = trigger.location.distance(vehicle.get_location())
a = np.sqrt(
trigger.extent.x ** 2 +
trigger.extent.y ** 2 +
trigger.extent.z ** 2)
b = np.sqrt(
vehicle.bounding_box.extent.x ** 2 +
vehicle.bounding_box.extent.y ** 2 +
vehicle.bounding_box.extent.z ** 2)
if dist > a + b:
continue
x, y = target
draw.ellipse((x-radius, y-radius, x+radius, y+radius), 26)
return np.array(image)
| StarcoderdataPython |
1763645 | import backup
backup.Bdd_Updates() | StarcoderdataPython |
8023427 | # Copyright Notice:
# Copyright 2016-2019 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Interface-Emulator/blob/master/LICENSE.md
# get_chassis_template()
#from api_emulator.utils import timestamp
_CHASSIS_TEMPLATE = \
{
"@odata.context": "{rb}$metadata#Chassis/Links/Members/$entity",
"@odata.id": "{rb}Chassis/{id}",
"@odata.type": "#Chassis.1.0.0.Chassis",
#"Id": None,
"Name": "Computer System Chassis",
"ChassisType": "RackMount",
"Manufacturer": "Redfish Computers",
"Model": "3500RX",
"SKU": "8675309",
"SerialNumber": "437XR1138R2",
"Version": "1.02",
"PartNumber": "224071-J23",
"AssetTag": "Chicago-45Z-2381",
"Status": {
"State": "Enabled",
"Health": "OK"
},
"Links": {
"ComputerSystems": [
{
"@odata.id": "{rb}Systems/"
}
],
"ManagedBy": [
{
"@odata.id": "{rb}Managers/1"
}
],
"ThermalMetrics": {
"@odata.id": "{rb}Chassis/{id}/ThermalMetrics"
},
"PowerMetrics": {
"@odata.id": "{rb}Chassis/{id}/PowerMetrics"
},
"MiscMetrics": {
"@odata.id": "{rb}Chassis/{id}/MiscMetrics"
},
"Oem": {}
},
"Oem": {}
}
def get_chassis_template(rest_base, ident):
"""
Formats the template
Arguments:
rest_base - Base URL for the RESTful interface
indent - ID of the chassis
"""
c = _CHASSIS_TEMPLATE.copy()
# Formatting
#c['Id'] = ident
c['@odata.context'] = c['@odata.context'].format(rb=rest_base)
c['@odata.id'] = c['@odata.id'].format(rb=rest_base, id=ident)
c['Links']['ManagedBy'][0]['@odata.id'] = c['Links']['ManagedBy'][0]['@odata.id'].format(rb=rest_base)
c['Links']['ThermalMetrics']['@odata.id'] = c['Links']['ThermalMetrics']['@odata.id'].format(rb=rest_base, id=ident)
c['Links']['PowerMetrics']['@odata.id'] = c['Links']['PowerMetrics']['@odata.id'].format(rb=rest_base, id=ident)
c['Links']['MiscMetrics']['@odata.id'] = c['Links']['MiscMetrics']['@odata.id'].format(rb=rest_base, id=ident)
c['Links']['ComputerSystems'][0]['@odata.id']=c['Links']['ComputerSystems'][0]['@odata.id'].format(rb=rest_base)
return c
| StarcoderdataPython |
4890892 | # Copyright 2012, <NAME>, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import socket
import netaddr
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from oslo_log import log as logging
from neutron.agent.linux import dhcp
from neutron.agent.linux import ip_lib
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NETWORK_PROBE = constants.DEVICE_OWNER_NETWORK_PREFIX + 'probe'
DEVICE_OWNER_COMPUTE_PROBE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'probe'
class NeutronDebugAgent(object):
def __init__(self, conf, client, driver):
self.conf = conf
self.client = client
self.driver = driver
def _get_namespace(self, port):
return "qprobe-%s" % port.id
def create_probe(self, network_id, device_owner='network'):
network = self._get_network(network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
port = self._create_port(network, device_owner)
interface_name = self.driver.get_device_name(port)
namespace = self._get_namespace(port)
if ip_lib.device_exists(interface_name, namespace=namespace):
LOG.debug('Reusing existing device: %s.', interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
bridge=bridge,
namespace=namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace)
return port
def _get_subnet(self, subnet_id):
subnet_dict = self.client.show_subnet(subnet_id)['subnet']
return dhcp.DictModel(subnet_dict)
def _get_network(self, network_id):
network_dict = self.client.show_network(network_id)['network']
network = dhcp.DictModel(network_dict)
network.external = network_dict.get('router:external')
obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets]
network.subnets = obj_subnet
return network
def clear_probes(self):
"""Returns number of deleted probes"""
ports = self.client.list_ports(
device_id=socket.gethostname(),
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
self.delete_probe(port['id'])
return len(info)
def delete_probe(self, port_id):
port = dhcp.DictModel(self.client.show_port(port_id)['port'])
network = self._get_network(port.network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
ip = ip_lib.IPWrapper()
namespace = self._get_namespace(port)
if ip.netns.exists(namespace):
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge,
namespace=namespace)
try:
ip.netns.delete(namespace)
except Exception:
LOG.warning('Failed to delete namespace %s', namespace)
else:
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge)
self.client.delete_port(port.id)
def list_probes(self):
ports = self.client.list_ports(
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
port['device_name'] = self.driver.get_device_name(
dhcp.DictModel(port))
return info
def exec_command(self, port_id, command=None):
port = dhcp.DictModel(self.client.show_port(port_id)['port'])
ip = ip_lib.IPWrapper()
namespace = self._get_namespace(port)
if not command:
return "sudo ip netns exec %s" % self._get_namespace(port)
namespace = ip.ensure_namespace(namespace)
return namespace.netns.execute(shlex.split(command))
def ensure_probe(self, network_id):
ports = self.client.list_ports(network_id=network_id,
device_id=socket.gethostname(),
device_owner=DEVICE_OWNER_NETWORK_PROBE)
info = ports.get('ports', [])
if info:
return dhcp.DictModel(info[0])
else:
return self.create_probe(network_id)
def ping_all(self, network_id=None, timeout=1):
if network_id:
ports = self.client.list_ports(network_id=network_id)['ports']
else:
ports = self.client.list_ports()['ports']
result = ""
for port in ports:
probe = self.ensure_probe(port['network_id'])
if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE:
continue
for fixed_ip in port['fixed_ips']:
address = fixed_ip['ip_address']
subnet = self._get_subnet(fixed_ip['subnet_id'])
if subnet.ip_version == 4:
ping_command = 'ping'
else:
ping_command = 'ping6'
result += self.exec_command(probe.id,
'%s -c 1 -w %s %s' % (ping_command,
timeout,
address))
return result
def _create_port(self, network, device_owner):
host = self.conf.host
body = {'port': {'admin_state_up': True,
'network_id': network.id,
'device_id': '%s' % socket.gethostname(),
'device_owner': '%s:probe' % device_owner,
'tenant_id': network.tenant_id,
portbindings.HOST_ID: host,
'fixed_ips': [dict(subnet_id=s.id)
for s in network.subnets]}}
port_dict = self.client.create_port(body)['port']
port = dhcp.DictModel(port_dict)
port.network = network
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id)
return port
| StarcoderdataPython |
11359858 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import saharaclient.api.base as sab
from sahara.openstack.common import timeutils
from sahara.tests.integration.tests import base
from sahara.tests.integration.tests import edp
from sahara.utils import edp as utils_edp
class TransientClusterTest(edp.EDPTest):
@base.skip_test(
'SKIP_TRANSIENT_CLUSTER_TEST',
message='Test for transient cluster was skipped.')
def transient_cluster_testing(self, plugin_config, floating_ip_pool,
internal_neutron_net):
cluster_template_id = self.create_cluster_template(
name='test-transient-cluster-template-vanilla',
plugin_config=self.vanilla_config,
description=('test cluster template for transient cluster '
'of Vanilla plugin'),
cluster_configs={},
node_groups=[
dict(
name='master-node',
flavor_id=self.flavor_id,
node_processes=['namenode', 'oozie', 'jobtracker'],
floating_ip_pool=floating_ip_pool,
count=1),
dict(
name='worker-node',
flavor_id=self.flavor_id,
node_processes=['datanode', 'tasktracker'],
floating_ip_pool=floating_ip_pool,
count=1)
],
net_id=internal_neutron_net
)
try:
# create a transient cluster
try:
cluster_name = (self.common_config.CLUSTER_NAME + '-transient-'
+ plugin_config.PLUGIN_NAME)
self.create_cluster(
name=cluster_name,
plugin_config=plugin_config,
cluster_template_id=cluster_template_id,
description='test transient cluster',
cluster_configs={},
is_transient=True
)
except Exception:
self.delete_objects(cluster_id=self.cluster_id)
raise
# check EDP
path = 'sahara/tests/integration/tests/resources/'
pig_job_data = open(path + 'edp-job.pig').read()
pig_lib_data = open(path + 'edp-lib.jar').read()
self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job_data}],
lib_data_list=[{'jar': pig_lib_data}])
# set timeout in seconds
timeout = self.common_config.TRANSIENT_CLUSTER_TIMEOUT * 60
s_time = timeutils.utcnow()
raise_failure = True
# wait for cluster deleting
while timeutils.delta_seconds(
s_time, timeutils.utcnow()) < timeout:
try:
self.sahara.clusters.get(self.cluster_id)
except sab.APIException as api_ex:
if 'not found' in api_ex.message:
raise_failure = False
break
time.sleep(2)
if raise_failure:
self.delete_objects(cluster_id=self.cluster_id)
self.fail('Transient cluster has not been deleted within %s '
'minutes.'
% self.common_config.TRANSIENT_CLUSTER_TIMEOUT)
finally:
self.delete_objects(cluster_template_id=cluster_template_id)
| StarcoderdataPython |
1707926 | <reponame>huong-rose/student-practices
n=input()
a=[]
b=[]
for i in range(len(n)):
a.append(n.count(n[i]))
for i in range(len(n)):
b.append([n[i],a[i]])
c=[]
for i in range(len(b)):
if (b[i] in c)==False:
c.append(b[i])
for i in range(len(c)):
print(str(c[i][0])+'('+str(c[i][1])+')') | StarcoderdataPython |
158450 | <gh_stars>1-10
#!/usr/bin/env python3
import argparse
from random import choice
from multiprocessing import freeze_support
from textwrap import dedent
from common import VALID_FILENAME, ANSWER_FILENAME, DEFAULT_ROUNDS, Mode, get_words
from play import play
from benchmark import benchmark
DEFAULT_MODE = Mode.SOLVE
def run(args):
# bit of a hack
mode = next((m for m in (getattr(args, mode.value) for mode in Mode) if m != None), DEFAULT_MODE)
debug = args.debug
hard = args.hard
rounds = args.rounds
override_answer = args.answer
valid_words = get_words(args.valid_words_file)
answers = get_words(args.answer_words_file)
answer = override_answer or choice(list(answers))
if answer not in valid_words:
print(f'"{answer}" is not in the valid word list')
exit(1)
if mode == Mode.BENCHMARK:
benchmark(mode, answers, valid_words, rounds, debug)
else:
play(mode, answer, valid_words, hard, rounds, debug)
if __name__ == '__main__':
freeze_support()
arg_parser = argparse.ArgumentParser()
mode_group = arg_parser.add_mutually_exclusive_group()
for mode in Mode:
mode_group.add_argument(f'-{mode.value[0]}', f'--{mode.value}', action='store_const', const=mode,
help=f'use "{mode.value}" mode{" (default)" if mode == DEFAULT_MODE else ""}')
arg_parser.add_argument('-r', '--rounds', type=int, default=DEFAULT_ROUNDS, metavar='num_rounds',
help=f'number of rounds (default="{DEFAULT_ROUNDS}")')
arg_parser.add_argument('-a', '--answer', type=str, metavar='word',
help='sets the answer word - useful for debugging a specific case. Ignored in benchmark mode')
arg_parser.add_argument('-H','--hard', action='store_true',
help='enable hard mode (any revealed hints must be used in subsequent guesses). Ignored in benchmark mode.')
arg_parser.add_argument('-d', '--debug', action='store_true',
help='print extra output for debugging',)
arg_parser.add_argument('--answer-words-file', type=str, metavar='path', default=ANSWER_FILENAME,
help=f'file with all possible answers (default="{ANSWER_FILENAME}")')
arg_parser.add_argument('--valid-words-file', type=str, metavar='path', default=VALID_FILENAME,
help=f'file with all accepted words (default="{VALID_FILENAME}")')
args = arg_parser.parse_args()
run(args)
| StarcoderdataPython |
4890068 | """
Find the k-cores of a graph.
The k-core is found by recursively pruning nodes with degrees less than k.
See the following references for details:
An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
Generalized Cores
<NAME> and <NAME>, 2002.
https://arxiv.org/pdf/cs/0202039
For directed graphs a more general notion is that of D-cores which
looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core
is the k-core.
D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy
<NAME>, <NAME>, <NAME>, ICDM 2011.
http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf
Multi-scale structure and topological anomaly detection via a new network \
statistic: The onion decomposition
<NAME>, <NAME>, and <NAME>
Scientific Reports 6, 31708 (2016)
http://doi.org/10.1038/srep31708
"""
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import not_implemented_for
__all__ = [
"core_number",
"find_cores",
"k_core",
"k_shell",
"k_crust",
"k_corona",
"k_truss",
"onion_layers",
"d_core_number",
"find_d_cores",
"d_core"
]
@not_implemented_for("multigraph")
def core_number(G):
"""Returns the core number for each vertex.
A k-core is a maximal subgraph that contains nodes of degree k or more.
The core number of a node is the largest value k of a k-core containing
that node.
Parameters
----------
G : NetworkX graph
A graph or directed graph
Returns
-------
core_number : dictionary
A dictionary keyed by node to the core number.
Raises
------
NetworkXError
The k-core is not implemented for graphs with self loops
or parallel edges.
Notes
-----
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
"""
if nx.number_of_selfloops(G) > 0:
msg = (
"Input graph has self loops which is not permitted; "
"Consider using G.remove_edges_from(nx.selfloop_edges(G))."
)
raise NetworkXError(msg)
degrees = dict(G.degree())
# Sort nodes by degree.
nodes = sorted(degrees, key=degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
# The initial guess for the core number of a node is its degree.
core = degrees
nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
return core
def _core_subgraph(G, k_filter, k=None, core=None):
"""Returns the subgraph induced by nodes passing filter `k_filter`.
Parameters
----------
G : NetworkX graph
The graph or directed graph to process
k_filter : filter function
This function filters the nodes chosen. It takes three inputs:
A node of G, the filter's cutoff, and the core dict of the graph.
The function should return a Boolean value.
k : int, optional
The order of the core. If not specified use the max core number.
This value is used as the cutoff for the filter.
core : dict, optional
Precomputed core numbers keyed by node for the graph `G`.
If not specified, the core numbers will be computed from `G`.
"""
if core is None:
core = core_number(G)
if k is None:
k = max(core.values())
nodes = (v for v in core if k_filter(v, k, core))
return G.subgraph(nodes).copy()
def k_core(G, k=None, core_number=None):
"""Returns the k-core of G.
A k-core is a maximal subgraph that contains nodes of degree k or more.
Parameters
----------
G : NetworkX graph
A graph or directed graph
k : int, optional
The order of the core. If not specified return the main core.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-core subgraph
Raises
------
NetworkXError
The k-core is not defined for graphs with self loops or parallel edges.
Notes
-----
The main core is the core with the largest degree.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
"""
def k_filter(v, k, c):
return c[v] >= k
return _core_subgraph(G, k_filter, k, core_number)
#######################################
# DIRECTED CORE DECOMPOSITION BABYYYY #
#######################################
def d_core_number(G):
"""Returns the core number for each vertex.
A k-core is a maximal subgraph that contains nodes of degree k or more.
The core number of a node is the largest value k of a k-core containing
that node.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
d_core_number : dictionary
A dictionary keyed by node to the core number.
Raises
------
NetworkXError
The d-core is not implemented for graphs with self loops
or parallel edges or undirected graphs.
Notes
-----
Not implemented for graphs with parallel edges or self loops.
Only node out-degree is considered. For k-cores use core_number(G) node degree is defined to be the
in-degree + out-degree.
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
"""
if nx.number_of_selfloops(G) > 0:
msg = (
"Input graph has self loops which is not permitted; "
"Consider using G.remove_edges_from(nx.selfloop_edges(G))."
)
raise NetworkXError(msg)
if nx.is_directed(G) == False:
msg = (
"Input graph is not directed; "
"Consider converting to DiGraph."
)
raise NetworkXError(msg)
out_degrees = dict(G.out_degree())
# Sort nodes by degree.
nodes = sorted(out_degrees, key=out_degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if out_degrees[v] > curr_degree:
bin_boundaries.extend([i] * (out_degrees[v] - curr_degree))
curr_degree = out_degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
# The initial guess for the core number of a node is its degree.
core = out_degrees
nbrs = {v: list(nx.DiGraph.neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
try: # Diagnose this more thoroughly
nbrs[u].remove(v)
except ValueError as e:
print(e)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
return core
find_d_cores = d_core_number
def _d_core_subgraph(G, d_filter, d=None, core=None):
"""Returns the subgraph induced by nodes passing filter `d_filter`.
Parameters
----------
G : NetworkX graph
The directed graph to process
d_filter : filter function
This function filters the nodes chosen. It takes three inputs:
A node of G, the filter's cutoff, and the core dict of the graph.
The function should return a Boolean value.
d : int, optional
The order of the directed core. If not specified use the max core number.
This value is used as the cutoff for the filter.
core : dict, optional
Precomputed core numbers keyed by node for the graph `G`.
If not specified, the core numbers will be computed from `G`.
"""
if core is None:
core = d_core_number(G)
if d is None:
d = max(core.values())
nodes = (v for v in core if d_filter(v, d, core))
return G.subgraph(nodes).copy()
def d_core(G, d=None, d_core_number=None):
"""Returns the k-core of G.
A k-core is a maximal subgraph that contains nodes of degree d or more.
Parameters
----------
G : NetworkX graph
A graph or directed graph
d : int, optional
The order of the core. If not specified return the main core.
d_core_number : dictionary, optional
Precomputed directed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-core subgraph
Raises
------
NetworkXError
The k-core is not defined for graphs with self loops or parallel edges.
Notes
-----
The main core is the core with the largest degree.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
d_core_number
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
"""
def d_filter(v, d, c):
return c[v] >= d
return _d_core_subgraph(G, d_filter, d, d_core_number)
####################################
# DIRECTED CORE DECOMPOSITION OVER #
####################################
def k_shell(G, k=None, core_number=None):
"""Returns the k-shell of G.
The k-shell is the subgraph induced by nodes with core number k.
That is, nodes in the k-core that are not in the (k+1)-core.
Parameters
----------
G : NetworkX graph
A graph or directed graph.
k : int, optional
The order of the shell. If not specified return the outer shell.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-shell subgraph
Raises
------
NetworkXError
The k-shell is not implemented for graphs with self loops
or parallel edges.
Notes
-----
This is similar to k_corona but in that case only neighbors in the
k-core are considered.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
k_corona
References
----------
.. [1] A model of Internet topology using k-shell decomposition
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
http://www.pnas.org/content/104/27/11150.full
"""
def k_filter(v, k, c):
return c[v] == k
return _core_subgraph(G, k_filter, k, core_number)
def k_crust(G, k=None, core_number=None):
"""Returns the k-crust of G.
The k-crust is the graph G with the edges of the k-core removed
and isolated nodes found after the removal of edges are also removed.
Parameters
----------
G : NetworkX graph
A graph or directed graph.
k : int, optional
The order of the shell. If not specified return the main crust.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-crust subgraph
Raises
------
NetworkXError
The k-crust is not implemented for graphs with self loops
or parallel edges.
Notes
-----
This definition of k-crust is different than the definition in [1]_.
The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
References
----------
.. [1] A model of Internet topology using k-shell decomposition
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, PNAS July 3, 2007 vol. 104 no. 27 11150-11154
http://www.pnas.org/content/104/27/11150.full
"""
# Default for k is one less than in _core_subgraph, so just inline.
# Filter is c[v] <= k
if core_number is None:
core_number = find_cores(G)
if k is None:
k = max(core_number.values()) - 1
nodes = (v for v in core_number if core_number[v] <= k)
return G.subgraph(nodes).copy()
find_cores = core_number
def k_corona(G, k, core_number=None):
"""Returns the k-corona of G.
The k-corona is the subgraph of nodes in the k-core which have
exactly k neighbours in the k-core.
Parameters
----------
G : NetworkX graph
A graph or directed graph
k : int
The order of the corona.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-corona subgraph
Raises
------
NetworkXError
The k-cornoa is not defined for graphs with self loops or
parallel edges.
Notes
-----
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
References
----------
.. [1] k -core (bootstrap) percolation on complex networks:
Critical phenomena and nonlocal effects,
<NAME>, <NAME>, and <NAME>,
Phys. Rev. E 73, 056101 (2006)
http://link.aps.org/doi/10.1103/PhysRevE.73.056101
"""
def func(v, k, c):
return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k)
return _core_subgraph(G, func, k, core_number)
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def k_truss(G, k):
"""Returns the k-truss of `G`.
The k-truss is the maximal induced subgraph of `G` which contains at least
three vertices where every edge is incident to at least `k-2` triangles.
Parameters
----------
G : NetworkX graph
An undirected graph
k : int
The order of the truss
Returns
-------
H : NetworkX graph
The k-truss subgraph
Raises
------
NetworkXError
The k-truss is not defined for graphs with self loops or parallel edges
or directed graphs.
Notes
-----
A k-clique is a (k-2)-truss and a k-truss is a (k+1)-core.
Not implemented for digraphs or graphs with parallel edges or self loops.
Graph, node, and edge attributes are copied to the subgraph.
K-trusses were originally defined in [2] which states that the k-truss
is the maximal induced subgraph where each edge belongs to at least
`k-2` triangles. A more recent paper, [1], uses a slightly different
definition requiring that each edge belong to at least `k` triangles.
This implementation uses the original definition of `k-2` triangles.
References
----------
.. [1] Bounds and Algorithms for k-truss. <NAME>, <NAME>,
<NAME>, 2018. https://arxiv.org/abs/1806.05523v2
.. [2] Trusses: Cohesive Subgraphs for Social Network Analysis. Jonathan
Cohen, 2005.
"""
H = G.copy()
n_dropped = 1
while n_dropped > 0:
n_dropped = 0
to_drop = []
seen = set()
for u in H:
nbrs_u = set(H[u])
seen.add(u)
new_nbrs = [v for v in nbrs_u if v not in seen]
for v in new_nbrs:
if len(nbrs_u & set(H[v])) < (k - 2):
to_drop.append((u, v))
H.remove_edges_from(to_drop)
n_dropped = len(to_drop)
H.remove_nodes_from(list(nx.isolates(H)))
return H
@not_implemented_for("multigraph")
@not_implemented_for("directed")
def onion_layers(G):
"""Returns the layer of each vertex in an onion decomposition of the graph.
The onion decomposition refines the k-core decomposition by providing
information on the internal organization of each k-shell. It is usually
used alongside the `core numbers`.
Parameters
----------
G : NetworkX graph
A simple graph without self loops or parallel edges
Returns
-------
od_layers : dictionary
A dictionary keyed by vertex to the onion layer. The layers are
contiguous integers starting at 1.
Raises
------
NetworkXError
The onion decomposition is not implemented for graphs with self loops
or parallel edges or for directed graphs.
Notes
-----
Not implemented for graphs with parallel edges or self loops.
Not implemented for directed graphs.
See Also
--------
core_number
References
----------
.. [1] Multi-scale structure and topological anomaly detection via a new
network statistic: The onion decomposition
<NAME>, <NAME>. Grochow, and <NAME>
Scientific Reports 6, 31708 (2016)
http://doi.org/10.1038/srep31708
.. [2] Percolation and the effective structure of complex networks
<NAME> and <NAME>
Physical Review X 9, 011023 (2019)
http://doi.org/10.1103/PhysRevX.9.011023
"""
if nx.number_of_selfloops(G) > 0:
msg = (
"Input graph contains self loops which is not permitted; "
"Consider using G.remove_edges_from(nx.selfloop_edges(G))."
)
raise NetworkXError(msg)
# Dictionaries to register the k-core/onion decompositions.
od_layers = {}
# Adjacency list
neighbors = {v: list(nx.all_neighbors(G, v)) for v in G}
# Effective degree of nodes.
degrees = dict(G.degree())
# Performs the onion decomposition.
current_core = 1
current_layer = 1
# Sets vertices of degree 0 to layer 1, if any.
isolated_nodes = [v for v in nx.isolates(G)]
if len(isolated_nodes) > 0:
for v in isolated_nodes:
od_layers[v] = current_layer
degrees.pop(v)
current_layer = 2
# Finds the layer for the remaining nodes.
while len(degrees) > 0:
# Sets the order for looking at nodes.
nodes = sorted(degrees, key=degrees.get)
# Sets properly the current core.
min_degree = degrees[nodes[0]]
if min_degree > current_core:
current_core = min_degree
# Identifies vertices in the current layer.
this_layer = []
for n in nodes:
if degrees[n] > current_core:
break
this_layer.append(n)
# Identifies the core/layer of the vertices in the current layer.
for v in this_layer:
od_layers[v] = current_layer
for n in neighbors[v]:
neighbors[n].remove(v)
degrees[n] = degrees[n] - 1
degrees.pop(v)
# Updates the layer count.
current_layer = current_layer + 1
# Returns the dictionaries containing the onion layer of each vertices.
return od_layers
| StarcoderdataPython |
3434381 | <filename>tanuky/__init__.py
from .tanuky import *
__version__ = '1.3.1'
| StarcoderdataPython |
1904400 | <filename>Bot/bot.py
'''
This project was made by https://github.com/himanshu2406 , incase of cloning / modifying or release of any bot based on this source code,
You are obligated to give credits to the original author.
Original repo: https://github.com/himanshu2406/Corona-Tracker-Bot
Original Bot Support Server: https://discord.gg/kdj6DMr
'''
import uuid
from dotenv import load_dotenv
import discord
from discord.ext import commands
import os
import diseaseapi
from helper_functions import *
class CovidCommands(commands.Cog):
def __init__(self, client):
self.client = client
self.api_client = diseaseapi.Client().covid19
@commands.command(help="Restarts the bot, requires admin permission")
@commands.has_role('admin')
async def restart(self, ctx):
if ctx.message.author.id == <PASSWORD>:
await ctx.send('Restarting...')
print('Restarting...')
try:
await self.client.logout()
finally:
os.system("py -3 bot.py")
@commands.command(help="Shuts Down The bot, Requires admin permission")
@commands.has_role('admin')
async def shutdown(self, ctx):
if ctx.message.author.id == <PASSWORD>:
await ctx.send('Bot is Shut Down')
print('Shut Down Command Received')
await self.client.logout()
return
@commands.command()
@commands.has_role('admin')
async def botservers(self, ctx):
if ctx.message.author.id == <PASSWORD>:
list_guilds = []
total_mem = 0
await self.client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=" a !cov help in " + str(len(self.client.guilds)) + " server(s)"))
await ctx.send("I'm in " + str(len(self.client.guilds)) + " servers")
for guild in self.client.guilds:
total_mem += len(guild.members)
list_guilds.append(guild.name + ' : ' + str(guild.id))
list_guilds.append('Total members: ' + str(total_mem))
# await ctx.send(list_guilds)
print(list_guilds)
await ctx.send('Total members: ' + str(total_mem))
return
@commands.command()
@commands.has_role('admin')
async def leave(self, ctx, id):
if ctx.message.author.id == <PASSWORD>:
to_leave = self.client.get_guild(int(id))
await ctx.send('Leaving ' + str(to_leave))
try:
await to_leave.leave()
await ctx.send('Left ' + str(to_leave))
except:
await ctx.send('Failed leaving ' + str(to_leave))
return
@commands.group(name='graph', aliases=['g'])
async def graph(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.invoke(self.ghelp)
@graph.command(name='top10')
async def graphtop10(self, ctx, arg2):
if arg2 not in ['confirmed', 'active', 'deaths', 'recovered']:
return
newcountry = arg2.lower()
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/top_10_countries_' + \
newcountry + '.png' + '?' + str(uuid.uuid4().hex[:15])
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for Top 10 " + newcountry + 'Cases', color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@graph.command(name='country')
async def graphcountry(self, ctx, arg2='all'):
if arg2 == 'all':
urltobe = 'https://github.com/resoucesforcorona/Resources/blob/master/All_countries.png?raw=true'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for all countries", color=0x00ff00)
else:
newcountry = (arg2.lower()).capitalize()
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/' + \
newcountry + '.png' + '?' + str(uuid.uuid4().hex[:15])
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for: " + newcountry, color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@graph.command(name='continent')
async def graphcontinent(self, ctx, arg2='all'):
if arg2 == 'all':
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/All_continents.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for all continents", color=0x00ff00)
else:
newcountry = (arg2.lower()).capitalize()
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/' + \
newcountry + '.png' + '?' + str(uuid.uuid4().hex[:15])
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for: " + newcountry, color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@graph.command(name='pie')
async def graphpie(self, ctx):
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/all_countrywise_pie.png'
embedVar = discord.Embed(
title="Corona Tracker", url='https://anondoser.xyz', description="Graph for Pie", color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@graph.command(name='predict')
async def predict(self, ctx, arg2):
newcountry = (arg2.lower())
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/world_prediction_curve_' + \
newcountry + '.png' + '?' + str(uuid.uuid4().hex[:15])
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for global prediction curve for " + newcountry, color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@graph.command(name='global')
async def graphglobal(self, ctx, arg2):
if arg2 not in ['daily_confirmed', 'daily_deaths', 'trend', 'deaths']:
return
if arg2 == 'daily_confirmed':
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/daily_confirmed_cases_global.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for global daily confirmed", color=0x00ff00)
elif arg2 == 'daily_deaths':
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/daily_deaths_cases_global.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for global daily deaths", color=0x00ff00)
elif arg2 == 'trend':
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/world_trend_confirmed_cases.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for global trend", color=0x00ff00)
else:
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/worldwide_cases_deaths.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for global total deaths", color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@graph.command(name='trend')
async def graphtrend(self, ctx, arg2):
if arg2 not in ['confirmed', 'deaths']:
return
if arg2 == 'confirmed':
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/trend_comparison_continents_confirmed.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for trend comparison b/w continents [confirmed]", color=0x00ff00)
else:
newcountry = (arg2.lower()).capitalize()
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/trend_comparison_countries_deaths.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for trend comparison b/w countries [deaths]" + newcountry, color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@graph.command(name='spread')
async def graphspread(self, ctx):
urltobe = 'https://raw.githubusercontent.com/resoucesforcorona/Resources/master/countries_vs_date_spread.png'
embedVar = discord.Embed(title="Corona Tracker", url='https://anondoser.xyz',
description="Graph for number of countries infected vs date", color=0x00ff00)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
embedVar.set_image(url=urltobe)
await ctx.send(embed=embedVar)
@commands.command()
async def help(self, ctx):
embedVar = discord.Embed(title="Corona Tracker Help Panel",
description="Here's some help for you :heart:", color=0x00ff00)
embedVar.add_field(
name="!cov graph [commands]", value="Consists of graphical predictions, analysis , statistics and more see below for command", inline=False)
embedVar.add_field(
name="!cov g [commands]", value="Same as above, use !cov ghelp for list of commands", inline=False)
embedVar.add_field(
name="!cov ghelp", value="Help and commands list for `!cov graph`", inline=False)
embedVar.add_field(
name="!cov all", value="Shows global Covid-19 statistics", inline=False)
embedVar.add_field(name="!cov interactive", value="Sends the best live interactive maps \n See how the Covid spread on the world map from the very start \n See live status of Covid on the world map \n See Mortality Progression from the very beginning", inline=False)
embedVar.add_field(name="!cov country {your country} {full/micro}",
value="Shows you a particular country's stats (Optional- use 'micro' for a simplified report)", inline=False)
embedVar.add_field(
name="!cov help", value="Shows you this message", inline=False)
embedVar.add_field(
name="!cov invite", value="Sends you the links to invite the bot to your own server & the official bot server", inline=False)
embedVar.add_field(
name="github", value="https://github.com/himanshu2406/Corona-Tracker-Bot", inline=False)
embedVar.add_field(
name="tip :heart: ", value="Buy me a Coffee [sends addresses for tipping]", inline=False)
embedVar.add_field(name="Dev Contact",
value="Firelogger#7717", inline=False)
await ctx.send(embed=embedVar)
@commands.command()
async def invite(self, ctx):
embedVar = discord.Embed(title="Invite me", url='https://discord.com/api/oauth2/authorize?client_id=731855425145798668&permissions=121856&scope=bot',
description="Click the title to invite to your own server", color=0x00ff00)
embedVar.set_footer(text="Firelogger#7717",
icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
await ctx.send(embed=embedVar)
@commands.command()
async def tip(self, ctx):
embedVar = discord.Embed(title="Tip :heart:", url='https://www.buymeacoffee.com/anondoser/shop',
description="Donate for improving the services and help running the bot", color=0x00ff00)
embedVar.add_field(
name="Btc address", value="```37btgSzgWdywmSPeBN5rUH8W5G9EYJoRoA```", inline=False)
embedVar.add_field(
name="Paypal", value="```https://www.paypal.me/firelogger```", inline=False)
embedVar.add_field(
name="For more", value="For more methods please dm me", inline=False)
embedVar.set_footer(text="Firelogger#7717",
icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
await ctx.send(embed=embedVar)
@commands.command()
async def interactive(self, ctx):
embedVar = discord.Embed(title="Interactive and playable Maps and statistics",
description="Play with these live maps , statistics and visuals", color=0x00ff00)
embedVar.add_field(name="World Map Live Progression",
value="See how COVID spread from the very beginning on the world map, seekable and playable", inline=False)
embedVar.add_field(
name="Link", value="https://corona.anondoser.xyz/worldmap_progression.html", inline=False)
embedVar.add_field(name="World Map static interactive ",
value="see the current spread of Covid-19 on the world map", inline=False)
embedVar.add_field(
name="Link", value="https://corona.anondoser.xyz/worldmap_cases_interactive.html", inline=False)
embedVar.add_field(name="Mortality rate Live progression",
value="A beautiful playable and seekable representation of the mortality rate progression from the beginning , see those balls bounce", inline=False)
embedVar.add_field(
name="Link", value="https://corona.anondoser.xyz/mortalityrate_progression.html", inline=False)
embedVar.set_footer(text="Firelogger#7717",
icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
await ctx.send(embed=embedVar)
@commands.command()
async def ghelp(self, ctx):
embedVar = discord.Embed(title="Corona Tracker graph commands ",
description="Use - `!cov graph [command] [arguments]` you can use `!cov g` instead too", color=0x00ff00)
embedVar.add_field(name="How to use - ",
value="```!cov graph [commands given below] [argument]``` arguments in round brackets - () mean type exact commands as given , \n Square brackets -[] mean a variable like your country name", inline=False)
embedVar.add_field(
name="spread", value="No arguments ; shows the spread among countries vs date", inline=False)
embedVar.add_field(
name="pie", value="No arguments ; shows a list of pie charts for cases among countries", inline=False)
embedVar.add_field(name="top10 (confirmed/active/deaths/recovered)",
value="Shows top 10 countries based on the argument given", inline=False)
embedVar.add_field(name="global (daily_confirmed/daily_deaths/trend/deaths)",
value="Shows graphs for global arguments", inline=False)
embedVar.add_field(
name="country [country name]", value="Graph for the country given", inline=False)
embedVar.add_field(name="continent [continent name]",
value="Graph for the continent given", inline=False)
embedVar.add_field(name="predict (confirmed/deaths)",
value="Shows graphical projections for the future along with next 10 day predicted figures for the argument", inline=False)
embedVar.add_field(name="trend (confirmed/deaths)",
value="Shows trend between different countries / continents based on the argument", inline=False)
#embedVar.add_field(name="github", value="https://github.com/himanshu2406/Corona-Tracker", inline=False)
#embedVar.add_field(name="tip :heart: ", value="Buy me a Coffee [sends addresses for tipping]", inline=False)
embedVar.add_field(
name="Example", value="```!cov graph top10 confirmed```", inline=False)
embedVar.set_footer(text="Firelogger#7717",
icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
await ctx.send(embed=embedVar)
@commands.command(aliases=['c'])
async def country(self, ctx, args, complete='full'):
if complete not in ['full', 'micro']:
return
try:
i = await self.api_client.country(args)
yday_c = await self.api_client.country(i.name, yesterday=True)
except diseaseapi.NotFound:
embedVar = discord.Embed(title="Invalid Country: " + args.capitalize(), description="Error, the country doesn't exist in the database", color=0xe33b3b, url='https://anondoser.xyz')
return await ctx.send(embed=embedVar)
embedVar = discord.Embed(description="Statistics from disease.py",
color=0xe33b3b, url='https://anondoser.xyz')
yday_c = await self.api_client.country(i.name, yesterday=True)
cases_diff = cleaned_diff(i.cases, yday_c.cases)
death_diff = cleaned_diff(i.deaths, yday_c.deaths)
re_diff = cleaned_diff(i.recoveries, yday_c.recoveries)
active_diff = cleaned_diff(i.active, yday_c.active)
crit_diff = cleaned_diff(i.critical, yday_c.critical)
test_diff = cleaned_diff(i.tests, yday_c.tests)
embedVar.add_field(name='Cases:', value=check_na(i.cases)+cases_diff, inline=True)
embedVar.add_field(name='Cases Today:', value=check_na(i.today.cases), inline=True)
embedVar.add_field(name='Deaths:', value=check_na(i.deaths)+death_diff, inline=True)
embedVar.add_field(name='Deaths Today:', value=check_na(i.today.deaths), inline=True)
embedVar.add_field(name='Recovered:', value=check_na(i.recoveries)+re_diff, inline=True)
embedVar.add_field(name='Recovered Today:', value=check_na(i.today.recoveries), inline=True)
embedVar.add_field(name='Active:', value=check_na(i.active)+active_diff, inline=True)
embedVar.add_field(name='Critical:', value=check_na(i.critical)+crit_diff, inline=True)
embedVar.add_field(name='Tests:', value=check_na(i.tests)+test_diff, inline=True)
urltobe = str(i.info.flag)
if complete == 'full':
embedVar.add_field(name ='Cases per Million', value= i.per_million.cases, inline=True)
embedVar.add_field(name ='Active per Million', value= i.per_million.active, inline=True)
embedVar.add_field(name ='Recoveries per Million', value= i.per_million.recoveries, inline=True)
embedVar.add_field(name ='Tests per Million', value= i.per_million.tests, inline=True)
embedVar.add_field(name ='Deaths per Million', value= i.per_million.deaths, inline=True)
embedVar.add_field(name ='Critical per Million', value= i.per_million.critical, inline=True)
embedVar.add_field(name ='One Case per Person', value= i.per_people.case, inline=True)
embedVar.add_field(name ='One Death per Person', value= i.per_people.death, inline=True)
embedVar.add_field(name ='One Test per Person', value= i.per_people.test, inline=True)
embedVar.add_field(name ='Updated', value= i.updated, inline=True)
embedVar.title = "Covid Stats for: " + i.name
embedVar.set_thumbnail(url=urltobe)
embedVar.set_footer(
text="Firelogger#7717", icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
await ctx.send(embed=embedVar)
@commands.command()
async def all(self, ctx):
response = await self.api_client.all()
embedVar = discord.Embed(title="Covid Worldwide Stats",
description="Statistics from disease.py", color=0xe33b3b, url='https://anondoser.xyz')
yday_c = await self.api_client.all(yesterday=True)
cases_diff = cleaned_diff(response.cases, yday_c.cases)
death_diff = cleaned_diff(response.deaths, yday_c.deaths)
re_diff = cleaned_diff(response.recoveries, yday_c.recoveries)
active_diff = cleaned_diff(response.active, yday_c.active)
crit_diff = cleaned_diff(response.critical, yday_c.critical)
test_diff = cleaned_diff(response.tests, yday_c.tests)
embedVar.add_field(name='Cases:', value=check_na(response.cases)+cases_diff, inline=True)
embedVar.add_field(name='Cases Today:', value=check_na(response.today.cases), inline=True)
embedVar.add_field(name='Deaths:', value=check_na(response.deaths)+death_diff, inline=True)
embedVar.add_field(name='Deaths Today:', value=check_na(response.today.deaths), inline=True)
embedVar.add_field(name='Recovered:', value=check_na(response.recoveries)+re_diff, inline=True)
embedVar.add_field(name='Recovered Today:', value=check_na(response.today.recoveries), inline=True)
embedVar.add_field(name='Active:', value=check_na(response.active)+active_diff, inline=True)
embedVar.add_field(name='Critical:', value=check_na(response.critical)+crit_diff, inline=True)
embedVar.add_field(name='Tests:', value=check_na(response.tests)+test_diff, inline=True)
embedVar.add_field(name ='Cases per Million', value= response.per_million.cases, inline=True)
embedVar.add_field(name ='Active per Million', value= response.per_million.active, inline=True)
embedVar.add_field(name ='Recoveries per Million', value= response.per_million.recoveries, inline=True)
embedVar.add_field(name ='Tests per Million', value= response.per_million.tests, inline=True)
embedVar.add_field(name ='Deaths per Million', value= response.per_million.deaths, inline=True)
embedVar.add_field(name ='Critical per Million', value= response.per_million.critical, inline=True)
embedVar.add_field(name ='One Case per Person', value= response.per_people.case, inline=True)
embedVar.add_field(name ='One Death per Person', value= response.per_people.death, inline=True)
embedVar.add_field(name ='One Test per Person', value= response.per_people.test, inline=True)
embedVar.add_field(name ='Updated', value= response.updated, inline=True)
embedVar.set_footer(text="Firelogger#7717",icon_url='https://avatars2.githubusercontent.com/u/37951606?s=460&u=f45b1c7a7f0eddbe0036a7cf79b47d7dfa889321&v=4')
await ctx.send(embed=embedVar)
def setup(client):
client.add_cog(CovidCommands(client)) | StarcoderdataPython |
9638334 | <filename>pybacktest/verification.py
import pandas
import sys
from pybacktest.backtest import Backtest
def iter_verify(strategy_fn, data, window_size):
"""
Verify vectorized pandas backtest iteratively by running it
in sliding window, bar-by-bar.
NOTE: depreciated, use `verify` now.
"""
sp = None
mis_cur = {}
mis_prev = {}
print('iterative verification')
for i in range(window_size, len(data)):
s = Backtest(strategy_fn(data.iloc[i-window_size:i])).signals
if (not sp is None) and (sp != s.iloc[-2]).any():
ix = data.index[i]
mis_prev[ix] = sp
mis_cur[ix] = s.iloc[-2]
sp = s.iloc[-1]
prg = round(((float(i) - window_size) / (len(data) - window_size)) * 100, 1)
sys.stdout.write(' \r%s%% done' % prg)
sys.stdout.flush()
df = pandas.Panel(
{'cur': pandas.DataFrame(mis_cur),
'prev': pandas.DataFrame(mis_prev)}
).to_frame().swaplevel(0, 1).sort()
df = df.ix[df['cur'] != df['prev']]
if len(df):
return df
else:
print('valid')
def frontal_iterative_signals(strategy_fn, data, window_size, verbose=True):
front = []
p_prg = None
for i in range(window_size, len(data)):
data_subset = data.iloc[i - window_size : i]
last_sig = Backtest(strategy_fn(data_subset)).signals.iloc[-1]
front.append(last_sig)
if verbose:
prg = round(((float(i) - window_size) / (len(data) - window_size)) * 100, 1)
if p_prg != prg:
sys.stdout.write(' \r%s%% done' % prg)
sys.stdout.flush()
p_prg = prg
return pandas.DataFrame(front)
def verify(strategy_fn, data, window_size, verbose=True):
"""
Verify vectorized pandas backtest iteratively by running it
in sliding window, bar-by-bar.
"""
fsig = frontal_iterative_signals(strategy_fn, data, window_size, verbose)
bsig = Backtest(strategy_fn(data)).signals.reindex(fsig.index)
comp = fsig.ix[(fsig == bsig).T.all() == False]
if len(comp) != 0:
if verbose:
sys.stdout.write('\rverification did not pass\nreturning dataframe with mismatches')
sys.stdout.flush()
return comp
elif verbose:
sys.stdout.flush()
sys.stdout.write('\rverification passed')
| StarcoderdataPython |
5139205 | """simd float32x4"""
def main():
float32x4 a = numpy.array( [1.1, 1.2, 1.3, 0.4], dtype=numpy.float32 )
float32x4 b = numpy.array( [1.9, 1.8, 1.7, 0.6], dtype=numpy.float32 )
c = a + b
print(c)
if PYTHON == 'PYTHONJS':
TestError( c.x==3.0 )
TestError( c.y==3.0 )
TestError( c.z==3.0 )
TestError( c.w==1.0 )
else:
TestError( c[0]==3.0 )
TestError( c[1]==3.0 )
TestError( c[2]==3.0 )
TestError( c[3]==1.0 )
| StarcoderdataPython |
6549351 | <filename>aws_boto3_demo.py
"""
This is a python demo of boto3 library.
Written By: <NAME>
"""
import boto3
s3_resource = boto3.resource('s3')
#Create a Bucket
s3_resource.create_bucket(Bucket="first-aws-bucket-1")
#List all buckets in S3
for bucket in s3_resource.buckets.all():
print(bucket.name)
#Uploading an object into the Bucket
s3_resource.Object('first-aws-bucket-1', 'Screen_Shot.png').\
upload_file(Filename='/Users/ankhipaul/Documents/Screenshots/Screen_Shot.png')
#Downloading an object from Bucket to local
s3_resource.Object('pythonusecase', 'doc.pdf').download_file(
f'/Users/ankhipaul/Documents/doc.pdf')
#List all objects of one bucket
pythonusecase = s3_resource.Bucket(name = 'pythonusecase')
for object in pythonusecase.objects.all():
print(object.key)
#Copy object old_convertcsv.csv as object new_convertcsv.csv
s3_resource.Object("pythonusecase", "new_convertcsv.csv").copy_from(CopySource="pythonusecase/old_convertcsv.csv")
#Delete object old_convertcsv.csv
s3_resource.Object("pythonusecase", "old_convertcsv.csv").delete()
#Delete bucket first-aws-bucket-1
bucket = s3_resource.Bucket('first-aws-bucket-1')
bucket.objects.all().delete()
s3_resource.Bucket("first-aws-bucket-1").delete()
#Encrypting an object with ServerSideEncryption
s3_resource.Object('pythonusecase', 'random_pic.jpg').\
upload_file(Filename='/Users/ankhipaul/Documents/random_pic.jpg',ExtraArgs={
'ServerSideEncryption': 'AES256'})
#Enable versioning of a Bucket
s3_resource.BucketVersioning("pythonusecase").enable()
| StarcoderdataPython |
9624410 | """
Given an array of positive integers nums and a positive integer target, return the minimal length of a contiguous
subarray [numsl, numsl+1, ..., numsr-1, numsr] of which the sum is greater than or equal to target. If there is no
such subarray, return 0 instead.
Example 1:
Input: target = 7, nums = [2, 3, 1, 2, 4, 3], Output: 2
Explanation: The subarray [4, 3] has the minimal length under the problem constraint.
Example 2:
Input: target = 4, nums = [1, 4, 4], Output: 1
Example 3:
Input: target = 11, nums = [1, 1, 1, 1, 1, 1, 1, 1], Output: 0
"""
"""
We utilise a two-pointer approach. We initialise the left and right pointers, as well as the array sum, to 0. We also
initialise the min_length to infinity. We increment the array_sum by the value of the right pointer and move the right
pointer up at each step. If the array_sum is bigger than the target, we strip elements from the left side by
subtracting the value of the left pointer and moving the left pointer up, and update the min_length. We keep doing this
until the array_sum is no longer bigger than the target, then we move the right pointer again. We check to see if there
was a minimum length at all (otherwise, return 0) before returning min_length.
"""
def min_subarray_len(target, nums):
left = right = array_sum = 0
min_length = float('inf')
while right < len(nums):
array_sum += nums[right]
right += 1
while array_sum >= target:
min_length = min(min_length, right-left)
array_sum -= nums[left]
left += 1
return 0 if min_length == float('inf') else min_length
assert min_subarray_len(7, [2, 3, 1, 2, 4, 3]) == 2
assert min_subarray_len(4, [1, 4, 4]) == 1
assert min_subarray_len(11, [1, 1, 1, 1, 1, 1, 1, 1]) == 0
| StarcoderdataPython |
11255863 | """
Copyright (C) 2011 <NAME>
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<<EMAIL>>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
from QuantLib import *
import unittest
class AssetSwapTest(unittest.TestCase):
def setUp(self):
# initial setup
self.termStructure = RelinkableYieldTermStructureHandle()
self.swapSettlementDays = 2
self.faceAmount = 100.0
self.fixedConvention = Unadjusted
self.compounding = Continuous
self.fixedFrequency = Annual
self.floatingFrequency = Semiannual
self.iborIndex = Euribor(Period(self.floatingFrequency), self.termStructure)
self.calendar = self.iborIndex.fixingCalendar()
self.swapIndex= SwapIndex("EuriborSwapIsdaFixA", Period(10,Years), self.swapSettlementDays,
self.iborIndex.currency(), self.calendar,
Period(self.fixedFrequency), self.fixedConvention,
self.iborIndex.dayCounter(), self.iborIndex)
self.spread = 0.0
self.nonnullspread = 0.003
self.today = Date(24,April,2007)
Settings.instance().evaluationDate = self.today
self.termStructure.linkTo(FlatForward(self.today, 0.05, Actual365Fixed()))
self.yieldCurve = FlatForward(self.today, 0.05, Actual365Fixed())
self.pricer = BlackIborCouponPricer()
self.swaptionVolatilityStructure = SwaptionVolatilityStructureHandle(ConstantSwaptionVolatility(self.today, NullCalendar(),Following,
0.2, Actual365Fixed()))
self.meanReversionQuote = QuoteHandle(SimpleQuote(0.01))
self.cmspricer = AnalyticHaganPricer(self.swaptionVolatilityStructure,
GFunctionFactory.Standard,
self.meanReversionQuote)
def testConsistency(self) :
"""Testing consistency between fair price and fair spread..."""
bondCalendar = TARGET()
settlementDays = 3
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
bondSchedule = Schedule(Date(4,January,2005),
Date(4,January,2037),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
bond = FixedRateBond(settlementDays, self.faceAmount,
bondSchedule,[0.04],
ActualActual(ActualActual.ISDA),
Following,
100.0, Date(4,January,2005))
payFixedRate = True
bondPrice = 95.0
isPar = True
parAssetSwap = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
Settings.instance().evaluationDate)
parAssetSwap.setPricingEngine(swapEngine)
fairCleanPrice = parAssetSwap.fairCleanPrice()
fairSpread = parAssetSwap.fairSpread()
tolerance = 1.0e-13
assetSwap2 = AssetSwap(payFixedRate, bond, fairCleanPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap2.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap2.NPV())>tolerance,
"\npar asset swap fair clean price doesn't zero the NPV: "
+ "\n clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(fairCleanPrice)
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairCleanPrice() - fairCleanPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(assetSwap2.fairCleanPrice())
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairSpread() - self.spread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread "
+ "at zero NPV: "
+ "\n input spread: " + str(self.spread )
+ "\n fair spread: " + str(assetSwap2.fairSpread() )
+ "\n NPV: " + str(assetSwap2.NPV() )
+ "\n tolerance: " + str(tolerance))
assetSwap3 = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, fairSpread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap3.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap3.NPV())>tolerance,
"\npar asset swap fair spread doesn't zero the NPV: "
+ "\n spread: " + str(self.spread)
+ "\n fair spread: " + str(fairSpread)
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairCleanPrice() - bondPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(assetSwap3.fairCleanPrice())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairSpread() - fairSpread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread at"
+ " zero NPV: "
+ "\n input spread: " + str(fairSpread)
+ "\n fair spread: " + str(assetSwap3.fairSpread())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
## let's change the npv date
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
bond.settlementDate())
parAssetSwap.setPricingEngine(swapEngine)
## fair clean price and fair spread should not change
self.assertFalse(abs(parAssetSwap.fairCleanPrice() - fairCleanPrice)>tolerance,
"\npar asset swap fair clean price changed with NpvDate:"
+ "\n expected clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(parAssetSwap.fairCleanPrice())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(parAssetSwap.fairSpread() - fairSpread)>tolerance,
"\npar asset swap fair spread changed with NpvDate:"
+ "\n expected spread: " + str(fairSpread)
+ "\n fair spread: " + str(parAssetSwap.fairSpread())
+ "\n tolerance: " + str(tolerance))
assetSwap2 = AssetSwap(payFixedRate,
bond, fairCleanPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap2.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap2.NPV())>tolerance,
"\npar asset swap fair clean price doesn't zero the NPV: "
+ "\n clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(fairCleanPrice)
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairCleanPrice() - fairCleanPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(assetSwap2.fairCleanPrice())
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap2.fairSpread() - self.spread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(self.spread)
+ "\n fair spread: " + str(assetSwap2.fairSpread())
+ "\n NPV: " + str(assetSwap2.NPV())
+ "\n tolerance: " + str(tolerance))
assetSwap3 = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, fairSpread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap3.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap3.NPV())>tolerance,
"\npar asset swap fair spread doesn't zero the NPV: "
+ "\n spread: " + str(self.spread)
+ "\n fair spread: " + str(fairSpread)
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairCleanPrice() - bondPrice)>tolerance,
"\npar asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(assetSwap3.fairCleanPrice())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap3.fairSpread() - fairSpread)>tolerance,
"\npar asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(fairSpread)
+ "\n fair spread: " + str(assetSwap3.fairSpread())
+ "\n NPV: " + str(assetSwap3.NPV())
+ "\n tolerance: " + str(tolerance))
## now market asset swap
isPar = False
mktAssetSwap = AssetSwap (payFixedRate,
bond, bondPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
Settings.instance().evaluationDate)
mktAssetSwap.setPricingEngine(swapEngine)
fairCleanPrice = mktAssetSwap.fairCleanPrice()
fairSpread = mktAssetSwap.fairSpread()
assetSwap4 = AssetSwap (payFixedRate,
bond, fairCleanPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap4.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap4.NPV())>tolerance,
"\nmarket asset swap fair clean price doesn't zero the NPV: "
+ "\n clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(fairCleanPrice)
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap4.fairCleanPrice() - fairCleanPrice)>tolerance,
"\nmarket asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(assetSwap4.fairCleanPrice())
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap4.fairSpread() - self.spread)>tolerance,
"\nmarket asset swap fair spread doesn't equal input spread"
+ " at zero NPV: "
+ "\n input spread: " + str(self.spread)
+ "\n fair spread: " + str(assetSwap4.fairSpread())
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
assetSwap5 = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, fairSpread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap5.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap5.NPV())>tolerance,
"\nmarket asset swap fair spread doesn't zero the NPV: "
+ "\n spread: " + str(self.spread)
+ "\n fair spread: " + str(fairSpread)
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap5.fairCleanPrice() - bondPrice)>tolerance,
"\nmarket asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(assetSwap5.fairCleanPrice())
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap5.fairSpread() - fairSpread)>tolerance,
"\nmarket asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(fairSpread)
+ "\n fair spread: " + str(assetSwap5.fairSpread())
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
## let's change the npv date
swapEngine = DiscountingSwapEngine(self.termStructure,
True,
bond.settlementDate(),
bond.settlementDate())
mktAssetSwap.setPricingEngine(swapEngine)
## fair clean price and fair spread should not change
self.assertFalse(abs(mktAssetSwap.fairCleanPrice() - fairCleanPrice)>tolerance,
"\nmarket asset swap fair clean price changed with NpvDate:"
+ "\n expected clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(mktAssetSwap.fairCleanPrice())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(mktAssetSwap.fairSpread() - fairSpread)>tolerance,
"\nmarket asset swap fair spread changed with NpvDate:"
+ "\n expected spread: " + str(fairSpread)
+ "\n fair spread: " + str(mktAssetSwap.fairSpread())
+ "\n tolerance: " + str(tolerance))
assetSwap4 = AssetSwap(payFixedRate,
bond, fairCleanPrice,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap4.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap4.NPV())>tolerance,
"\nmarket asset swap fair clean price doesn't zero the NPV: "
+ "\n clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(fairCleanPrice)
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap4.fairCleanPrice() - fairCleanPrice)>tolerance,
"\nmarket asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(fairCleanPrice)
+ "\n fair clean price: " + str(assetSwap4.fairCleanPrice())
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap4.fairSpread() - self.spread)>tolerance,
"\nmarket asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(self.spread)
+ "\n fair spread: " + str(assetSwap4.fairSpread())
+ "\n NPV: " + str(assetSwap4.NPV())
+ "\n tolerance: " + str(tolerance))
assetSwap5 = AssetSwap(payFixedRate,
bond, bondPrice,
self.iborIndex, fairSpread,
Schedule(),
self.iborIndex.dayCounter(),
isPar)
assetSwap5.setPricingEngine(swapEngine)
self.assertFalse(abs(assetSwap5.NPV())>tolerance,
"\nmarket asset swap fair spread doesn't zero the NPV: "
+ "\n spread: " + str(self.spread)
+ "\n fair spread: " + str(fairSpread)
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap5.fairCleanPrice() - bondPrice)>tolerance,
"\nmarket asset swap fair clean price doesn't equal input "
+ "clean price at zero NPV: "
+ "\n input clean price: " + str(bondPrice)
+ "\n fair clean price: " + str(assetSwap5.fairCleanPrice())
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
self.assertFalse(abs(assetSwap5.fairSpread() - fairSpread)>tolerance,
"\nmarket asset swap fair spread doesn't equal input spread at zero NPV: "
+ "\n input spread: " + str(fairSpread)
+ "\n fair spread: " + str(assetSwap5.fairSpread())
+ "\n NPV: " + str(assetSwap5.NPV())
+ "\n tolerance: " + str(tolerance))
def testImpliedValue(self):
"""Testing implied bond value against asset-swap fair price with null spread..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
payFixedRate = True
parAssetSwap = True
inArrears = False
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondSchedule1 = Schedule(Date(4,January,2005),
Date(4,January,2037),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBond1 = FixedRateBond(settlementDays, self.faceAmount,
fixedBondSchedule1,
[0.04],
ActualActual(ActualActual.ISDA),
Following,
100.0, Date(4,January,2005))
bondEngine = DiscountingBondEngine(self.termStructure)
swapEngine = DiscountingSwapEngine(self.termStructure, False)
fixedBond1.setPricingEngine(bondEngine)
fixedBondPrice1 = fixedBond1.cleanPrice()
fixedBondAssetSwap1 = AssetSwap(payFixedRate,
fixedBond1, fixedBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondAssetSwap1.setPricingEngine(swapEngine)
fixedBondAssetSwapPrice1 = fixedBondAssetSwap1.fairCleanPrice()
tolerance = 1.0e-13
error1 = abs(fixedBondAssetSwapPrice1-fixedBondPrice1)
self.assertFalse(error1>tolerance,
"wrong zero spread asset swap price for fixed bond:"
+ "\n bond's clean price: " + str(fixedBondPrice1)
+ "\n asset swap fair price: " + str(fixedBondAssetSwapPrice1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
## Fixed Underlying bond (Isin: IT0006527060 IBRD 5 02/05/19)
## maturity occurs on a business day
fixedBondSchedule2 = Schedule(Date(5,February,2005),
Date(5,February,2019),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBond2 = FixedRateBond(settlementDays, self.faceAmount,
fixedBondSchedule2,
[0.05],
Thirty360(Thirty360.BondBasis),
Following,
100.0, Date(5,February,2005))
fixedBond2.setPricingEngine(bondEngine)
fixedBondPrice2 = fixedBond2.cleanPrice()
fixedBondAssetSwap2 = AssetSwap(payFixedRate,
fixedBond2, fixedBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondAssetSwap2.setPricingEngine(swapEngine)
fixedBondAssetSwapPrice2 = fixedBondAssetSwap2.fairCleanPrice()
error2 = abs(fixedBondAssetSwapPrice2-fixedBondPrice2)
self.assertFalse(error2>tolerance,
"wrong zero spread asset swap price for fixed bond:"
+ "\n bond's clean price: " + str(fixedBondPrice2)
+ "\n asset swap fair price: " + str(fixedBondAssetSwapPrice2)
+ "\n error: " + str(error2)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: IT0003543847 ISPIM 0 09/29/13)
## maturity doesn't occur on a business day
floatingBondSchedule1 = Schedule(Date(29,September,2003),
Date(29,September,2013),
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBond1 =FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule1,
self.iborIndex, Actual360(),
Following, fixingDays,
[1],
[0.0056],
[],
[],
inArrears,
100.0, Date(29,September,2003))
floatingBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
floatingBondPrice1 = floatingBond1.cleanPrice()
floatingBondAssetSwap1 = AssetSwap(payFixedRate,
floatingBond1, floatingBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondAssetSwap1.setPricingEngine(swapEngine)
floatingBondAssetSwapPrice1 = floatingBondAssetSwap1.fairCleanPrice()
error3 = abs(floatingBondAssetSwapPrice1-floatingBondPrice1)
self.assertFalse(error3>tolerance,
"wrong zero spread asset swap price for floater:"
+ "\n bond's clean price: " + str(floatingBondPrice1)
+ "\n asset swap fair price: " + str(floatingBondAssetSwapPrice1)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: XS0090566539 COE 0 09/24/18)
## maturity occurs on a business day
floatingBondSchedule2 = Schedule(Date(24,September,2004),
Date(24,September,2018),
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBond2 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule2,
self.iborIndex, Actual360(),
ModifiedFollowing, fixingDays,
[1],
[0.0025],
[],
[],
inArrears,
100.0, Date(24,September,2004))
floatingBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
currentCoupon=0.04013+0.0025
floatingCurrentCoupon= floatingBond2.nextCouponRate()
error4= abs(floatingCurrentCoupon-currentCoupon)
self.assertFalse(error4>tolerance,
"wrong current coupon is returned for floater bond:"
+ "\n bond's calculated current coupon: " + str(currentCoupon)
+ "\n current coupon asked to the bond: " + str(floatingCurrentCoupon)
+ "\n error: " + str(error4)
+ "\n tolerance: " + str(tolerance))
floatingBondPrice2 = floatingBond2.cleanPrice()
floatingBondAssetSwap2 = AssetSwap(payFixedRate,
floatingBond2, floatingBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondAssetSwap2.setPricingEngine(swapEngine)
floatingBondAssetSwapPrice2 = floatingBondAssetSwap2.fairCleanPrice()
error5 = abs(floatingBondAssetSwapPrice2-floatingBondPrice2)
self.assertFalse(error5>tolerance,
"wrong zero spread asset swap price for floater:"
+ "\n bond's clean price: " + str(floatingBondPrice2)
+ "\n asset swap fair price: " + str(floatingBondAssetSwapPrice2)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0228052402 CRDIT 0 8/22/20)
## maturity doesn't occur on a business day
cmsBondSchedule1 = Schedule(Date(22,August,2005),
Date(22,August,2020),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBond1 = CmsRateBond(settlementDays, self.faceAmount,
cmsBondSchedule1,
self.swapIndex, Thirty360(),
Following, fixingDays,
[1.0],
[0.0],
[0.055],
[0.025],
inArrears,
100.0, Date(22,August,2005))
cmsBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondPrice1 = cmsBond1.cleanPrice()
cmsBondAssetSwap1 = AssetSwap(payFixedRate,
cmsBond1, cmsBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondAssetSwap1.setPricingEngine(swapEngine)
cmsBondAssetSwapPrice1 = cmsBondAssetSwap1.fairCleanPrice()
error6 = abs(cmsBondAssetSwapPrice1-cmsBondPrice1)
self.assertFalse(error6>tolerance,
"wrong zero spread asset swap price for cms bond:"
+ "\n bond's clean price: " + str(cmsBondPrice1)
+ "\n asset swap fair price: " + str(cmsBondAssetSwapPrice1)
+ "\n error: " + str(error6)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0218766664 ISPIM 0 5/6/15)
## maturity occurs on a business day
cmsBondSchedule2 = Schedule(Date(6,May,2005),
Date(6,May,2015),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBond2 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule2,
self.swapIndex, Thirty360(),
Following, fixingDays,
[0.84], [0.0],
[], [],
inArrears,
100.0, Date(6,May,2005))
cmsBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondPrice2 = cmsBond2.cleanPrice()
cmsBondAssetSwap2 = AssetSwap(payFixedRate,
cmsBond2, cmsBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondAssetSwap2.setPricingEngine(swapEngine)
cmsBondAssetSwapPrice2 = cmsBondAssetSwap2.fairCleanPrice()
error7 = abs(cmsBondAssetSwapPrice2-cmsBondPrice2)
self.assertFalse(error7>tolerance,
"wrong zero spread asset swap price for cms bond:"
+ "\n bond's clean price: " + str(cmsBondPrice2)
+ "\n asset swap fair price: " + str(cmsBondAssetSwapPrice2)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBond1 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(20,December,2015),
Following,
100.0, Date(19,December,1985))
zeroCpnBond1.setPricingEngine(bondEngine)
zeroCpnBondPrice1 = zeroCpnBond1.cleanPrice()
zeroCpnAssetSwap1 = AssetSwap(payFixedRate,
zeroCpnBond1, zeroCpnBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnAssetSwap1.setPricingEngine(swapEngine)
zeroCpnBondAssetSwapPrice1 = zeroCpnAssetSwap1.fairCleanPrice()
error8 = abs(cmsBondAssetSwapPrice1-cmsBondPrice1)
self.assertFalse(error8>tolerance,
"wrong zero spread asset swap price for zero cpn bond:"
+ "\n bond's clean price: " + str(zeroCpnBondPrice1)
+ "\n asset swap fair price: " + str(zeroCpnBondAssetSwapPrice1)
+ "\n error: " + str(error8)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity occurs on a business day
zeroCpnBond2 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(17,February,2028),
Following,
100.0, Date(17,February,1998))
zeroCpnBond2.setPricingEngine(bondEngine)
zeroCpnBondPrice2 = zeroCpnBond2.cleanPrice()
zeroCpnAssetSwap2 = AssetSwap(payFixedRate,
zeroCpnBond2, zeroCpnBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnAssetSwap2.setPricingEngine(swapEngine)
zeroCpnBondAssetSwapPrice2 = zeroCpnAssetSwap2.fairCleanPrice()
error9 = abs(cmsBondAssetSwapPrice2-cmsBondPrice2)
self.assertFalse(error9>tolerance,
"wrong zero spread asset swap price for zero cpn bond:"
+ "\n bond's clean price: " + str(zeroCpnBondPrice2)
+ "\n asset swap fair price: " + str(zeroCpnBondAssetSwapPrice2)
+ "\n error: " + str(error9)
+ "\n tolerance: " + str(tolerance))
def testMarketASWSpread(self) :
"""Testing relationship between market asset swap and par asset swap..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
payFixedRate = True
parAssetSwap = True
mktAssetSwap = False
inArrears = False
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondSchedule1 = Schedule (Date(4,January,2005),
Date(4,January,2037),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBond1 = FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule1,
[0.04],
ActualActual(ActualActual.ISDA), Following,
100.0, Date(4,January,2005))
bondEngine = DiscountingBondEngine(self.termStructure)
swapEngine = DiscountingSwapEngine(self.termStructure,False)
fixedBond1.setPricingEngine(bondEngine)
fixedBondMktPrice1 = 89.22 ## market price observed on 7th June 2007
fixedBondMktFullPrice1=fixedBondMktPrice1+fixedBond1.accruedAmount()
fixedBondParAssetSwap1 = AssetSwap(payFixedRate,
fixedBond1, fixedBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondParAssetSwap1.setPricingEngine(swapEngine)
fixedBondParAssetSwapSpread1 = fixedBondParAssetSwap1.fairSpread()
fixedBondMktAssetSwap1 = AssetSwap(payFixedRate,
fixedBond1, fixedBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
fixedBondMktAssetSwap1.setPricingEngine(swapEngine)
fixedBondMktAssetSwapSpread1 = fixedBondMktAssetSwap1.fairSpread()
tolerance = 1.0e-13
error1 = abs(fixedBondMktAssetSwapSpread1-
100*fixedBondParAssetSwapSpread1/fixedBondMktFullPrice1)
self.assertFalse (error1>tolerance,
"wrong asset swap spreads for fixed bond:"
+ "\n market ASW spread: " + str(fixedBondMktAssetSwapSpread1)
+ "\n par ASW spread: " + str(fixedBondParAssetSwapSpread1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
## Fixed Underlying bond (Isin: IT0006527060 IBRD 5 02/05/19)
## maturity occurs on a business day
fixedBondSchedule2 = Schedule(Date(5,February,2005),
Date(5,February,2019),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBond2 =FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule2,
[0.05],
Thirty360(Thirty360.BondBasis), Following,
100.0, Date(5,February,2005))
fixedBond2.setPricingEngine(bondEngine)
fixedBondMktPrice2 = 99.98 ## market price observed on 7th June 2007
fixedBondMktFullPrice2 = fixedBondMktPrice2+fixedBond2.accruedAmount()
fixedBondParAssetSwap2 = AssetSwap (payFixedRate,
fixedBond2, fixedBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondParAssetSwap2.setPricingEngine(swapEngine)
fixedBondParAssetSwapSpread2 = fixedBondParAssetSwap2.fairSpread()
fixedBondMktAssetSwap2 = AssetSwap(payFixedRate,
fixedBond2, fixedBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
fixedBondMktAssetSwap2.setPricingEngine(swapEngine)
fixedBondMktAssetSwapSpread2 = fixedBondMktAssetSwap2.fairSpread()
error2 = abs(fixedBondMktAssetSwapSpread2-
100*fixedBondParAssetSwapSpread2/fixedBondMktFullPrice2)
self.assertFalse(error2>tolerance,
"wrong asset swap spreads for fixed bond:"
+ "\n market ASW spread: " + str(fixedBondMktAssetSwapSpread2)
+ "\n par ASW spread: " + str(fixedBondParAssetSwapSpread2)
+ "\n error: " + str(error2)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: IT0003543847 ISPIM 0 09/29/13)
## maturity doesn't occur on a business day
floatingBondSchedule1 = Schedule(Date(29,September,2003),
Date(29,September,2013),
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBond1 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule1,
self.iborIndex, Actual360(),
Following, fixingDays,
[1], [0.0056],
[],[],
inArrears,
100.0, Date(29,September,2003))
floatingBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
## market price observed on 7th June 2007
floatingBondMktPrice1 = 101.64
floatingBondMktFullPrice1 = floatingBondMktPrice1+floatingBond1.accruedAmount()
floatingBondParAssetSwap1 = AssetSwap(payFixedRate,
floatingBond1, floatingBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondParAssetSwap1.setPricingEngine(swapEngine)
floatingBondParAssetSwapSpread1 = floatingBondParAssetSwap1.fairSpread()
floatingBondMktAssetSwap1 = AssetSwap(payFixedRate,
floatingBond1, floatingBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
floatingBondMktAssetSwap1.setPricingEngine(swapEngine)
floatingBondMktAssetSwapSpread1 = floatingBondMktAssetSwap1.fairSpread()
error3 = abs(floatingBondMktAssetSwapSpread1-
100*floatingBondParAssetSwapSpread1/floatingBondMktFullPrice1)
self.assertFalse(error3>tolerance,
"wrong asset swap spreads for floating bond:"
+ "\n market ASW spread: " + str(floatingBondMktAssetSwapSpread1)
+ "\n par ASW spread: " + str(floatingBondParAssetSwapSpread1)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: XS0090566539 COE 0 09/24/18)
## maturity occurs on a business day
floatingBondSchedule2 = Schedule (Date(24,September,2004),
Date(24,September,2018),
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBond2 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule2,
self.iborIndex, Actual360(),
ModifiedFollowing, fixingDays,
[1], [0.0025],
[], [],
inArrears,
100.0, Date(24,September,2004))
floatingBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
## market price observed on 7th June 2007
floatingBondMktPrice2 = 101.248
floatingBondMktFullPrice2 = floatingBondMktPrice2+floatingBond2.accruedAmount()
floatingBondParAssetSwap2 = AssetSwap (payFixedRate,
floatingBond2, floatingBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondParAssetSwap2.setPricingEngine(swapEngine)
floatingBondParAssetSwapSpread2 = floatingBondParAssetSwap2.fairSpread()
floatingBondMktAssetSwap2 = AssetSwap(payFixedRate,
floatingBond2, floatingBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
floatingBondMktAssetSwap2.setPricingEngine(swapEngine)
floatingBondMktAssetSwapSpread2 = floatingBondMktAssetSwap2.fairSpread()
error4 = abs(floatingBondMktAssetSwapSpread2-
100*floatingBondParAssetSwapSpread2/floatingBondMktFullPrice2)
self.assertFalse(error4>tolerance ,
"wrong asset swap spreads for floating bond:"
+ "\n market ASW spread: " + str(floatingBondMktAssetSwapSpread2)
+ "\n par ASW spread: " + str(floatingBondParAssetSwapSpread2)
+ "\n error: " + str(error4)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0228052402 CRDIT 0 8/22/20)
## maturity doesn't occur on a business day
cmsBondSchedule1 = Schedule(Date(22,August,2005),
Date(22,August,2020),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBond1 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule1,
self.swapIndex, Thirty360(),
Following, fixingDays,
[1,1.0], [0.0],
[0.055], [0.025],
inArrears,
100.0, Date(22,August,2005))
cmsBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondMktPrice1 = 88.45 ## market price observed on 7th June 2007
cmsBondMktFullPrice1 = cmsBondMktPrice1+cmsBond1.accruedAmount()
cmsBondParAssetSwap1 = AssetSwap(payFixedRate,
cmsBond1, cmsBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondParAssetSwap1.setPricingEngine(swapEngine)
cmsBondParAssetSwapSpread1 = cmsBondParAssetSwap1.fairSpread()
cmsBondMktAssetSwap1 = AssetSwap(payFixedRate,
cmsBond1, cmsBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
cmsBondMktAssetSwap1.setPricingEngine(swapEngine)
cmsBondMktAssetSwapSpread1 = cmsBondMktAssetSwap1.fairSpread()
error5 = abs(cmsBondMktAssetSwapSpread1-
100*cmsBondParAssetSwapSpread1/cmsBondMktFullPrice1)
self.assertFalse(error5>tolerance,
"wrong asset swap spreads for cms bond:"
+ "\n market ASW spread: " + str(cmsBondMktAssetSwapSpread1)
+ "\n par ASW spread: " + str(cmsBondParAssetSwapSpread1)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0218766664 ISPIM 0 5/6/15)
## maturity occurs on a business day
cmsBondSchedule2 = Schedule(Date(6,May,2005),
Date(6,May,2015),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBond2 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule2,
self.swapIndex, Thirty360(),
Following, fixingDays,
[0.84], [0.0],
[], [],
inArrears,
100.0, Date(6,May,2005))
cmsBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondMktPrice2 = 94.08 ## market price observed on 7th June 2007
cmsBondMktFullPrice2 = cmsBondMktPrice2+cmsBond2.accruedAmount()
cmsBondParAssetSwap2 = AssetSwap(payFixedRate,
cmsBond2, cmsBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondParAssetSwap2.setPricingEngine(swapEngine)
cmsBondParAssetSwapSpread2 = cmsBondParAssetSwap2.fairSpread()
cmsBondMktAssetSwap2 = AssetSwap(payFixedRate,
cmsBond2, cmsBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
cmsBondMktAssetSwap2.setPricingEngine(swapEngine)
cmsBondMktAssetSwapSpread2 = cmsBondMktAssetSwap2.fairSpread()
error6 = abs(cmsBondMktAssetSwapSpread2-
100*cmsBondParAssetSwapSpread2/cmsBondMktFullPrice2)
self.assertFalse(error6>tolerance,
"wrong asset swap spreads for cms bond:"
+ "\n market ASW spread: " + str(cmsBondMktAssetSwapSpread2)
+ "\n par ASW spread: " + str(cmsBondParAssetSwapSpread2)
+ "\n error: " + str(error6)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBond1 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(20,December,2015),
Following,
100.0, Date(19,December,1985))
zeroCpnBond1.setPricingEngine(bondEngine)
## market price observed on 12th June 2007
zeroCpnBondMktPrice1 = 70.436
zeroCpnBondMktFullPrice1 = zeroCpnBondMktPrice1+zeroCpnBond1.accruedAmount()
zeroCpnBondParAssetSwap1 = AssetSwap(payFixedRate,zeroCpnBond1,
zeroCpnBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondParAssetSwap1.setPricingEngine(swapEngine)
zeroCpnBondParAssetSwapSpread1 = zeroCpnBondParAssetSwap1.fairSpread()
zeroCpnBondMktAssetSwap1 = AssetSwap(payFixedRate,zeroCpnBond1,
zeroCpnBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
zeroCpnBondMktAssetSwap1.setPricingEngine(swapEngine)
zeroCpnBondMktAssetSwapSpread1 = zeroCpnBondMktAssetSwap1.fairSpread()
error7 = abs(zeroCpnBondMktAssetSwapSpread1-
100*zeroCpnBondParAssetSwapSpread1/zeroCpnBondMktFullPrice1)
self.assertFalse(error7>tolerance,
"wrong asset swap spreads for zero cpn bond:"
+ "\n market ASW spread: " + str(zeroCpnBondMktAssetSwapSpread1)
+ "\n par ASW spread: " + str(zeroCpnBondParAssetSwapSpread1)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity occurs on a business day
zeroCpnBond2 =ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(17,February,2028),
Following,
100.0, Date(17,February,1998))
zeroCpnBond2.setPricingEngine(bondEngine)
## zeroCpnBondPrice2 = zeroCpnBond2.cleanPrice()
## market price observed on 12th June 2007
zeroCpnBondMktPrice2 = 35.160
zeroCpnBondMktFullPrice2 = zeroCpnBondMktPrice2+zeroCpnBond2.accruedAmount()
zeroCpnBondParAssetSwap2 = AssetSwap(payFixedRate,zeroCpnBond2,
zeroCpnBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondParAssetSwap2.setPricingEngine(swapEngine)
zeroCpnBondParAssetSwapSpread2 = zeroCpnBondParAssetSwap2.fairSpread()
zeroCpnBondMktAssetSwap2 = AssetSwap(payFixedRate,zeroCpnBond2,
zeroCpnBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
zeroCpnBondMktAssetSwap2.setPricingEngine(swapEngine)
zeroCpnBondMktAssetSwapSpread2 = zeroCpnBondMktAssetSwap2.fairSpread()
error8 = abs(zeroCpnBondMktAssetSwapSpread2-
100*zeroCpnBondParAssetSwapSpread2/zeroCpnBondMktFullPrice2)
self.assertFalse(error8>tolerance,
"wrong asset swap spreads for zero cpn bond:"
+ "\n market ASW spread: " + str(zeroCpnBondMktAssetSwapSpread2)
+ "\n par ASW spread: " + str(zeroCpnBondParAssetSwapSpread2)
+ "\n error: " + str(error8)
+ "\n tolerance: " + str(tolerance))
def testZSpread(self) :
"""Testing clean and dirty price with null Z-spread against theoretical prices..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
inArrears = False
## Fixed bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondSchedule1 = Schedule(Date(4,January,2005),
Date(4,January,2037),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBond1 = FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule1,
[0.04],
ActualActual(ActualActual.ISDA), Following,
100.0, Date(4,January,2005))
bondEngine = DiscountingBondEngine(self.termStructure)
fixedBond1.setPricingEngine(bondEngine)
fixedBondImpliedValue1 = fixedBond1.cleanPrice()
fixedBondSettlementDate1= fixedBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YC...
fixedBondCleanPrice1 = cleanPriceFromZSpread(fixedBond1,self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual,
fixedBondSettlementDate1)
tolerance = 1.0e-13
error1 = abs(fixedBondImpliedValue1-fixedBondCleanPrice1)
self.assertFalse(error1>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(fixedBondImpliedValue1)
+ "\n par asset swap spread: " + str(fixedBondCleanPrice1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
## Fixed bond (Isin: IT0006527060 IBRD 5 02/05/19)
## maturity occurs on a business day
fixedBondSchedule2 = Schedule (Date(5,February,2005),
Date(5,February,2019),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBond2 = FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule2,
[0.05],
Thirty360(Thirty360.BondBasis), Following,
100.0, Date(5,February,2005))
fixedBond2.setPricingEngine(bondEngine)
fixedBondImpliedValue2 = fixedBond2.cleanPrice()
fixedBondSettlementDate2= fixedBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
fixedBondCleanPrice2 = cleanPriceFromZSpread(
fixedBond2, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual,
fixedBondSettlementDate2)
error3 = abs(fixedBondImpliedValue2-fixedBondCleanPrice2)
self.assertFalse(error3>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(fixedBondImpliedValue2)
+ "\n par asset swap spread: " + str(fixedBondCleanPrice2)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
## FRN bond (Isin: IT0003543847 ISPIM 0 09/29/13)
## maturity doesn't occur on a business day
floatingBondSchedule1 = Schedule(Date(29,September,2003),
Date(29,September,2013),
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBond1 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule1,
self.iborIndex, Actual360(),
Following, fixingDays,
[1], [0.0056],
[], [],
inArrears,
100.0, Date(29,September,2003))
floatingBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
floatingBondImpliedValue1 = floatingBond1.cleanPrice()
floatingBondSettlementDate1= floatingBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
floatingBondCleanPrice1 = cleanPriceFromZSpread(
floatingBond1, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Semiannual,
fixedBondSettlementDate1)
error5 = abs(floatingBondImpliedValue1-floatingBondCleanPrice1)
self.assertFalse(error5>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(floatingBondImpliedValue1)
+ "\n par asset swap spread: " + str(floatingBondCleanPrice1)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
## FRN bond (Isin: XS0090566539 COE 0 09/24/18)
## maturity occurs on a business day
floatingBondSchedule2 = Schedule(Date(24,September,2004),
Date(24,September,2018),
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBond2 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule2,
self.iborIndex, Actual360(),
ModifiedFollowing, fixingDays,
[1], [0.0025],
[], [],
inArrears,
100.0, Date(24,September,2004))
floatingBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
floatingBondImpliedValue2 = floatingBond2.cleanPrice()
floatingBondSettlementDate2= floatingBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
floatingBondCleanPrice2 = cleanPriceFromZSpread(
floatingBond2, self.yieldCurve,
self.spread, Actual365Fixed(), self.compounding, Semiannual,
fixedBondSettlementDate1)
error7 = abs(floatingBondImpliedValue2-floatingBondCleanPrice2)
self.assertFalse(error7>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(floatingBondImpliedValue2)
+ "\n par asset swap spread: " + str(floatingBondCleanPrice2)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
#### CMS bond (Isin: XS0228052402 CRDIT 0 8/22/20)
#### maturity doesn't occur on a business day
cmsBondSchedule1 = Schedule(Date(22,August,2005),
Date(22,August,2020),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBond1 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule1,
self.swapIndex, Thirty360(),
Following, fixingDays,
[1.0], [0.0],
[0.055], [0.025],
inArrears,
100.0, Date(22,August,2005))
cmsBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondImpliedValue1 = cmsBond1.cleanPrice()
cmsBondSettlementDate1= cmsBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
cmsBondCleanPrice1 = cleanPriceFromZSpread(
cmsBond1, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual,
cmsBondSettlementDate1)
error9 = abs(cmsBondImpliedValue1-cmsBondCleanPrice1)
self.assertFalse(error9>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(cmsBondImpliedValue1)
+ "\n par asset swap spread: " + str(cmsBondCleanPrice1)
+ "\n error: " + str(error9)
+ "\n tolerance: " + str(tolerance))
## CMS bond (Isin: XS0218766664 ISPIM 0 5/6/15)
## maturity occurs on a business day
cmsBondSchedule2 = Schedule(Date(6,May,2005),
Date(6,May,2015),
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBond2 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule2,
self.swapIndex, Thirty360(),
Following, fixingDays,
[0.84], [0.0],
[], [],
inArrears,
100.0, Date(6,May,2005))
cmsBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondImpliedValue2 = cmsBond2.cleanPrice()
cmsBondSettlementDate2= cmsBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
cmsBondCleanPrice2 = cleanPriceFromZSpread(
cmsBond2, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual,
cmsBondSettlementDate2)
error11 = abs(cmsBondImpliedValue2-cmsBondCleanPrice2)
self.assertFalse(error11>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(cmsBondImpliedValue2)
+ "\n par asset swap spread: " + str(cmsBondCleanPrice2)
+ "\n error: " + str(error11)
+ "\n tolerance: " + str(tolerance))
## Zero-Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBond1 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(20,December,2015),
Following,
100.0, Date(19,December,1985))
zeroCpnBond1.setPricingEngine(bondEngine)
zeroCpnBondImpliedValue1 = zeroCpnBond1.cleanPrice()
zeroCpnBondSettlementDate1= zeroCpnBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
zeroCpnBondCleanPrice1 = cleanPriceFromZSpread(zeroCpnBond1,
self.yieldCurve,
self.spread,
Actual365Fixed(),
self.compounding, Annual,
zeroCpnBondSettlementDate1)
error13 = abs(zeroCpnBondImpliedValue1-zeroCpnBondCleanPrice1)
self.assertFalse(error13>tolerance,
"wrong clean price for zero coupon bond:"
+ "\n zero cpn implied value: " + str(zeroCpnBondImpliedValue1)
+ "\n zero cpn price: " + str(zeroCpnBondCleanPrice1)
+ "\n error: " + str(error13)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity doesn't occur on a business day
zeroCpnBond2 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(17,February,2028),
Following,
100.0, Date(17,February,1998))
zeroCpnBond2.setPricingEngine(bondEngine)
zeroCpnBondImpliedValue2 = zeroCpnBond2.cleanPrice()
zeroCpnBondSettlementDate2= zeroCpnBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
zeroCpnBondCleanPrice2 = cleanPriceFromZSpread(zeroCpnBond2,
self.yieldCurve,
self.spread,
Actual365Fixed(),
self.compounding, Annual,
zeroCpnBondSettlementDate2)
error15 = abs(zeroCpnBondImpliedValue2-zeroCpnBondCleanPrice2)
self.assertFalse(error15>tolerance,
"wrong clean price for zero coupon bond:"
+ "\n zero cpn implied value: " + str(zeroCpnBondImpliedValue2)
+ "\n zero cpn price: " + str(zeroCpnBondCleanPrice2)
+ "\n error: " + str(error15)
+ "\n tolerance: " + str(tolerance))
def testGenericBondImplied(self):
"""Testing implied generic-bond value against asset-swap fair price with null spread..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
payFixedRate = True
parAssetSwap = True
inArrears = False
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondStartDate1 = Date(4,January,2005)
fixedBondMaturityDate1 = Date(4,January,2037)
fixedBondSchedule1 = Schedule(fixedBondStartDate1,
fixedBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg1 = list(FixedRateLeg(fixedBondSchedule1,
ActualActual(ActualActual.ISDA),
[self.faceAmount],
[0.04]))
fixedbondRedemption1 = bondCalendar.adjust(fixedBondMaturityDate1,
Following)
fixedBondLeg1.append(SimpleCashFlow(100.0, fixedbondRedemption1))
fixedBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate1, fixedBondStartDate1,
tuple(fixedBondLeg1))
bondEngine = DiscountingBondEngine(self.termStructure)
swapEngine = DiscountingSwapEngine(self.termStructure,True)
fixedBond1.setPricingEngine(bondEngine)
fixedBondPrice1 = fixedBond1.cleanPrice()
fixedBondAssetSwap1 = AssetSwap(payFixedRate,
fixedBond1, fixedBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondAssetSwap1.setPricingEngine(swapEngine)
fixedBondAssetSwapPrice1 = fixedBondAssetSwap1.fairCleanPrice()
tolerance = 1.0e-13
error1 = abs(fixedBondAssetSwapPrice1-fixedBondPrice1)
self.assertFalse(error1>tolerance,
"wrong zero spread asset swap price for fixed bond:"
+ "\n bond's clean price: " + str(fixedBondPrice1)
+ "\n asset swap fair price: " + str(fixedBondAssetSwapPrice1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
## Fixed Underlying bond (Isin: IT0006527060 IBRD 5 02/05/19)
## maturity occurs on a business day
fixedBondStartDate2 = Date(5,February,2005)
fixedBondMaturityDate2 = Date(5,February,2019)
fixedBondSchedule2 = Schedule(fixedBondStartDate2,
fixedBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg2 = list(FixedRateLeg(fixedBondSchedule2,Thirty360(Thirty360.BondBasis),
[self.faceAmount],[0.05]))
fixedbondRedemption2 = bondCalendar.adjust(fixedBondMaturityDate2,Following)
fixedBondLeg2.append(SimpleCashFlow(100.0, fixedbondRedemption2))
fixedBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate2, fixedBondStartDate2, tuple(fixedBondLeg2))
fixedBond2.setPricingEngine(bondEngine)
fixedBondPrice2 = fixedBond2.cleanPrice()
fixedBondAssetSwap2 = AssetSwap(payFixedRate,
fixedBond2, fixedBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondAssetSwap2.setPricingEngine(swapEngine)
fixedBondAssetSwapPrice2 = fixedBondAssetSwap2.fairCleanPrice()
error2 = abs(fixedBondAssetSwapPrice2-fixedBondPrice2)
self.assertFalse(error2>tolerance,
"wrong zero spread asset swap price for fixed bond:"
+ "\n bond's clean price: " + str(fixedBondPrice2)
+ "\n asset swap fair price: " + str(fixedBondAssetSwapPrice2)
+ "\n error: " + str(error2)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: IT0003543847 ISPIM 0 09/29/13)
## maturity doesn't occur on a business day
floatingBondStartDate1 = Date(29,September,2003)
floatingBondMaturityDate1 = Date(29,September,2013)
floatingBondSchedule1 = Schedule(floatingBondStartDate1,
floatingBondMaturityDate1,
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBondLeg1 = list(IborLeg([self.faceAmount],floatingBondSchedule1, self.iborIndex,
Actual360(),ModifiedFollowing, [fixingDays],[],[0.0056],[],[],inArrears))
floatingbondRedemption1 = bondCalendar.adjust(floatingBondMaturityDate1, Following)
floatingBondLeg1.append(SimpleCashFlow(100.0, floatingbondRedemption1))
floatingBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate1, floatingBondStartDate1,
tuple(floatingBondLeg1))
floatingBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
floatingBondPrice1 = floatingBond1.cleanPrice()
floatingBondAssetSwap1 = AssetSwap (payFixedRate,
floatingBond1, floatingBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondAssetSwap1.setPricingEngine(swapEngine)
floatingBondAssetSwapPrice1 = floatingBondAssetSwap1.fairCleanPrice()
error3 = abs(floatingBondAssetSwapPrice1-floatingBondPrice1)
self.assertFalse(error3>tolerance,
"wrong zero spread asset swap price for floater:"
+ "\n bond's clean price: " + str(floatingBondPrice1)
+ "\n asset swap fair price: " + str(floatingBondAssetSwapPrice1)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: XS0090566539 COE 0 09/24/18)
## maturity occurs on a business day
floatingBondStartDate2 = Date(24,September,2004)
floatingBondMaturityDate2 = Date(24,September,2018)
floatingBondSchedule2 =Schedule(floatingBondStartDate2,
floatingBondMaturityDate2,
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBondLeg2 = list(IborLeg([self.faceAmount],floatingBondSchedule2, self.iborIndex,
Actual360(),ModifiedFollowing,[fixingDays],[],[0.0025],[],[],inArrears))
floatingbondRedemption2 = bondCalendar.adjust(floatingBondMaturityDate2, ModifiedFollowing)
floatingBondLeg2.append(SimpleCashFlow(100.0, floatingbondRedemption2))
floatingBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate2, floatingBondStartDate2,
tuple(floatingBondLeg2))
floatingBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
currentCoupon=0.04013+0.0025
floatingCurrentCoupon= floatingBond2.nextCouponRate()
error4= abs(floatingCurrentCoupon-currentCoupon)
self.assertFalse(error4>tolerance,
"wrong current coupon is returned for floater bond:"
+ "\n bond's calculated current coupon: " + str(currentCoupon)
+ "\n current coupon asked to the bond: " + str(floatingCurrentCoupon)
+ "\n error: " + str(error4)
+ "\n tolerance: " + str(tolerance))
floatingBondPrice2 = floatingBond2.cleanPrice()
floatingBondAssetSwap2 = AssetSwap(payFixedRate,
floatingBond2, floatingBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondAssetSwap2.setPricingEngine(swapEngine)
floatingBondAssetSwapPrice2 = floatingBondAssetSwap2.fairCleanPrice()
error5 = abs(floatingBondAssetSwapPrice2-floatingBondPrice2)
self.assertFalse(error5>tolerance,
"wrong zero spread asset swap price for floater:"
+ "\n bond's clean price: " + str(floatingBondPrice2)
+ "\n asset swap fair price: " + str(floatingBondAssetSwapPrice2)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0228052402 CRDIT 0 8/22/20)
## maturity doesn't occur on a business day
cmsBondStartDate1 = Date(22,August,2005)
cmsBondMaturityDate1 = Date(22,August,2020)
cmsBondSchedule1 = Schedule(cmsBondStartDate1,
cmsBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg1 = list(CmsLeg([self.faceAmount],cmsBondSchedule1, self.swapIndex,
Thirty360(),Following,[fixingDays],[],[0.055],[0.025],[],inArrears))
cmsbondRedemption1 = bondCalendar.adjust(cmsBondMaturityDate1, Following)
cmsBondLeg1.append(SimpleCashFlow(100.0, cmsbondRedemption1))
cmsBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate1, cmsBondStartDate1, tuple(cmsBondLeg1))
cmsBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondPrice1 = cmsBond1.cleanPrice()
cmsBondAssetSwap1 = AssetSwap(payFixedRate,
cmsBond1, cmsBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondAssetSwap1.setPricingEngine(swapEngine)
cmsBondAssetSwapPrice1 = cmsBondAssetSwap1.fairCleanPrice()
error6 = abs(cmsBondAssetSwapPrice1-cmsBondPrice1)
self.assertFalse(error6>tolerance,
"wrong zero spread asset swap price for cms bond:"
+ "\n bond's clean price: " + str(cmsBondPrice1)
+ "\n asset swap fair price: " + str(cmsBondAssetSwapPrice1)
+ "\n error: " + str(error6)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0218766664 ISPIM 0 5/6/15)
## maturity occurs on a business day
cmsBondStartDate2 = Date(6,May,2005)
cmsBondMaturityDate2 = Date(6,May,2015)
cmsBondSchedule2 = Schedule(cmsBondStartDate2,
cmsBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg2 = list(CmsLeg([self.faceAmount],cmsBondSchedule2, self.swapIndex,
Thirty360(),Following,[fixingDays],[0.84],[],[],[],inArrears))
cmsbondRedemption2 = bondCalendar.adjust(cmsBondMaturityDate2, Following)
cmsBondLeg2.append(SimpleCashFlow(100.0, cmsbondRedemption2))
cmsBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate2, cmsBondStartDate2, tuple(cmsBondLeg2))
cmsBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondPrice2 = cmsBond2.cleanPrice()
cmsBondAssetSwap2 = AssetSwap(payFixedRate,
cmsBond2, cmsBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondAssetSwap2.setPricingEngine(swapEngine)
cmsBondAssetSwapPrice2 = cmsBondAssetSwap2.fairCleanPrice()
error7 = abs(cmsBondAssetSwapPrice2-cmsBondPrice2)
self.assertFalse(error7>tolerance,
"wrong zero spread asset swap price for cms bond:"
+ "\n bond's clean price: " + str(cmsBondPrice2)
+ "\n asset swap fair price: " + str(cmsBondAssetSwapPrice2)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBondStartDate1 = Date(19,December,1985)
zeroCpnBondMaturityDate1 = Date(20,December,2015)
zeroCpnBondRedemption1 = bondCalendar.adjust(zeroCpnBondMaturityDate1,
Following)
zeroCpnBondLeg1 = Leg([SimpleCashFlow(100.0, zeroCpnBondRedemption1)])
zeroCpnBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate1, zeroCpnBondStartDate1, zeroCpnBondLeg1)
zeroCpnBond1.setPricingEngine(bondEngine)
zeroCpnBondPrice1 = zeroCpnBond1.cleanPrice()
zeroCpnAssetSwap1 = AssetSwap (payFixedRate,
zeroCpnBond1, zeroCpnBondPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnAssetSwap1.setPricingEngine(swapEngine)
zeroCpnBondAssetSwapPrice1 = zeroCpnAssetSwap1.fairCleanPrice()
error8 = abs(zeroCpnBondAssetSwapPrice1-zeroCpnBondPrice1)
self.assertFalse(error8>tolerance,
"wrong zero spread asset swap price for zero cpn bond:"
+ "\n bond's clean price: " + str(zeroCpnBondPrice1)
+ "\n asset swap fair price: " + str(zeroCpnBondAssetSwapPrice1)
+ "\n error: " + str(error8)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity occurs on a business day
zeroCpnBondStartDate2 = Date(17,February,1998)
zeroCpnBondMaturityDate2 = Date(17,February,2028)
zerocpbondRedemption2 = bondCalendar.adjust(zeroCpnBondMaturityDate2,
Following)
zeroCpnBondLeg2 = Leg([SimpleCashFlow(100.0, zerocpbondRedemption2)])
zeroCpnBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate2, zeroCpnBondStartDate2, zeroCpnBondLeg2)
zeroCpnBond2.setPricingEngine(bondEngine)
zeroCpnBondPrice2 = zeroCpnBond2.cleanPrice()
zeroCpnAssetSwap2 = AssetSwap(payFixedRate,
zeroCpnBond2, zeroCpnBondPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnAssetSwap2.setPricingEngine(swapEngine)
zeroCpnBondAssetSwapPrice2 = zeroCpnAssetSwap2.fairCleanPrice()
error9 = abs(cmsBondAssetSwapPrice2-cmsBondPrice2)
self.assertFalse(error9>tolerance,
"wrong zero spread asset swap price for zero cpn bond:"
+ "\n bond's clean price: " + str(zeroCpnBondPrice2)
+ "\n asset swap fair price: " + str(zeroCpnBondAssetSwapPrice2)
+ "\n error: " + str(error9)
+ "\n tolerance: " + str(tolerance))
def testMASWWithGenericBond(self):
"""Testing market asset swap against par asset swap with generic bond..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
payFixedRate = True
parAssetSwap = True
mktAssetSwap = False
inArrears = False
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondStartDate1 = Date(4,January,2005)
fixedBondMaturityDate1 = Date(4,January,2037)
fixedBondSchedule1 = Schedule(fixedBondStartDate1,
fixedBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg1 = list(FixedRateLeg(fixedBondSchedule1, ActualActual(ActualActual.ISDA), [self.faceAmount], [0.04]))
fixedbondRedemption1 = bondCalendar.adjust(fixedBondMaturityDate1, Following)
fixedBondLeg1.append(SimpleCashFlow(100.0, fixedbondRedemption1))
fixedBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate1, fixedBondStartDate1,
fixedBondLeg1)
bondEngine = DiscountingBondEngine(self.termStructure)
swapEngine = DiscountingSwapEngine(self.termStructure, False)
fixedBond1.setPricingEngine(bondEngine)
fixedBondMktPrice1 = 89.22 ## market price observed on 7th June 2007
fixedBondMktFullPrice1=fixedBondMktPrice1+fixedBond1.accruedAmount()
fixedBondParAssetSwap1 = AssetSwap (payFixedRate,
fixedBond1, fixedBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondParAssetSwap1.setPricingEngine(swapEngine)
fixedBondParAssetSwapSpread1 = fixedBondParAssetSwap1.fairSpread()
fixedBondMktAssetSwap1 = AssetSwap(payFixedRate,
fixedBond1, fixedBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
fixedBondMktAssetSwap1.setPricingEngine(swapEngine)
fixedBondMktAssetSwapSpread1 = fixedBondMktAssetSwap1.fairSpread()
tolerance = 1.0e-13
error1 = abs(fixedBondMktAssetSwapSpread1-
100*fixedBondParAssetSwapSpread1/fixedBondMktFullPrice1)
self.assertFalse(error1>tolerance,
"wrong asset swap spreads for fixed bond:"
+ "\n market asset swap spread: " + str(fixedBondMktAssetSwapSpread1)
+ "\n par asset swap spread: " + str(fixedBondParAssetSwapSpread1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
## Fixed Underlying bond (Isin: IT0006527060 IBRD 5 02/05/19)
## maturity occurs on a business day
fixedBondStartDate2 = Date(5,February,2005)
fixedBondMaturityDate2 = Date(5,February,2019)
fixedBondSchedule2 = Schedule(fixedBondStartDate2,
fixedBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg2 = list(FixedRateLeg(fixedBondSchedule2, Thirty360(Thirty360.BondBasis),[self.faceAmount],[0.05]))
fixedbondRedemption2 = bondCalendar.adjust(fixedBondMaturityDate2, Following)
fixedBondLeg2.append(SimpleCashFlow(100.0, fixedbondRedemption2))
fixedBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate2, fixedBondStartDate2, fixedBondLeg2)
fixedBond2.setPricingEngine(bondEngine)
fixedBondMktPrice2 = 99.98 ## market price observed on 7th June 2007
fixedBondMktFullPrice2 = fixedBondMktPrice2+fixedBond2.accruedAmount()
fixedBondParAssetSwap2 = AssetSwap(payFixedRate,
fixedBond2, fixedBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondParAssetSwap2.setPricingEngine(swapEngine)
fixedBondParAssetSwapSpread2 = fixedBondParAssetSwap2.fairSpread()
fixedBondMktAssetSwap2 = AssetSwap(payFixedRate,
fixedBond2, fixedBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
fixedBondMktAssetSwap2.setPricingEngine(swapEngine)
fixedBondMktAssetSwapSpread2 = fixedBondMktAssetSwap2.fairSpread()
error2 = abs(fixedBondMktAssetSwapSpread2-
100*fixedBondParAssetSwapSpread2/fixedBondMktFullPrice2)
self.assertFalse(error2>tolerance,
"wrong asset swap spreads for fixed bond:"
+ "\n market asset swap spread: " + str(fixedBondMktAssetSwapSpread2)
+ "\n par asset swap spread: " + str(fixedBondParAssetSwapSpread2)
+ "\n error: " + str(error2)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: IT0003543847 ISPIM 0 09/29/13)
## maturity doesn't occur on a business day
floatingBondStartDate1 = Date(29,September,2003)
floatingBondMaturityDate1 = Date(29,September,2013)
floatingBondSchedule1 = Schedule(floatingBondStartDate1,
floatingBondMaturityDate1,
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBondLeg1 = list(IborLeg([self.faceAmount],floatingBondSchedule1, self.iborIndex,Actual360(),Following,
[fixingDays], [],[0.0056],[],[],inArrears))
floatingbondRedemption1 = bondCalendar.adjust(floatingBondMaturityDate1, Following)
floatingBondLeg1.append(SimpleCashFlow(100.0, floatingbondRedemption1))
floatingBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate1, floatingBondStartDate1,
floatingBondLeg1)
floatingBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
## market price observed on 7th June 2007
floatingBondMktPrice1 = 101.64
floatingBondMktFullPrice1 = floatingBondMktPrice1+floatingBond1.accruedAmount()
floatingBondParAssetSwap1 = AssetSwap(payFixedRate,
floatingBond1, floatingBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondParAssetSwap1.setPricingEngine(swapEngine)
floatingBondParAssetSwapSpread1 = floatingBondParAssetSwap1.fairSpread()
floatingBondMktAssetSwap1 = AssetSwap(payFixedRate,
floatingBond1, floatingBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
floatingBondMktAssetSwap1.setPricingEngine(swapEngine)
floatingBondMktAssetSwapSpread1 = floatingBondMktAssetSwap1.fairSpread()
error3 = abs(floatingBondMktAssetSwapSpread1-
100*floatingBondParAssetSwapSpread1/floatingBondMktFullPrice1)
self.assertFalse(error3>tolerance,
"wrong asset swap spreads for floating bond:"
+ "\n market asset swap spread: " + str(floatingBondMktAssetSwapSpread1)
+ "\n par asset swap spread: " + str(floatingBondParAssetSwapSpread1)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: XS0090566539 COE 0 09/24/18)
## maturity occurs on a business day
floatingBondStartDate2 = Date(24,September,2004)
floatingBondMaturityDate2 = Date(24,September,2018)
floatingBondSchedule2 = Schedule(floatingBondStartDate2,
floatingBondMaturityDate2,
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBondLeg2 = list(IborLeg([self.faceAmount],floatingBondSchedule2, self.iborIndex, Actual360(),
ModifiedFollowing, [fixingDays], [], [0.0025] , [],[], inArrears))
floatingbondRedemption2 = bondCalendar.adjust(floatingBondMaturityDate2, ModifiedFollowing)
floatingBondLeg2.append(SimpleCashFlow(100.0, floatingbondRedemption2))
floatingBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate2, floatingBondStartDate2,
floatingBondLeg2)
floatingBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
## market price observed on 7th June 2007
floatingBondMktPrice2 = 101.248
floatingBondMktFullPrice2 = floatingBondMktPrice2+floatingBond2.accruedAmount()
floatingBondParAssetSwap2 = AssetSwap(payFixedRate,
floatingBond2, floatingBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondParAssetSwap2.setPricingEngine(swapEngine)
floatingBondParAssetSwapSpread2 = floatingBondParAssetSwap2.fairSpread()
floatingBondMktAssetSwap2 = AssetSwap(payFixedRate,
floatingBond2, floatingBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
floatingBondMktAssetSwap2.setPricingEngine(swapEngine)
floatingBondMktAssetSwapSpread2 = floatingBondMktAssetSwap2.fairSpread()
error4 = abs(floatingBondMktAssetSwapSpread2-
100*floatingBondParAssetSwapSpread2/floatingBondMktFullPrice2)
self.assertFalse(error4>tolerance,
"wrong asset swap spreads for floating bond:"
+ "\n market asset swap spread: " + str(floatingBondMktAssetSwapSpread2)
+ "\n par asset swap spread: " + str(floatingBondParAssetSwapSpread2)
+ "\n error: " + str(error4)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0228052402 CRDIT 0 8/22/20)
## maturity doesn't occur on a business day
cmsBondStartDate1 = Date(22,August,2005)
cmsBondMaturityDate1 = Date(22,August,2020)
cmsBondSchedule1 = Schedule(cmsBondStartDate1,
cmsBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg1 = list(CmsLeg([self.faceAmount],cmsBondSchedule1, self.swapIndex,
Thirty360(),Following,[fixingDays],[],[],[0.055],[0.025],inArrears))
cmsbondRedemption1 = bondCalendar.adjust(cmsBondMaturityDate1, Following)
cmsBondLeg1.append(SimpleCashFlow(100.0, cmsbondRedemption1))
cmsBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate1, cmsBondStartDate1, cmsBondLeg1)
cmsBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondMktPrice1 = 88.45 ## market price observed on 7th June 2007
cmsBondMktFullPrice1 = cmsBondMktPrice1+cmsBond1.accruedAmount()
cmsBondParAssetSwap1 = AssetSwap(payFixedRate,
cmsBond1, cmsBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondParAssetSwap1.setPricingEngine(swapEngine)
cmsBondParAssetSwapSpread1 = cmsBondParAssetSwap1.fairSpread()
cmsBondMktAssetSwap1 = AssetSwap(payFixedRate,
cmsBond1, cmsBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
cmsBondMktAssetSwap1.setPricingEngine(swapEngine)
cmsBondMktAssetSwapSpread1 = cmsBondMktAssetSwap1.fairSpread()
error5 = abs(cmsBondMktAssetSwapSpread1-
100*cmsBondParAssetSwapSpread1/cmsBondMktFullPrice1)
self.assertFalse(error5>tolerance,
"wrong asset swap spreads for cms bond:"
+ "\n market asset swap spread: " + str(cmsBondMktAssetSwapSpread1)
+ "\n par asset swap spread: " + str(100*cmsBondParAssetSwapSpread1/cmsBondMktFullPrice1)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0218766664 ISPIM 0 5/6/15)
## maturity occurs on a business day
cmsBondStartDate2 = Date(6,May,2005)
cmsBondMaturityDate2 = Date(6,May,2015)
cmsBondSchedule2 = Schedule(cmsBondStartDate2,
cmsBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg2 = list(CmsLeg([self.faceAmount],cmsBondSchedule2, self.swapIndex,
Thirty360(),Following,[fixingDays],[0.84],[],[],[],inArrears))
cmsbondRedemption2 = bondCalendar.adjust(cmsBondMaturityDate2, Following)
cmsBondLeg2.append(SimpleCashFlow(100.0, cmsbondRedemption2))
cmsBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate2, cmsBondStartDate2, cmsBondLeg2)
cmsBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondMktPrice2 = 94.08 ## market price observed on 7th June 2007
cmsBondMktFullPrice2 = cmsBondMktPrice2+cmsBond2.accruedAmount()
cmsBondParAssetSwap2 = AssetSwap(payFixedRate,
cmsBond2, cmsBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondParAssetSwap2.setPricingEngine(swapEngine)
cmsBondParAssetSwapSpread2 = cmsBondParAssetSwap2.fairSpread()
cmsBondMktAssetSwap2 = AssetSwap(payFixedRate,
cmsBond2, cmsBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
cmsBondMktAssetSwap2.setPricingEngine(swapEngine)
cmsBondMktAssetSwapSpread2 = cmsBondMktAssetSwap2.fairSpread()
error6 = abs(cmsBondMktAssetSwapSpread2-
100*cmsBondParAssetSwapSpread2/cmsBondMktFullPrice2)
self.assertFalse(error6>tolerance,
"wrong asset swap spreads for cms bond:"
+ "\n market asset swap spread: " + str(cmsBondMktAssetSwapSpread2)
+ "\n par asset swap spread: " + str(cmsBondParAssetSwapSpread2)
+ "\n error: " + str(error6)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBondStartDate1 = Date(19,December,1985)
zeroCpnBondMaturityDate1 = Date(20,December,2015)
zeroCpnBondRedemption1 = bondCalendar.adjust(zeroCpnBondMaturityDate1,
Following)
zeroCpnBondLeg1 = Leg([SimpleCashFlow(100.0, zeroCpnBondRedemption1)])
zeroCpnBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate1, zeroCpnBondStartDate1, zeroCpnBondLeg1)
zeroCpnBond1.setPricingEngine(bondEngine)
## market price observed on 12th June 2007
zeroCpnBondMktPrice1 = 70.436
zeroCpnBondMktFullPrice1 = zeroCpnBondMktPrice1+zeroCpnBond1.accruedAmount()
zeroCpnBondParAssetSwap1 = AssetSwap(payFixedRate,zeroCpnBond1,
zeroCpnBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondParAssetSwap1.setPricingEngine(swapEngine)
zeroCpnBondParAssetSwapSpread1 = zeroCpnBondParAssetSwap1.fairSpread()
zeroCpnBondMktAssetSwap1 = AssetSwap(payFixedRate,zeroCpnBond1,
zeroCpnBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
zeroCpnBondMktAssetSwap1.setPricingEngine(swapEngine)
zeroCpnBondMktAssetSwapSpread1 = zeroCpnBondMktAssetSwap1.fairSpread()
error7 = abs(zeroCpnBondMktAssetSwapSpread1-
100*zeroCpnBondParAssetSwapSpread1/zeroCpnBondMktFullPrice1)
self.assertFalse(error7>tolerance,
"wrong asset swap spreads for zero cpn bond:"
+ "\n market asset swap spread: " + str(zeroCpnBondMktAssetSwapSpread1)
+ "\n par asset swap spread: " + str(zeroCpnBondParAssetSwapSpread1)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity occurs on a business day
zeroCpnBondStartDate2 = Date(17,February,1998)
zeroCpnBondMaturityDate2 = Date(17,February,2028)
zerocpbondRedemption2 = bondCalendar.adjust(zeroCpnBondMaturityDate2,
Following)
zeroCpnBondLeg2 = Leg([SimpleCashFlow(100.0, zerocpbondRedemption2)])
zeroCpnBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate2, zeroCpnBondStartDate2, zeroCpnBondLeg2)
zeroCpnBond2.setPricingEngine(bondEngine)
## zeroCpnBondPrice2 = zeroCpnBond2.cleanPrice()
## market price observed on 12th June 2007
zeroCpnBondMktPrice2 = 35.160
zeroCpnBondMktFullPrice2 = zeroCpnBondMktPrice2+zeroCpnBond2.accruedAmount()
zeroCpnBondParAssetSwap2 = AssetSwap(payFixedRate,zeroCpnBond2,
zeroCpnBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondParAssetSwap2.setPricingEngine(swapEngine)
zeroCpnBondParAssetSwapSpread2 = zeroCpnBondParAssetSwap2.fairSpread()
zeroCpnBondMktAssetSwap2 = AssetSwap(payFixedRate,zeroCpnBond2,
zeroCpnBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
mktAssetSwap)
zeroCpnBondMktAssetSwap2.setPricingEngine(swapEngine)
zeroCpnBondMktAssetSwapSpread2 = zeroCpnBondMktAssetSwap2.fairSpread()
error8 = abs(zeroCpnBondMktAssetSwapSpread2-
100*zeroCpnBondParAssetSwapSpread2/zeroCpnBondMktFullPrice2)
self.assertFalse(error8>tolerance,
"wrong asset swap spreads for zero cpn bond:"
+ "\n market asset swap spread: " + str(zeroCpnBondMktAssetSwapSpread2)
+ "\n par asset swap spread: " + str(zeroCpnBondParAssetSwapSpread2)
+ "\n error: " + str(error8)
+ "\n tolerance: " + str(tolerance))
def testZSpreadWithGenericBond(self) :
"""Testing clean and dirty price with null Z-spread against theoretical prices..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
inArrears = False
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondStartDate1 = Date(4,January,2005)
fixedBondMaturityDate1 = Date(4,January,2037)
fixedBondSchedule1 = Schedule(fixedBondStartDate1,
fixedBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg1 = list(FixedRateLeg(fixedBondSchedule1, ActualActual(ActualActual.ISDA), [self.faceAmount], [0.04]))
fixedbondRedemption1 = bondCalendar.adjust(fixedBondMaturityDate1, Following)
fixedBondLeg1.append(SimpleCashFlow(100.0, fixedbondRedemption1))
fixedBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate1, fixedBondStartDate1,
fixedBondLeg1)
bondEngine = DiscountingBondEngine(self.termStructure)
fixedBond1.setPricingEngine(bondEngine)
fixedBondImpliedValue1 = fixedBond1.cleanPrice()
fixedBondSettlementDate1 = fixedBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
fixedBondCleanPrice1 = cleanPriceFromZSpread(fixedBond1, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual, fixedBondSettlementDate1)
tolerance = 1.0e-13
error1 = abs(fixedBondImpliedValue1-fixedBondCleanPrice1)
self.assertFalse(error1>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(fixedBondImpliedValue1)
+ "\n par asset swap spread: " + str(fixedBondCleanPrice1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
## Fixed Underlying bond (Isin: IT0006527060 IBRD 5 02/05/19)
## maturity occurs on a business day
fixedBondStartDate2 = Date(5,February,2005)
fixedBondMaturityDate2 = Date(5,February,2019)
fixedBondSchedule2 = Schedule(fixedBondStartDate2,
fixedBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg2 = list(FixedRateLeg(fixedBondSchedule2, Thirty360(Thirty360.BondBasis),
[self.faceAmount],[0.05]))
fixedbondRedemption2 = bondCalendar.adjust(fixedBondMaturityDate2, Following)
fixedBondLeg2.append(SimpleCashFlow(100.0, fixedbondRedemption2))
fixedBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate2, fixedBondStartDate2, fixedBondLeg2)
fixedBond2.setPricingEngine(bondEngine)
fixedBondImpliedValue2 = fixedBond2.cleanPrice()
fixedBondSettlementDate2= fixedBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
fixedBondCleanPrice2 = cleanPriceFromZSpread(fixedBond2, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual, fixedBondSettlementDate2)
error3 = abs(fixedBondImpliedValue2-fixedBondCleanPrice2)
self.assertFalse(error3>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(fixedBondImpliedValue2)
+ "\n par asset swap spread: " + str(fixedBondCleanPrice2)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: IT0003543847 ISPIM 0 09/29/13)
## maturity doesn't occur on a business day
floatingBondStartDate1 = Date(29,September,2003)
floatingBondMaturityDate1 = Date(29,September,2013)
floatingBondSchedule1 = Schedule(floatingBondStartDate1,
floatingBondMaturityDate1,
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBondLeg1 = list(IborLeg([self.faceAmount],floatingBondSchedule1, self.iborIndex,
Actual360(),Following,[fixingDays], [],[0.0056],[],[], inArrears))
floatingbondRedemption1 = bondCalendar.adjust(floatingBondMaturityDate1, Following)
floatingBondLeg1.append(SimpleCashFlow(100.0, floatingbondRedemption1))
floatingBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate1, floatingBondStartDate1,
floatingBondLeg1)
floatingBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
floatingBondImpliedValue1 = floatingBond1.cleanPrice()
floatingBondSettlementDate1= floatingBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
floatingBondCleanPrice1 = cleanPriceFromZSpread(floatingBond1, self.yieldCurve,
self.spread, Actual365Fixed(), self.compounding, Semiannual,
fixedBondSettlementDate1)
error5 = abs(floatingBondImpliedValue1-floatingBondCleanPrice1)
self.assertFalse(error5>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(floatingBondImpliedValue1)
+ "\n par asset swap spread: " + str(floatingBondCleanPrice1)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: XS0090566539 COE 0 09/24/18)
## maturity occurs on a business day
floatingBondStartDate2 = Date(24,September,2004)
floatingBondMaturityDate2 = Date(24,September,2018)
floatingBondSchedule2 = Schedule(floatingBondStartDate2,
floatingBondMaturityDate2,
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBondLeg2 = list(IborLeg([self.faceAmount],floatingBondSchedule2, self.iborIndex,
Actual360(),ModifiedFollowing, [fixingDays],[],[0.0025],[],[], inArrears))
floatingbondRedemption2 = bondCalendar.adjust(floatingBondMaturityDate2, ModifiedFollowing)
floatingBondLeg2.append(SimpleCashFlow(100.0, floatingbondRedemption2))
floatingBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate2, floatingBondStartDate2, floatingBondLeg2)
floatingBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
floatingBondImpliedValue2 = floatingBond2.cleanPrice()
floatingBondSettlementDate2= floatingBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
floatingBondCleanPrice2 = cleanPriceFromZSpread(floatingBond2, self.yieldCurve,
self.spread, Actual365Fixed(), self.compounding, Semiannual, fixedBondSettlementDate1)
error7 = abs(floatingBondImpliedValue2-floatingBondCleanPrice2)
self.assertFalse(error7>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(floatingBondImpliedValue2)
+ "\n par asset swap spread: " + str(floatingBondCleanPrice2)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0228052402 CRDIT 0 8/22/20)
## maturity doesn't occur on a business day
cmsBondStartDate1 = Date(22,August,2005)
cmsBondMaturityDate1 = Date(22,August,2020)
cmsBondSchedule1 = Schedule(cmsBondStartDate1,
cmsBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg1 = list(CmsLeg([self.faceAmount],cmsBondSchedule1, self.swapIndex,
Thirty360(),Following,[fixingDays],[],[],[0.055],[0.025],inArrears))
cmsbondRedemption1 = bondCalendar.adjust(cmsBondMaturityDate1, Following)
cmsBondLeg1.append(SimpleCashFlow(100.0, cmsbondRedemption1))
cmsBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate1, cmsBondStartDate1, cmsBondLeg1)
cmsBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondImpliedValue1 = cmsBond1.cleanPrice()
cmsBondSettlementDate1= cmsBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
cmsBondCleanPrice1 = cleanPriceFromZSpread(cmsBond1, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual, cmsBondSettlementDate1)
error9 = abs(cmsBondImpliedValue1-cmsBondCleanPrice1)
self.assertFalse(error9>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(cmsBondImpliedValue1)
+ "\n par asset swap spread: " + str(cmsBondCleanPrice1)
+ "\n error: " + str(error9)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0218766664 ISPIM 0 5/6/15)
## maturity occurs on a business day
cmsBondStartDate2 = Date(6,May,2005)
cmsBondMaturityDate2 = Date(6,May,2015)
cmsBondSchedule2 = Schedule(cmsBondStartDate2,
cmsBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg2 = list(CmsLeg([self.faceAmount],cmsBondSchedule2, self.swapIndex,
Thirty360(),Following,[fixingDays],[0.84],[],[],[],inArrears))
cmsbondRedemption2 = bondCalendar.adjust(cmsBondMaturityDate2, Following)
cmsBondLeg2.append(SimpleCashFlow(100.0, cmsbondRedemption2))
cmsBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate2, cmsBondStartDate2, cmsBondLeg2)
cmsBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondImpliedValue2 = cmsBond2.cleanPrice()
cmsBondSettlementDate2= cmsBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
cmsBondCleanPrice2 = cleanPriceFromZSpread(cmsBond2, self.yieldCurve, self.spread,
Actual365Fixed(), self.compounding, Annual, cmsBondSettlementDate2)
error11 = abs(cmsBondImpliedValue2-cmsBondCleanPrice2)
self.assertFalse(error11>tolerance,
"wrong clean price for fixed bond:"
+ "\n market asset swap spread: " + str(cmsBondImpliedValue2)
+ "\n par asset swap spread: " + str(cmsBondCleanPrice2)
+ "\n error: " + str(error11)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBondStartDate1 = Date(19,December,1985)
zeroCpnBondMaturityDate1 = Date(20,December,2015)
zeroCpnBondRedemption1 = bondCalendar.adjust(zeroCpnBondMaturityDate1,
Following)
zeroCpnBondLeg1 = Leg([SimpleCashFlow(100.0, zeroCpnBondRedemption1)])
zeroCpnBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate1, zeroCpnBondStartDate1, zeroCpnBondLeg1)
zeroCpnBond1.setPricingEngine(bondEngine)
zeroCpnBondImpliedValue1 = zeroCpnBond1.cleanPrice()
zeroCpnBondSettlementDate1= zeroCpnBond1.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
zeroCpnBondCleanPrice1 = cleanPriceFromZSpread(zeroCpnBond1, self.yieldCurve,
self.spread,
Actual365Fixed(),
self.compounding, Annual,
zeroCpnBondSettlementDate1)
error13 = abs(zeroCpnBondImpliedValue1-zeroCpnBondCleanPrice1)
self.assertFalse(error13>tolerance,
"wrong clean price for zero coupon bond:"
+ "\n zero cpn implied value: " + str(zeroCpnBondImpliedValue1)
+ "\n zero cpn price: " + str(zeroCpnBondCleanPrice1)
+ "\n error: " + str(error13)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity occurs on a business day
zeroCpnBondStartDate2 = Date(17,February,1998)
zeroCpnBondMaturityDate2 = Date(17,February,2028)
zerocpbondRedemption2 = bondCalendar.adjust(zeroCpnBondMaturityDate2, Following)
zeroCpnBondLeg2 = Leg([SimpleCashFlow(100.0, zerocpbondRedemption2)])
zeroCpnBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate2, zeroCpnBondStartDate2, zeroCpnBondLeg2)
zeroCpnBond2.setPricingEngine(bondEngine)
zeroCpnBondImpliedValue2 = zeroCpnBond2.cleanPrice()
zeroCpnBondSettlementDate2= zeroCpnBond2.settlementDate()
## standard market conventions:
## bond's frequency + coumpounding and daycounter of the YieldCurve
zeroCpnBondCleanPrice2 = cleanPriceFromZSpread(zeroCpnBond2,
self.yieldCurve,
self.spread,
Actual365Fixed(),
self.compounding, Annual,
zeroCpnBondSettlementDate2)
error15 = abs(zeroCpnBondImpliedValue2-zeroCpnBondCleanPrice2)
self.assertFalse(error15>tolerance,
"wrong clean price for zero coupon bond:"
+ "\n zero cpn implied value: " + str(zeroCpnBondImpliedValue2)
+ "\n zero cpn price: " + str(zeroCpnBondCleanPrice2)
+ "\n error: " + str(error15)
+ "\n tolerance: " + str(tolerance))
def testSpecializedBondVsGenericBond(self) :
"""Testing clean and dirty prices for specialized bond against equivalent generic bond..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
inArrears = False
## Fixed Underlying bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondStartDate1 = Date(4,January,2005)
fixedBondMaturityDate1 = Date(4,January,2037)
fixedBondSchedule1 = Schedule(fixedBondStartDate1,
fixedBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg1 = list(FixedRateLeg(fixedBondSchedule1, ActualActual(ActualActual.ISDA), [self.faceAmount],
[0.04]))
fixedbondRedemption1 = bondCalendar.adjust(fixedBondMaturityDate1, Following)
fixedBondLeg1.append(SimpleCashFlow(100.0, fixedbondRedemption1))
## generic bond
fixedBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate1, fixedBondStartDate1,
fixedBondLeg1)
bondEngine = DiscountingBondEngine(self.termStructure)
fixedBond1.setPricingEngine(bondEngine)
## equivalent specialized fixed rate bond
fixedSpecializedBond1 = FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule1,
[0.04], ActualActual(ActualActual.ISDA), Following,
100.0, Date(4,January,2005))
fixedSpecializedBond1.setPricingEngine(bondEngine)
fixedBondTheoValue1 = fixedBond1.cleanPrice()
fixedSpecializedBondTheoValue1 = fixedSpecializedBond1.cleanPrice()
tolerance = 1.0e-13
error1 = abs(fixedBondTheoValue1-fixedSpecializedBondTheoValue1)
self.assertFalse(error1>tolerance,
"wrong clean price for fixed bond:"
+ "\n specialized fixed rate bond's theo clean price: " + str(fixedBondTheoValue1)
+ "\n generic equivalent bond's theo clean price: " + str(fixedSpecializedBondTheoValue1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
fixedBondTheoDirty1 = fixedBondTheoValue1+fixedBond1.accruedAmount()
fixedSpecializedTheoDirty1 = fixedSpecializedBondTheoValue1+ fixedSpecializedBond1.accruedAmount()
error2 = abs(fixedBondTheoDirty1-fixedSpecializedTheoDirty1)
self.assertFalse(error2>tolerance,
"wrong dirty price for fixed bond:"
+ "\n specialized fixed rate bond's theo dirty price: " + str(fixedBondTheoDirty1)
+ "\n generic equivalent bond's theo dirty price: " + str(fixedSpecializedTheoDirty1)
+ "\n error: " + str(error2)
+ "\n tolerance: " + str(tolerance))
## Fixed Underlying bond (Isin: IT0006527060 IBRD 5 02/05/19)
## maturity occurs on a business day
fixedBondStartDate2 = Date(5,February,2005)
fixedBondMaturityDate2 = Date(5,February,2019)
fixedBondSchedule2 = Schedule(fixedBondStartDate2,
fixedBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg2 = list(FixedRateLeg(fixedBondSchedule2, Thirty360(Thirty360.BondBasis),[self.faceAmount],[0.05]))
fixedbondRedemption2 = bondCalendar.adjust(fixedBondMaturityDate2, Following)
fixedBondLeg2.append(SimpleCashFlow(100.0, fixedbondRedemption2))
## generic bond
fixedBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate2, fixedBondStartDate2, fixedBondLeg2)
fixedBond2.setPricingEngine(bondEngine)
## equivalent specialized fixed rate bond
fixedSpecializedBond2 = FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule2,
[0.05],Thirty360(Thirty360.BondBasis), Following,
100.0, Date(5,February,2005))
fixedSpecializedBond2.setPricingEngine(bondEngine)
fixedBondTheoValue2 = fixedBond2.cleanPrice()
fixedSpecializedBondTheoValue2 = fixedSpecializedBond2.cleanPrice()
error3 = abs(fixedBondTheoValue2-fixedSpecializedBondTheoValue2)
self.assertFalse(error3>tolerance,
"wrong clean price for fixed bond:"
+ "\n specialized fixed rate bond's theo clean price: " + str(fixedBondTheoValue2)
+ "\n generic equivalent bond's theo clean price: " + str(fixedSpecializedBondTheoValue2)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
fixedBondTheoDirty2 = fixedBondTheoValue2+ fixedBond2.accruedAmount()
fixedSpecializedBondTheoDirty2 = fixedSpecializedBondTheoValue2+ fixedSpecializedBond2.accruedAmount()
error4 = abs(fixedBondTheoDirty2-fixedSpecializedBondTheoDirty2)
self.assertFalse(error4>tolerance,
"wrong dirty price for fixed bond:"
+ "\n specialized fixed rate bond's dirty clean price: " + str(fixedBondTheoDirty2)
+ "\n generic equivalent bond's theo dirty price: " + str(fixedSpecializedBondTheoDirty2)
+ "\n error: " + str(error4)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: IT0003543847 ISPIM 0 09/29/13)
## maturity doesn't occur on a business day
floatingBondStartDate1 = Date(29,September,2003)
floatingBondMaturityDate1 = Date(29,September,2013)
floatingBondSchedule1 = Schedule(floatingBondStartDate1,
floatingBondMaturityDate1,
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBondLeg1 = list(IborLeg([self.faceAmount],floatingBondSchedule1, self.iborIndex,
Actual360(),Following,[fixingDays],[],[0.0056],[],[],inArrears))
floatingbondRedemption1 = bondCalendar.adjust(floatingBondMaturityDate1, Following)
floatingBondLeg1.append(SimpleCashFlow(100.0, floatingbondRedemption1))
## generic bond
floatingBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate1, floatingBondStartDate1,
floatingBondLeg1)
floatingBond1.setPricingEngine(bondEngine)
## equivalent specialized floater
floatingSpecializedBond1 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule1,
self.iborIndex, Actual360(),
Following, fixingDays,
[1],
[0.0056],
[], [],
inArrears,
100.0, Date(29,September,2003))
floatingSpecializedBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
setCouponPricer(floatingSpecializedBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
floatingBondTheoValue1 = floatingBond1.cleanPrice()
floatingSpecializedBondTheoValue1 = floatingSpecializedBond1.cleanPrice()
error5 = abs(floatingBondTheoValue1-
floatingSpecializedBondTheoValue1)
self.assertFalse(error5>tolerance,
"wrong clean price for fixed bond:"
+ "\n generic fixed rate bond's theo clean price: " + str(floatingBondTheoValue1)
+ "\n equivalent specialized bond's theo clean price: " + str(floatingSpecializedBondTheoValue1)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
floatingBondTheoDirty1 = floatingBondTheoValue1+ floatingBond1.accruedAmount()
floatingSpecializedBondTheoDirty1 = floatingSpecializedBondTheoValue1 + floatingSpecializedBond1.accruedAmount()
error6 = abs(floatingBondTheoDirty1-
floatingSpecializedBondTheoDirty1)
self.assertFalse(error6>tolerance,
"wrong dirty price for frn bond:"
+ "\n generic frn bond's dirty clean price: " + str(floatingBondTheoDirty1)
+ "\n equivalent specialized bond's theo dirty price: " + str(floatingSpecializedBondTheoDirty1)
+ "\n error: " + str(error6)
+ "\n tolerance: " + str(tolerance))
## FRN Underlying bond (Isin: XS0090566539 COE 0 09/24/18)
## maturity occurs on a business day
floatingBondStartDate2 = Date(24,September,2004)
floatingBondMaturityDate2 = Date(24,September,2018)
floatingBondSchedule2 = Schedule(floatingBondStartDate2,
floatingBondMaturityDate2,
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBondLeg2 = list(IborLeg([self.faceAmount], floatingBondSchedule2, self.iborIndex,
Actual360(),ModifiedFollowing,[fixingDays], [],[0.0025],[],[],inArrears))
floatingbondRedemption2 = bondCalendar.adjust(floatingBondMaturityDate2, ModifiedFollowing)
floatingBondLeg2.append(SimpleCashFlow(100.0, floatingbondRedemption2))
## generic bond
floatingBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate2, floatingBondStartDate2,
floatingBondLeg2)
floatingBond2.setPricingEngine(bondEngine)
## equivalent specialized floater
floatingSpecializedBond2 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule2,
self.iborIndex, Actual360(),
ModifiedFollowing, fixingDays,
[1], [0.0025],
[], [],
inArrears,
100.0, Date(24,September,2004))
floatingSpecializedBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
setCouponPricer(floatingSpecializedBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
floatingBondTheoValue2 = floatingBond2.cleanPrice()
floatingSpecializedBondTheoValue2 = floatingSpecializedBond2.cleanPrice()
error7 = abs(floatingBondTheoValue2-floatingSpecializedBondTheoValue2)
self.assertFalse(error7>tolerance,
"wrong clean price for floater bond:"
+ "\n generic floater bond's theo clean price: " + str(floatingBondTheoValue2)
+ "\n equivalent specialized bond's theo clean price: " + str(floatingSpecializedBondTheoValue2)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
floatingBondTheoDirty2 = floatingBondTheoValue2+ floatingBond2.accruedAmount()
floatingSpecializedTheoDirty2 = floatingSpecializedBondTheoValue2+ floatingSpecializedBond2.accruedAmount()
error8 = abs(floatingBondTheoDirty2-floatingSpecializedTheoDirty2)
self.assertFalse(error8>tolerance,
"wrong dirty price for floater bond:"
+ "\n generic floater bond's theo dirty price: " + str(floatingBondTheoDirty2)
+ "\n equivalent specialized bond's theo dirty price: " + str(floatingSpecializedTheoDirty2)
+ "\n error: " + str(error8)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0228052402 CRDIT 0 8/22/20)
## maturity doesn't occur on a business day
cmsBondStartDate1 = Date(22,August,2005)
cmsBondMaturityDate1 = Date(22,August,2020)
cmsBondSchedule1 = Schedule(cmsBondStartDate1,
cmsBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg1 = list(CmsLeg([self.faceAmount],cmsBondSchedule1, self.swapIndex,
Thirty360(), Following, [fixingDays], [],[],[0.055],[0.025],inArrears))
cmsbondRedemption1 = bondCalendar.adjust(cmsBondMaturityDate1, Following)
cmsBondLeg1.append(SimpleCashFlow(100.0, cmsbondRedemption1))
## generic cms bond
cmsBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate1, cmsBondStartDate1, cmsBondLeg1)
cmsBond1.setPricingEngine(bondEngine)
## equivalent specialized cms bond
cmsSpecializedBond1 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule1,
self.swapIndex, Thirty360(),
Following, fixingDays,
[1.0], [0.0],
[0.055],[0.025],
inArrears,
100.0, Date(22,August,2005))
cmsSpecializedBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
setCouponPricer(cmsSpecializedBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondTheoValue1 = cmsBond1.cleanPrice()
cmsSpecializedBondTheoValue1 = cmsSpecializedBond1.cleanPrice()
error9 = abs(cmsBondTheoValue1-cmsSpecializedBondTheoValue1)
self.assertFalse(error9>tolerance,
"wrong clean price for cms bond:"
+ "\n generic cms bond's theo clean price: " + str(cmsBondTheoValue1)
+ "\n equivalent specialized bond's theo clean price: " + str(cmsSpecializedBondTheoValue1)
+ "\n error: " + str(error9)
+ "\n tolerance: " + str(tolerance))
cmsBondTheoDirty1 = cmsBondTheoValue1+cmsBond1.accruedAmount()
cmsSpecializedBondTheoDirty1 = cmsSpecializedBondTheoValue1+ cmsSpecializedBond1.accruedAmount()
error10 = abs(cmsBondTheoDirty1-cmsSpecializedBondTheoDirty1)
self.assertFalse(error10>tolerance,
"wrong dirty price for cms bond:"
+ "\n generic cms bond's theo dirty price: " + str(cmsBondTheoDirty1)
+ "\n specialized cms bond's theo dirty price: " + str(cmsSpecializedBondTheoDirty1)
+ "\n error: " + str(error10)
+ "\n tolerance: " + str(tolerance))
## CMS Underlying bond (Isin: XS0218766664 ISPIM 0 5/6/15)
## maturity occurs on a business day
cmsBondStartDate2 = Date(6,May,2005)
cmsBondMaturityDate2 = Date(6,May,2015)
cmsBondSchedule2 = Schedule(cmsBondStartDate2,
cmsBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg2 = list(CmsLeg([self.faceAmount],cmsBondSchedule2, self.swapIndex,
Thirty360(),Following,[fixingDays],[0.84],[],[],[],inArrears))
cmsbondRedemption2 = bondCalendar.adjust(cmsBondMaturityDate2, Following)
cmsBondLeg2.append(SimpleCashFlow(100.0, cmsbondRedemption2))
## generic bond
cmsBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate2, cmsBondStartDate2, cmsBondLeg2)
cmsBond2.setPricingEngine(bondEngine)
## equivalent specialized cms bond
cmsSpecializedBond2 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule2,
self.swapIndex, Thirty360(),
Following, fixingDays,
[0.84], [0.0],
[], [],
inArrears,
100.0, Date(6,May,2005))
cmsSpecializedBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
setCouponPricer(cmsSpecializedBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondTheoValue2 = cmsBond2.cleanPrice()
cmsSpecializedBondTheoValue2 = cmsSpecializedBond2.cleanPrice()
error11 = abs(cmsBondTheoValue2-cmsSpecializedBondTheoValue2)
self.assertFalse(error11>tolerance,
"wrong clean price for cms bond:"
+ "\n generic cms bond's theo clean price: " + str(cmsBondTheoValue2)
+ "\n cms bond's theo clean price: " + str(cmsSpecializedBondTheoValue2)
+ "\n error: " + str(error11)
+ "\n tolerance: " + str(tolerance))
cmsBondTheoDirty2 = cmsBondTheoValue2+cmsBond2.accruedAmount()
cmsSpecializedBondTheoDirty2 = cmsSpecializedBondTheoValue2+cmsSpecializedBond2.accruedAmount()
error12 = abs(cmsBondTheoDirty2-cmsSpecializedBondTheoDirty2)
self.assertFalse(error12>tolerance,
"wrong dirty price for cms bond:"
+ "\n generic cms bond's dirty price: " + str(cmsBondTheoDirty2)
+ "\n specialized cms bond's theo dirty price: " + str(cmsSpecializedBondTheoDirty2)
+ "\n error: " + str(error12)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBondStartDate1 = Date(19,December,1985)
zeroCpnBondMaturityDate1 = Date(20,December,2015)
zeroCpnBondRedemption1 = bondCalendar.adjust(zeroCpnBondMaturityDate1,
Following)
zeroCpnBondLeg1 = Leg([SimpleCashFlow(100.0, zeroCpnBondRedemption1)])
## generic bond
zeroCpnBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate1, zeroCpnBondStartDate1, zeroCpnBondLeg1)
zeroCpnBond1.setPricingEngine(bondEngine)
## specialized zerocpn bond
zeroCpnSpecializedBond1 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(20,December,2015),
Following,
100.0, Date(19,December,1985))
zeroCpnSpecializedBond1.setPricingEngine(bondEngine)
zeroCpnBondTheoValue1 = zeroCpnBond1.cleanPrice()
zeroCpnSpecializedBondTheoValue1 = zeroCpnSpecializedBond1.cleanPrice()
error13 = abs(zeroCpnBondTheoValue1-zeroCpnSpecializedBondTheoValue1)
self.assertFalse(error13>tolerance,
"wrong clean price for zero coupon bond:"
+ "\n generic zero bond's clean price: " + str(zeroCpnBondTheoValue1)
+ "\n specialized zero bond's clean price: " + str(zeroCpnSpecializedBondTheoValue1)
+ "\n error: " + str(error13)
+ "\n tolerance: " + str(tolerance))
zeroCpnBondTheoDirty1 = zeroCpnBondTheoValue1+ zeroCpnBond1.accruedAmount()
zeroCpnSpecializedBondTheoDirty1 = zeroCpnSpecializedBondTheoValue1+ \
zeroCpnSpecializedBond1.accruedAmount()
error14 = abs(zeroCpnBondTheoDirty1-zeroCpnSpecializedBondTheoDirty1)
self.assertFalse(error14>tolerance,
"wrong dirty price for zero bond:"
+ "\n generic zerocpn bond's dirty price: " + str(zeroCpnBondTheoDirty1)
+ "\n specialized zerocpn bond's clean price: " + str(zeroCpnSpecializedBondTheoDirty1)
+ "\n error: " + str(error14)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity occurs on a business day
zeroCpnBondStartDate2 = Date(17,February,1998)
zeroCpnBondMaturityDate2 = Date(17,February,2028)
zerocpbondRedemption2 = bondCalendar.adjust(zeroCpnBondMaturityDate2,
Following)
zeroCpnBondLeg2 = Leg([SimpleCashFlow(100.0, zerocpbondRedemption2)])
## generic bond
zeroCpnBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate2, zeroCpnBondStartDate2, zeroCpnBondLeg2)
zeroCpnBond2.setPricingEngine(bondEngine)
## specialized zerocpn bond
zeroCpnSpecializedBond2 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(17,February,2028),
Following,
100.0, Date(17,February,1998))
zeroCpnSpecializedBond2.setPricingEngine(bondEngine)
zeroCpnBondTheoValue2 = zeroCpnBond2.cleanPrice()
zeroCpnSpecializedBondTheoValue2 = zeroCpnSpecializedBond2.cleanPrice()
error15 = abs(zeroCpnBondTheoValue2 -zeroCpnSpecializedBondTheoValue2)
self.assertFalse(error15>tolerance,
"wrong clean price for zero coupon bond:"
+ "\n generic zerocpn bond's clean price: " + str(zeroCpnBondTheoValue2)
+ "\n specialized zerocpn bond's clean price: " + str(zeroCpnSpecializedBondTheoValue2)
+ "\n error: " + str(error15)
+ "\n tolerance: " + str(tolerance))
zeroCpnBondTheoDirty2 = zeroCpnBondTheoValue2+ zeroCpnBond2.accruedAmount()
zeroCpnSpecializedBondTheoDirty2 = \
zeroCpnSpecializedBondTheoValue2+ \
zeroCpnSpecializedBond2.accruedAmount()
error16 = abs(zeroCpnBondTheoDirty2-zeroCpnSpecializedBondTheoDirty2)
self.assertFalse(error16>tolerance,
"wrong dirty price for zero coupon bond:"
+ "\n generic zerocpn bond's dirty price: " + str(zeroCpnBondTheoDirty2)
+ "\n specialized zerocpn bond's dirty price: " + str(zeroCpnSpecializedBondTheoDirty2)
+ "\n error: " + str(error16)
+ "\n tolerance: " + str(tolerance))
def testSpecializedBondVsGenericBondUsingAsw(self) :
"""Testing asset-swap prices and spreads for specialized bond against equivalent generic bond..."""
bondCalendar = TARGET()
settlementDays = 3
fixingDays = 2
payFixedRate = True
parAssetSwap = True
inArrears = False
## Fixed bond (Isin: DE0001135275 DBR 4 01/04/37)
## maturity doesn't occur on a business day
fixedBondStartDate1 = Date(4,January,2005)
fixedBondMaturityDate1 = Date(4,January,2037)
fixedBondSchedule1 = Schedule(fixedBondStartDate1,
fixedBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg1 = list(FixedRateLeg(fixedBondSchedule1,
ActualActual(ActualActual.ISDA), [self.faceAmount],[0.04]))
fixedbondRedemption1 = bondCalendar.adjust(fixedBondMaturityDate1, Following)
fixedBondLeg1.append(SimpleCashFlow(100.0, fixedbondRedemption1))
## generic bond
fixedBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate1, fixedBondStartDate1, fixedBondLeg1)
bondEngine = DiscountingBondEngine(self.termStructure)
swapEngine = DiscountingSwapEngine(self.termStructure, False)
fixedBond1.setPricingEngine(bondEngine)
## equivalent specialized fixed rate bond
fixedSpecializedBond1 = FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule1,
[0.04], ActualActual(ActualActual.ISDA), Following,
100.0, Date(4,January,2005))
fixedSpecializedBond1.setPricingEngine(bondEngine)
fixedBondPrice1 = fixedBond1.cleanPrice()
fixedSpecializedBondPrice1 = fixedSpecializedBond1.cleanPrice()
fixedBondAssetSwap1 = AssetSwap(payFixedRate,
fixedBond1, fixedBondPrice1,
self.iborIndex, self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondAssetSwap1.setPricingEngine(swapEngine)
fixedSpecializedBondAssetSwap1 = AssetSwap(payFixedRate,
fixedSpecializedBond1,
fixedSpecializedBondPrice1,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedSpecializedBondAssetSwap1.setPricingEngine(swapEngine)
fixedBondAssetSwapPrice1 = fixedBondAssetSwap1.fairCleanPrice()
fixedSpecializedBondAssetSwapPrice1 = fixedSpecializedBondAssetSwap1.fairCleanPrice()
tolerance = 1.0e-13
error1 = abs(fixedBondAssetSwapPrice1-fixedSpecializedBondAssetSwapPrice1)
self.assertFalse(error1>tolerance,
"wrong clean price for fixed bond:"
+ "\n generic fixed rate bond's clean price: " + str(fixedBondAssetSwapPrice1)
+ "\n equivalent specialized bond's clean price: " + str(fixedSpecializedBondAssetSwapPrice1)
+ "\n error: " + str(error1)
+ "\n tolerance: " + str(tolerance))
## market executable price as of 4th sept 2007
fixedBondMktPrice1= 91.832
fixedBondASW1 = AssetSwap(payFixedRate,
fixedBond1, fixedBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondASW1.setPricingEngine(swapEngine)
fixedSpecializedBondASW1 = AssetSwap(payFixedRate,
fixedSpecializedBond1,
fixedBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedSpecializedBondASW1.setPricingEngine(swapEngine)
fixedBondASWSpread1 = fixedBondASW1.fairSpread()
fixedSpecializedBondASWSpread1 = fixedSpecializedBondASW1.fairSpread()
error2 = abs(fixedBondASWSpread1-fixedSpecializedBondASWSpread1)
self.assertFalse(error2>tolerance,
"wrong asw spread for fixed bond:"
+ "\n generic fixed rate bond's asw spread: " + str(fixedBondASWSpread1)
+ "\n equivalent specialized bond's asw spread: " + str(fixedSpecializedBondASWSpread1)
+ "\n error: " + str(error2)
+ "\n tolerance: " + str(tolerance))
##Fixed bond (Isin: IT0006527060 IBRD 5 02/05/19)
##maturity occurs on a business day
fixedBondStartDate2 = Date(5,February,2005)
fixedBondMaturityDate2 = Date(5,February,2019)
fixedBondSchedule2 = Schedule(fixedBondStartDate2,
fixedBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
fixedBondLeg2 = list(FixedRateLeg(fixedBondSchedule2, Thirty360(Thirty360.BondBasis),[self.faceAmount],
[0.05]))
fixedbondRedemption2 = bondCalendar.adjust(fixedBondMaturityDate2, Following)
fixedBondLeg2.append(SimpleCashFlow(100.0, fixedbondRedemption2))
## generic bond
fixedBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
fixedBondMaturityDate2, fixedBondStartDate2, fixedBondLeg2)
fixedBond2.setPricingEngine(bondEngine)
## equivalent specialized fixed rate bond
fixedSpecializedBond2 = FixedRateBond(settlementDays, self.faceAmount, fixedBondSchedule2,
[0.05], Thirty360(Thirty360.BondBasis), Following,
100.0, Date(5,February,2005))
fixedSpecializedBond2.setPricingEngine(bondEngine)
fixedBondPrice2 = fixedBond2.cleanPrice()
fixedSpecializedBondPrice2 = fixedSpecializedBond2.cleanPrice()
fixedBondAssetSwap2 = AssetSwap(payFixedRate,
fixedBond2, fixedBondPrice2,
self.iborIndex, self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondAssetSwap2.setPricingEngine(swapEngine)
fixedSpecializedBondAssetSwap2 = AssetSwap(payFixedRate,
fixedSpecializedBond2,
fixedSpecializedBondPrice2,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedSpecializedBondAssetSwap2.setPricingEngine(swapEngine)
fixedBondAssetSwapPrice2 = fixedBondAssetSwap2.fairCleanPrice()
fixedSpecializedBondAssetSwapPrice2 = fixedSpecializedBondAssetSwap2.fairCleanPrice()
error3 = abs(fixedBondAssetSwapPrice2-fixedSpecializedBondAssetSwapPrice2)
self.assertFalse(error3>tolerance,
"wrong clean price for fixed bond:"
+ "\n generic fixed rate bond's clean price: " + str(fixedBondAssetSwapPrice2)
+ "\n equivalent specialized bond's clean price: " + str(fixedSpecializedBondAssetSwapPrice2)
+ "\n error: " + str(error3)
+ "\n tolerance: " + str(tolerance))
## market executable price as of 4th sept 2007
fixedBondMktPrice2= 102.178
fixedBondASW2 = AssetSwap(payFixedRate,
fixedBond2, fixedBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedBondASW2.setPricingEngine(swapEngine)
fixedSpecializedBondASW2 = AssetSwap(payFixedRate,
fixedSpecializedBond2,
fixedBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
fixedSpecializedBondASW2.setPricingEngine(swapEngine)
fixedBondASWSpread2 = fixedBondASW2.fairSpread()
fixedSpecializedBondASWSpread2 = fixedSpecializedBondASW2.fairSpread()
error4 = abs(fixedBondASWSpread2-fixedSpecializedBondASWSpread2)
self.assertFalse(error4>tolerance,
"wrong asw spread for fixed bond:"
+ "\n generic fixed rate bond's asw spread: " + str(fixedBondASWSpread2)
+ "\n equivalent specialized bond's asw spread: " + str(fixedSpecializedBondASWSpread2)
+ "\n error: " + str(error4)
+ "\n tolerance: " + str(tolerance))
##FRN bond (Isin: IT0003543847 ISPIM 0 09/29/13)
##maturity doesn't occur on a business day
floatingBondStartDate1 = Date(29,September,2003)
floatingBondMaturityDate1 = Date(29,September,2013)
floatingBondSchedule1 = Schedule(floatingBondStartDate1,
floatingBondMaturityDate1,
Period(Semiannual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
floatingBondLeg1 = list(IborLeg([self.faceAmount],floatingBondSchedule1, self.iborIndex,
Actual360(), Following,[fixingDays],[],[0.0056],[],[],inArrears))
floatingbondRedemption1 = bondCalendar.adjust(floatingBondMaturityDate1, Following)
floatingBondLeg1.append(SimpleCashFlow(100.0, floatingbondRedemption1))
## generic bond
floatingBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate1, floatingBondStartDate1,
floatingBondLeg1)
floatingBond1.setPricingEngine(bondEngine)
## equivalent specialized floater
floatingSpecializedBond1 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule1,
self.iborIndex, Actual360(),
Following, fixingDays,
[1],
[0.0056],
[], [],
inArrears,
100.0, Date(29,September,2003))
floatingSpecializedBond1.setPricingEngine(bondEngine)
setCouponPricer(floatingBond1.cashflows(), self.pricer)
setCouponPricer(floatingSpecializedBond1.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(27,March,2007), 0.0402)
floatingBondPrice1 = floatingBond1.cleanPrice()
floatingSpecializedBondPrice1= floatingSpecializedBond1.cleanPrice()
floatingBondAssetSwap1 = AssetSwap(payFixedRate,
floatingBond1, floatingBondPrice1,
self.iborIndex, self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondAssetSwap1.setPricingEngine(swapEngine)
floatingSpecializedBondAssetSwap1 = AssetSwap(payFixedRate,
floatingSpecializedBond1,
floatingSpecializedBondPrice1,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingSpecializedBondAssetSwap1.setPricingEngine(swapEngine)
floatingBondAssetSwapPrice1 = floatingBondAssetSwap1.fairCleanPrice()
floatingSpecializedBondAssetSwapPrice1 = floatingSpecializedBondAssetSwap1.fairCleanPrice()
error5 = abs(floatingBondAssetSwapPrice1-floatingSpecializedBondAssetSwapPrice1)
self.assertFalse(error5>tolerance,
"wrong clean price for frnbond:"
+ "\n generic frn rate bond's clean price: " + str(floatingBondAssetSwapPrice1)
+ "\n equivalent specialized bond's price: " + str(floatingSpecializedBondAssetSwapPrice1)
+ "\n error: " + str(error5)
+ "\n tolerance: " + str(tolerance))
## market executable price as of 4th sept 2007
floatingBondMktPrice1= 101.33
floatingBondASW1 = AssetSwap(payFixedRate,
floatingBond1, floatingBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondASW1.setPricingEngine(swapEngine)
floatingSpecializedBondASW1 = AssetSwap(payFixedRate,
floatingSpecializedBond1,
floatingBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingSpecializedBondASW1.setPricingEngine(swapEngine)
floatingBondASWSpread1 = floatingBondASW1.fairSpread()
floatingSpecializedBondASWSpread1 = floatingSpecializedBondASW1.fairSpread()
error6 = abs(floatingBondASWSpread1-floatingSpecializedBondASWSpread1)
self.assertFalse(error6>tolerance,
"wrong asw spread for fixed bond:"
+ "\n generic frn rate bond's asw spread: " + str(floatingBondASWSpread1)
+ "\n equivalent specialized bond's asw spread: " + str(floatingSpecializedBondASWSpread1)
+ "\n error: " + str(error6)
+ "\n tolerance: " + str(tolerance))
##FRN bond (Isin: XS0090566539 COE 0 09/24/18)
##maturity occurs on a business day
floatingBondStartDate2 = Date(24,September,2004)
floatingBondMaturityDate2 = Date(24,September,2018)
floatingBondSchedule2 = Schedule(floatingBondStartDate2,
floatingBondMaturityDate2,
Period(Semiannual), bondCalendar,
ModifiedFollowing, ModifiedFollowing,
DateGeneration.Backward, False)
floatingBondLeg2 = list(IborLeg([self.faceAmount],floatingBondSchedule2, self.iborIndex,
Actual360(),ModifiedFollowing,[fixingDays],[],[0.0025],[],[],inArrears))
floatingbondRedemption2 = bondCalendar.adjust(floatingBondMaturityDate2, ModifiedFollowing)
floatingBondLeg2.append(SimpleCashFlow(100.0, floatingbondRedemption2))
## generic bond
floatingBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
floatingBondMaturityDate2, floatingBondStartDate2,
floatingBondLeg2)
floatingBond2.setPricingEngine(bondEngine)
## equivalent specialized floater
floatingSpecializedBond2 = FloatingRateBond(settlementDays, self.faceAmount,
floatingBondSchedule2,
self.iborIndex, Actual360(),
ModifiedFollowing, fixingDays,
[1],
[0.0025],
[], [],
inArrears,
100.0, Date(24,September,2004))
floatingSpecializedBond2.setPricingEngine(bondEngine)
setCouponPricer(floatingBond2.cashflows(), self.pricer)
setCouponPricer(floatingSpecializedBond2.cashflows(), self.pricer)
self.iborIndex.addFixing(Date(22,March,2007), 0.04013)
floatingBondPrice2 = floatingBond2.cleanPrice()
floatingSpecializedBondPrice2= floatingSpecializedBond2.cleanPrice()
floatingBondAssetSwap2 = AssetSwap(payFixedRate,
floatingBond2, floatingBondPrice2,
self.iborIndex, self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondAssetSwap2.setPricingEngine(swapEngine)
floatingSpecializedBondAssetSwap2 = AssetSwap(payFixedRate,
floatingSpecializedBond2,
floatingSpecializedBondPrice2,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingSpecializedBondAssetSwap2.setPricingEngine(swapEngine)
floatingBondAssetSwapPrice2 = floatingBondAssetSwap2.fairCleanPrice()
floatingSpecializedBondAssetSwapPrice2 = floatingSpecializedBondAssetSwap2.fairCleanPrice()
error7 = abs(floatingBondAssetSwapPrice2-floatingSpecializedBondAssetSwapPrice2)
self.assertFalse(error7>tolerance,
"wrong clean price for frnbond:"
+ "\n generic frn rate bond's clean price: " + str(floatingBondAssetSwapPrice2)
+ "\n equivalent specialized frn bond's price: " + str(floatingSpecializedBondAssetSwapPrice2)
+ "\n error: " + str(error7)
+ "\n tolerance: " + str(tolerance))
## market executable price as of 4th sept 2007
floatingBondMktPrice2 = 101.26
floatingBondASW2 = AssetSwap(payFixedRate,
floatingBond2, floatingBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingBondASW2.setPricingEngine(swapEngine)
floatingSpecializedBondASW2 = AssetSwap(payFixedRate,
floatingSpecializedBond2,
floatingBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
floatingSpecializedBondASW2.setPricingEngine(swapEngine)
floatingBondASWSpread2 = floatingBondASW2.fairSpread()
floatingSpecializedBondASWSpread2 = floatingSpecializedBondASW2.fairSpread()
error8 = abs(floatingBondASWSpread2-floatingSpecializedBondASWSpread2)
self.assertFalse(error8>tolerance,
"wrong asw spread for frn bond:"
+ "\n generic frn rate bond's asw spread: " + str(floatingBondASWSpread2)
+ "\n equivalent specialized bond's asw spread: " + str(floatingSpecializedBondASWSpread2)
+ "\n error: " + str(error8)
+ "\n tolerance: " + str(tolerance))
## CMS bond (Isin: XS0228052402 CRDIT 0 8/22/20)
## maturity doesn't occur on a business day
cmsBondStartDate1 = Date(22,August,2005)
cmsBondMaturityDate1 = Date(22,August,2020)
cmsBondSchedule1 = Schedule(cmsBondStartDate1,
cmsBondMaturityDate1,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg1 = list(CmsLeg([self.faceAmount],cmsBondSchedule1, self.swapIndex,
Thirty360(),Following,[fixingDays], [],[],[0.055],[0.025],inArrears))
cmsbondRedemption1 = bondCalendar.adjust(cmsBondMaturityDate1,Following)
cmsBondLeg1.append(SimpleCashFlow(100.0, cmsbondRedemption1))
## generic cms bond
cmsBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate1, cmsBondStartDate1, cmsBondLeg1)
cmsBond1.setPricingEngine(bondEngine)
## equivalent specialized cms bond
cmsSpecializedBond1 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule1,
self.swapIndex, Thirty360(),
Following, fixingDays,
[1.0], [0.0],
[0.055], [0.025],
inArrears,
100.0, Date(22,August,2005))
cmsSpecializedBond1.setPricingEngine(bondEngine)
setCouponPricer(cmsBond1.cashflows(), self.cmspricer)
setCouponPricer(cmsSpecializedBond1.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(18,August,2006), 0.04158)
cmsBondPrice1 = cmsBond1.cleanPrice()
cmsSpecializedBondPrice1 = cmsSpecializedBond1.cleanPrice()
cmsBondAssetSwap1 = AssetSwap(payFixedRate,cmsBond1, cmsBondPrice1,
self.iborIndex, self.nonnullspread,
Schedule(),self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondAssetSwap1.setPricingEngine(swapEngine)
cmsSpecializedBondAssetSwap1 = AssetSwap(payFixedRate,cmsSpecializedBond1,
cmsSpecializedBondPrice1,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsSpecializedBondAssetSwap1.setPricingEngine(swapEngine)
cmsBondAssetSwapPrice1 = cmsBondAssetSwap1.fairCleanPrice()
cmsSpecializedBondAssetSwapPrice1 = cmsSpecializedBondAssetSwap1.fairCleanPrice()
error9 = abs(cmsBondAssetSwapPrice1-cmsSpecializedBondAssetSwapPrice1)
self.assertFalse(error9>tolerance,
"wrong clean price for cmsbond:"
+ "\n generic bond's clean price: " + str(cmsBondAssetSwapPrice1)
+ "\n equivalent specialized cms rate bond's price: " + str(cmsSpecializedBondAssetSwapPrice1)
+ "\n error: " + str(error9)
+ "\n tolerance: " + str(tolerance))
cmsBondMktPrice1 = 87.02## market executable price as of 4th sept 2007
cmsBondASW1 = AssetSwap(payFixedRate,
cmsBond1, cmsBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondASW1.setPricingEngine(swapEngine)
cmsSpecializedBondASW1 = AssetSwap(payFixedRate,
cmsSpecializedBond1,
cmsBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsSpecializedBondASW1.setPricingEngine(swapEngine)
cmsBondASWSpread1 = cmsBondASW1.fairSpread()
cmsSpecializedBondASWSpread1 = cmsSpecializedBondASW1.fairSpread()
error10 = abs(cmsBondASWSpread1-cmsSpecializedBondASWSpread1)
self.assertFalse(error10>tolerance,
"wrong asw spread for cm bond:"
+ "\n generic cms rate bond's asw spread: " + str(cmsBondASWSpread1)
+ "\n equivalent specialized bond's asw spread: " + str(cmsSpecializedBondASWSpread1)
+ "\n error: " + str(error10)
+ "\n tolerance: " + str(tolerance))
##CMS bond (Isin: XS0218766664 ISPIM 0 5/6/15)
##maturity occurs on a business day
cmsBondStartDate2 = Date(6,May,2005)
cmsBondMaturityDate2 = Date(6,May,2015)
cmsBondSchedule2 = Schedule(cmsBondStartDate2,
cmsBondMaturityDate2,
Period(Annual), bondCalendar,
Unadjusted, Unadjusted,
DateGeneration.Backward, False)
cmsBondLeg2 = list(CmsLeg([self.faceAmount],cmsBondSchedule2, self.swapIndex,
Thirty360(), Following, [fixingDays] , [0.84],[],[],[],inArrears))
cmsbondRedemption2 = bondCalendar.adjust(cmsBondMaturityDate2,
Following)
cmsBondLeg2.append(SimpleCashFlow(100.0, cmsbondRedemption2))
## generic bond
cmsBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
cmsBondMaturityDate2, cmsBondStartDate2, cmsBondLeg2)
cmsBond2.setPricingEngine(bondEngine)
## equivalent specialized cms bond
cmsSpecializedBond2 = CmsRateBond(settlementDays, self.faceAmount, cmsBondSchedule2,
self.swapIndex, Thirty360(),
Following, fixingDays,
[0.84], [0.0],
[], [],
inArrears,
100.0, Date(6,May,2005))
cmsSpecializedBond2.setPricingEngine(bondEngine)
setCouponPricer(cmsBond2.cashflows(), self.cmspricer)
setCouponPricer(cmsSpecializedBond2.cashflows(), self.cmspricer)
self.swapIndex.addFixing(Date(4,May,2006), 0.04217)
cmsBondPrice2 = cmsBond2.cleanPrice()
cmsSpecializedBondPrice2 = cmsSpecializedBond2.cleanPrice()
cmsBondAssetSwap2 = AssetSwap(payFixedRate,cmsBond2, cmsBondPrice2,
self.iborIndex, self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondAssetSwap2.setPricingEngine(swapEngine)
cmsSpecializedBondAssetSwap2 = AssetSwap(payFixedRate,cmsSpecializedBond2,
cmsSpecializedBondPrice2,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsSpecializedBondAssetSwap2.setPricingEngine(swapEngine)
cmsBondAssetSwapPrice2 = cmsBondAssetSwap2.fairCleanPrice()
cmsSpecializedBondAssetSwapPrice2 = cmsSpecializedBondAssetSwap2.fairCleanPrice()
error11 = abs(cmsBondAssetSwapPrice2-cmsSpecializedBondAssetSwapPrice2)
self.assertFalse(error11>tolerance,
"wrong clean price for cmsbond:"
+ "\n generic bond's clean price: " + str(cmsBondAssetSwapPrice2)
+ "\n equivalent specialized cms rate bond's price: " + str(cmsSpecializedBondAssetSwapPrice2)
+ "\n error: " + str(error11)
+ "\n tolerance: " + str(tolerance))
cmsBondMktPrice2 = 94.35## market executable price as of 4th sept 2007
cmsBondASW2 = AssetSwap(payFixedRate,
cmsBond2, cmsBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsBondASW2.setPricingEngine(swapEngine)
cmsSpecializedBondASW2 = AssetSwap(payFixedRate,
cmsSpecializedBond2,
cmsBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
cmsSpecializedBondASW2.setPricingEngine(swapEngine)
cmsBondASWSpread2 = cmsBondASW2.fairSpread()
cmsSpecializedBondASWSpread2 = cmsSpecializedBondASW2.fairSpread()
error12 = abs(cmsBondASWSpread2-cmsSpecializedBondASWSpread2)
self.assertFalse(error12>tolerance,
"wrong asw spread for cm bond:"
+ "\n generic cms rate bond's asw spread: " + str(cmsBondASWSpread2)
+ "\n equivalent specialized bond's asw spread: " + str(cmsSpecializedBondASWSpread2)
+ "\n error: " + str(error12)
+ "\n tolerance: " + str(tolerance))
## Zero-Coupon bond (Isin: DE0004771662 IBRD 0 12/20/15)
## maturity doesn't occur on a business day
zeroCpnBondStartDate1 = Date(19,December,1985)
zeroCpnBondMaturityDate1 = Date(20,December,2015)
zeroCpnBondRedemption1 = bondCalendar.adjust(zeroCpnBondMaturityDate1, Following)
zeroCpnBondLeg1 = Leg([SimpleCashFlow(100.0, zeroCpnBondRedemption1)])
## generic bond
zeroCpnBond1 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate1, zeroCpnBondStartDate1, zeroCpnBondLeg1)
zeroCpnBond1.setPricingEngine(bondEngine)
## specialized zerocpn bond
zeroCpnSpecializedBond1 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(20,December,2015),
Following,
100.0, Date(19,December,1985))
zeroCpnSpecializedBond1.setPricingEngine(bondEngine)
zeroCpnBondPrice1 = zeroCpnBond1.cleanPrice()
zeroCpnSpecializedBondPrice1 = zeroCpnSpecializedBond1.cleanPrice()
zeroCpnBondAssetSwap1 = AssetSwap(payFixedRate,zeroCpnBond1,
zeroCpnBondPrice1,
self.iborIndex, self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondAssetSwap1.setPricingEngine(swapEngine)
zeroCpnSpecializedBondAssetSwap1 = AssetSwap(payFixedRate,
zeroCpnSpecializedBond1,
zeroCpnSpecializedBondPrice1,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnSpecializedBondAssetSwap1.setPricingEngine(swapEngine)
zeroCpnBondAssetSwapPrice1 = zeroCpnBondAssetSwap1.fairCleanPrice()
zeroCpnSpecializedBondAssetSwapPrice1 = zeroCpnSpecializedBondAssetSwap1.fairCleanPrice()
error13 = abs(zeroCpnBondAssetSwapPrice1-zeroCpnSpecializedBondAssetSwapPrice1)
self.assertFalse(error13>tolerance,
"wrong clean price for zerocpn bond:"
+ "\n generic zero cpn bond's clean price: " + str(zeroCpnBondAssetSwapPrice1)
+ "\n specialized equivalent bond's price: " + str(zeroCpnSpecializedBondAssetSwapPrice1)
+ "\n error: " + str(error13)
+ "\n tolerance: " + str(tolerance))
## market executable price as of 4th sept 2007
zeroCpnBondMktPrice1 = 72.277
zeroCpnBondASW1 = AssetSwap(payFixedRate,
zeroCpnBond1,zeroCpnBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondASW1.setPricingEngine(swapEngine)
zeroCpnSpecializedBondASW1 = AssetSwap(payFixedRate,
zeroCpnSpecializedBond1,
zeroCpnBondMktPrice1,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnSpecializedBondASW1.setPricingEngine(swapEngine)
zeroCpnBondASWSpread1 = zeroCpnBondASW1.fairSpread()
zeroCpnSpecializedBondASWSpread1 = zeroCpnSpecializedBondASW1.fairSpread()
error14 = abs(zeroCpnBondASWSpread1-zeroCpnSpecializedBondASWSpread1)
self.assertFalse(error14>tolerance,
"wrong asw spread for zeroCpn bond:"
+ "\n generic zeroCpn bond's asw spread: " + str(zeroCpnBondASWSpread1)
+ "\n equivalent specialized bond's asw spread: " + str(zeroCpnSpecializedBondASWSpread1)
+ "\n error: " + str(error14)
+ "\n tolerance: " + str(tolerance))
## Zero Coupon bond (Isin: IT0001200390 ISPIM 0 02/17/28)
## maturity doesn't occur on a business day
zeroCpnBondStartDate2 = Date(17,February,1998)
zeroCpnBondMaturityDate2 = Date(17,February,2028)
zerocpbondRedemption2 = bondCalendar.adjust(zeroCpnBondMaturityDate2, Following)
zeroCpnBondLeg2 = Leg([SimpleCashFlow(100.0, zerocpbondRedemption2)])
## generic bond
zeroCpnBond2 = Bond(settlementDays, bondCalendar, self.faceAmount,
zeroCpnBondMaturityDate2, zeroCpnBondStartDate2, zeroCpnBondLeg2)
zeroCpnBond2.setPricingEngine(bondEngine)
## specialized zerocpn bond
zeroCpnSpecializedBond2 = ZeroCouponBond(settlementDays, bondCalendar, self.faceAmount,
Date(17,February,2028),
Following,
100.0, Date(17,February,1998))
zeroCpnSpecializedBond2.setPricingEngine(bondEngine)
zeroCpnBondPrice2 = zeroCpnBond2.cleanPrice()
zeroCpnSpecializedBondPrice2 = zeroCpnSpecializedBond2.cleanPrice()
zeroCpnBondAssetSwap2 = AssetSwap(payFixedRate,zeroCpnBond2,
zeroCpnBondPrice2,
self.iborIndex, self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondAssetSwap2.setPricingEngine(swapEngine)
zeroCpnSpecializedBondAssetSwap2 = AssetSwap(payFixedRate,
zeroCpnSpecializedBond2,
zeroCpnSpecializedBondPrice2,
self.iborIndex,
self.nonnullspread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnSpecializedBondAssetSwap2.setPricingEngine(swapEngine)
zeroCpnBondAssetSwapPrice2 = zeroCpnBondAssetSwap2.fairCleanPrice()
zeroCpnSpecializedBondAssetSwapPrice2 = zeroCpnSpecializedBondAssetSwap2.fairCleanPrice()
error15 = abs(zeroCpnBondAssetSwapPrice2 -zeroCpnSpecializedBondAssetSwapPrice2)
self.assertFalse(error8>tolerance,
"wrong clean price for zerocpn bond:"
+ "\n generic zero cpn bond's clean price: " + str(zeroCpnBondAssetSwapPrice2)
+ "\n equivalent specialized bond's price: " + str(zeroCpnSpecializedBondAssetSwapPrice2)
+ "\n error: " + str(error15)
+ "\n tolerance: " + str(tolerance))
## market executable price as of 4th sept 2007
zeroCpnBondMktPrice2 = 72.277
zeroCpnBondASW2 = AssetSwap(payFixedRate,
zeroCpnBond2,zeroCpnBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnBondASW2.setPricingEngine(swapEngine)
zeroCpnSpecializedBondASW2 = AssetSwap(payFixedRate,
zeroCpnSpecializedBond2,
zeroCpnBondMktPrice2,
self.iborIndex, self.spread,
Schedule(),
self.iborIndex.dayCounter(),
parAssetSwap)
zeroCpnSpecializedBondASW2.setPricingEngine(swapEngine)
zeroCpnBondASWSpread2 = zeroCpnBondASW2.fairSpread()
zeroCpnSpecializedBondASWSpread2 = zeroCpnSpecializedBondASW2.fairSpread()
error16 = abs(zeroCpnBondASWSpread2-zeroCpnSpecializedBondASWSpread2)
self.assertFalse(error16>tolerance,
"wrong asw spread for zeroCpn bond:"
+ "\n generic zeroCpn bond's asw spread: " + str(zeroCpnBondASWSpread2)
+ "\n equivalent specialized bond's asw spread: " + str(zeroCpnSpecializedBondASWSpread2)
+ "\n error: " + str(error16)
+ "\n tolerance: " + str(tolerance))
if __name__ == '__main__':
print('testing QuantLib ' + QuantLib.__version__)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AssetSwapTest,'test'))
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
3531360 | # hitme_game/routing.py
from django.conf.urls import url
from django.urls import path
from . import consumers
websocket_urlpatterns = [
url(r'^ws/lobby/$', consumers.LobbyConsumer),
path('ws/game/<game_url>/', consumers.GameRoomConsumer),
]
| StarcoderdataPython |
1948215 | <gh_stars>0
from discord.ext import commands
import discord
from discord.utils import get
class FeedbackCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def botfeedback(self, ctx):
channel = self.bot.get_channel(826078825649668136)
if not ctx.message.attachments:
embedFeedback = discord.Embed(color=0x22a7f0)
embedFeedback.add_field(name='Feedback', value=ctx.message.content[12:])
embedFeedback.set_footer(text=f'Send from {ctx.message.author}', icon_url=ctx.message.author.avatar_url)
await channel.send(embed=embedFeedback)
embedAnswer = discord.Embed(color=0x22a7f0)
embedAnswer.add_field(name='Thank you for your Feedback!', value='It was send successfully!')
embedAnswer.set_footer(text=ctx.message.author.name, icon_url=ctx.message.author.avatar_url)
await ctx.channel.send(embed=embedAnswer)
elif ctx.message.attachments and '.png' in str(ctx.message.attachments):
url = str(get(ctx.message.attachments)).split('url=\'')[1][:-2]
embedFeedback = discord.Embed(color=0x22a7f0)
embedFeedback.add_field(name='Feedback', value=ctx.message.content[12:])
embedFeedback.set_footer(text=f'Send from {ctx.message.author}', icon_url=ctx.message.author.avatar_url)
embedFeedback.set_image(url=url)
await channel.send(embed=embedFeedback)
embedAnswer = discord.Embed(color=0x22a7f0)
embedAnswer.add_field(name='Thank you for your Feedback!', value='It was send successfully!')
embedAnswer.set_footer(text=ctx.message.author.name, icon_url=ctx.message.author.avatar_url)
await ctx.channel.send(embed=embedAnswer)
def setup(bot):
bot.add_cog(FeedbackCog(bot))
| StarcoderdataPython |
5010246 | <filename>stephen/week3.py
from collections import Counter
from distutils.archive_util import make_zipfile
from typing import List, Tuple
def get_rainfall(data: List[Tuple[str, int]]) -> dict:
totals = {}
for pair in data:
city, rain = pair[0], pair[1]
if city in totals:
totals[city] += rain
else:
totals[city] = rain
return totals
# uses dict.get(key, defaultValue) method. If the city is not a key in totals,
# the default value of 0 will be returned.
def get_rainfall2(data: List[tuple]) -> dict:
totals = {}
for city, rain in data:
totals[city] = totals.get(city, 0) + rain
return totals
print(get_rainfall2([("boston", 10), ("sf", 5),
("seattle", 20), ("sf", 3), ("boston", 5)]))
def take_order():
MENU = {'sandwich': 10, 'tea': 7, 'salad': 9}
while True:
print(MENU)
order = input(
"please order an item and amount (optional), i.e 'salad, 3' \n").split(", ")
amount = 1
item = order[0]
if len(order) > 1:
try:
amount = int(order[1])
except ValueError:
print("amount not a number, please try again")
if item in MENU:
if MENU[item] >= amount:
MENU[item] -= amount
else:
print("not enough {} left".format(item))
else:
print("item not sold here")
def most_repeating_chars(words):
max_count = 1
most_frequent = words[0]
for word in words:
counts = Counter(word)
if max(counts.values()) > max_count:
max_count = max(counts.values())
most_frequent = word
return most_frequent
# returns the most frequently appearing word in words
def most_frequent(words):
word_dict = {}
most_frequent = words[0]
max_count = 1
for word in words:
word_dict[word] = word_dict.get(word, 0) + 1
if word_dict[word] > max_count:
max_count = word_dict[word]
most_frequent = word
return most_frequent
print(most_repeating_chars(
['this', 'is', 'an', 'elementary', 'test', 'example']))
| StarcoderdataPython |
3234110 | <reponame>drlim2u/Shuttle-Bus-Educational-Tool
from common.Scope import Scope
from common.TokenType import TokenType
class Function(Scope):
"""
Class to define the Function object to be used during compilation of the whole program.
"""
def __init__(self, line_number, instructions):
"""
Constructor for the Function object.
:param line_number: The line the function is defined at
:param instructions: The program to be compiled
"""
super().__init__(line_number, instructions)
self.__name = self._instructions[0][1]
self._instructions.pop(0)
self._instructions.pop(len(self._instructions) - 1)
self._type = TokenType.SCOPE_FUNCTION
def get_name(self):
"""
Getter for the name of the function.
:return: The name of the function as a string
"""
return self.__name
if __name__ == '__main__':
print('Please save your instructions and run run.py')
input('Press enter to close window . . . ')
| StarcoderdataPython |
8091887 | <filename>heavytailed/lognormal.py
from .base_distribution import distribution
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
class lognormal(distribution):
'''
Discrete log-normal distributions, given by
ln(x) ~ Normal(mu, sigma^2)
More specificly:
P(k)=(Phi((log(k+1)-mu)/sigma)-Phi((log(k)-mu)/sigma))
/ (1-Phi((log(k_min)-mu)/sigma))
'''
# change these values when necessary
# especially when the returned likelihood is np.nan
para_limits = {'mu': (-100, 100), 'sigma': (1 + 1e-2, 100.)}
init_values = (0., 2.)
def __init__(self):
super(lognormal, self).__init__()
self.name = 'log-normal'
self.n_para = 2
def _norm_factor_i(self, i, mu, sigma):
normcdf_numerator = (norm.cdf((np.log(i + 1) - mu) / sigma
) - norm.cdf((np.log(i) - mu) / sigma))
if normcdf_numerator > 0:
normcdf = np.log(normcdf_numerator)
else:
normcdf = -np.log(2 * np.pi) / 2 - \
((np.log(i) - mu) / sigma)**2 / 2
normcdf += np.log((np.log(i + 1) - np.log(i)) / sigma)
return normcdf
def _check_zero_log(self, normfactor, temp_z):
if normfactor == 0:
return np.log(1 - np.e**(1.40007 * temp_z)
) - np.log(-temp_z) - temp_z**2 / 2 - 1.04557
else:
return np.log(normfactor)
def _loglikelihood(self, mu_sigma, freq, xmin, N):
mu, sigma = mu_sigma
temp_z = -(np.log(xmin) - mu) / sigma
normfactor = self._check_zero_log(norm.cdf(temp_z), temp_z)
lognormfactor = np.array(list(self._norm_factor_i(i, mu, sigma)
for i in freq[:, 0]))
lognormsum = np.sum(lognormfactor * freq[:, -1])
logll = lognormsum - N * normfactor
return -logll
def _fitting(self, xmin=1):
freq = self.freq[self.freq[:, 0] >= xmin]
N = np.sum(freq[:, -1])
if xmin not in self.N_xmin:
self.N_xmin[xmin] = N
res2 = minimize(self._loglikelihood, x0=self.init_values,
args=(freq, xmin, N),
method='SLSQP', tol=1e-8,
bounds=(self.para_limits['mu'],
self.para_limits['sigma']))
aic = 2 * res2.fun + 2 * self.n_para
fits = {}
fits['mu'] = res2.x[0]
fits['sigma'] = res2.x[1]
return (res2.x, -res2.fun, aic), fits
def _get_ccdf(self, xmin):
mu = self.fitting_res[xmin][1]['mu']
sigma = self.fitting_res[xmin][1]['sigma']
total, ccdf = 1., []
temp_z = -(np.log(xmin) - mu) / sigma
normfactor = self._check_zero_log(norm.cdf(temp_z), temp_z)
for x in range(xmin, self.xmax):
total -= np.exp(self._norm_factor_i(x, mu, sigma) - normfactor)
ccdf.append([x, total])
return np.asarray(ccdf)
| StarcoderdataPython |
4332 | # Create your views here.
from .models import Mfund
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.tools import make_subplots
from django.db.models import Q
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views import View
from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min
from django.db.models.functions import Trim, Lower, Round
import pandas as pd
import csv, io
import openpyxl
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update
from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf
def Mfund_url():
return "unused-mfund-refresh-url"
class MfundListView(ListView):
model = Mfund
# if pagination is desired
# paginate_by = 300
# filter_backends = [filters.OrderingFilter,]
# ordering_fields = ['sno', 'nse_symbol']
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id)
return queryset
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MfundListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Amount(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id).order_by('-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_amc', 'mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC_Amount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_amc').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
print('hi ', self.queryset)
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_amc']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundListView_Category(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Subcat(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Reco(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_research_reco', '-mf_rating')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_SubcatAmount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_subcat').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_subcat']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundRefreshView(View):
debug_level = 1
def get(self, request):
self.mfund_refresh(request)
return HttpResponseRedirect(reverse("mfund-list"))
def __init__(self):
super(MfundRefreshView, self).__init__()
def mfund_refresh(self, request):
debug_level = 1
# declaring template
# first delete all existing mfund objects
Mfund.objects.all().filter(mf_user_id=request.user.id).delete()
max_id_instances = Mfund.objects.aggregate(max_id=Max('mf_id'))
max_mf_id = max_id_instances['max_id']
print('DS: found max id ', max_mf_id)
if max_mf_id is None:
max_mf_id = 0
print('max_mf_id ', max_mf_id)
unique_id = max_mf_id
for brec in BrokerIcidirMf.objects.all().filter(bim_user_id=request.user.id):
unique_id += 1
print(brec.bim_amc, brec.bim_name, brec.bim_category, brec.bim_subcat)
print(brec.bim_rating, brec.bim_units, brec.bim_cost_value, brec.bim_nav_value)
print(brec.bim_research_reco)
# skip 0 units
if int(float(brec.bim_units)) != 0:
_, created = Mfund.objects.update_or_create(
mf_id=unique_id,
mf_user_id=request.user.id,
mf_broker='icidir',
mf_amc=brec.bim_amc,
mf_name=brec.bim_name,
mf_category=brec.bim_category,
mf_subcat=brec.bim_subcat,
mf_rating=brec.bim_rating,
mf_cost_value=brec.bim_cost_value,
mf_nav_value=brec.bim_nav_value,
mf_research_reco=brec.bim_research_reco
)
# breakpoint()
# import pdb
# pdb.set_trace()
# Updated Gfundareco objects
lastrefd_update("mfund")
| StarcoderdataPython |
9612299 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
simple boto instance example
'''
# import argparse
from collections import defaultdict
import boto3
EC2 = boto3.resource('ec2')
SESSION = boto3.Session()
REGIONS = SESSION.get_available_regions('ec2')
# print REGIONS
EC2CLIENT = SESSION.client('ec2', 'us-west-1')
EC2_INSTANCES = EC2CLIENT.describe_instances()
EC2TAGS = EC2CLIENT.describe_tags()
# print EC2_INSTANCES
EC2R = SESSION.resource('ec2')
print EC2R.Instance
print EC2R.Tag
print EC2R.instances
print EC2R.internet_gateways
for i in EC2R.instances.all():
print i
EC2W3 = boto3.resource('ec2')
# Get information for all running instances
RUNNING_INSTANCES = EC2W3.instances.filter(Filters=[{
'Name': 'instance-state-name',
'Values': ['running']}])
EC2INFO = defaultdict()
for instance in RUNNING_INSTANCES:
for tag in instance.tags:
if 'Name'in tag['Key']:
name = tag['Value']
# Add instance info to a dictionary
EC2INFO[instance.id] = {
'Name': name,
'Type': instance.instance_type,
'State': instance.state['Name'],
'Launch Time': instance.launch_time
}
ATTRIBUTES = ['Name', 'Type', 'State',
'Private IP', 'Public IP', 'Launch Time']
for instance_id, instance in EC2INFO.items():
for key in ATTRIBUTES:
print "{0}: {1}".format(key, instance[key])
print "------"
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.