blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8b31b9716c7fcce8782f533d9571a3ce2f38497 | 7b0c697ac16f1e9ec4f8482d6d11feb5bfd00cc5 | /restProject/urls.py | f4de89b93279d53e5e795280fecab01356c45d78 | [] | no_license | dhanushkomari/restProject | 8004a1d13ec913cbb3b5e96cabb0481c33ad127f | 48a3588251d084c33f99ec9f27afd215db7948a4 | refs/heads/master | 2023-01-03T12:31:16.941008 | 2020-11-04T10:00:11 | 2020-11-04T10:00:11 | 309,660,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | """restProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/',include('api.urls')),
path('uiApp/',include('uiApp.urls'))
]
| [
"dhanushkomari@gmail.com"
] | dhanushkomari@gmail.com |
fe8c1da06cb5220b0e5ee515224cc1101de51d57 | 6be8aa517e679b33b47d35f100e6590902a8a1db | /DP/Problem54.py | 72cbb8c1c999c705d1e1d21afdf23d8dfda03060 | [] | no_license | LeeJuhae/Algorithm-Python | 7ca4762712e5e84d1e277abecb3bf39c9cbd4e56 | 729947b4428205adfbac194a5527b0eeafe1c525 | refs/heads/master | 2023-04-24T01:02:36.430970 | 2021-05-23T07:17:25 | 2021-05-23T07:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # https://www.acmicpc.net/problem/17182
import sys
from itertools import permutations
read = sys.stdin.readline
n, st = map(int, read().strip().split())
dp = [list(map(int, read().strip().split())) for _ in range(n)]
for k in range(n):
for i in range(n):
for j in range(n):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k][j])
ans = float('inf')
for cites in permutations(range(n), n):
prev = st
tmp = 0
for city in cites:
tmp += dp[prev][city]
prev = city
ans = min(ans, tmp)
print(ans)
| [
"gusdn0657@gmail.com"
] | gusdn0657@gmail.com |
20d92aad7b9b2ce1096f58b5648e6e206b6bd5df | b1bf1bbb2b9fed57d6d4e45a3230b60fc219d15c | /test/1.5.2017/snake.py | 5d18cb65b9390317786a1e1ead6ee55c5552b4d3 | [] | no_license | Hi-Im-darkness/snake | 717df249bb5fd35ae1d6e5b1afaac22a7f5ffb85 | 5a6a8eb1d43811fdb2451ec6ade0cf9cc77b112d | refs/heads/master | 2021-01-20T10:05:08.319697 | 2017-05-04T23:37:14 | 2017-05-04T23:37:14 | 90,318,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,350 | py | import tkinter as tk
import time
class Pos:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, oPos):
if self.x == oPos.x and self.y == oPos.y:
return True
return False
class Obj:
def __init__(self, canvas, pos, color):
self.canvas = canvas
self.pos = pos
self.color = color
def delete(self):
self.canvas.delete(self.id)
class Head(Obj):
def __init__(self, canvas, direct, pos, color='Green'):
Obj.__init__(self, canvas, pos, color)
self.changeDirect(direct)
def changeDirect(self, direct):
self.direct = direct
if direct == 'Left':
self.stepx = -1
self.stepy = 0
elif direct == 'Right':
self.stepx = 1
self.stepy = 0
elif direct == 'Up':
self.stepx = 0
self.stepy = -1
else:
self.stepx = 0
self.stepy = 1
self.image = tk.PhotoImage(file='Asset/%s/Head/%s.gif' % (self.color, self.direct))
self.id = self.canvas.create_image(self.pos.x, self.pos.y, anchor=tk.CENTER, image=self.image)
def move(self):
self.canvas.move(self.id, self.stepx, self.stepy)
self.pos = Pos(self.pos.x + self.stepx, self.pos.y + self.stepy)
if self.pos.x > 400:
self.canvas.move(self.id, -400, 0)
self.pos.x -= 400
elif self.pos.x < 0:
self.canvas.move(self.id, 400, 0)
self.pos.x += 400
elif self.pos.y > 400:
self.canvas.move(self.id, 0, -400)
self.pos.y -= 400
elif self.pos.y < 0:
self.canvas.move(self.id, 0, 400)
self.pos.y += 400
class Bodies(Obj):
def __init__(self, canvas, pos, color='Green'):
Obj.__init__(self, canvas, pos, color)
self.image = tk.PhotoImage(file='Asset/%s/Body.gif' % color)
self.id = self.canvas.create_image(self.pos.x, self.pos.y, anchor=tk.CENTER, image=self.image)
class Tail(Obj):
def __init__(self, canvas, pos, color='Green'):
Obj.__init__(self, canvas, pos, color)
self.image = tk.PhotoImage(file='Asset/%s/Tail.gif' % color)
self.id = self.canvas.create_image(self.pos.x, self.pos.y, anchor=tk.CENTER, image=self.image)
class Snake:
def __init__(self, canvas, pos, color='Green'):
self.canvas = canvas
self.lenght = 10
self.bodies = []
self.bodies.append(Tail(canvas, pos, color))
self.bodyPos = []
for x in range(pos.x, pos.x - 5 + 5 * self.lenght):
self.bodyPos = [Pos(x, pos.y)] + self.bodyPos
if (x - pos.x) in range(1, pos.x - 5 + 5 * self.lenght, 5):
self.bodies = [Bodies(canvas, Pos(x, pos.y), color)] + self.bodies
x = pos.x - 5 + 5 * self.lenght
self.head = Head(canvas, 'Right', Pos(x, pos.y), color)
def move(self):
self.grow()
self.lenght -= 1
self.bodies[-1].delete()
self.bodies[-2].delete()
self.bodies[-2] = Tail(self.canvas, self.bodies[-2].pos, self.bodies[-2].color)
del self.bodies[-1]
for i in range(5):
del self.bodyPos[-1]
if self.head.pos.x > 400:
self.canvas.move(self.head.id, -400, 0)
self.head.pos.x -= 400
elif self.head.pos.x < 0:
self.canvas.move(self.head.id, 400, 0)
self.head.pos.x += 400
elif self.head.pos.y > 400:
self.canvas.move(self.head.id, 0, -400)
self.head.pos.y -= 400
elif self.head.pos.y < 0:
self.canvas.move(self.head.id, 0, 400)
self.head.pos.y += 400
def isDie(self):
if self.head.pos in self.bodyPos:
return True
return False
def grow(self):
self.lenght += 1
self.head.delete()
self.bodies = [Bodies(self.canvas, self.head.pos, self.head.color)] + self.bodies
x = self.head.pos.x
y = self.head.pos.y
if self.head.direct == 'Left':
x = self.head.pos.x - 5
for i in range(x + 5, x, -1):
self.bodyPos = [Pos(i, y)] + self.bodyPos
elif self.head.direct == 'Right':
x = self.head.pos.x + 5
for i in range(x - 5, x):
self.bodyPos = [Pos(i, y)] + self.bodyPos
elif self.head.direct == 'Up':
y = self.head.pos.y - 5
for i in range(y + 5, y, -1):
self.bodyPos = [Pos(x, i)] + self.bodyPos
else:
y = self.head.pos.y + 5
for i in range(y - 5, y):
self.bodyPos = [Pos(x, i)] + self.bodyPos
self.head = Head(self.canvas, self.head.direct, Pos(x, y), self.head.color)
def getFood(self, food):
headPos = self.head.pos
tp = headPos.y - 13
bm = headPos.y + 13
lt = headPos.x - 13
rt = headPos.x + 13
foodPos = self.canvas.coords(food.normal.id)
if foodPos[1] >= tp and foodPos[3] <= bm:
if foodPos[0] >= lt and foodPos[2] <= rt:
food.normal.delete()
return True
return False
class BotSnake(Snake):
def __init__(self, canvas, pos, color='Yellow'):
Snake.__init__(self, canvas, pos, color)
self.step = None
def huntFood(self, food):
self.food = food
taget = food.normal
head = self.head
if head.direct == 'Left' or head.direct == 'Right':
if head.pos.y < taget.pos.y:
self.step = [Pos(taget.pos.x, head.pos.y), 'Down']
elif head.pos.y > taget.pos.y:
self.step = [Pos(taget.pos.x, head.pos.y), 'Up']
else:
if head.pos.x < taget.pos.x:
self.step = [Pos(head.pos.x, taget.pos.y), 'Right']
elif head.pos.x > taget.pos.x:
self.step = [Pos(head.pos.x, taget.pos.y), 'Left']
def isBlock(self, hurdle):
x = self.head.pos.x + self.head.stepx * 5
y = self.head.pos.y + self.head.stepy * 5
nextPos = Pos(x, y)
if nextPos in hurdle:
return True
return False
def move(self):
if self.step is not None:
turnPoint = self.step[0]
direct = self.step[1]
if self.head.pos == turnPoint:
self.head.changeDirect(direct)
self.step = None
if self.isBlock(self.bodyPos):
if self.head.direct == 'Left' or self.head.direct == 'Right':
self.head.changeDirect('Up')
if self.isBlock(self.bodyPos):
self.head.changeDirect('Down')
else:
self.head.changeDirect('Left')
if self.isBlock(self.bodyPos):
self.head.changeDirect('Right')
self.huntFood(self.food)
Snake.move(self)
if __name__ == '__main__':
window = tk.Tk()
canvas = tk.Canvas(window, width=400, height=400)
canvas.pack()
# h = Head(canvas, grid, 'Up', Pos(100, 100), 'Green')
s = Snake(canvas, Pos(100, 100))
while True:
time.sleep(0.01)
# canvas.bind_all('<KeyPress>', .keyPress)
s.move()
canvas.update()
window.mainloop()
| [
"nghthach98@gmail.com"
] | nghthach98@gmail.com |
8251ffe046d39813fb96ab3eda7aaf564efa9dde | 21155deb4419380b995c09946a680a261c524b5b | /meraki/models/subnet_model.py | f08d566a104c927c12dbea3f8f178de10ea8c155 | [
"MIT"
] | permissive | dexterlabora/meraki-python-sdk | 620efab5e6b6eb32ca52308be1cb740748fc0f30 | f6e6d61bd8694548169cd872b0642def69115bcb | refs/heads/master | 2023-05-25T06:50:21.845198 | 2019-06-13T12:22:34 | 2019-06-13T12:22:34 | 182,791,973 | 0 | 1 | NOASSERTION | 2023-05-22T21:37:22 | 2019-04-22T13:22:08 | Python | UTF-8 | Python | false | false | 1,690 | py | # -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class SubnetModel(object):
"""Implementation of the 'Subnet' model.
TODO: type model description here.
Attributes:
local_subnet (string): The CIDR notation subnet used within the VPN
use_vpn (bool): Indicates the presence of the subnet in the VPN
"""
# Create a mapping from Model property names to API property names
_names = {
"local_subnet":'localSubnet',
"use_vpn":'useVpn'
}
def __init__(self,
local_subnet=None,
use_vpn=None):
"""Constructor for the SubnetModel class"""
# Initialize members of the class
self.local_subnet = local_subnet
self.use_vpn = use_vpn
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
local_subnet = dictionary.get('localSubnet')
use_vpn = dictionary.get('useVpn')
# Return an object of this model
return cls(local_subnet,
use_vpn)
| [
"git@apimatic.io"
] | git@apimatic.io |
2254441003b6a6ba9046998db029f6d874870846 | fd2e812eaa6c1cadfdf1c57755a9fe1cbdc6b71f | /encoder/_preprocessing.py | 09e9425fe530cca9610c786734eb6aa1ab0241fa | [] | no_license | Sarut-Theppitak/PSPnet_MB2_RepVGG | 3411e91f6f6d4eaaaf0990cd00c71149dae0632e | 51c799d6362025a49fb7b6b1a39593e7f2e3a6b2 | refs/heads/main | 2023-03-14T02:21:59.086229 | 2021-03-12T03:59:41 | 2021-03-12T03:59:41 | 346,930,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | import numpy as np
import albumentations as albu
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor, mask=to_tensor),
]
return albu.Compose(_transform)
def preprocess_input(x, mean=None, std=None, input_space="RGB", input_range=None, **kwargs):
if input_space == "BGR":
x = x[..., ::-1].copy()
if input_range is not None:
if x.max() > 1 and input_range[1] == 1:
x = x / 255.0
if mean is not None:
mean = np.array(mean)
x = x - mean
if std is not None:
std = np.array(std)
x = x / std
return x
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype('float32')
def normalize_input(x, **kwargs):
return x / 255.0 | [
"saruttheppitak@gmail.com"
] | saruttheppitak@gmail.com |
97854bce5074f25e872e5049683998e3f85bf0e5 | 902eb829f1e2f47b5057ea23f807bb5bd0519df6 | /curate_experiment_path.py | 7f572e7e9f82177c61c4d234d031992a5b1a5951 | [] | no_license | sriramrn/clam | 636680bea23e2c1bb4992c02c0ebb55620179d79 | 10703a11fab333e964f9a7e8061f69fe5086b51f | refs/heads/master | 2022-03-26T01:09:42.653556 | 2020-01-11T11:43:44 | 2020-01-11T11:43:44 | 125,971,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 8 14:28:34 2018
@author: Sriram Narayanan
"""
import glob
def paramdict(pathtoparamfile):
f = open(pathtoparamfile,'r')
params = f.readlines()
f.close()
paramlist = []
entry = []
for i in range(len(params)):
if len(params[i])>1:
paramlist.append(params[i].split(':',1)[0].split('\t',1)[0])
entry.append(params[i].split(':',1)[1][1::].split('\n',1)[0])
zippedparams = zip(paramlist,entry)
param_dict = dict(zippedparams)
return param_dict
expName = 'BoutClamp'
expParamFile = 'D:/ClosedLoopRaw/BCParams.txt'
savePath = 'D:/ClosedLoopRaw/'
expParam = paramdict(expParamFile)
dataPath = 'D:/ClosedLoopRaw/'
paramPath = glob.glob(dataPath+'/**/params.txt',recursive=True)
pathList = []
for paramFile in paramPath:
tempParam = paramdict(paramFile)
accept = True
for key in list(expParam.keys()):
if key in list(tempParam.keys()):
if tempParam[key] != expParam[key]:
accept = False
else:
accept = False
if accept:
pathList.append(paramFile.split('params.txt')[0])
file = open(savePath+expName+'_DataPath.txt','w+')
for p in pathList:
file.writelines(p+'\n')
file.close() | [
"sriram.r.narayanan@gmail.com"
] | sriram.r.narayanan@gmail.com |
9d7fa1949f2329fb360cf30a14031fc756ee8814 | 83f0cdbc9e1f7261dcd1ff5fc0c8ef4280e84fbb | /ADaM/python/cdisc_library.py | 8437f8e380df5cc47b45fd6272dc69f18a942760 | [
"MIT"
] | permissive | mihir-shinde/CSS2020-Hackathon | 0c39d59ddb1503f0c4170b230f789b8f29fee9ae | f9538ee425fe7eb0573757cdd2346d1f8c7998c1 | refs/heads/master | 2023-03-16T05:06:26.518324 | 2020-09-25T16:20:12 | 2020-09-25T16:20:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,958 | py | import requests
class CDISCConnector:
BASE_URL = "https://library.cdisc.org/api/"
def __init__(self, username, password):
self._client = None
self._username = username
self._password = password
self._cache = {}
@property
def client(self):
if self._client is None:
session = requests.Session()
session.auth = (self._username, self._password)
session.headers
self._client = session
return self._client
def flush(self):
self._cache = {}
def _get(self, path):
url = self.BASE_URL + path
if url not in self._cache:
response = self.client.get(url)
if response.status_code == 200:
self._cache[url] = response.json()
return self._cache.get(url, {})
@property
def products(self):
return self.get_products()
def get_products(self):
contents = self._get("mdr/products")
specs = {}
if contents:
for aspect, asp_def in contents.get("_links").items():
if aspect == "self":
continue
for spec, spec_def in asp_def.get("_links").items():
if spec == "self":
continue
# Assumption
href = spec_def[0].get('href')
specs[spec] = href
return specs
def adam(self, version="1-1"):
"""
Get the ADaM Specifications
"""
path = f"mdr/adam/adamig-{version}"
response = self._get(path)
if not response.status_code == 200:
if response.status_code == 401:
print("Authentication not recognised")
return {}
elif response.status_code == 404:
print("Standard or Dataset not found")
return {}
return response.json()
def adam_dataset(self, dataset, version="1-1"):
"""
Get the ADaM Dataset Specifications
"""
path = f"mdr/adam/adamig-{version}/{dataset}"
response = self._get(path)
if not response.status_code == 200:
if response.status_code == 401:
print("Authentication not recognised")
return {}
elif response.status_code == 404:
print("Standard or Dataset not found")
return {}
return response.json()
def adam_var(self, dataset, variable, version="1-1"):
"""
Get the ADaM Dataset variable Specifications
"""
path = f"mdr/adam/adamig-{version}/datastructures/{dataset}/variables/{variable}"
response = self._get(path)
if not response.status_code == 200:
if response.status_code == 401:
print("Authentication not recognised")
return {}
elif response.status_code == 404:
print("Standard or Dataset not found")
return {}
return response.json()
def sdtm(self, version="3-3"):
"""
Get the SDTM Specifications
"""
response = self._get(f"mdr/sdtmig/{version}")
if not response.status_code == 200:
if response.status_code == 401:
print("Authentication not recognised")
return {}
elif response.status_code == 404:
print("Standard or Dataset not found")
return {}
return response.json()
def sdtm_dataset(self, dataset, version="3-3"):
"""
Get the SDTM Dataset Specifications
"""
response = self._get(f"mdr/sdtmig/{version}/datasets/{dataset}")
if not response.status_code == 200:
if response.status_code == 401:
print("Authentication not recognised")
return {}
elif response.status_code == 404:
print("Standard or Dataset not found")
return {}
return response.json()
def sdtm_variable(self, dataset, variable, version="3-3"):
"""
Get the SDTM Specifications
"""
response = self._get(f"mdr/sdtmig/{version}/datasets/{dataset}/variables/{variable}")
if not response.status_code == 200:
if response.status_code == 401:
print("Authentication not recognised")
return {}
elif response.status_code == 404:
print("Standard or Dataset not found")
return {}
return response.json()
def get_terminology_by_name(self, name, parent):
"""
Given the username for the Codelist find the
"""
pass
def terminology_set(self, name, parent="sdtm"):
"""
Get the codelist
"""
| [
"glow@mdsol.com"
] | glow@mdsol.com |
b1b73aec37759c72a704ea13002ec87a409dff1c | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/matrix-org_synapse/synapse-master/synapse/__init__.py | ff251ce5973b833547a661e133844a21b6b52695 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 700 | py | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.19.2"
| [
"659338505@qq.com"
] | 659338505@qq.com |
9dafd19f9c9a55401753b199ad8db113b6b87eea | 07cdccd585e19acda514be54c6840bfedc951ae8 | /main.py | 9ffb3d5935b831bed7ce04591ddf8757c602523d | [] | no_license | HemantJaiman/Spam_classifier | 942512b62cf67a050044cd7af4de1c2fd4a2991d | 3606bb77ada123ac66eaf67b553272a64aa7e9f0 | refs/heads/main | 2023-06-24T20:10:26.756826 | 2021-07-15T07:25:18 | 2021-07-15T07:25:18 | 386,202,174 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py |
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import matplotlib.pyplot as plt
from wordcloud import *
from math import log, sqrt
import pandas as pd
import numpy as np
mails = pd.read_csv('spam.csv', encoding = 'latin-1')
#train test split
totalmails = mails['v2'].shape[0]
trainIndex, testIndex = list(), list()
for i in range(mails.shape[0]):
if np.random.uniform(0,1) < 0.75:
trainIndex +=[i]
else:
testIndex +=[i]
trainData = mails.loc[trainIndex]
testData = mails.loc[testIndex]
trainData.reset_index(inplace = True)
# Visualization of data
spam_words = ' '.join(list(mails[mails['v1']=='spam']['v2']))
spam_wc = WordCloud(width =512,height=512).generate(spam_words)
plt.figure(figsize=(10,8),facecolor='k')
plt.imshow(spam_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
def process_message(message,lower_case = True, stem = True, stop_words = True, gram =2):
if lower_case:
message= message.lower()
words = word_tokenize(message)
words = [w for w in words if len(w) > 2]
if gram > 1:
w =[]
for i in range(len(words) - gram + 1):
w.append([' '.join(words[i:i + gram])])
return w
if stop_words:
sw = stopwords.word('english')
words = [word for word in words if word not in sw]
if stem:
stemmer = PorterStemmer()
words = [stemmer.stem(word) for word in words]
return words
"""
s_c = SpamClassifier(trainData, 'tf-idf')
s_c.train()
pred_s_c = s_c.predict(testData['v2'])
metrics(testData['label'],pred_s_c)
1.
""" | [
"noreply@github.com"
] | noreply@github.com |
ebec3629eb42d836bab2a456034eb71b975018dd | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/workloads/azure-mgmt-workloads/setup.py | 98da49aa95a0f0e5fcc66f523e70cc2345923cf2 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 2,764 | py | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-workloads"
PACKAGE_PPRINT_NAME = "Workloads Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
include_package_data=True,
package_data={
'pytyped': ['py.typed'],
},
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-mgmt-core>=1.3.0,<2.0.0',
],
python_requires=">=3.6"
)
| [
"noreply@github.com"
] | noreply@github.com |
f165e00d444f850aee54fecab36cf98b9209d337 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy2453.py | 5211053a79a7324e34dd64e88d63b85985dd3c0e | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,238 | py | # qubit number=4
# total number=42
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=39
prog.cz(input_qubit[0],input_qubit[3]) # number=40
prog.h(input_qubit[3]) # number=41
prog.cx(input_qubit[0],input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=30
prog.cz(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=32
prog.x(input_qubit[2]) # number=21
prog.h(input_qubit[2]) # number=36
prog.cz(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=38
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[1],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=26
prog.cz(input_qubit[3],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.cx(input_qubit[3],input_qubit[0]) # number=14
prog.y(input_qubit[2]) # number=29
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2453.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
60c38a9c6cfadd1e3504578eba36268da313371f | cb0737858ac70539cf2b23a0a239d60abd4553e0 | /Week-1/11sandSquares.py | 00cc7a1681d50d36413c25e3a4a31e3f4a694ac9 | [] | no_license | riatalwar/AoPS-Intermediate-Programming-With-Python | 1a87e0ee91278999cdcba3ad2ac35eec91a2bdc6 | 94710dd915ec7bd179d3bd45383fc32280c922d5 | refs/heads/master | 2023-02-09T13:17:12.041209 | 2021-01-06T16:51:44 | 2021-01-06T16:51:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # Python Class 2532
# Lesson 1 Problem 2
# Author: riatalwar (486154)
# There are two 3-digit numbers n having the property
# that n is divisible by 11 and
# n/11 is equal to the sum of the squares of the digits of n.
# Find both values of n. You may submit them in either order.
def find_numbers():
numbers = []
for n in range(99, 1000, 11):
sumOfSquares = ((n // 100) ** 2) + (((n // 10) % 10) ** 2) + ((n % 10) ** 2)
if n / 11 == sumOfSquares:
numbers.append(n)
return numbers
print(find_numbers())
| [
"riatalwar@hotmail.com"
] | riatalwar@hotmail.com |
8aa95b8aee556ee8fa7fb2ff5c965d5021d95fbd | 60561fd3efd5ecd8f984c4767c8e1017f66dbfd0 | /apps/unsubscribes/migrations/0002_unsubscribeemail_user.py | a5468036448b33392ed58db1295c00f26159ef47 | [] | no_license | kaushalaman97/react | fd3b691340ba877ace3b9feec0a93103b30f466f | 4b34ace3357fbba0aa6616d761da2f501993bcc4 | refs/heads/main | 2023-03-08T16:33:48.675925 | 2021-02-26T14:23:38 | 2021-02-26T14:23:38 | 342,596,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # Generated by Django 3.1.4 on 2021-02-24 08:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('unsubscribes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='unsubscribeemail',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"mohit.kaushal@techstriker.com"
] | mohit.kaushal@techstriker.com |
633661f1ed2f3acf817bd8e8a0ec408fcac7dc57 | 8c875f0b580f9040dd21cd6662f2276c2eba49ee | /hurdle_regression.py | f3b3c66cdb5a4ae21b61480afb3e31e3b0c22222 | [] | no_license | NathanielBlairStahn/hurdle-regression | 88abf83698c216ac62c30782b7b8bc625fcc5bd6 | a161d289799a45f724315181ddb6c7ae12f65ef2 | refs/heads/master | 2020-03-07T07:45:36.256345 | 2018-03-30T04:41:57 | 2018-03-30T04:41:57 | 127,357,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | """Module to implement hurdle regression models.
"""
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
class HurdleLinearRegression():
"""Implements linear regression with a hurdle at 0.
"""
def __init__(self):
self.logistic = LogisticRegression()
self.linear = LinearRegression()
def fit(self, X, y):
self.logistic.fit(X, y>0)
self.linear.fit(X[y>0], y[y>0])
return self
def predict(self, X):
p = self.logistic.predict_proba(X)
y_hat = self.linear.predict(X)
return p * y_hat
| [
"nathaniel.blair.stahn@gmail.com"
] | nathaniel.blair.stahn@gmail.com |
0315172cd8f2f418b8753f197edeb6c03507474d | ac0b9c85542e6d1ef59c5e9df4618ddf22223ae0 | /kratos/applications/FluidDynamicsApplication/python_scripts/apply_custom_velocity_constraints.py | 22b0262260595debdf02adca990f94e5f573eb8c | [] | no_license | UPC-EnricBonet/trunk | 30cb6fbd717c1e78d95ec66bc0f6df1a041b2b72 | 1cecfe201c8c9a1b87b2d87faf8e505b7b1f772d | refs/heads/master | 2021-06-04T05:10:06.060945 | 2016-07-15T15:29:00 | 2016-07-15T15:29:00 | 33,677,051 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,124 | py | from KratosMultiphysics import *
from FluidDynamicsApplication import *
def Factory(settings, Model):
if(type(settings) != Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyCustomVelocityConstraintProcess(Model, settings["Parameters"])
##all the processes python processes should be derived from "python_process"
class ApplyCustomVelocityConstraintProcess(Process):
def __init__(self, Model, settings ):
Process.__init__(self)
model_part = Model[settings["model_part_name"].GetString()]
if settings["is_fixed_x"].GetBool() == True:
# Auxiliar x-component parameters creation
x_params = Parameters("{}")
x_params.AddValue("model_part_name",settings["model_part_name"])
x_params.AddValue("mesh_id",settings["mesh_id"])
x_params.AddValue("is_fixed",settings["is_fixed_x"])
x_params.AddValue("value",settings["value"][0])
x_params.AddEmptyValue("variable_name").SetString("VELOCITY_X")
self.x_component_process = ApplyConstantScalarValueProcess(model_part, x_params)
if settings["is_fixed_y"].GetBool() == True:
# Auxiliar y-component parameters creation
y_params = Parameters("{}")
y_params.AddValue("model_part_name",settings["model_part_name"])
y_params.AddValue("mesh_id",settings["mesh_id"])
y_params.AddValue("is_fixed",settings["is_fixed_y"])
y_params.AddValue("value",settings["value"][1])
y_params.AddEmptyValue("variable_name").SetString("VELOCITY_Y")
self.y_component_process = ApplyConstantScalarValueProcess(model_part, y_params)
if settings["is_fixed_z"].GetBool() == True:
# Auxiliar x-component parameters creation
z_params = Parameters("{}")
z_params.AddValue("model_part_name",settings["model_part_name"])
z_params.AddValue("mesh_id",settings["mesh_id"])
z_params.AddValue("is_fixed",settings["is_fixed_z"])
z_params.AddValue("value",settings["value"][2])
z_params.AddEmptyValue("variable_name").SetString("VELOCITY_Z")
self.z_component_process = ApplyConstantScalarValueProcess(model_part, z_params)
# Auxiliar vector with the fixicity settings
self.fixicity_vec = [settings["is_fixed_x"].GetBool(),
settings["is_fixed_y"].GetBool(),
settings["is_fixed_z"].GetBool()]
def ExecuteInitialize(self):
if self.fixicity_vec[0] == True:
self.x_component_process.ExecuteInitialize()
if self.fixicity_vec[1] == True:
self.y_component_process.ExecuteInitialize()
if self.fixicity_vec[2] == True:
self.z_component_process.ExecuteInitialize()
| [
"enriquebonetgil@hotmail.com"
] | enriquebonetgil@hotmail.com |
7eede2990f6e638af015bc568bd54608b7a9581e | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /events_write_1/event-bu_delete.py | 1d62d387f9b2e00234829669c850e7bdd2a0f3aa | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/events/delete-event-bus.html
if __name__ == '__main__':
"""
create-event-bus : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/events/create-event-bus.html
describe-event-bus : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/events/describe-event-bus.html
"""
parameter_display_string = """
# name : The name of the event bus to delete.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("events", "delete-event-bus", "name", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
bdeab10718c8421fb9001a909da9e2f385a9a434 | 815d3e741dea557f4cf492547d50c2f05ebb230b | /tests/test_spatio_temporal_interpolation_quality.py | f650b983b4bfb0f4707ce555c7f5187fe779f899 | [] | no_license | Evnica/Interpolation2017 | 36582e52de9eb5bfdb5ac78bf1d397a1422c3dee | ae18c77e90a9daa34321d8a2dae25eef3a5239c9 | refs/heads/master | 2021-01-11T09:17:17.175650 | 2017-01-29T23:32:34 | 2017-01-29T23:32:34 | 77,075,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | from interpolation.analysis import Analysis
from interpolation.iohelper import Reader
import interpolation.utils as utils
import interpolation.qualityassessment as qa
reader = Reader('input/wetter_nov.csv')
analysis = Analysis(60, 10)
reader(2, 3, 4, 13, 1, analysis)
time_handler = utils.TimeHandler(reader.times)
points4d = time_handler.raise_to_fourth_dimension(reader.points, 1)
random_samples, last_sample = utils.divide_in_random(10, points=points4d, timestamps=reader.times, values=reader.values,
point_dimension=4)
# access_quality_of_spatial_interpolation(grouped_samples, one_sample, function='rbf', function_type='linear',
# number_of_neighbors=6, power=2, write=False, r2formula='keller'):
qa.access_quality_of_interpolation(random_samples, last_sample, function='idw', write=True)
# grids = analysis.generate_time_series_grids(reader.times)
qa.access_quality_of_interpolation(random_samples, last_sample, write=True)
| [
"evnica@gmail.com"
] | evnica@gmail.com |
63eedeed5df1c4e71ce5ec0366ac35b6be2deb26 | dc2d4d0b5982c47b453ab0b9beecc4da12a07338 | /MOOC/turtlegoto.py | 10540717529bd68c9a8b984208d1a63e1f9dd128 | [] | no_license | zou23cn/Python | 6a2c89935b6d7c0e508364934ef40db18ed784f4 | 5104823a9b98067876583f37bdf6219dd8451a28 | refs/heads/master | 2021-06-25T13:35:42.722701 | 2021-01-14T10:02:48 | 2021-01-14T10:02:48 | 182,368,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import turtle
turtle.setup(900, 500, 0, 0)
turtle.goto(100,100)
turtle.goto(100,-100)
turtle.goto(-100,-100)
turtle.goto(-100,100)
turtle.goto(0,0)
turtle.done()
| [
"zou23cn@gmail.com"
] | zou23cn@gmail.com |
9c884ce6346e2b5e3bb6f88c449801d9cfd42f60 | 78461d51d7cbabb5a08a91f7bccaa85c36aae322 | /source/digits.py | a69713acd8764ea165ff1c8ae22fbc0d035efb5b | [] | no_license | TimurFatykhov/Campus-2.0 | 707069eb30e21a4acb2aba5eaf2f3ef1b557e57d | 20f943d99bf2102eecbe3997e0536ee2eb4b23cd | refs/heads/master | 2022-04-23T11:25:10.330134 | 2020-03-06T11:11:30 | 2020-03-06T11:11:30 | 219,302,794 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,419 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
def get_X_y():
"""
Загружает данные с цифрами
Возвращает две переменные:
-------------------------
- X: матрица 1797x64
картинки в виде векторов длинной 64
- y: матрица 1797x10
матрица, где в каждой строке только в одном столбце единица,
о в остальных столбцах нули: единица стоит в столбце,
который соотетствует цифре из матрицы X, то есть если
y[m] = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], то X[m] - картинка
с нулем, если y[k] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
то X[k] - картинка с девяткой и тд
Пример использования:
---------------------
>>> X, y = get_X_y()
>>>
"""
X, y_raw = load_digits(return_X_y=True)
n = len(X)
y = np.zeros((n, 10))
y[range(0,n), y_raw] = 1
return X, y
def show_image(img, figsize=(5,5)):
"""
Показывает изображение
Параметры:
- img: numpy.array
массив numpy, с тремя или одним каналом (цветное или ч/б фото)
"""
if len(img.shape) < 2:
s = np.sqrt(len(img)).astype(int)
img = img.reshape((s,s))
plt.figure(figsize=figsize)
plt.imshow(img)
plt.axis('off')
plt.show()
def ReLU(vector):
v_copy = vector.copy()
mask = v_copy < 0
v_copy[mask] = 0
return v_copy
def softmax(s):
e = np.exp(s - s.max(1).reshape((-1, 1)) )
summ = np.sum(e)
return e / summ
def mean_square_error(predict, true):
N = len(predict)
return np.sum((predict - true)**2) / N
class TwoLayerClassifier():
def __init__(self, input_size, hidden_size, output_size):
self.params = {}
self.params['W1'] = np.random.rand(input_size, hidden_size) * 2 - 1
self.params['b1'] = np.random.rand(hidden_size)
self.params['W2'] = np.random.rand(hidden_size, output_size) * 2 - 1
self.params['b2'] = np.random.rand(output_size)
self.H = None
self.S = None
self.P = None
def loss(self, X, y=None, reg=0):
N = len(X)
W1 = self.params['W1']
b1 = self.params['b1']
W2 = self.params['W2']
b2 = self.params['b2']
# Compute the forward pass
self.o_11 = X.dot(W1)
self.o_12 = self.o_11 + b1
self.H = ReLU(self.o_12)
self.o_21 = self.H.dot(W2)
self.S = self.H.dot(W2) + b2
self.P = softmax(self.S)
if y is None:
return self.P
loss = mean_square_error(self.P, y)
# Backward pass: compute gradients
grads = {}
# dl/dP
# dP
dP = 2 * (self.P - y)
# dl/dP * dP/dS
# dS
exp = np.exp(self.S - self.S.max(1).reshape((-1, 1)))
softsum = np.sum(exp, 1).reshape((-1, 1))
dS = exp * (softsum - exp) / softsum**2
dS = dP * dS
# dl/dP * dP/dS * dS/dW2
# dW2
grads['W2'] = self.H.T.dot(dS) / N
# dl/dP * dP/dS * dS/db2
# db2
grads['b2'] = dS.sum(0) / N
# dH
dH = dS.dot(W2.T)
# do_12
do_12 = np.ones_like(self.o_12)
do_12[self.o_12 < 0] = 0
# dW1
grads['W1'] = X.T.dot(do_12) / N
# db1
grads['b1'] = do_12.sum(0) / N
return loss, grads
def fit(self, X, y, epochs, lr):
self.history_loss = []
for epoch in range(epochs):
loss, grads = self.loss(X, y)
# print(loss)
self.history_loss.append(loss)
self.params['W1'] -= lr * grads['W1']
self.params['b1'] -= lr * grads['b1']
self.params['W2'] -= lr * grads['W2']
self.params['b2'] -= lr * grads['b2']
| [
"fatykhov.timur.m@gmail.com"
] | fatykhov.timur.m@gmail.com |
cc2d67c10951e85ac38fb33a2a8857e71a6610fd | 1c67732a24042a991cc9f7e764d4640522391972 | /back/gamedata/admin.py | d0e2b17e7528b7c6c839144c30b720f95932f249 | [] | no_license | sungguenja/bsgg | 1061ccc6f5f08ed9ad14d3a332af020ec7a5df22 | 447283378ac3bb8f489e2a4662bfb6513bc37be2 | refs/heads/master | 2023-04-01T14:15:05.491775 | 2021-04-06T09:46:25 | 2021-04-06T09:46:25 | 318,800,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | from django.contrib import admin
from .models import Area, Animal, Item, AreaItem, AreaAnimal, AnimalItem
# Register your models here.
admin.site.register(Area)
admin.site.register(Animal)
admin.site.register(Item)
admin.site.register(AreaItem)
admin.site.register(AreaAnimal)
admin.site.register(AnimalItem) | [
"59605197+sungguenja@users.noreply.github.com"
] | 59605197+sungguenja@users.noreply.github.com |
22c48bb5b52443319e5ff3b551288a22cd8140df | f7f28cfcfbea56c7000681534bfe64f23212e3e5 | /vote-api/lib/python3.6/shutil.py | 7fde999ad626d22578109dfb3981ec330bfcf5d9 | [] | no_license | mew177/vote-api | c26ad2b508f39418eb91cc366f1e7355176d0945 | b95e29c9223636d12d34de817035f164dd71d1d4 | refs/heads/master | 2023-08-03T14:17:29.651960 | 2020-06-19T03:20:11 | 2020-06-19T03:20:11 | 273,392,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | /Users/Rycemond/anaconda3/lib/python3.6/shutil.py | [
"mew177@pitt.edu"
] | mew177@pitt.edu |
993c1305af4b69cbcf5f2700baeb90c2acbf25d4 | cffbeaa47d477ee7c4439cbac67b11f0e240f12a | /api/views.py | 0ef1f0d5f837032a6e61841e074b777591ec097a | [] | no_license | mnpappo/handwriting_api | 7ad6c52ce22e7b217b5ebad339f2be75ca2aed76 | 93d48043b7b329de78212fa957d376f97e581014 | refs/heads/master | 2020-09-16T08:24:26.940930 | 2017-06-15T23:35:59 | 2017-06-15T23:35:59 | 94,486,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,280 | py | import numpy as np
np.random.seed(1337)
from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from api.serializers import UserSerializer, GroupSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.core.files.base import ContentFile
import os
from PIL import Image
# process image after upload
def process_image(image_name, file_content):
folder = './temp/'
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# create the folder if it doesn't exist.
try: os.mkdir(os.path.join(BASE_PATH, folder))
except: pass
# save the uploaded file inside that folder.
try:
img_fullpath = os.path.join(BASE_PATH, folder, image_name)
fout = open(img_fullpath, 'wb+')
for chunk in file_content.chunks():
fout.write(chunk)
fout.close()
except Exception as exp: raise exp
img = np.array(Image.open(img_fullpath).convert('RGB'))
return img, img_fullpath
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class PhotoList(APIView):
"""
This end point is to recive from-data of two images and an imgid
field: imgid, type: string
field: file1, type: file
field: file2, type: file
"""
def get(self, request, format=None):
return Response({'mgs': 'not applied api end point'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self, request, format=None):
imgid = request.POST['imgid']
print('recieved image id:', imgid)
try:
image_one = request.FILES['file1'].name
file_content = ContentFile(request.FILES['file1'].read())
image_one, img_one_fullpath = process_image(image_one, file_content)
image_two = request.FILES['file2'].name
file_content = ContentFile(request.FILES['file2'].read())
image_two, img_two_fullpath = process_image(image_two, file_content)
except Exception as exp:
# raise exp
return Response({'mgs': 'image processing failed'}, status=status.HTTP_204_NO_CONTENT)
# do ypur prediction here
# after prediction remove the file
# return success message with predicted data : os.remove(img_path)
print(type(image_one))
print(img_one_fullpath)
return Response({
'mgs': 'success!!! checkout the full response :)',
'key1': 'result1... so on'
}, status=status.HTTP_200_OK)
class PhotoDetail(APIView):
def get(self, request, pk, format=None):
return Response({'mgs': 'not applied api end point'}, status=status.HTTP_501_NOT_IMPLEMENTED)
def post(self, request, pk, format=None):
return Response({'mgs': 'not applied api end point'}, status=status.HTTP_501_NOT_IMPLEMENTED)
| [
"mnpappo@gmail.com"
] | mnpappo@gmail.com |
67a4431f2cf41a56085422a65fa040772f0312e1 | 5edbc16216806de0c32634fae1ae67c4773fbf65 | /wiki/migrations/0002_auto_20160820_2351.py | 8878c2f9679bb51843d6d084ebf7537e0c527bb0 | [] | no_license | MilesWilliams/klaritywiki | 431d9139309c2997aeaeeb02afce9b4da43cff8d | 197c0f9c4094a64e437eb2a51b531747c262290b | refs/heads/master | 2020-12-02T20:44:30.703329 | 2016-08-22T12:10:48 | 2016-08-22T12:10:48 | 66,269,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-20 21:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wiki', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Category',
new_name='Categories',
),
]
| [
"miles@klarity.co.za"
] | miles@klarity.co.za |
29cc73d94435bfd91f4071297e290173c3e70a6f | 86cc876d2b7cbc29d5c13a73d4d985079c73ed68 | /thingflow/adapters/mqtt.py | fe0b20c00a3689ab9dac8f62fb3d9c69fce6d0b5 | [
"Apache-2.0"
] | permissive | masayoshi-louis/thingflow-python | 74fe6f90a37803a27bd69eff9163f7fb668836b4 | c191a8fedac6a962994945830c872e957f929e29 | refs/heads/master | 2020-03-26T08:13:58.334964 | 2017-08-08T03:59:09 | 2017-08-08T03:59:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,643 | py | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
import time
from collections import namedtuple
try:
import paho.mqtt.client as paho
except ImportError:
print("could not import paho.mqtt.client")
import ssl
from thingflow.base import InputThing, OutputThing, EventLoopOutputThingMixin
MQTTEvent = namedtuple('MQTTEvent', ['timestamp', 'state', 'mid', 'topic', 'payload', 'qos', 'dup', 'retain' ])
import random
random.seed()
import datetime
class MockMQTTClient(object):
def __init__(self, client_id=""):
self.userdata = None
self.client_id = client_id
self.on_message = None
self.on_connect = None
self.on_publish = None
def connect(self, host, port=1883):
if self.on_connect:
self.on_connect(self, self.userdata, None, 0)
return 0
def subscribe(self, topics):
pass
def publish(self, topic, payload, qos, retain=False):
if self.on_publish:
self.on_publish(self, self.userdata, 0)
def username_pw_set(self, username, password=""):
pass
def loop(self, timeout=1.0, max_packets=1):
s = random.randint(1, max_packets)
for i in range(0, s):
msg = MQTTEvent(datetime.datetime.now(), 0, i, 'bogus/bogus', 'xxx', 0, False, False)
if self.on_message:
self.on_message(self, self.userdata, msg)
time.sleep(timeout)
return 0
def disconnect(self):
pass
class MQTTWriter(InputThing):
"""Subscribes to internal events and pushes them out to MQTT.
The topics parameter is a list of (topic, qos) pairs.
Events should be serialized before passing them to the writer.
"""
def __init__(self, host, port=1883, client_id="", client_username="", client_password=None, server_tls=False, server_cert=None, topics=[], mock_class=None):
self.host = host
self.port = port
self.client_id = client_id
self.client_username = client_id
self.client_password = client_password
self.topics = topics
self.server_tls = server_tls
self.server_cert = server_cert
if mock_class:
self.client = MockMQTTClient(self.client_id)
else:
self.client = paho.Client(self.client_id)
if self.client_username:
self.client.username_pw_set(self.client_username, password=self.client_password)
self._connect()
def _connect(self):
if self.server_tls:
raise Exception("TBD")
print(self.client.tls_set(self.server_tls.server_cert, cert_reqs=ssl.CERT_OPTIONAL))
print(self.client.connect(self.host, self.port))
else:
self.client.connect(self.host, self.port)
self.client.subscribe(self.topics)
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
self.client.on_connect = on_connect
def on_publish(client, userdata, mid):
print("Successfully published mid %d" % mid)
self.client.on_publish = on_publish
def on_next(self, msg):
"""Note that the message is passed directly to paho.mqtt.client. As such,
it must be a string, a bytearray, an int, a float or None. Usually, you would
use something like to_json (in thingflow.filters.json) to do the
serialization of events.
"""
# publish the message to the topics
retain = msg.retain if hasattr(msg, 'retain') else False
for (topic, qos) in self.topics:
self.client.publish(topic, msg, qos, retain)
def on_error(self, e):
self.client.disconnect()
def on_completed(self):
self.client.disconnect()
def __str__(self):
return 'MQTTWriter(%s)' % ', '.join([topic for (topic,qos) in self.topics])
class MQTTReader(OutputThing, EventLoopOutputThingMixin):
"""An reader that creates a stream from an MQTT broker. Initialize the
reader with a list of topics to subscribe to. The topics parameter
is a list of (topic, qos) pairs.
Pre-requisites: An MQTT broker (on host:port) --- tested with mosquitto
The paho.mqtt python client for mqtt (pip install paho-mqtt)
"""
def __init__(self, host, port=1883, client_id="", client_username="", client_password=None, server_tls=False, server_cert=None, topics=[], mock_class=None):
super().__init__()
self.stop_requested = False
self.host = host
self.port = port
self.client_id = client_id
self.client_username = client_id
self.client_password = client_password
self.topics = topics
self.server_tls = server_tls
self.server_cert = server_cert
if mock_class:
self.client = MockMQTTClient(self.client_id)
else:
self.client = paho.Client(self.client_id)
if self.client_username:
self.client.username_pw_set(self.client_username, password=self.client_password)
self._connect()
def on_message(client, userdata, msg):
m = MQTTEvent(msg.timestamp, msg.state, msg.mid, msg.topic, msg.payload, msg.qos, msg.dup, msg.retain)
self._dispatch_next(m)
self.client.on_message = on_message
def _connect(self):
if self.server_tls:
raise Exception("TBD")
print(self.client.tls_set(self.server_tls.server_cert, cert_reqs=ssl.CERT_OPTIONAL))
print(self.client.connect(self.host, self.port))
else:
self.client.connect(self.host, self.port)
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(self.topics)
self.client.on_connect = on_connect
def _observe_event_loop(self):
print("starting event loop")
while True:
if self.stop_requested:
break
result = self.client.loop(1)
if result != 0:
self._connect()
self.stop_requested = False
self.client.disconnect()
print("Stopped private event loop")
def _stop_loop(self):
self.stop_requested = True
print("requesting stop")
def __str__(self):
return 'MQTTReader(%s)' % ', '.join([topic for (topic,qos) in self.topics])
| [
"jeff@data-ken.org"
] | jeff@data-ken.org |
56369aec96a4cef7cf632a602fd07ffec540ec5f | ee3e8773f86da51e39fe1b1a57237ad558c0f991 | /plotting/easy_plotting.py | ef5f64774999291358476bfc58818463ad0dfdd9 | [] | no_license | qyx268/plato | 72cd9ca2a6d5e28cd1618433ebc6af21fd2161e7 | b7c84c021bc26d63c768e9d08e28bbaf77d79a87 | refs/heads/master | 2021-01-15T21:07:56.182831 | 2016-04-15T12:33:21 | 2016-04-15T12:33:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | from collections import OrderedDict
from general.nested_structures import flatten_struct
from plotting.data_conversion import vector_length_to_tile_dims
import plotting.matplotlib_backend as eplt
import numpy as np
__author__ = 'peter'
def ezplot(anything, plots = None, hang = True, **plot_preference_kwargs):
"""
Make a plot of anything. Anything at all.
:param anything: Anything.
"""
data_dict = flatten_struct(anything)
figure, plots = plot_data_dict(data_dict, plots, mode = 'static', hang = hang, **plot_preference_kwargs)
return figure, plots
def plot_data_dict(data_dict, plots = None, mode = 'static', hang = True, figure = None, size = None, **plot_preference_kwargs):
"""
Make a plot of data in the format defined in data_dict
:param data_dict: dict<str: plottable_data>
:param plots: Optionally, a dict of <key: IPlot> identifying the plot objects to use (keys should
be the same as those in data_dict).
:return: The plots (same ones you provided if you provided them)
"""
assert mode in ('live', 'static')
if isinstance(data_dict, list):
assert all(len(d) == 2 for d in data_dict), "You can provide data as a list of 2 tuples of (plot_name, plot_data)"
data_dict = OrderedDict(data_dict)
if plots is None:
plots = {k: eplt.get_plot_from_data(v, mode = mode, **plot_preference_kwargs) for k, v in data_dict.iteritems()}
if figure is None:
if size is not None:
from pylab import rcParams
rcParams['figure.figsize'] = size
figure = eplt.figure()
n_rows, n_cols = vector_length_to_tile_dims(len(data_dict))
for i, (k, v) in enumerate(data_dict.iteritems()):
eplt.subplot(n_rows, n_cols, i+1)
plots[k].update(v)
eplt.title(k, fontdict = {'fontsize': 8})
oldhang = eplt.isinteractive()
eplt.interactive(not hang)
eplt.show()
eplt.interactive(oldhang)
return figure, plots
def funplot(func, xlims = None, n_points = 100):
"""
Plot a function
:param func:
:param xlims:
:param n_points:
:return:
"""
if xlims is None:
xlims = eplt.gca().get_xbound()
xs, xe = xlims
x = np.linspace(xs, xe, n_points)
eplt.plot(x, func(x))
eplt.gca().set_xbound(*xlims)
| [
"peter.ed.oconnor@gmail.com"
] | peter.ed.oconnor@gmail.com |
0c30fc865d3d875c8782ce0bfcccbc2af6fbe095 | b32560d3f585c2f1c3e7db848c216e0c32ba08e2 | /Programmers/Python/정렬_K번째수.py | 8118ba1ef103ed9c88d703bb2b79271c1f751e8a | [] | no_license | gimmizz/Data-Structure-Algorithm | 48fb7f5a1c9308200a3e020f04b13798b474f6c7 | b8ecf12c71370af89a05022b67e6ae18ddfb3f63 | refs/heads/main | 2023-05-09T04:35:01.088474 | 2021-05-23T14:27:00 | 2021-05-23T14:27:00 | 321,087,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | def solution(array, commands):
answer = []
for command in commands:
sorted_array = sorted(array[command[0]-1:command[1]])
# array[:command[0]-1] + sorted(array[command[0]-1:command[1]]) + array[command[1]:]
answer.append(sorted_array[command[2]-1])
return answer | [
"69383392+gimmizz@users.noreply.github.com"
] | 69383392+gimmizz@users.noreply.github.com |
37d2d633bb293144044fac2ac98d89645e361fd5 | 27d028f5def725279ade9ebcd22e41464d980871 | /locallibrary/catalog/models.py | f46380126b69e374cfcf5c051c9896f43f383f9d | [] | no_license | MarcusBlaisdell/django-library | 8fc952290d463c87594099ca10333867f40ebb9f | 376780d17204529c8c088f428821426bfefa4e37 | refs/heads/master | 2023-02-15T03:49:45.794019 | 2021-01-08T23:00:46 | 2021-01-08T23:00:46 | 328,028,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,883 | py | from django.db import models
from django.urls import reverse # Used to generate URLs by reversing the URL patterns
import uuid # Required for unique book instances
from django.contrib.auth.models import User
from datetime import date
# Create your models here.
class Genre(models.Model):
"""Model representing a book genre."""
name = models.CharField(max_length=200, help_text='Enter a book genre (e.g. Science Fiction)')
def __str__(self):
"""String for representing the Model object."""
return self.name
class Book(models.Model):
"""Model representing a book (but not a specific copy of a book)."""
title = models.CharField(max_length=200)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the book')
isbn = models.CharField('ISBN', max_length=13, unique=True,
help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
genre = models.ManyToManyField(Genre, help_text='Select a genre for this book')
def __str__(self):
"""String for representing the Model object."""
return self.title
def get_absolute_url(self):
"""Returns the url to access a detail record for this book."""
return reverse('book-detail', args=[str(self.id)])
def display_genre(self):
"""Create a string for the Genre. This is required to display genre in Admin."""
return ', '.join(genre.name for genre in self.genre.all()[:3])
display_genre.short_description = 'Genre'
class BookInstance(models.Model):
"""Model representing a specific copy of a book (i.e. that can be borrowed from the library)."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text='Unique ID for this particular book across whole library')
book = models.ForeignKey('Book', on_delete=models.CASCADE)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(
max_length=1,
choices=LOAN_STATUS,
blank=True,
default='m',
help_text='Book availability',
)
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
class Meta:
ordering = ['due_back']
permissions = (("can_mark_returned", "Set book as returned"),)
def __str__(self):
"""String for representing the Model object."""
return f'{self.id} ({self.book.title})'
class Author(models.Model):
"""Model representing an author."""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
class Meta:
ordering = ['last_name', 'first_name']
def get_absolute_url(self):
"""Returns the url to access a particular author instance."""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""String for representing the Model object."""
return f'{self.last_name}, {self.first_name}'
| [
"marcus.blaisdell@wsu.edu"
] | marcus.blaisdell@wsu.edu |
6c6fb1d95838c3f889b46a7dc6de313d15b867a4 | 01f8b69a1d13578bd013f5e60199ad151863799c | /examples/_relative/xxx.py | ef8d091dbbd19d048dc7c9164f14db13c6a7efe3 | [
"MIT"
] | permissive | podhmo/pyinspect | e3dd9331627091de90a172d9d7eff34307bf2496 | 32ff53c7ceb6a382b635f6c8b98b15f2213d18ff | refs/heads/master | 2020-03-22T20:19:06.961689 | 2019-12-19T15:14:26 | 2019-12-19T15:14:26 | 140,589,327 | 4 | 0 | null | 2019-12-19T15:14:28 | 2018-07-11T14:45:17 | Python | UTF-8 | Python | false | false | 123 | py | def f(x):
return g(x, 1)
def g(x, acc):
if x == 0:
return acc
else:
return g(x - 1, acc * x)
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
a60bfa7980001c986bed8b71d56e75e0c5b2a66e | 1730f8cea72838a677b52fe82e72d91aa8f68f75 | /003_queues/003_solutionCourseProvided.py | 37326ef9b6a6f674a399d5971a030bad629104f7 | [
"MIT"
] | permissive | remichartier/026_UdacityTechnicalInterviewPrep | 354097e25972a7214b8d1f84fcd3e80b69e79333 | fa52b5f57bdd4e79751059971bb9f73fa0ca8004 | refs/heads/main | 2023-04-07T15:25:16.499791 | 2021-04-18T05:15:23 | 2021-04-18T05:15:23 | 354,467,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # I managed to write all of the methods in one line!
class Queue(object):
def __init__(self, head=None):
self.storage = [head]
def enqueue(self, new_element):
self.storage.append(new_element)
def peek(self):
return self.storage[0]
def dequeue(self):
return self.storage.pop(0) | [
"remipr.chartier@gmail.com"
] | remipr.chartier@gmail.com |
d26c3b842a62198bbbe215f07abe2319e9795251 | 445d666367580241f40a997cdf4b57ef53559720 | /tests/psd_tools/psd/test_image_data.py | d68cfccbddcb7ce16791f2e7b76e254c2ce9d8a3 | [
"MIT"
] | permissive | tannerhelland/psd-tools | 32b7747c6286024ebea9cf0c9f71483f857a4fc7 | f4d3fbdcb2164ab745046f9663ae2275a5b2b1ba | refs/heads/master | 2022-10-27T22:02:25.498880 | 2020-05-25T01:50:50 | 2020-05-25T01:50:50 | 275,910,147 | 1 | 2 | MIT | 2020-06-29T19:41:42 | 2020-06-29T19:41:42 | null | UTF-8 | Python | false | false | 2,137 | py | from __future__ import absolute_import, unicode_literals
import pytest
from psd_tools.constants import Compression
from psd_tools.psd.header import FileHeader
from psd_tools.psd.image_data import ImageData
from ..utils import check_write_read
RAW_IMAGE_3x3_8bit = b'\x00\x01\x02\x01\x01\x01\x01\x00\x00'
RAW_IMAGE_2x2_16bit = b'\x00\x01\x00\x02\x00\x03\x00\x04'
def test_image_data():
check_write_read(ImageData())
check_write_read(ImageData(data=b'\x00'))
@pytest.mark.parametrize(
'compression, data, header', [
(
Compression.RAW, [RAW_IMAGE_3x3_8bit] * 3,
FileHeader(width=3, height=3, depth=8, channels=3, version=1)
),
(
Compression.RLE, [RAW_IMAGE_3x3_8bit] * 3,
FileHeader(width=3, height=3, depth=8, channels=3, version=1)
),
(
Compression.ZIP, [RAW_IMAGE_3x3_8bit] * 3,
FileHeader(width=3, height=3, depth=8, channels=3, version=1)
),
(
Compression.RAW, [RAW_IMAGE_3x3_8bit] * 3,
FileHeader(width=3, height=3, depth=8, channels=3, version=2)
),
(
Compression.RLE, [RAW_IMAGE_3x3_8bit] * 3,
FileHeader(width=3, height=3, depth=8, channels=3, version=2)
),
(
Compression.ZIP, [RAW_IMAGE_3x3_8bit] * 3,
FileHeader(width=3, height=3, depth=8, channels=3, version=2)
),
(
Compression.RAW, [RAW_IMAGE_2x2_16bit] * 3,
FileHeader(width=2, height=2, depth=16, channels=3, version=1)
),
(
Compression.RLE, [RAW_IMAGE_2x2_16bit] * 3,
FileHeader(width=2, height=2, depth=16, channels=3, version=1)
),
(
Compression.ZIP, [RAW_IMAGE_2x2_16bit] * 3,
FileHeader(width=2, height=2, depth=16, channels=3, version=1)
),
]
)
def test_image_data_data(compression, data, header):
image_data = ImageData(compression)
image_data.set_data(data, header)
output = image_data.get_data(header)
assert output == data, 'output=%r, expected=%r' % (output, data)
| [
"KotaYamaguchi1984@gmail.com"
] | KotaYamaguchi1984@gmail.com |
9f87f840edf1086df8955c74ea004ebccd29f992 | 9a85b0c1c4af8614ca66bd5815c40bc7de57a9fa | /Ohja/ohja_1.py | 948888244fe4774c5c76169a3e810058883b2ad7 | [] | no_license | tviivi/Python-kertausta | e2ec8d3dd96ee36bfe0cb6f47364bac5b2d327d0 | d86addc6940f3a22e7b2172b8216387615bc23a8 | refs/heads/main | 2023-03-18T13:24:43.580623 | 2021-03-19T14:15:48 | 2021-03-19T14:15:48 | 348,300,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | from datetime import *
# Oliot ja metodit
def rivien_summat(matriisi: list):
for i in range(len(matriisi)):
summa = 0
for j in range(len(matriisi)):
summa += matriisi[i][j]
matriisi[i].append(summa)
# Luokat ja oliot
def vuodet_listaan(paivamaarat: list):
apulista = []
for i in range(len(paivamaarat)):
apulista.append(paivamaarat[i].year)
apulista.sort()
paivamaarat = apulista[:]
return paivamaarat
if __name__ == "__main__":
matriisi = [[1, 2], [3, 4]]
rivien_summat(matriisi)
print(matriisi)
paiva1 = date(2019, 2, 3)
paiva2 = date(2006, 10, 10)
paiva3 = date(1993, 5, 9)
vuodet = vuodet_listaan([paiva1, paiva2, paiva3])
print(vuodet)
class Kirja:
def __init__(self, nimi: str, kirjoittaja: str, genre: str, kirjoitusvuosi: int):
self.nimi = nimi
self.kirjoittaja = kirjoittaja
self.genre = genre
self.kirjoitusvuosi = kirjoitusvuosi
def vanhempi_kirja(kirja1: Kirja, kirja2: Kirja):
if kirja1.kirjoitusvuosi < kirja2.kirjoitusvuosi:
return (f"{kirja1.nimi} on vanhempi, se kirjoitettiin {kirja1.kirjoitusvuosi}")
else:
return (f"{kirja2.nimi} on vanhempi, se kirjoitettiin {kirja2.kirjoitusvuosi}")
def genren_kirjat(kirjat: list, genre: str):
haluttugenre = []
for i in range(len(kirjat)):
if kirjat[i].genre == genre:
haluttugenre.append(kirjat[i])
return haluttugenre
python = Kirja("Fluent Python", "Luciano Ramalho", "ohjelmointi", 2015)
everest = Kirja("Huipulta huipulle", "Carina Räihä", "elämänkerta", 2010)
norma = Kirja("Norma", "Sofi Oksanen", "rikos", 2015)
kirjat = [python, everest, norma, Kirja("Lumiukko", "Jo Nesbø", "rikos", 2007)]
print("rikoskirjoja ovat")
for kirja in genren_kirjat(kirjat, "rikos"):
print(f"{kirja.kirjoittaja}: {kirja.nimi}")
print(f"{python.kirjoittaja}: {python.nimi} ({python.kirjoitusvuosi})")
print(f"Kirjan {everest.nimi} genre on {everest.genre}")
print(vanhempi_kirja(python, everest))
| [
"viivi.tiihonen@helsinki.fi"
] | viivi.tiihonen@helsinki.fi |
61d14f737007b347215fcd14e2e8ecc34714c78e | 276632ade57564054c26d701cac0d5353b93c935 | /obj.py | 831d59a0a2846a98b9df8faf5c1973f7645e1241 | [] | no_license | kamel156/Systemy-ekspresowe | a8f1985f6212333ca4eea2cc71d0f60c9fd138d0 | 3821189fb5e301a899e5429d4db9a293d246ae3e | refs/heads/main | 2023-05-23T16:32:53.475324 | 2021-06-17T21:28:04 | 2021-06-17T21:28:04 | 376,932,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | import cv2
import numpy as np
# import video
cap = cv2.VideoCapture('1.mp4')
counter = 0
#Load Yolo net
net = cv2.dnn.readNet('yolov3tiny.cfg', 'yolov3-tiny.weights')
classes = []
with open('cocco.names', 'r') as f:
classes =[line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0]-1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size = (len(classes), 3))
klasa = 'car'
while True:
success, frame = cap.read()
height, width, channels = frame.shape
#Detecting Objects
blob = cv2.dnn.blobFromImage(frame, 0.00392,(320, 320), (0, 0, 0), True, crop=True)
net.setInput(blob)
outputs = net.forward(output_layers)
#Print info on screen
class_ids =[]
confidences = []
boxes =[]
for out in outputs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > .5:
center_x =int(detection[0]* width)
center_y = int(detection[1] * height)
w = int(detection[2]* width)
h = int(detection[2] * height)
x = int(center_x - w/2)
y = int(center_y - h/2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.4,nms_threshold=0.2,top_k=1)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label == klasa:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0, 255, 0),2)
cv2.putText(frame, label, (x, y + 30), cv2.FONT_HERSHEY_SIMPLEX, .6, (0, 255, 0),3)
counter+=1
print("conter: ", counter)
cv2.imshow('image' , frame)
cv2.waitKey(20)
cv2.destroyAllWindows() | [
"kamelus@interia.eu"
] | kamelus@interia.eu |
d913b9c0afd66a6ee6f04517d7ca25d9e5a27bf4 | 8dd06c1cb548f1a2457607e352646f3e20efc2c3 | /front/__init__.py | 5d1715e08b4f1cd4da658cd7d2bdd4437a3a51c7 | [] | no_license | lordhamster66/Automation-Engineering | 98f3df9ca14957acd481ddb7e2acd6ae94a681d4 | 10cf77f04a204784da1a03e17c46ce5147d2f56e | refs/heads/master | 2022-11-08T12:08:39.331341 | 2017-06-16T05:59:27 | 2017-06-16T05:59:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Breakering"
# Date: 2017/5/24
| [
"1079614505@qq.com"
] | 1079614505@qq.com |
9a0ceb1f8a9e8cca78d4939bcf31c244e4acd324 | e1abd868bfad11bf93c50eee1dc9976674de2358 | /scaffold/suite/mass_flux_spatial_scales_plot.py | e0c9cd702c8515ce963bc91851a1de04cd43b566 | [] | no_license | markmuetz/scaffold_analysis | 5c7e9d04b24abe3462c8946381f4cab264bf09e0 | c02d32536c801b23ac8a71e36d25fa922e7cfd94 | refs/heads/master | 2022-06-03T16:13:54.775718 | 2022-05-31T13:22:24 | 2022-05-31T13:22:24 | 92,677,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,255 | py | from itertools import groupby
import matplotlib
import numpy as np
matplotlib.use('Agg')
import pylab as plt
from omnium import Analyser
from scaffold.utils import cm_to_inch
class MassFluxSpatialScalesPlotter(Analyser):
"""Plots histograms of mass flux for each power of 2 (n), and expt."""
analysis_name = 'mass_flux_spatial_scales_plot'
multi_expt = True
input_dir = 'omnium_output/{version_dir}/{expt}'
input_filename = '{input_dir}/atmos.mass_flux_spatial_scales_combined.nc'
output_dir = 'omnium_output/{version_dir}/suite_{expts}'
output_filenames = ['{output_dir}/atmos.mass_flux_spatial_scales_plot.dummy']
def load(self):
self.load_cubes()
def run(self):
pass
def save(self, state, suite):
with open(self.task.output_filenames[0], 'w') as f:
f.write('done')
def display_results(self):
self.nbins = None
self.x_cutoff = 0
self.xlim = None
self.ylim = None
self._plot_mass_flux_spatial()
plt.close('all')
def _plot_mass_flux_spatial(self):
self.append_log('plotting mass_flux_spatial')
heights = []
ns = []
for expt in self.task.expts:
cubes = self.expt_cubes[expt]
sorted_cubes = []
for cube in cubes:
(height_level_index, thresh_index, n) = cube.attributes['mass_flux_spatial_key']
mf_key = (height_level_index, thresh_index, n)
sorted_cubes.append((mf_key, cube))
# Each element is a tuple like: ((1, 2, 32), cube)
# Sorting will put in correct order, sorting on initial tuple.
sorted_cubes.sort()
# Group on first element of tuple, i.e. on 1 for ((1, 2, 32), cube)
for height_index, key_cubes in groupby(sorted_cubes, lambda x: x[0][0]):
if height_index not in heights:
heights.append(height_index)
hist_data = []
dmax = 0
for i, key_cube in enumerate(key_cubes):
# middle cube is the one with the middle thresh_index.
mf_key = key_cube[0]
cube = key_cube[1]
# Pick out middle element, i.e. thresh_index == 1.
if mf_key[1] == 1:
hist_data.append((mf_key, cube))
dmax = max(cube.data.max(), dmax)
# assert len(hist_data) == 3
for mf_key, hist_datum in hist_data:
(height_index, thresh_index, n) = mf_key
if n not in ns:
ns.append(n)
name = '{}.z{}.n{}.hist'.format(expt, height_index, n)
plt.figure(name)
plt.clf()
plt.title('{} z{} n{} mass_flux_spatial_hist'.format(expt, height_index, n))
hist_kwargs = {}
if self.xlim:
hist_kwargs['range'] = self.xlim
else:
#hist_kwargs['range'] = (0, 0.1)
pass
if self.nbins:
hist_kwargs['bins'] = self.nbins
filtered_data = hist_datum.data[hist_datum.data >= self.x_cutoff]
y, bin_edges = np.histogram(filtered_data, **hist_kwargs)
bin_centers = 0.5 * (bin_edges[1:] + bin_edges[:-1])
# N.B. full width bins.
width = bin_edges[1:] - bin_edges[:-1]
plt.bar(bin_centers, y / n**2, width=width)
if self.xlim:
plt.xlim(self.xlim)
if self.ylim:
plt.ylim(self.ylim)
plt.savefig(self.file_path(name + '.png'))
name = '{}.z{}.all_n.hist'.format(expt, height_index)
plt.figure(name)
plt.plot(bin_centers, y / n**2, label=n)
plt.figure('combined_expt_z{}_n{}'.format(height_index, n))
plt.plot(bin_centers, y / n**2, label=expt)
both_name = 'both_z{}'.format(height_index)
if plt.fignum_exists(both_name):
f = plt.figure(both_name)
ax1, ax2 = f.axes
# f_poster
f_p = plt.figure('poster_' + both_name)
ax1_p, ax2_p = f_p.axes
else:
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, num=both_name)
ax1.set_ylabel('Frequency (rescaled)')
ax2.set_ylabel('Frequency (rescaled)')
ax2.set_xlabel('Mass flux (kg s$^{-1}$ m$^{-2}$)')
if self.xlim:
ax1.set_xlim(self.xlim)
f_p, (ax1_p, ax2_p) = plt.subplots(1, 2, sharex=True, num='poster_' + both_name)
ax1_p.set_ylabel('Frequency (rescaled)')
ax1_p.set_xlabel('Mass flux (kg s$^{-1}$ m$^{-2}$)')
ax2_p.set_xlabel('Mass flux (kg s$^{-1}$ m$^{-2}$)')
if self.xlim:
ax1_p.set_xlim(self.xlim)
ax2_p.set_xlim(self.xlim)
styles = {1: 'b-',
2: 'b--',
4: 'b-.'}
if expt == 'S0' and n <= 4:
style = styles[n]
ax1.plot(bin_centers, y / n**2, style, label=n)
ax1_p.plot(bin_centers, y / n**2, style, label=n)
if n == 1:
ax2.plot(bin_centers, y / n**2, label=expt)
ax2_p.plot(bin_centers, y / n**2, label=expt)
for height_index in heights:
f = plt.figure('both_z{}'.format(height_index))
ax1, ax2 = f.axes
ax1.legend(loc='upper right')
ax2.legend(loc='upper right')
plt.savefig(self.file_path('both_z{}.png'.format(height_index)))
f_p = plt.figure('poster_both_z{}'.format(height_index))
f_p.set_size_inches(*cm_to_inch(25, 9))
ax1_p, ax2_p = f_p.axes
ax1_p.legend(loc='upper right')
ax2_p.legend(loc='upper right')
plt.tight_layout()
plt.savefig(self.file_path('poster_both_z{}.png'.format(height_index)))
for expt in self.task.expts:
name = '{}.z{}.all_n.hist'.format(expt, height_index)
plt.figure(name)
plt.title(name)
plt.legend()
plt.savefig(self.file_path(name + '.png'))
for n in ns:
plt.figure('combined_expt_z{}_n{}'.format(height_index, n))
plt.title('combined_expt_z{}_n{}'.format(height_index, n))
plt.legend()
if self.xlim:
plt.xlim(self.xlim)
plt.savefig(self.file_path('z{}_n{}_combined.png'.format(height_index, n)))
| [
"markmuetz@gmail.com"
] | markmuetz@gmail.com |
85a8fa3fb7694fce31e9cb1f369c3a65c6b78a3c | c50155808f38f48e00dc3321e137de24926250a7 | /lib/dock.py | 9aed7236489639fc7eba012ffe0979c30e036385 | [] | no_license | irobert0126/ParellelExecution | 788a440feada94fb6d149e8deeeddf570f23e835 | b9a17df4129cce8f233fd21ac4309180f43a6f15 | refs/heads/master | 2020-03-19T06:57:50.186472 | 2018-06-04T19:58:00 | 2018-06-04T19:58:00 | 136,071,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | import docker, time
from io import BytesIO
from docker import APIClient
client = docker.from_env()
print client.version()
cli = APIClient(base_url='unix://var/run/docker.sock')
def get_current_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def cleanup():
global client
for cli in client.containers.list(all=True):
try:
cli.stop()
cli.remove()
except:
pass
def findImageByTag(name, v="latest"):
global client
for image in client.images.list(all=True):
if "%s:%s" % (name, v) in image.tags:
return image
return None
def buildImage(dockerfile, name, force=False):
global cli
global client
if not force:
image_id = findImageByTag(name)
if image_id:
print "[+] Find Existing Image (%s): %s" % (name, image_id)
return image_id
try:
#logs = cli.build(path=dockerfile, rm=True, tag=name)
logs = cli.build(path=dockerfile, tag=name)
time.sleep(5)
image_id = findImageByTag(name)
print get_current_time(), "[+] Build Image:", image_id
return image_id
except docker.errors.APIError as e:
print e
return None
def runC(image, cmd="", ports={}):
global client
container = client.containers.run(image, ports=ports, detach=True)
print get_current_time(), "[+] Create Container:", container
return container
def test_tomcat():
cleanup()
profile = {
"dockerfile":"/home/t/AutoRox/testbed/cve-2017-12617/",
"tag":"tomcat_vul",
"image":"",
"ports":[
{'8080/tcp': 8080},
{'8080/tcp': 8081}
],
}
image_id = buildImage(profile["dockerfile"], profile["tag"])
c1 = runC(image_id, ports=profile["ports"][0])
c2 = runC(image_id, ports=profile["ports"][1])
print client.containers.list()
return [c1, c2]
| [
"t@tongbotest.2fjn4vs4cr4uhlfyeomffjimcg.dx.internal.cloudapp.net"
] | t@tongbotest.2fjn4vs4cr4uhlfyeomffjimcg.dx.internal.cloudapp.net |
74aeb00ae6f9fbcc90d8241447d5c58ee25aebea | 31d3dc809c82290056a27cc8ac7bd3d09c30550a | /nimbusenv/bin/django-admin | b152cc53bb27494b16c570a059b93282a4fd6032 | [
"MIT"
] | permissive | konoufo/nimbus | 4785be902eb18a0ef6c0d637ecedca0e54b715c6 | 9d07534e2658d590eaaf51784dcd17580c2a3725 | refs/heads/master | 2021-01-09T06:22:30.222432 | 2017-02-05T16:07:38 | 2017-02-05T16:07:38 | 80,975,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | #!/home/kevyn/nimbus/nimbusenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"root@ip-172-31-20-119.us-west-2.compute.internal"
] | root@ip-172-31-20-119.us-west-2.compute.internal | |
e06d2b176396a29ae9f62cab21aeb06a0c165897 | e0980f704a573894350e285f66f4cf390837238e | /.history/news/models_20201124125405.py | ad4367cc96dbe5bc9bd177dc7020584d0a479ff6 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class NewsPage(AbstractEmailForm):
tempalte ='news/news_page.html'
leanding_page_template = '' | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
c67ec6cd4426884633178328c210265009ba7e05 | dd67bc8452795a3632c360ce67324004d620ec09 | /pytools/runPTCLM.py | a09fc9b2ea5727592aa000f6adb66161297079c5 | [] | no_license | fmyuan/elm-pf-tools | ebcc4b7bcb3afc78f154b220510e477a1d228a26 | deceaf4eb38c267654c1434138891710c742ed0c | refs/heads/master | 2023-08-18T01:54:57.218105 | 2023-08-01T23:03:56 | 2023-08-01T23:03:56 | 196,645,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,757 | py | #!/usr/bin/env python
import os, sys, stat, csv
from optparse import OptionParser
import glob
#DMR 4/16/13
#python ./runPTCLM.py does the following:
# 1. Call routines to create point data (makepointdata.py, makemetdata.py) - removed
# 2. Set point and case-specific namelist options
# 2. configure case
# 3. build (compile) CESM with clean_build first if requested
# 4. apply patch for transient CO2 if transient run
# 6. apply user-specified PBS and submit information - removed
# 7. submit job to PBS queue if requested.
#
# FMY 5/23/2016
# revised to work for ALM used by NGEE-Arctic Phase II
# FMY 3/2/2017: updated with ACME v.1
#-------------------Parse options-----------------------------------------------
parser = OptionParser()
parser.add_option("--caseidprefix", dest="mycaseid", default="", \
help="Unique identifier to include as a prefix to the case name")
parser.add_option("--caseroot", dest="caseroot", default='./', \
help = "case root directory (default = ./, i.e., under scripts/)")
parser.add_option("--runroot", dest="runroot", default="../runs", \
help="Directory where the run would be created")
parser.add_option("--ccsm_input", dest="ccsm_input", \
help = "input data directory for CESM (required)")
parser.add_option("--cesmdir", dest="cesmdir", default='..', \
help = "cesm directory (default = .., i.e., upper directory of this script)")
parser.add_option("--compset", dest="compset", default='I1850CLM45CN', \
help = "component set to use (required)")
#
# will use SITE info below to generate 'resolution', i.e. '-res CLM_USRDAT'
parser.add_option("--site", dest="site", default='', \
help = '6-character FLUXNET code to run (required)')
parser.add_option("--sitegroup", dest="sitegroup", default="AmeriFlux", \
help = "site group to use (default AmeriFlux)")
parser.add_option("--align_year", dest="align_year", default="1851", \
help = 'Alignment year for datm data')
#
parser.add_option("--machine", dest="machine", default = '', \
help = "machine to use ---- \n"
"default = '' \n"
"options = checking by ./create_newcase -list machines \n "
"NOTE: make sure the option you chose well-defined in config_machines.xml")
parser.add_option("--compiler", dest="compiler", default = '', \
help = "compiler to use on machine ---- \n"
" default = '', the default compiler for the chosen machine) \n "
" options = intel,ibm, pgi,pathscale,gnu,cray,lahey, ... \n "
"NOTE: make sure the option you chose well-defined in config_compilers.xml")
parser.add_option("--mpilib", dest="mpilib", default = 'mpi-serial', \
help = "mpi library to use (default = mpi-serial)"
"options=openmpi,mpich,mpt,ibm,mpi-serial, BUT upon your system")
#compsets and module options
parser.add_option("--no_fire", dest="nofire", action="store_true", \
default=False, help="Turn off fire algorightms")
parser.add_option("--centbgc", dest="centbgc", default=False, \
help = 'To turn on CN with multiple soil layers, CENTURY C module', action="store_true")
parser.add_option("--nitrif_denitrif", dest="kovenN", default=False, \
help = 'To turn on CN with Koven nitrif-denitrif (not necessary to bundle with centbgc)', action="store_true")
parser.add_option("--CH4", dest="CH4", default=False, \
help = 'To turn on CN with CLM4me (not necessary to bundle with centbgc)', action="store_true")
#parser.add_option("--extended_pft", dest="extended_pft", default=False, \
# help = 'To turn on Expanded (Arctic) PFTs flag (-DEXTENDED_PFT) in CLM. Must provide --parm_file', action="store_true")
parser.add_option("--parm_file", dest="parm_file", default="clm_params_c160822.nc", \
help = 'CLM user-defined physiological parameter file, with default: clm_params.c160822.nc')
parser.add_option("--co2_file", dest="co2_file", default="fco2_datm_1765-2007_c100614.nc", \
help = 'CLM transient CO2 file for diagnostic option')
parser.add_option("--ad_spinup", action="store_true", \
dest="ad_spinup", default=False, \
help = 'Run accelerated decomposition spinup (note: exit-ad will run in the end as well)')
parser.add_option("--coldstart", dest="coldstart", default=False, \
help = "set cold start (mutually exclusive w/finidat)", \
action="store_true")
#user-defined datasets
parser.add_option("--tstep", dest="tstep", default=0.5, \
help = 'CLM timestep (hours)')
parser.add_option("--surfdatafile", dest="surfdatafile", default="", \
help = 'CLM user-defined surface data file in inputdata/lnd/clm2/surfdata_map/ \n'
"the default file name IS: surfdata_?x?pt_sitename_simyr????.nc, with ?x?pt_sitename as CLM_USRDAT_NAME")
parser.add_option("--pftdynfile", dest="pftdynfile", default="", \
help = 'CLM user-defined pftdyn data file in inputdata/lnd/clm2/surfdata_map/ \n'
"the default file name IS: surfdata.pftdyn_?x?pt_sitename.nc, with ?x?pt_sitename as CLM_USRDAT_NAME")
parser.add_option("--regional", dest="regional", default=False, \
help="flag for regional", action="store_true")
parser.add_option("--xpts", dest="xpts", default=1, \
help = 'for regional: xpts')
parser.add_option("--ypts", dest="ypts", default=1, \
help = 'for regional: ypts')
parser.add_option("--update-datm-domain", dest="update_datm_domain",
action="store_true", default=False, \
help = 'point the datm path for the domain to the datm directory instead of the share directory.')
#running years/units
parser.add_option("--finidat_case", dest="finidat_case", default='', \
help = "case containing initial data file to use" \
+" (should be in your runroot directory)")
parser.add_option("--finidat", dest="finidat", default='', \
help = "initial data file to use" \
+" (should be in the runroot directory)")
parser.add_option("--finidat_year", dest="finidat_year", default=1, \
help = "model year of initial data file (default is" \
+" last available)")
parser.add_option("--run_units", dest="run_units", default='nyears', \
help = "run length units (ndays, nmonths, nyears, date)")
parser.add_option("--run_n", dest="run_n", default=600, \
help = "run length (in run units)")
parser.add_option("--restart_units", dest="restart_units", default='', \
help = "restart length units for output restart file" \
+ "(default: same as run_units)")
parser.add_option("--restart_n", dest="restart_n", default='', \
help = "restart length for outputfor output restart file" \
+ "(default: same as run_n)")
#user-defined codes/output lists
parser.add_option("--srcmods_loc", dest="srcmods_loc", default='', \
help = 'Copy sourcemods from this location')
parser.add_option("--hist_userdefined", dest="hist_file", default='', \
help = 'user=defined hist file')
#build options
parser.add_option("--rmold", dest="rmold", default=False, action="store_true", \
help = 'Remove old case directory with same name' \
+" before create/update new one")
parser.add_option("--clean_build", dest="clean_build", default=False, \
help = 'Perform clean build before building', \
action="store_true")
parser.add_option("--debug_build", dest="debug_build", default=False, \
help = 'Perform debug build', \
action="store_true")
parser.add_option("--clean_config", dest="clean_config", default=False, \
help = 'Perform clean setup before setting-up', \
action="store_true")
parser.add_option("--cleanlogs",dest="cleanlogs", help=\
"Removes temporary and log files that are created",\
default=False,action="store_true")
#submit/run options
parser.add_option("--no_submit", dest="no_submit", default=False, \
help = 'do NOT submit CESM to queue', action="store_true")
parser.add_option("--jobname", dest="jobname", default="", \
help="userdefined job name, default is the casename")
parser.add_option("--np", dest="np", default=1, \
help = 'number of processors requested')
parser.add_option("--ppn", dest="ppn", default=1, \
help = 'processors per node, usually input with --np option above, with PBS -l nodes=np/ppn+1:ppn')
parser.add_option("--mppwidth", dest="mppwidth", default=1, \
help = 'processors range, which is --np option but using default ppn, e.g. on Hopper')
parser.add_option("--mppnodes", dest="mppnodes", default=1, \
help = 'number of nodes requested, with PBS -l nodes=mppnodes, e.g. on Titan')
parser.add_option("--walltime", dest="walltime", default="", \
help = 'user-requested walltime in format hh:mm:ss')
#CLM-PFLOTRAN coupling options
parser.add_option("--clm_pflotran", action="store_true", dest="pflotran", default = False, \
help = "clm coupled with PFLOTRAAN (default = False, i.e. not coupled). Must provide --pflotran_dir")
parser.add_option("--clm_pf_colmode", action="store_true", dest="clmpfcolmode", default = False, \
help = "clm coupled with PFLOTRAAN by column-mode (default = False, i.e. fully in 3-D; otherwise, vertical-only )." )
parser.add_option("--pflotran_srcdir", dest="pflotran_srcdir", default = "", \
help = "PFLOTRAN source root directory, under which /src/clm-pflotran/libpflotran.a exists! ")
parser.add_option("--pflotran_indir", dest="pflotran_indir", default = "", \
help = "PFLOTRAN input root directory, under which input files for pflotran will be copied from (default: $cesminput/pflotran/ ")
parser.add_option("--petsc_dir", dest="petsc_dir", default = "", \
help = "PETSC_DIR petsc library root directory, if not set in env_specific_mach")
parser.add_option("--petsc_arch", dest="petsc_arch", default = "", \
help = "PETSC_ARCH petsc ARCH name, if not set in env_specific_mach")
#
(options, args) = parser.parse_args()
#------------------- arguments ------------------------------------------------------------
print('INFO: configure ACME with DATM, NCEP datm setting, user-defined yrs-range ')
print('INFO: configure ACME with RESOLUTION of CLM_USRDAT, naming as "?x?pt_sitename" ')
print('INFO: configure ACME with CLM-CN+ with multiple vertical soil layers ')
# current directory
runCLMdir = os.getcwd()
# cesm model directory
if (options.cesmdir==''):
print('UNKNOWN cesm root directory: ')
sys.exit()
else:
csmdir = os.path.abspath(options.cesmdir)
scriptsdir = csmdir+'/cime/scripts'
# machine/compiler options
machineoptions = ''
if (options.machine==''):
print('machine option is required !')
sys.exit()
else:
machineoptions += ' -mach '+options.machine
if (options.machine == 'userdefined'):
print('WARNING: must manually edit env_case.xml, env_build.xml, env_run.xml, env_mach_pes.xml ...! \n')
if (options.compiler != ''):
machineoptions += ' -compiler '+options.compiler
else:
print('compiler is required !')
sys.exit()
if (options.mpilib != ''):
machineoptions += ' -mpilib '+options.mpilib
else:
machineoptions += ' -mpilib mpi-serial'
#check for valid input data directory
if (options.ccsm_input == '' or \
(os.path.exists(options.ccsm_input) == False)):
print('Error: invalid input data directory')
sys.exit()
else:
ccsm_input = os.path.abspath(options.ccsm_input)
#check for valid compset, i.e. only with 'CLM45CN' NCEP datm settings
compset = options.compset
if (compset.startswith('I1850CLM45') == False \
and compset.startswith('I1850CRUCLM45') == False \
and compset.startswith('I20TRCLM45') == False \
and compset.startswith('I20TRCRUCLM45') == False):
print('Error: please enter one of following valid options for compset:')
print(' I1850(CRU)CLM45CN, I1850(CRU)CLM45BGC, I20TR(CRU)CLM45CN, I20TR(CRU)CLM45BGC')
sys.exit()
if (compset.startswith('I20TR') == True):
#ignore spinup option if transient compset
if (options.ad_spinup):
print('Spinup options not available for transient compset.')
sys.exit()
elif(options.ad_spinup):
options.coldstart = True
# cases root directory
if (options.caseroot == ''):
caseroot = csmdir+'/cases'
else:
caseroot = os.path.abspath(options.caseroot)
print('CASE root directory: '+options.caseroot)
if(os.path.exists(options.caseroot) == False):
os.system('mkdir -p '+caseroot)
#----- Construct default casename
casename = options.site+"_"+compset
if (options.mycaseid != ""):
casename = options.mycaseid+'_'+casename
if (options.ad_spinup):
casename = casename+'_ad_spinup'
#case directory
if (caseroot != "./"):
casedir = caseroot+"/"+casename
else:
casedir = casename
print ("CASE directory is: "+casedir+"\n")
#Check for existing case directory
if (os.path.exists(casedir)):
print('Warning: Case directory exists and --rmold not specified')
var = raw_input('proceed (p), remove old (r), or exit (x)? ')
if var[0] == 'r':
os.system('rm -rf '+casedir)
if var[0] == 'x':
sys.exit()
# cases run root directory
if (options.runroot == ''):
runroot = csmdir+"/runs"
else:
runroot = os.path.abspath(options.runroot)
print('CASE RUN root directory: '+runroot)
if(os.path.exists(options.runroot) == False):
os.system('mkdir -p '+runroot)
blddir=runroot+"/"+casename+'/bld'
print ("CASE bulid and exeroot is: "+blddir+"\n")
rundir=runroot+"/"+casename+"/run"
print ("CASE rundir is: "+rundir+"\n")
#finidat file and finidat year
if (options.coldstart and (options.finidat != '' or options.finidat_case != '')):
print('Error: Cannot have an finidat/finidat_case AND coldstart simultaneously! Exit \n')
sys.exit()
if (options.finidat == '' and options.finidat_case == ''): # not user-defined
if (options.coldstart==False and compset.startswith('I1850')==True):
options.finidat_case = casename+'_ad_spinup'
if (compset.startswith('I20TR') == True):
options.finidat_case = casename.replace('I20TR','I1850')
#finidat and finidat_year is required for transient compset
if (os.path.exists(runroot+'/'+options.finidat_case) == False \
or options.finidat_year == -1):
print('Error: must provide initial data file for I20TR(CRU)CLM45??? compset, OR, '+ \
runroot+'/'+options.finidat_case+' existed as refcase')
sys.exit()
elif (options.finidat != ''): # user-defined finidat file
if (options.finidat.startswith('/')): # full path and file names
finidat = options.finidat
else: # otherwise, finidat is assummed under the $ccsm_input/lnd/clm2/inidata/
finidat = ccsm_input+'/lnd/clm2/inidata/'+options.finidat
if (options.finidat_year == -1):
print('Error: must define the finidat_year if finidat defined! Exit \n')
sys.exit()
finidat_year = int(options.finidat_year)
finidat_yst = str(finidat_year)
if (finidat_year >= 100 and finidat_year < 1000):
finidat_yst = '0'+str(finidat_year)
if (finidat_year >= 10 and finidat_year < 100):
finidat_yst = '00'+str(finidat_year)
if (finidat_year < 10):
finidat_yst = '000'+str(finidat_year)
if (options.finidat_case != '' and options.finidat == ''):
finidat = runroot+'/'+options.finidat_case+'/run/'+ \
options.finidat_case+'.clm2.r.'+finidat_yst+'-01-01-00000.nc'
#default simyr
mysimyr=1850
if (options.compset.startswith('I20TR') == True):
mysimyr=2000
#pft parameter file
# new or user-defined pft-phys file if desired
if (options.extended_pft and options.parm_file == ''):
print('MUST provide user-defined parameter file! Exit \n')
sys.exit()
if (options.parm_file != ''):
pftfile = ccsm_input+'/lnd/clm2/paramdata/' + \
options.parm_file.replace('.nc','') + '.' + options.site + '.nc'
os.system('cp -f '+ccsm_input+'/lnd/clm2/paramdata/'+ options.parm_file + \
' '+pftfile)
#----- Construct CLM_USRDAT_NAME for PTCLM -res CLM_USRDAT
#PTCLMfiledir = runCLMdir
PTCLMfiledir = options.ccsm_input+'/lnd/clm2/PTCLM'
sitedatadir = os.path.abspath(PTCLMfiledir+'/PTCLM_sitedata')
os.chdir(sitedatadir)
AFdatareader = csv.reader(open(options.sitegroup+'_sitedata.txt',"rb"))
for row in AFdatareader:
if row[0] == options.site:
startyear=int(row[6])
endyear=int(row[7])
alignyear = int(row[8])
if (options.regional == True):
if (options.xpts<2 and options.ypts<2):
print('Error: xpts OR ypts MUST be greater than 1 for Option: regional! \n')
exit(-1)
numxpts=int(options.xpts)
numypts=int(options.ypts)
else:
numxpts=1
numypts=1
clm_usrdat_name = str(numxpts)+"x"+str(numypts)+"pt_"+options.site
if (options.compset.startswith('I20TR') == False):
alignyear = 1 # otherwise, for 'I1850*' compset, there will be issue of reading in datm data. (ONLY read ONCE)
#set number of run years, if user-defined
run_n = options.run_n
run_units = options.run_units
if (compset.startswith('I20TR') == True and options.run_n == 600): # 600 is the default (i.e., NOT user-defined)
if (run_units == 'nyears'):
run_n = endyear - 1850 +1
elif (run_units == 'date'):
run_n = endyear + 1 # date in yyyymmdd, but here only needs year value
#-------------------------------------------------------------
os.chdir(scriptsdir)
# ------------------ create, setup and build -----------------------------------------
#--- (1) create a new case
#create new case
comps = options.compset
print ('./create_newcase -case '+casedir +' '+machineoptions + \
' -compset '+ comps +' -res CLM_USRDAT ')
os.system('./create_newcase -case '+casedir+' '+machineoptions + \
' -compset '+ comps +' -res CLM_USRDAT ' + \
' > create_newcase.log')
if (os.path.isdir(casedir)):
print(casename+' created. See create_newcase.log for details')
os.system('mv create_newcase.log ' + casedir +"/"+casename+"_case.log")
else:
print('failed to create case. See create_newcase.log for details')
# go to newly created case directory
os.chdir(casedir)
# (2) env_build.xml modification ---------------------------
if (options.runroot != ''):
# the following is new
os.system('./xmlchange -file env_build.xml -id ' \
+'CESMSCRATCHROOT -val '+runroot)
#build directory
os.system('./xmlchange -file env_build.xml -id ' \
+'EXEROOT -val '+blddir)
# turn off rtm module
os.system('./xmlchange -file env_build.xml -id ' \
+'RTM_MODE -val NULL')
# turn off rtm flood module
os.system('./xmlchange -file env_build.xml -id ' \
+'RTM_FLOOD_MODE -val NULL')
# clm4_5 config options (note: this configuration has re-designed, with most options moved to CLMNMLBLDOPT in env_run.xml)
# base physic options
clmconfig_opts = "-phys clm4_5"
os.system('./xmlchange -file env_build.xml -id ' \
+'CLM_CONFIG_OPTS -val "'+clmconfig_opts+'"')
print ("CLM configuration options: " + clmconfig_opts +"\n")
# (3) env_run.xml modification ------------------------------------
# input/run/output directory
if (options.runroot != ''):
os.system('./xmlchange -file env_run.xml -id ' \
+'RUNDIR -val '+rundir)
os.system('./xmlchange -file env_run.xml -id ' \
+'DOUT_S -val TRUE')
os.system('./xmlchange -file env_run.xml -id ' \
+'DOUT_S_ROOT -val '+runroot+'/archives/'+casename)
if (options.ccsm_input != ''):
os.system('./xmlchange -file env_run.xml -id ' \
+'DIN_LOC_ROOT -val '+ccsm_input)
os.system('./xmlchange -file env_run.xml -id ' \
+'DIN_LOC_ROOT_CLMFORC -val '+ccsm_input+'/atm/datm7')
# datm options
os.system('./xmlchange -file env_run.xml -id ' \
+'DATM_MODE -val CLM1PT')
os.system('./xmlchange -file env_run.xml -id ' \
+'DATM_CLMNCEP_YR_START -val '+str(startyear))
os.system('./xmlchange -file env_run.xml -id ' \
+'DATM_CLMNCEP_YR_END -val '+str(endyear))
os.system('./xmlchange -file env_run.xml -id ' \
+'DATM_CLMNCEP_YR_ALIGN -val '+str(alignyear))
# run timestep
if (options.tstep != 0.5):
os.system('./xmlchange -file env_run.xml -id ' \
+'ATM_NCPL -val '+str(int(24/float(options.tstep))))
# run-type adjusting -- needs checking ('rof' not working??)
if (options.ad_spinup==False and options.coldstart==False):
os.system('./xmlchange -file env_run.xml -id ' \
+'RUN_REFDATE -val '+finidat_yst+'-01-01')
# run starting date/time
if (compset.startswith('I20TRCLM45') == True):
#by default, 'I20TR' starting from 1850, but if not, then
if(int(options.finidat_year) > 1850 and \
(not ('I1850' in options.finidat or 'I1850' in options.finidat_case))):
os.system('./xmlchange -file env_run.xml -id ' \
+'RUN_STARTDATE -val '+finidat_yst+'-01-01')
else:
os.system('./xmlchange -file env_run.xml -id ' \
+'RUN_STARTDATE -val 1850-01-01')
else:
os.system('./xmlchange -file env_run.xml -id ' \
+'RUN_STARTDATE -val '+finidat_yst+'-01-01')
#adds capability to run with transient CO2
if (compset.startswith('I20TR') == True):
os.system('./xmlchange -file env_run.xml -id ' \
+'CCSM_BGC -val CO2A')
os.system('./xmlchange -file env_run.xml -id ' \
+'CLM_CO2_TYPE -val diagnostic')
# user-defined running stop options
os.system('./xmlchange -file env_run.xml -id ' \
+'STOP_OPTION -val '+run_units)
if (options.run_units == 'date'):
os.system('./xmlchange -file env_run.xml -id ' \
+'STOP_DATE -val '+str(run_n)+'0101')
os.system('./xmlchange -file env_run.xml -id ' \
+'STOP_N -val -9999')
else:
os.system('./xmlchange -file env_run.xml -id ' \
+'STOP_DATE -val -9999')
os.system('./xmlchange -file env_run.xml -id ' \
+'STOP_N -val '+str(run_n))
# user-defined restart options
if (options.restart_units != ''):
os.system('./xmlchange -file env_run.xml -id ' \
+'REST_OPTION -val '+options.restart_units)
if (options.restart_n != ''):
if (options.restart_units == 'date'):
os.system('./xmlchange -file env_run.xml -id ' \
+'REST_DATE -val '+str(options.restart_n)+'0101')
os.system('./xmlchange -file env_run.xml -id ' \
+'REST_N -val -9999')
else:
os.system('./xmlchange -file env_run.xml -id ' \
+'REST_N -val '+str(options.restart_n))
os.system('./xmlchange -file env_run.xml -id ' \
+'REST_DATE -val -9999')
#User-defined resolution data name
os.system('./xmlchange -file env_run.xml -id CLM_USRDAT_NAME ' \
+'-val '+clm_usrdat_name)
# CLM build namelist options
# (NOTE: clm-cn is by default
stdout = os.popen("./xmlquery -valonly CLM_BLDNML_OPTS")
clmbldnmls = stdout.read().rstrip( )
# i) CLM-CN or CLM-CENTURY
# NOTE: vertical-soil now is for both option
if (options.centbgc):
clmbldnmls = " -bgc bgc"
# this will turn-on the whole package of clm4me, nitrif-denitrif, century-decomp
print(" \n ======= CLM-BGC ==========================================" )
print(" CLM bgc will be turned on with CENTURY BGC with clm4me, nitrif-denitrif ! " )
else:
clmbldnmls = " -bgc cn"
print(" \n ======= CLM-CN ==========================================" )
print(" CLM bgc will be turned on with classic CLM-CN ! " )
# option to turn on '-LCH4' for CLM45CN without CENTURY BGC, if requested ------
# this is because configuration of 'CLM4ME' is bundled with centurary BGC
if (options.kovenN):
print(" \n ======= NITRIF-DENITRIF ====================================" )
print(" CLM-CN+ classic bgc will be turned on with Koven' nitrif_denitrif option! " )
clmbldnmls += " -nitrif_denitrif"
#elif (options.centbgc==False):
#print(" CLM-CN+ classic bgc will be with simple inorganic N cycle modules ! " )
if (options.CH4 and options.centbgc==False):
print(" \n ======= CH4 =========================================" )
print(" CLM-CN+ classic bgc will be turned on with clm4me option! " )
clmbldnmls += " -methane"
# ii) ad-spinup with exit-spinup included
if (options.ad_spinup):
clmbldnmls += " -bgc_spinup on"
# iii) write into the xml valule
os.system('./xmlchange -file env_run.xml -id ' \
+'CLM_BLDNML_OPTS -val "'+clmbldnmls+'"')
print ("CLM namelist build options: " + clmbldnmls +"\n")
#save intermit restart files (NOW, at most 3 sets are saved)
os.system('./xmlchange -file env_run.xml -id DOUT_S_SAVE_INTERIM_RESTART_FILES ' \
+'-val TRUE' )
# (4) env_mach_pes.xml modification ------------------------------------
# normal 1 pt mode, not threaded
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_ATM -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_ATM -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_ATM -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_LND -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_LND -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_LND -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_ICE -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_ICE -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_ICE -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_OCN -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_OCN -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_OCN -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_CPL -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_CPL -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_CPL -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_GLC -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_GLC -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_GLC -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_ROF -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_ROF -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_ROF -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id NTASKS_WAV -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id NTHRDS_WAV -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id ROOTPE_WAV -val 0')
os.system('./xmlchange -file env_mach_pes.xml -id MAX_TASKS_PER_NODE -val 1')
os.system('./xmlchange -file env_mach_pes.xml -id TOTALPES -val 1')
if(int(options.np)<=1):
if(int(options.mppwidth)>1): # this input is for HOPPER PBS
options.np = options.mppwidth
if( int(options.mppnodes)>1 or
(int(options.mppnodes)==1 and int(options.ppn)>1) ): # this input is for Titan PBS
options.np = str(int(options.mppnodes)*int(options.ppn))
#if running with > 1 processor
else:
options.mppwidth = options.np
options.mppnodes = str((int(options.np)-1)/int(options.ppn)+1)
os.system('./xmlchange -file env_mach_pes.xml -id ' \
+'NTASKS_ATM -val '+options.np)
os.system('./xmlchange -file env_mach_pes.xml -id ' \
+'NTASKS_LND -val '+options.np)
os.system('./xmlchange -file env_mach_pes.xml -id ' \
+'NTASKS_CPL -val '+options.np)
if (options.machine == 'mira'):
os.system('./xmlchange -file env_mach_pes.xml -id ' \
+'MAX_TASKS_PER_NODE -val '+str(int(options.ppn)))
else:
os.system('./xmlchange -file env_mach_pes.xml -id ' \
+'MAX_TASKS_PER_NODE -val '+str(min(int(options.ppn),int(options.np))))
os.system('./xmlchange -file env_mach_pes.xml -id ' \
+'TOTALPES -val '+options.np)
# (5) cesm setup -------------------------------------------
#clean configure if requested prior to configure
if (options.clean_config):
os.system('./case.setup -clean')
os.system('rm -f user-nl-*')
# (5a)check if pflotran directory as an input
if (options.pflotran):
if(options.pflotran_srcdir==""):
print(" PFLOTRAN directories NOT defined, please provide one using '--pflotran_srcdir=' ! \n")
sys.exit()
elif(os.path.exists(options.pflotran_srcdir) == False):
print(" PFLOTRAN directories NOT exist, please the directory provided using '--pflotran_srcdir=' ! \n")
sys.exit()
# (5b) settings for clm coupled with pflotran, if requested ------
else:
print(" \n ============== CLM-PFLOTRAN ===================================" )
print(" NOTE: PFLOTRAN coupled CLM will be configured ! \n" )
print(" make sure of libpflotran.a compiled, and exists in:\n")
if (options.pflotran_srcdir !=""):
print(" PFLOTRAN directory '" \
+options.pflotran_srcdir+"/src/clm-pflotran/'\n" )
with open("env_mach_specific", "a") as myfile:
myfile.write("\n")
myfile.write("#pflotran settings\n")
myfile.write("setenv PFLOTRAN TRUE\n")
myfile.write("setenv PFLOTRAN_COUPLED_MODEL "+options.pflotran_srcdir+"\n")
if(options.petsc_dir!=""):
myfile.write("setenv PETSC_DIR "+options.petsc_dir+"\n")
if(options.petsc_arch!=""):
myfile.write("setenv PETSC_ARCH "+options.petsc_arch+"\n")
if(os.path.exists(options.petsc_dir+"/lib")):
myfile.write("setenv PETSC_LIB ${PETSC_DIR}/lib \n")
else:
myfile.write("setenv PETSC_LIB ${PETSC_DIR}/${PETSC_ARCH}/lib \n")
myfile.write("\n")
myfile.close()
else:
print("PFLOTRAN directory NOT defined! Exit!")
sys.exit()
if(options.clmpfcolmode):
print(" CLM-PFLOTRAN coupled in COLUMN_MODE ! \n" )
with open("env_mach_specific", "a") as myfile:
myfile.write("\n")
myfile.write("#CLM-PFLOTRAN coupled in COLUMN_MODE\n")
myfile.write("setenv COLUMN_MODE TRUE\n")
myfile.write("\n")
myfile.close()
# (5c) flags for option to turn on expanded arctic PFTs, if requested ------
if (options.extended_pft):
print(" \n ======= EXTENDED-PFT ==========================================" )
print(" \n Expanded PFTs for Arctic Tundra in CLM4.5 will be turned on ! " )
print(" NOTE: make sure of using right CLM4.5 code ! \n" )
with open("env_mach_specific", "a") as myfile:
myfile.write("\n")
myfile.write("#expanded arctic PFT switch for CLM4.5\n")
myfile.write("setenv EXTENDED_PFT TRUE\n")
myfile.write("\n")
myfile.close()
# (5e) setup ---------
os.system('./case.setup > configure.log')
# (5f) datm domain path -----------
# if using a single point datm on a multi-point clm, we need to
# update the datm domain path. This must be done after cesm_setup
# is called. Any changes to the user_nl_datm after this point will
# not affect the contents of datm.streams.txt.
# need to copy CaseDocs/datm.streams.txt.CLM1PT.CLM_USRDAT
# to user_datm.streams.txt.CLM1PT.CLM_USRDAT
# then replace share/domains/domain.clm with atm/datm7/domain.clm in the domainInfo filePath
if (options.update_datm_domain is True):
print("--- Updating datm domain ---\n")
os.system('cp -f CaseDocs/datm.streams.txt.CLM1PT.CLM_USRDAT user_datm.streams.txt.CLM1PT.CLM_USRDAT')
# add user write permissions to the datm.streams file
mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
os.chmod('user_datm.streams.txt.CLM1PT.CLM_USRDAT', mode)
os.system('perl -w -i -p -e "s@share/domains/domain.clm@atm/datm7/domain.clm@" user_datm.streams.txt.CLM1PT.CLM_USRDAT')
# (6) clm user-defined namelist modification ('user_nl_clm') -----
output = open("user_nl_clm",'w')
output.write('&clm_inparm\n')
#(6a) user-defined initial data file ---
if (options.coldstart == False):
if (finidat != ''):
output.write(" finidat = '"+finidat+"'\n")
#(6b) surfdata - user-defined ----
if(options.surfdatafile == ""):
output.write(" fsurdat = '"+ccsm_input+"/lnd/clm2/surfdata_map/" + \
"surfdata_"+str(numxpts)+"x"+str(numypts)+"pt_"+options.site+ \
"_simyr"+str(mysimyr)+".nc'\n")
else:
output.write(" fsurdat = '"+ccsm_input+"/lnd/clm2/surfdata_map/" + \
options.surfdatafile+"'\n")
#(6c) pft dynamics file for transient run ----
if (compset.startswith('I20TRCLM45') == True):
if(options.pftdynfile == ""):
output.write(" flanduse_timeseries = '"+ccsm_input+"/lnd/clm2/surfdata_map/" + \
"surfdata.pftdyn"+str(numxpts)+"x"+str(numypts)+"pt_"+ \
options.site+"_hist_simyr1850-2005_clm4_5_pftgrd_c140204.nc'\n")
else:
output.write(" flanduse_timeseries = '"+ccsm_input+"/lnd/clm2/surfdata_map/" + \
options.pftdynfile+"'\n")
#(6d) user-defined pft physiological file ----
if (pftfile != ''):
output.write(" paramfile= '" + pftfile + "'\n")
#(6e) clm output hist user-defined file ----
if (options.hist_file != ''):
histfile = runCLMdir+"/outputs-userdefined/"+options.hist_file
hvars_file = open(histfile)
output.write("\n")
for s2 in hvars_file:
myline = s2
output.write(myline)
output.write("\n")
hvars_file.close()
# (6f) hacking 'nofire' namelist
if (options.nofire):
print(" \n ======= FIRE ==========================================" )
print(" Turn OFF fire option! " )
output.write(" use_nofire = .true. \n")
#(6g) force namelist options for 'maxpatch_pft' changed if extended arctic pft ----
if (options.extended_pft):
output.write(" maxpatch_pft = 23\n")
#(6h) namelist options for PFLOTRAN coupling ----
if (options.pflotran):
output.write(" use_clm_interface = .true.\n")
output.write(" use_pflotran = .true.\n")
output.write("/\n")
output.write("&clm_pflotran_inparm\n")
output.write(" pflotran_prefix = '"+ casename + "'\n")
output.write("/\n")
output.close()
# (7) copy user-defined sourcemods codes ----
if (options.srcmods_loc != ''):
if (options.srcmods_loc.startswith('/')):
options.srcmods_loc = os.path.abspath(options.srcmods_loc)
else:
options.srcmods_loc = os.path.abspath(runCLMdir+'/'+options.srcmods_loc)
if (os.path.exists(options.srcmods_loc) == False):
print('Invalid srcmods directory. Exiting')
sys.exit()
else:
print('User-defined source codes will be copied from: '+options.srcmods_loc)
os.system('cp -rf '+options.srcmods_loc+'/* ./SourceMods')
# (8) transient CO2 patch for transient run ----
if (compset.startswith('I20TR') == True):
# (8a) historical co2 stream data: globally 1 value ----
os.system('cp '+csmdir+'/models/lnd/clm/doc/UsersGuide/co2_streams.txt ./')
myinput = open('co2_streams.txt')
myoutput = open('co2_streams.txt.tmp','w')
for s in myinput:
s2 = s.strip()
if (s2 =='<filePath>'):
myoutput.write(s)
myoutput.write(' '+ccsm_input+'/atm/datm7/CO2\n')
next(myinput);
elif (s2 =='<fileNames>'):
myoutput.write(s)
myoutput.write(' '+options.co2_file+'\n')
next(myinput);
else:
myoutput.write(s)
myinput.close()
myoutput.close()
os.system('mv co2_streams.txt.tmp co2_streams.txt')
# (8b) modifying default 'datm_atm.in' to include historical co2 stream data ---
myinput = open('./Buildconf/datmconf/datm_atm_in')
myoutput = open('user_nl_datm','w')
for s in myinput:
s2 = s.strip()
if (s2.startswith('dtlimit')):
myoutput.write(' '+s2+',1.5\n')
elif (s2.startswith('fillalgo')):
myoutput.write(" "+s2+",'nn'\n")
elif (s2.startswith('fillmask')):
myoutput.write(" "+s2+",'nomask'\n")
elif (s2.startswith('mapalgo')):
myoutput.write(" "+s2+",'nn'\n")
elif (s2.startswith('mapmask')):
myoutput.write(" "+s2+",'nomask'\n")
elif (s2.startswith('streams')):
myoutput.write(" "+s2+",'datm.global1val.streams.co2.txt 1766 1766 2010'\n")
elif (s2.startswith('taxmode')):
myoutput.write(" taxmode = 'cycle', 'extend', 'extend'\n")
elif (s2.startswith('tintalgo')):
myoutput.write(" "+s2+",'linear'\n")
else:
myoutput.write(s)
myinput.close()
myoutput.close()
# datm namelist modifications (cycle met data streams - a bug since clm4.5.10)
# the issue is that climate data will not repeating (taxmode is 'extend').
myoutput = open('user_nl_datm','w')
myoutput.write("&shr_strdata_nml\n")
if (compset.startswith('I20TR') == True):
myoutput.write(" taxmode = 'cycle', 'extend','extend'\n")
else:
myoutput.write(" taxmode = 'cycle', 'extend'\n")
myoutput.write("/\n")
myoutput.close()
# (9) ------- build clm45 within cesm ----------------------------------------------
#clean build if requested prior to build
if (options.clean_build):
os.system('./case.build --clean-all')
os.system('rm -rf '+rundir+'/*')
os.system('rm -rf '+blddir+'/*')
#compile cesm
os.system('./case.build')
# note: *.build will sweep everything under ./Buildconf, but we need 'co2_streams.txt' in transient run ---
if (compset.startswith('I20TR') == True):
os.system('cp -f co2_streams.txt ./Buildconf/datmconf/datm.global1val.streams.co2.txt')
os.system('cp -f co2_streams.txt '+rundir+'/datm.global1val.streams.co2.txt')
# if pflotran coupled, need to copy input files
if (options.pflotran):
pfindir = ""
if (options.pflotran_indir.startswith('/')): # full path
pfindir = options.pflotran_indir
else:
if(options.pflotran_indir=="" or options.pflotran_indir=="./"):
pfindir = options.ccsm_input+'/pflotran/default'
else:
pfindir = options.ccsm_input+'/pflotran/'+options.pflotran_indir
if(os.path.isfile(pfindir+'/pflotran_clm.in')):
os.system('cp '+pfindir+'/pflotran_clm.in '+rundir+'/'+casename+'.in')
else:
print('Error: must provide a "pflotran_clm.in" for CLM-PFLOTRAN in '+pfindir+'! Exit \n')
sys.exit()
if(glob.glob(pfindir+'/*.meshmap')):
os.system('cp '+pfindir+'/*.meshmap '+rundir+'/')
else:
print('Error: must provide a set of "*.meshmap" for CLM-PFLOTRAN in '+pfindir+'! Exit \n')
sys.exit()
if(os.path.isfile(pfindir+'/CLM-CN_database.dat')):
os.system('cp '+pfindir+'/CLM-CN_database.dat '+rundir+'/')
elif(os.path.isfile(pfindir+'/hanford-clm.dat')):
os.system('cp '+pfindir+'/hanford-clm.dat '+rundir+'/')
else:
if(os.path.isfile(pfindir+'/hanford-clm.dat') == False):
print('Waring: NO PFLOTRAN-bgc database file "handford-clm.dat" or "CLM-CN_database.dat" exists! \n')
if(glob.glob(pfindir+'/*.h5')):
os.system('cp '+pfindir+'/*.h5 '+rundir+'/')
else:
print('Warning: NO PFLOTRAN *.h5 input file exists! -- be sure it is the case! \n')
# ----- copy rpointers and restart files to current run directory prior to run model ---
if (options.finidat_case != ''):
os.system('cp -f '+runroot+'/'+options.finidat_case+'/run/' + \
options.finidat_case+'.*'+finidat_yst+'* ' + rundir)
os.system('cp -f '+runroot+'/'+options.finidat_case+'/run/'+ \
'rpointer.* '+rundir)
# ----- submit job if requested ---
if (options.no_submit == False):
os.chdir(casedir)
os.system('./case.submit')
| [
"yuanf@ornl.gov"
] | yuanf@ornl.gov |
e7f9eb6f18a705e2446062b9a7609948f8193c95 | 46349356d4812a6bf04a1dff4ee3311864f8b7ff | /ma_py/mic_utils/estimate_gg_pdf_nm_fast/estimate_gg_pdf.py | 494d1dd50b6c30a772ba4dcee8a2594e1c295ed2 | [] | no_license | alexdoberman/ma | 1ca9d20f64d0e8c87feff9f7bb04d09d3088aeb3 | 219e5e87b80c6a795c0d4161b3ad22b9973ed745 | refs/heads/master | 2022-07-17T13:15:21.672335 | 2020-05-12T15:10:40 | 2020-05-12T15:10:40 | 263,365,873 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,807 | py | # -*- coding: utf-8 -*-
import numpy as np
import soundfile as sf
import matplotlib.pyplot as plt
import glob
import math
from scipy import optimize
import scipy.stats as stats
def fun_ML_c(f, *args):
"""
Calc log likelihood for complex data
:param f: - shape
:param args:
:return:
"""
(scale, y) = args
K = y.shape[0]
B = math.gamma(1.0/f) / math.gamma(2.0/f)
p1 = K*(np.log(f) - np.log(np.pi * math.gamma(1.0/f) *B *scale))
p2 = np.sum(np.power( (np.abs(y)**2)/(B*scale), f))
R = p1 - p2
return - R
def estimate_shape_factor_c_(y, scale):
"""
Estimate shape factor for complex data
:param y: - complex array
:param scale:
:return:
"""
args = (scale, y)
minimum = optimize.brent(fun_ML_c, args=args, brack=(0.02, .3))
return minimum
def estimate_scale_c(y, shape_factor):
"""
Estimate scale for complex data
:param y:
:param shape_factor:
:return:
"""
K = y.shape[0]
B = math.gamma(1.0/shape_factor) / math.gamma(2.0/shape_factor)
scale = np.power( np.sum(np.power(np.abs(y), 2*shape_factor))*shape_factor/K, 1.0/shape_factor) / B
return scale
def estimate_gg_pdf_param_c(y, tol = 0.0000001):
"""
Estim GG pdf params for complex data
:param y:
:param tol:
:return:
"""
shape_factor_prev = 1
scale_prev = np.mean(np.power(np.abs(y), 2))
max_iter = 200
print ('scale_prev = {}'.format(scale_prev))
for _iter in range(max_iter):
shape_factor = estimate_shape_factor_c(y, scale_prev)
scale = estimate_scale_c(y, shape_factor)
print (" iter = {} shape = {} scale = {}".format(_iter, shape_factor, scale))
if (np.abs(scale - scale_prev) < tol and np.abs(shape_factor - shape_factor_prev) < tol):
return shape_factor, scale
scale_prev = scale
shape_factor_prev = shape_factor
print("Warning: estimate_gg_pdf_param_c - not convergent!")
return None, None
def main():
n_fft = 512
gg_params = []
for freq_bin in range(1, int(n_fft / 2)):
print('Process freq_ind = {}'.format(freq_bin))
path = "./out_bin/bin_{}.npy".format(freq_bin)
y = np.load(path)
f, scale = estimate_gg_pdf_param_c(y)
gg_params.append([freq_bin, f, scale])
np.save("gg_params_freq_f_scale", np.array(gg_params))
np.save("gg_params_freq_f_scale", np.array(gg_params))
def estimate_shape_factor_c(y, scale):
"""
Estimate shape factor for complex data
:param y: - complex array
:param scale:
:return:
"""
args = (scale, y)
ff = np.linspace(0.02, 0.9, 200)
L = []
for i in ff:
args = (scale, y)
L.append(fun_ML_c(i, *args))
L = np.array(L)
min_index = np.argmin(L)
l_min = np.min(min_index - 5, 0)
r_min = min_index + 5
a = ff[l_min]
b = ff[r_min]
c = ff[min_index]
minimum = optimize.brent(fun_ML_c, args=args, brack=(a, b))
return minimum
#return L[min_index]
def debug_run():
freq_bin = 1
print('Process freq_ind = {}'.format(freq_bin))
path = "./out_bin/bin_{}.npy".format(freq_bin)
y = np.load(path)
# f, scale = estimate_gg_pdf_param_c(y)
# print (f, scale)
ff = np.linspace(0.02, 0.9, 200)
L = []
for i in ff:
args = (0.04692564477433535, y)
L.append(fun_ML_c(i, *args))
L = np.array(L)
min_index = np.argmin(L)
l_min = np.min(min_index - 5, 0)
r_min = min_index + 5
a = ff[l_min]
b = ff[r_min]
c = ff[min_index ]
print (l_min,min_index,r_min)
print (a,c,b)
plt.plot(ff, L, label="L")
plt.legend(loc='best')
plt.show()
if __name__ == '__main__':
#debug_run()
main()
| [
"lavrentyev@speechpro.com"
] | lavrentyev@speechpro.com |
b5e2bc3969fa3ac42f856de318b96ec26f61200b | e76b659cd551ce65d63c868f8989ce05fe5e96a6 | /driving_image.py | 67d10ac7b0decd1c232e47649d5c53423f04d68e | [] | no_license | jyuatsfl/unsupervised_co_part_segmentation | 1ec9ffa99305e419e25cdb74dfe27497bd6a9a8b | 76b2c70e5636afcc4c2596134200e7397a7239db | refs/heads/main | 2023-08-01T01:57:53.376504 | 2021-09-15T20:56:05 | 2021-09-15T20:56:05 | 406,919,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,509 | py | import model as net
from argparse import ArgumentParser
import torch
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import imageio
from imageio import mimread
from skimage.color import gray2rgb
def read_video(name, frame_shape=(128,128)):
"""
Read video which can be:
- an image of concatenated frames
- '.mp4' and'.gif'
- folder with videos
"""
if os.path.isdir(name):
frames = sorted(os.listdir(name))
num_frames = len(frames)
video_array = []
for idx in range(num_frames):
this_image= cv2.resize(
cv2.cvtColor(
cv2.imread(os.path.join(name, frames[idx]))
,cv2.COLOR_BGR2RGB
)
,(frame_shape[0:2])
)
video_array.append(
this_image
)
video_array = np.array(video_array)/255
elif name.lower().endswith('.gif') or name.lower().endswith('.mp4') or name.lower().endswith('.mov'):
video = np.array(mimread(name))
if len(video.shape) == 3:
video = np.array([gray2rgb(frame) for frame in video])
if video.shape[-1] == 4:
video = video[..., :3]
video_array=[]
for v in video:
v = cv2.resize(v
,(frame_shape[0:2])
)
video_array.append(v)
video_array = np.array(video_array)/255
return video_array
def out_tensor_to_image(out_tensor):
out_image = out_tensor.detach().cpu().numpy()
out_image[out_image>1] = 1
out_image[out_image<0] = 0
out_image = out_image*255
out_image = out_image.transpose(1,2,0).astype(np.uint8)
return out_image
def init_model(model_path,part_numb):
model = net.Part_3D_Disnet(part_numb).cuda()
model.train()
model.cuda()
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(model_path))
return model
def start_run(opt):
colormap = plt.get_cmap('gist_rainbow')
model = init_model(opt.checkpoint_path,opt.part_numb)
all_data = read_video(opt.driving_path)
source_image_base = cv2.imread(opt.source_image)
source_image_base = cv2.cvtColor(source_image_base,cv2.COLOR_BGR2RGB)
source_image_base = cv2.resize(source_image_base,(128,128))
source_image = source_image_base[np.newaxis,:,:,:]
source_image = source_image.transpose(0,3,1,2)/255
source_image = torch.from_numpy(source_image).float()
source_image = torch.nn.functional.interpolate(source_image,(128,128)).cuda()
if not os.path.exists(opt.out_dir+'/images'):
os.makedirs(opt.out_dir+'/images')
video_lenth = len(all_data)
# video_writer = cv2.VideoWriter(opt.out_dir+'/video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 15, (128*3, 128), True)
final_image_list = []
for index_frame in range(video_lenth):
driving_image_base = all_data[index_frame:index_frame+1]
driving_image = driving_image_base.transpose(0,3,1,2)
driving_image = torch.from_numpy(driving_image)
driving_image = driving_image.float()
driving_image = torch.nn.functional.interpolate(driving_image,(128,128)).cuda()
out = model(source_image,driving_image)
pred_image = out['pred_image_t'][0]
pred_image = out_tensor_to_image(pred_image)
driving_image = cv2.resize(driving_image_base[0],(128,128))
driving_image =(driving_image*255).astype(np.uint8)
final_image = np.hstack([source_image_base,pred_image,driving_image])
print(final_image.shape)
final_image_list.append(final_image.copy())
final_image = cv2.cvtColor(final_image,cv2.COLOR_BGR2RGB)
cv2.imwrite(opt.out_dir+'/images/%04d.png'%index_frame,final_image)
imageio.mimsave(opt.out_dir+'/video.gif',final_image_list,fps=10)
if __name__ == "__main__":
parser = ArgumentParser(description="Evaluation script")
parser.add_argument("--driving_path", required=True, help="path to driving image")
parser.add_argument("--source_image", required=True, help="path to source image")
parser.add_argument("--checkpoint_path", required=True,help="path to checkpoint to restore")
parser.add_argument("--out_dir", required=True,help="path to save_result")
parser.add_argument("--part_numb", default=11,type=int,help="model_part_numb")
opt = parser.parse_args()
start_run(opt)
| [
"790717887@qq.com"
] | 790717887@qq.com |
c9654449609411fecd1626377a4475510a30daff | b210d58b724c7199f43ddde031eba40996c82257 | /submissions/sm_028_sagar/week_17/day_6/session_1/blueprint_auth.py | b9f76b6d74f3356b39b00be35fdc07f0147a9295 | [] | no_license | Tanmay53/cohort_3 | 0447efd2dc8b3c1284b03da7326b35393fdf5f93 | 351fb6e1d0c29995fb5cb3b6af411dbcf0ced64c | refs/heads/master | 2021-05-23T17:55:36.508185 | 2020-04-05T12:58:07 | 2020-04-05T12:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | from flask import Blueprint
from flask import request
import hashlib
import os
import json
import csv
auth = Blueprint("auth", __name__)
def generate_salt():
salt = os.urandom(16)
print(salt.encode('base-64'))
return salt.encode('base-64')
def md5_hash(string):
hash = hashlib.md5()
hash.update(string.encode('utf-8'))
# print(hash.hexdigest())
return hash.hexdigest()
def multiple_hashing(string,salt):
string = string + salt
hashed_string = md5_hash(string)
for i in range(50):
hashed_string = md5_hash(hashed_string)
return hashed_string
@auth.route('/signup',methods=['POST'])
def signup():
name = request.json['name']
email = request.json['email']
password = request.json['password']
#collecting in sample arr
isEmailPresent = False
id = 0
with open('data/users.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
id = row['id']
print(row['email'])
if row['email'] == email:
isEmailPresent = True
newId = int(id)+1
#generating new salt and password_hash
salt = generate_salt()
password_hash = multiple_hashing(password,salt)
if isEmailPresent: return json.dumps('Email is already registered')
else:
with open('data/users.csv','a') as csvfile:
fieldnames = ['id','name','email','salt','password_hash']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writerow({'id':newId,'name':name,'email':email,'salt':salt,'password_hash':password_hash})
return json.dumps('User Successfully Registered')
#function to check password:
def isPasswordAvailable(password,email):
salt = ''
#read file and check for salt
isPresent = False
saved_hashed_password = ''
with open ('data/users.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['email'] == email:
isPresent = True
salt = row['salt']
saved_hashed_password = row['password_hash']
break
if not isPresent:
return 'Email is Not Present'
else:
hashed_password = multiple_hashing(password,salt)
if hashed_password == saved_hashed_password:
return {'message':'Login Successfull','status':True}
else: return {'message':'Invalid Password','status':False}
@auth.route('/login',methods=['POST'])
def login():
email = request.json['email']
password = request.json['password']
output_message = ''
output_message = isPasswordAvailable(password,email)
return json.dumps(output_message)
| [
"sagarkadu17@gmail.com"
] | sagarkadu17@gmail.com |
c3a8e4dad55db36cd8fb6dd9517c8b824e4bedea | 87ff52138a82580ca6a35fc6e70a21a92dd1c637 | /inputs/input_05.py | f2e741b1b21b6917ca1ba93dcc17650e53ec2a2c | [] | no_license | raphey/advent-of-code-2020 | b888ff045544f471d7769c29c395ad1fe22ee57f | f44e31b743e19a54eadd4032d0b3179dc7a90423 | refs/heads/main | 2023-02-03T15:45:55.160475 | 2020-12-25T16:06:50 | 2020-12-25T16:06:50 | 318,403,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,412 | py | sample_input = """FBFBBFFRLR"""
main_input = """FFFBBFBLLR
BFBBBFFRLR
BFBBBBFLRR
BBFBFFFLLR
BBFBFBFLLL
BFBFBFBLLR
FBBFFBFRLR
BFFBBBFLLR
FBFBFFBRLR
FBFFBFFRRR
BBFFFFFLRL
FFBFBBFLLL
BFBBBFBRLL
BFFBBFFRRR
BBFBBFBLLR
BBFBBBBRLL
FFBFFBBRRL
FBFBBFFLLR
BFFFBFBRLL
BFBFBBBRRR
FBBFBFBRLR
BFFBBBFLLL
BFBFBFFRLR
BFFFFBFRLR
BBFFFBFLLL
BBFBFBBLRR
FFBBFBFRRL
FBBFBFFRRR
BFBBBBFRRL
FBBBFFFRRL
FBFFFBBRRL
BBFBBBBLLL
BBFBFFFLLL
BFBFBFBLLL
BBFFBFBLLR
FBFFFBBLRL
BFBBFFBRRL
FFBFFFBRRL
BFFFFFFLRR
BBFBFBBRLL
BBFFBBFRRL
FFBFFFBRRR
BFFFBBFLRR
FBBBBBBRRL
BFBFFBFLRL
FFFFBBBRRR
BFFBFBBRLR
FFFFBBBLLL
FFBFBBFRLL
FBBFFFFRRR
FFFBFBFLLR
FFFBBFFLLR
FBFFFFBRRR
FBBFFBFRRL
FFBBFBBLLR
BFFBBBBRLR
FBFBBFBRLL
FFFBBFBLRL
FBFBBFFLLL
FBFFFFFRRR
FBFBBFBLLL
FBBFFBFRLL
FBFFBBFLLL
BFFFBFBLRL
BFFBFFBRLR
FBBFFFFRLL
BBFBBFFLLL
BFBFFFFLRR
FBBBBBBRRR
FBBFFFBRRR
FFFFBBBRRL
BBBFFFFRLL
FBFBBBFLRL
FBBBBFBRLL
FFBBBFFLRR
FBFFBBBLLL
BFBBFFFRRL
BFFFBBFRRL
BFFBFBBRLL
BFBFBBFLLL
FFFBBBBLRL
FBFBFBFRLL
BBFFBBFRLR
FBBBBBBLLR
BFFFBBBRRR
FFFBBBBLLR
FBBFBFBLRR
FFBBFFBLRR
BFFBFFFRLR
FFBBFBBLRL
FBBFBFFLLL
BFFFFBBLRL
FFBBFBBLLL
FBBBBBBLLL
FBFFFFBLLL
BFBBFBBRRR
FBFBFFFLRR
FFFBFFFRLR
FBFFFBFLRL
BFFBBFFLLR
FBBBBFFLLL
BBFBFBFRRR
BFFFBBBRLL
BFBFBFBLRR
FBFFFFFLLR
BFFFFBFLLR
BBFBFFBRRR
FBFBFFBLLL
FFFFBBBLLR
BFBFBBBLLR
FBBFBBFRRR
FBBFBBBRLR
BFFBFBFLLL
BFFBFFFLRR
FBBBFFFLLL
BFBBBFBRRR
FBBFBFFLLR
BFBBFBFRRR
FBFBBFBLRL
FBFBFFBRRR
BFBBFBBRLR
BFFFFFBLLL
FFBBFFBRLL
FFBBFFFLLR
BBFBFBFLRR
BBFFBBBRLR
BBFBBBFRRR
BBFBBFBLRR
FBBBFBFRRL
BFBBFBFLLR
BFBBFBBLLR
FFFBFFFLLL
FFBFFBFRRR
BFFBBFBRRR
FFFBFBBLLR
BBFFFBFRRL
FBFBBBFRLR
BFBFBBFRLL
BFFFBBFRLL
BBFBBFFLRR
BFFFBBBLLR
FBBFBBFRLR
FBFFBFBLLR
BBFFFBFLRR
BFBFBFBLRL
BFBFBFBRRR
FBBFFBBLRL
FFBBFBFLLL
BFBFFBFRLR
BBFBBBFRRL
BFBFFFFRRL
FFFBFFFRLL
BBFBFFBRRL
BBBFFFBLLR
BBFBFFFLRL
FFBFBBBLRR
FBFFFFFLLL
FFBBFFBLLL
FBBBBBFRLR
FBBFBFFRLR
BBBFFFFRLR
BFBBBBFRLR
FFBFFFFLRL
BFFFFFBRLL
FFBFFFBLLR
FFFBFBFLRL
FBBBBFBLRR
FBFFFBFLRR
BBFBBFFRLR
BFFFFFBRRR
BFFBBFBLLL
FBBBBFBLLL
BBFBBBBLRL
FBFBFBFRRL
BBFBBBBRRL
FFBFFFFRLL
FFBBBBBRLR
FFFBBBFRLL
FBFBBBBRLR
FFBFFBFRLR
FBFFBFFLLR
FFFBBFFRLL
BBFBFBBLLR
FBBFFFFLRR
FBFFBBBRLL
BFBFFBFLLL
BFBFFBFRRR
FFBBBBBRLL
FFBFBBBRRR
BFFBFFFRRR
FBBBBFFRLR
BBFFFFBRRR
BBFBBBBLRR
BFFFBBBLLL
BFFFFFFLRL
BFFBBBFRLR
FFBBFBBRLL
FBFFFBFLLL
FFFBFFBRLL
BFFFBFFLRL
FFBBFBFRLL
FBBBBBFRRR
BFFFFBFRLL
FBBBBFFRLL
BFBBBBBRLL
FBBBFBFLLL
FFFBBBBLRR
FFBFFBFLRL
FBFFBFBRLR
BFBFBFFLLR
FFBFFBBLRR
FFFBFBFLLL
BFBBBFFRRL
FFFBBFFRRR
BBFFBBFLLL
BBFFBBBLLR
FBFBFBBLLR
BFBFBBFLLR
FBBFBFBLLL
FFFBBBBRRR
FBFBFBFLRL
FFFBFFBRRR
FFFFBBBRLL
BBFBBBFLRL
FBBFFFFLLR
FBFFFFFLRR
FBBBFBBRLR
BFFFBFFRLL
FBBBFFBRRR
FBBBFFBLRR
BFFBBBBLLL
FBBFBBBLLR
FBBBBBFRRL
FBFBFBBRRL
FBFBFBFLLL
BBFBBFFRRR
FFFBFBBRRR
FFBBBBBLLL
BFBBBFFLRR
FFFBBBFRLR
FFFBFFBLRL
FBBFFBBLLR
FBFBBBBLLR
BFFFFFFRLL
FFBFFFFRLR
BBFFBBFLRL
BFBBBFBRLR
BFFBFBFRLR
FBBFFFBLLL
FBBBFFBRLR
FFBBFFFRRL
FBFBBFBRRR
BFFBBFBRRL
FBBBFBFRRR
FBBFBFFRRL
FBFFBBBLRL
BFBFFFBRRL
FBBBBBBLRL
BBFBFFBLRL
BBFFFFFRLL
FBFBBBBRLL
BBFBBFBRRL
BBFFFFFLLL
BFBBFFBRLR
BBFBFFBLLL
FBBFBBFLLL
BFFFBFBLLL
BBFBFFFRRR
FBBBBBFLRR
BFFFFFBRLR
FFBBFFFLLL
FFBBFBBRRL
FFFBFFFLRL
FBFFBFBLRL
BBFFBFFLRL
BFFBBFFRLL
BBFBBBFLLR
FBBBFFBRLL
BFBBFBBLRR
BBFFFBFRRR
FFBFFBBLRL
FFFBBBFLLL
FFBBBBFRLR
BFBBFBFLRL
BFFBFBFRRR
FBBFFFFRRL
BBFBBFBLLL
FFBBBFBRRL
BFFFBFFLLR
FBBFFFBRLL
BFBBFBFLLL
FBBFBFFLRR
BFBFFFFLLL
FFBBBFBRRR
FFBBBFBLRR
FFBBBFFRLR
FFBBBFBLLL
FFFBFBBRLR
BFBFFFFLLR
BBFFBBBRLL
BBFBFBFLLR
FFBBBFFRLL
FBFFBFBRLL
FFFBFFFLLR
BBFBBBBLLR
FBFBBBFRRR
BBFBBFBRLL
FFBBBBFRLL
BFBFBFBRLL
FBFFFFBLLR
BFFFFBFLLL
BFBBBBBRLR
BFFFBFBRLR
BFFBFBBLLL
FBFFFBFRLR
FFFBFFFRRL
BBBFFFFRRL
FFFBBBBRLL
FBBFFFFLLL
FFFBFBBLLL
BFFFBBFLRL
FBBBBBFRLL
FBFFBBFRLL
BFFBFFBRRL
BBFFFBBLRL
BBFBBFBRLR
BFBFBFFRLL
BBFFBFFLRR
FFBBBBBRRR
FFBFBBFLLR
FFFBBBFLRR
FBBFFFBRRL
BBFBBBFRLR
FFFBBBBRLR
BFBBFBFLRR
BBFFBBBRRR
FBFFFFBLRL
FBBFFBFLLR
FBBFBBFLRR
BBFFBFFLLR
FFBFFFFLRR
BFBFFFBRLL
FBFBFFFRRL
FBBFFFBRLR
BFFBFFFRRL
FBFFFBBRLR
FBFFBBFRRL
FFBFFBBLLL
BFBFFBBRRL
BFBFBBBRLL
FBFFFBBLRR
FBBFBBBRLL
FBBBBFBRRR
FBBBFFBLLR
BBFBFBBRRR
FFFBBBFLLR
BFFFBBBLRR
FFFBFFBLLR
FFBFFFBRLL
BFBBBBBLLL
FBFFFBBRRR
FFBBFFBLLR
BFFBBFFLRL
FFBBBBBLRL
FFBFBBBRLL
FBBFBBBLRR
FFFBFFBLLL
FFBFBBBLLR
BBFFBBFLLR
FFFBFBBRLL
FFBFBBFRLR
BFFBBBFRRR
BFBFFBFRLL
FFBBFBBRRR
FFBFBFBLRR
BFBBBFBLLL
FFBFBFBRLL
BFFBFFFLRL
BBFBFBFRLL
BFFFBFFLLL
FBFBFBFRLR
FFBFBFFLLR
FBBFBBFLLR
FBBBBFBLLR
FBFFBFFLLL
FFBBBFFRRR
FBBFFFFLRL
FBFFBBFLLR
FFBBBBFLRL
FBBFBFFLRL
BFBFFFBLRL
BFFFFFBLRR
FFBBBBFLLL
BFBBBFFLLR
FBBFBBBLLL
BBFFBBBLRL
FBFBFFFRLR
BFFBFBFRLL
BFBBFBBLRL
BBFFBFFRRR
FBBBFBBLLR
BFBBFFBLRL
FFBFBFFLLL
BFBFBBBRLR
FBBFBFBLLR
FFBBBFFLLR
FFBBBFBRLR
BFFFBBBLRL
FBBFBFFRLL
FBFBFBBLLL
BFFBFBFLRR
FFBFBFFRLL
BBFFBFFRLR
BFBFBBBLLL
BBFFBBBLRR
FBFBBBBLRR
FBBFFBFRRR
BBFBFFFLRR
BFFFFBBRRL
FFBBBFFLLL
BFBBBBFLLL
BBFFFBBRLL
BFFBFFFLLL
FFBBBBFLLR
BBFFFFFRRR
BBFFFFFLRR
BBFBFBBLLL
BBFBFBFRRL
FBBBFFFRLR
FFBBBFBRLL
FBFFBFFLRL
BFBFFFFRLL
FBFFBBBRRR
FBBBFBFLRL
BFBBFFFLRL
BBFBFBFLRL
FFBFFBFRLL
FFBFBBBLRL
FFFBBFFLRR
FBBFBBBLRL
BFBBFFFLLL
FBBFFBFLRL
BFFBFBFLLR
BBFFFFBRRL
BFFFFBFLRR
FFFBBFBRLR
FFBBFBBRLR
FFBFBFBLRL
BFFFFFBRRL
FBBFFBFLLL
FBFFBFFLRR
FFBBBBFRRL
FBFFBBFLRR
FBBBFBFRLR
BFBBFFFLRR
FBFFFBBLLL
FBFBBBFRLL
FBFFBFFRLR
FFBBBFFLRL
FFBFFFBLLL
FBBBFBBRRL
BFFBFBFLRL
FFBFFFFLLR
FBBBFFFLRL
BFFFFBFLRL
BBFBBFBRRR
FFFBBBBRRL
BBFBFFFRLR
BFBFBBBLRR
FFFBBFBLLL
BFBBBBFLRL
FBBFFBBLRR
FFBBFFBRLR
BFBBBBFRRR
BFFBBFBLRR
FBFBFFBRLL
BBFBBBBRLR
BBFFFBFLLR
BFFFFBBLLL
FBBFFFBLRR
FFFBFBFLRR
FBFFBBFRRR
FBBBFFFLLR
BFFFFBFRRR
FBBBFBBRRR
FBFFFFFRRL
BFBFFBBLLR
BFBFFFFRRR
BFBFBBFRRR
BFBBBFBLRR
BFFBFBBRRR
BBFFFBBLRR
BFFBBBBLLR
FBFFFFFLRL
BBFBBBFLRR
BFBBBBBRRR
BFBBFBFRLR
FFBFFFBRLR
BBFBBFFLRL
FBFFFFBLRR
FFFBFBFRRR
BFFBFBFRRL
FFBBBBBLRR
FBFFFFFRLL
BFFBFBBLLR
BBFBFFBRLL
FBBBFBBLRR
FBFBBFFLRL
BFFFFFFRLR
BFFBBBFLRL
BFFBFFBLRR
BFFFFBFRRL
BFFBBBFRRL
BFFFFFBLRL
FFBBBBBRRL
FFFFBBBRLR
BFFBBFFRRL
FBBBFFFRLL
FBBFFBBRLR
FBBBBFFRRL
FBBBFBBRLL
FFBBFFBRRL
BBFFFFBRLL
FBBFFFFRLR
BFBFFFBRLR
FBFFBBFRLR
FBBFFFBLLR
BFBBFFBRLL
FBFFFFFRLR
FFFBBFBRRR
BBFBFBBLRL
BBFFFBFRLR
BFBFBBFRLR
BBFFBFBRLR
FBFBBBBLLL
FBBBFFBLLL
FBFBFFFRLL
BFFBBFFLLL
FBBBBBFLLR
BFFFBBBRRL
FBFFFBFRLL
FBFBFBFLLR
FBFFFFBRLR
BFBBBBBLRR
FFBFFFFLLL
BFBFBFFRRL
FFBFFBBRRR
BBFFFBFRLL
FFBFBFBRRR
FFBFFBFLRR
FBBFFBBRLL
BBFFBBFRLL
FFBFBBFLRL
FBFBFFFLLL
FFFBBBFLRL
BFFBBBFRLL
BFFFFBBLRR
FFBBFFFRLL
BBFFBBBLLL
FFFBBFBRRL
BFFBFFBLRL
FFBFFFFRRL
FFBBFFFLRR
BFBBFFFRLR
BFBFFBBRLR
BBFBBFFLLR
BFFBFBBLRR
BBBFFFFLLR
FBBBFFFLRR
BBFBFFFRLL
FFBFBFBLLR
FFFBBFBRLL
BBFBBBFRLL
FBBBBFBLRL
FBFFBFFRRL
BFBFBFFRRR
BFBBBFBLLR
BFFBBFFRLR
FBFFFBFRRL
FBFBFFBLRL
FFBBFBBLRR
BFFBBFBLLR
FFFBBFBLRR
BFBFFBBLRL
FFBFBBFLRR
BFFFBFBRRR
BFBBFFFLLR
BBFFBFBLLL
BFFFBFFRLR
FBFBBFFRLR
FFBFBFBRRL
BBFBFBBRRL
FBBFBFBLRL
BFBFFBFRRL
BFBFFBFLLR
BFFFBBFRRR
BBFBBBBRRR
FBBFBBBRRR
FBBFBFBRLL
FFBFFBBLLR
BBFFBFBLRR
FBFFBBBLLR
BBFBFFBLRR
BBFBFFFRRL
BFFBBFBRLR
BBFFBBBRRL
FBBFFBFLRR
BFBBBFFRLL
BFFFBBFLLR
BFFBBBBLRR
BBFFBBFRRR
BFBFBBFLRR
FBFBFBBRLL
BBFFFFBRLR
FFBFBBBRRL
BBFBFBBRLR
FBFBFFFLLR
FBFBBBFLRR
FFBFBFFRRR
BFFBFFBRRR
BFFFFFFRRL
BFBFFBBRLL
FBFBFFFLRL
FFBBBFFRRL
BFFFFBBRLR
FFBFFFBLRR
FFBBFBFLRR
BBFFFBBRLR
FBFBBFFRLL
FFFBBFFLLL
FBBBFBBLRL
BBFFFFFLLR
BBFFFBFLRL
FBBBBBFLLL
FFFBFFFRRR
FBFBFBBRRR
BFBBFFBLRR
FBFBBBFRRL
FBBBFBFRLL
BBFFFBBRRL
FBFFBFBLLL
BFBBFBBLLL
FFBFFBFLLR
BBFBBFFRRL
FBFFFFBRLL
BFFFBBFLLL
FFFBFFBLRR
BFBBFFBLLL
FBBBBBBRLL
FFBBFFBLRL
BFBBBBBLRL
FBBBBFFLLR
BFFBBFBRLL
BFBFBFFLRL
BBFFFFFRRL
BFBFBFFLLL
FBFFBBFLRL
BBFBBFFRLL
BFBFFBBLLL
BFFFBFBLLR
BBBFFFBLLL
BBFBBBFLLL
FFBFBFFLRR
BFFFFFFLLL
BBFFBFBRRR
FFFBFBFRLR
FBBBFBFLLR
FFBFBFFLRL
BFBFBFBRRL
FBBFFBBRRR
BFFBBFFLRR
BFFBFBBLRL
FFBFFBBRLL
FBFBFFBLLR
BFFFFFFLLR
FFFFBBBLRR
FBFFFBFRRR
BFBBBFFRRR
BFBBFBBRLL
FFBFBFFRRL
BFFBFFBLLL
FFBBFFFLRL
FBBBBBBRLR
FBBBFBFLRR
FBFBFBBLRR
BFFFBBFRLR
FFBFBBFRRL
FBFFFBFLLR
BBFFBBFLRR
BBBFFFFLRL
BBBFFFFRRR
FBFFFFBRRL
BFBFFFBRRR
BFFFBFBRRL
FBFBFBBLRL
BFBFFFFLRL
FBFBBBBRRR
BFFFBBBRLR
FBBBFFBLRL
FFFBFBBRRL
FBBFBBFRRL
BFBBBFBRRL
FFBBFFFRLR
BBFBBFBLRL
FBFBBFBRLR
BBFFBFFRLL
FBBFBBBRRL
FBBFBBFRLL
BFBFBBFRRL
FFBFFFBLRL
BFFBFFFRLL
BFBFBBBRRL
FFBBFBFLRL
FBFBBBBLRL
BFFBBBFLRR
FFFFBBFRRR
FBFBBFFRRR
BFFFBFFRRL
BFBBFFFRLL
FFBBBFBLLR
BFBFFBFLRR
FBFBBFFLRR
FFBFBFFRLR
FBBBBFFLRL
FFFBBFFLRL
FBFBFFBRRL
BFBBFBFRRL
FBBFBFBRRL
FFFBBFFRLR
FFBFBFBRLR
FFBFFBBRLR
BBFFBFFLLL
BFBFFBBRRR
BFBBBBBRRL
BFBFFFBLRR
FBFBBFFRRL
BBFBFBFRLR
BFFFFFBLLR
FBBBFFBRRL
FBFFBBBLRR
BBFFFFBLLL
BFBFFFBLLR
FFBFBBBLLL
BFFFFBBRLL
BFFBBBBRRL
FBFBFBBRLR
FBFBBBFLLR
FFBFFBFLLL
BFBBFBFRLL
BFBBFFFRRR
BFBBFFBRRR
BFFBBFBLRL
BFFBBBBLRL
FFFBFFFLRR
BFBBFFBLLR
FFFBFFBRRL
BFBBBBFRLL
FFBBFBFRLR
FBBBFBBLLL
BBBFFFFLLL
BBFFFFBLRL
BBFBFFBRLR
BFBFBBFLRL
FFFBFFBRLR
FFFBFBFRLL
BBFFFFBLLR
BFBFBFBRLR
FBFBBFBLRR
BBFFFBBLLR
FBFBBBFLLL
BFFFBFFRRR
BBFBFFBLLR
BFFBFBBRRL
FBBBBFFRRR
BFFFFBBRRR
FFBBBFBLRL
BFBBBBBLLR
FFBBFFFRRR
FFFBBBBLLL
FBBBBFBRLR
BBBFFFBLRL
FBFBFBFLRR
BFFBFFBRLL
BFFBBBBRLL
BBFFBFBRRL
FFBFBBFRRR
FBFFBFBRRL
BFBBBFFLLL
FBBBFFFRRR
FBBBBBFLRL
FBBFBBFLRL
FFBFFFFRRR
FFFFBBBLRL
FBFBBFBRRL
BBBFFFFLRR
FFFBFBBLRR
BBFFBFBRLL
FFBBFFBRRR
FBFFBFFRLL
FFBFFBFRRL
FBFBFFFRRR
BFFBFFFLLR
BFBBFBBRRL
FFFBFBFRRL
BFBBBBFLLR
BFFFBFFLRR
FBBFFFBLRL
BFFFFBBLLR
BFBFBFFLRR
FFBBBBFLRR
FBBBBFFLRR
FBBBBFBRRL
BBFFFFFRLR
FBFFFBBLLR
FFFBBBFRRR
BBFFFBBRRR
FBBFBFBRRR
FBBBBBBLRR
FFFBFBBLRL
BFBFFFBLLL
FFBBFBFLLR
FFBBBBFRRR
FBFFBBBRLR
FBFBFFBLRR
BBFFBFBLRL
FBFFBFBRRR
BFBFBBBLRL
FFFBBBFRRL
BFBBBFFLRL
BBFFBFFRRL
FFFBBFFRRL
BFBFFBBLRR
BBFFFBBLLL
BFBBBFBLRL
BFFBFFBLLR
BFBFFFFRLR
FFBBBBBLLR
BFFBBBBRRR
FBBFFBBLLL
FBFBFBFRRR
FBFBBFBLLR
FBBFFBBRRL
FFBFBFBLLL
BFFFBFBLRR
FBFBBBBRRL
FBFFBFBLRR
FBFFBBBRRL
FFBFBBBRLR
BBFFFFBLRR
FBFFFBBRLL
FFBBFBFRRR""" | [
"16931750+raphey@users.noreply.github.com"
] | 16931750+raphey@users.noreply.github.com |
67222d813c0f7c2646f99a022615da1354352b30 | c28b97616b658797ae46f75bdb173526841cb5f6 | /photo/urls.py | 0df836ae0acd5d889ba77f8efba589303a8af2d3 | [] | no_license | victoria-kovaleva/first | 5da344ff01ac4194b5835c864efee80c63f97cd5 | b823a455e8aad58c1a10c2ac76bf7a839a1a8f6e | refs/heads/master | 2021-01-01T18:55:07.379654 | 2014-02-23T16:01:07 | 2014-02-23T16:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | #from django.conf.urls.defaults import *
from django.conf.urls import patterns, url, include
from photo.models import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('photo.views',
(r"^(\d+)/$", "album"),
(r"^(\d+)/(full|thumbnails|edit)/$", "album"),
(r"^update/$", "update"),
(r"^search/$", "search"),
(r"^image/(\d+)/$", "image"),
(r"", "main"),
(r'^admin/', include(admin.site.urls)),
)
| [
"vkovaleva@mirantis.com"
] | vkovaleva@mirantis.com |
bb254c654547f81e9990ee4cf77ce5783ed9cdd3 | 96b53c177d3060a9149fead7c2481f631d954d2e | /virtual/bin/alembic | 86b62ed49fb847db593874c05a7dda327114015a | [
"MIT"
] | permissive | JOFLIX/pitch | 0a094c93025fa36e28938412e31fa210cd76613c | c821acf138b07f148be0cc5fbe6c82bd6396a428 | refs/heads/master | 2022-09-29T06:40:22.370088 | 2019-08-07T07:46:43 | 2019-08-07T07:46:43 | 200,628,592 | 0 | 0 | null | 2022-09-16T18:06:49 | 2019-08-05T09:51:31 | Python | UTF-8 | Python | false | false | 257 | #!/home/moringa/Desktop/pitch_your_work/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"joflixooko@outlook.com"
] | joflixooko@outlook.com | |
a1fd233d387cce9d078d171d6854b62735d62a2a | d869931c9164e8ebe6204fde2fbb783851d7a61f | /entry.py | cb118a5892344aa4af7217c9d3e2dcce4ce8bd0e | [
"MIT"
] | permissive | it-zoo-histories/marduk | c54509ecb59737c92b187226875de1e94f005875 | 1053961eabcebe901d2926d1f7e07ce43ffc6ac5 | refs/heads/master | 2022-01-25T12:07:58.463196 | 2019-05-26T21:41:53 | 2019-05-26T21:42:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | import IPython
from IPython.display import clear_output, HTML, display
from rasa_core.agent import Agent
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_nlu.model import Interpreter
import time
class LogicCore(object):
def __init__(self):
self.intrepreter = RasaNLUInterpreter('models/current/nlu')
self.nluModel = Interpreter.load('./models/current/nlu')
self.dialogue = Agent.load('./models/current/dialogue', interpreter=self.intrepreter)
self.unknown_command = "я вас не понял, попробуйте ещё раз"
def newMessage(self, message):
response_from_dialogue = self.dialogue.handle_message(sender_id=1, message=message)
print(response_from_dialogue)
if len(response_from_dialogue) == 0:
return ""
else:
return response_from_dialogue
def interpreter(self, message):
print("recognized: ", self.nluModel.parse(message)) | [
"kubitre@gmail.com"
] | kubitre@gmail.com |
c70d686b8a66449aa75277ec024a414043f77dab | 8b00e2b136636841b38eb182196e56f4721a1e4c | /trio/_util.py | 121513b20e80d517c58bc5e6fb5c7f2255ca441a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | xyicheng/trio | 77c8c1e08e3aa4effe8cf04e879720ccfcdb7d33 | fa091e2e91d196c2a57b122589a166949ea03103 | refs/heads/master | 2021-01-23T00:05:59.618483 | 2017-03-16T04:25:05 | 2017-03-16T04:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | import sys
from functools import wraps
import async_generator
__all__ = ["aitercompat", "acontextmanager"]
# Decorator to handle the change to __aiter__ in 3.5.2
def aiter_compat(aiter_impl):
if sys.version_info < (3, 5, 2):
@wraps(aiter_impl)
async def __aiter__(*args, **kwargs):
return aiter_impl(*args, **kwargs)
return __aiter__
else:
return aiter_impl
# Very much derived from the one in contextlib, by copy/pasting and then
# asyncifying everything.
# So this is a derivative work licensed under the PSF License, which requires
# the following notice:
#
# Copyright © 2001-2017 Python Software Foundation; All Rights Reserved
class _AsyncGeneratorContextManager:
def __init__(self, func, args, kwds):
self._agen = func(*args, **kwds).__aiter__()
async def __aenter__(self):
if sys.version_info < (3, 5, 2):
self._agen = await self._agen
try:
return await self._agen.asend(None)
except StopAsyncIteration:
raise RuntimeError("async generator didn't yield") from None
async def __aexit__(self, type, value, traceback):
if type is None:
try:
await self._agen.asend(None)
except StopAsyncIteration:
return
else:
raise RuntimeError("async generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
await self._agen.athrow(type, value, traceback)
raise RuntimeError("async generator didn't stop after athrow()")
except StopAsyncIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return (exc is not value)
except RuntimeError as exc:
# Don't re-raise the passed in exception. (issue27112)
if exc is value:
return False
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
if exc.__cause__ is value:
return False
raise
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def acontextmanager(func):
"""Like @contextmanager, but async."""
if not async_generator.isasyncgenfunction(func):
raise TypeError(
"must be an async generator (native or from async_generator; "
"if using @async_generator then @acontextmanager must be on top.")
@wraps(func)
def helper(*args, **kwds):
return _AsyncGeneratorContextManager(func, args, kwds)
return helper
| [
"njs@pobox.com"
] | njs@pobox.com |
5a813bd10a9a6555bcb7a31df0d331852598cdba | 5088fffefcbb3458ee2c8fca6d822487e13c4169 | /04-zanke/monte_carlo.py | 92515bfcf694126c02f491519d94ea6ab3eda678 | [] | no_license | matijapretnar/uvod-v-programiranje | 95de86fb63d6d06558984c05a40690f78d15aa5f | 464a9c566ed3564a6baba60e7c79f9e25399d45e | refs/heads/master | 2023-04-06T00:28:57.011142 | 2023-04-04T10:49:56 | 2023-04-04T10:49:56 | 52,275,510 | 5 | 34 | null | 2022-03-16T10:12:55 | 2016-02-22T13:32:48 | Python | UTF-8 | Python | false | false | 853 | py | import random
def oceni_pi(n):
v_krogu = 0
for i in range(1, n + 1):
x = random.uniform(-1, 1)
y = random.uniform(-1, 1)
if x ** 2 + y ** 2 <= 1:
v_krogu += 1
print(4 * v_krogu / i)
delez_v_krogu = v_krogu / n
return 4 * delez_v_krogu
def nakljucna_tocka_v_krogu(x0=0, y0=0, r=1):
while True:
x = random.uniform(x0 - r, x0 + r)
y = random.uniform(y0 - r, y0 + r)
if (x - x0) ** 2 + (y - y0) ** 2 <= r ** 2:
return x, y
def nesmiselna_naloga(st_poskusov):
razdalja_manj_kot_pol = 0
for _ in range(st_poskusov):
x1, y1 = nakljucna_tocka_v_krogu()
x2, y2 = nakljucna_tocka_v_krogu()
if (x2 - x1) ** 2 + (y2 - y1) ** 2 <= (1 / 2) ** 2:
razdalja_manj_kot_pol += 1
return razdalja_manj_kot_pol / st_poskusov
| [
"matija@pretnar.info"
] | matija@pretnar.info |
68c7b15940be9fd26d32216f542c91159895ef82 | 57d7c40b161cec3f5a3cb5d0b5e2344502cc4505 | /question/migrations/0004_auto_20190118_2010.py | 73ee7036874a32bf38cd6c97ba58ad1617c01896 | [] | no_license | rowbotman/web-service-tp | 1b97e7aaeda92c4bd0172f542b86a89e2d24d2ca | 8dc7391c636910a1ee66ca358a56624350445bc5 | refs/heads/master | 2020-04-21T21:27:54.030953 | 2019-02-09T15:48:44 | 2019-02-09T15:48:44 | 169,879,645 | 0 | 1 | null | 2019-02-09T15:48:45 | 2019-02-09T15:16:18 | CSS | UTF-8 | Python | false | false | 426 | py | # Generated by Django 2.1.5 on 2019-01-18 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('question', '0003_auto_20190117_2034'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(db_index=True, max_length=40, null=True, unique=True),
),
]
| [
"prokopenko.rower@mail.ru"
] | prokopenko.rower@mail.ru |
1bdb0707e2fd31eb7c20c9da690c177891ec0b67 | d65cf1532a0cdbb7a2755318774cc39762360d49 | /database/models.py | f666d868d23c2bc83eec6ec9b82fab9ef9b706ad | [] | no_license | Courtney2511/item_catalogue | ce9bb01adc19489e8b0a9dbd4691029d25f01c68 | c68da935ee5fe40acd8f7691edb01b2d4d58a1e1 | refs/heads/master | 2021-01-20T17:29:33.221202 | 2018-01-09T17:16:44 | 2018-01-09T17:16:44 | 83,486,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,599 | py | from datetime import datetime
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.orm import relationship
from database import Base
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
photos = relationship("Photo", back_populates="category")
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<Category %r>' % (self.name)
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'photo_count': len(self.photos),
}
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(30), nullable=False)
email = Column(String(100), nullable=False)
password = Column(String(100), nullable=True)
def __init__(self, username=None, email=None, password=None):
self.username = username
self.email = email
self.password = password
def __repr__(self):
return '<User %r>' % (self.username)
@property
def serialize(self):
return {
'id': self.id,
'username': self.username,
'email': self.email,
}
class Photo(Base):
__tablename__ = 'photo'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
description = Column(String(250))
category_id = Column(Integer, ForeignKey('category.id'), nullable=False)
category = relationship("Category", back_populates="photos")
picture = Column(String(500), nullable=False)
date_created = Column(DateTime, default=datetime.utcnow)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
user = relationship(User)
def __init__(self, name=None, description=None, category_id=None,
picture=None, user_id=None):
self.name = name
self.description = description
self.category_id = category_id
self.picture = picture
self.user_id = user_id
def __repr__(self):
return '<Photo %r>' % self.name
@property
def serialize(self):
"Return object data in serializable format"
return {
'id': self.id,
'name': self.name,
'description': self.description,
'category': self.category.serialize,
'picture': self.picture,
'date_created': float(self.date_created.strftime("%s")),
'user': self.user.serialize,
}
| [
"courtneynoonan@me.com"
] | courtneynoonan@me.com |
f6e48374edd3d005af2a9275a9ab22256ea25ab1 | 4c31159b3075c2aad7b6d5f753a62bead30d8e4a | /main2.py | dc86d980d0866db77f3a6b2d07047bf22d52af5e | [] | no_license | ppHaoqq/moobius_scrap | ac355308cab236e04c2704e7271f253fb3206210 | 7d02f101608c9ef0b95a3ea950471d089697197c | refs/heads/master | 2022-11-21T12:11:56.601053 | 2020-07-14T05:12:05 | 2020-07-14T05:12:05 | 260,847,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,692 | py | from selenium import webdriver
from bs4 import BeautifulSoup as bs
import pandas as pd
import time
import re
from selenium.webdriver.chrome.options import Options
import sys
def main():
options = Options()
#options.add_argument('--headless')
browser = webdriver.Chrome('C:\chromedriver_win32\chromedriver', options=options)
browser.implicitly_wait(10)
browser.get('https://kibi-cloud.jp/moobius/User/login.aspx')
time.sleep(2)
#login処理
login(browser)
#次ページへ
sekisanform = browser.find_element_by_xpath('//*[@id="system_select"]/li[6]')
sekisanform.click()
time.sleep(5)
#フレーム更新
change_frame(browser, 'mainFram')
#対象検索
kw = sys.argv
if len(kw) == 1:
search_def(browser)
else:
search_sys(browser, kw[1])
#フレーム更新
change_frame(browser, 'mainTFram')
#対象保存
html = browser.page_source
save_excel(html)
#終了処理
browser.close()
browser.quit()
def login(browser):
id1 = browser.find_element_by_css_selector('#txtComID')
id2 = browser.find_element_by_css_selector('#txtLogID')
pw = browser.find_element_by_css_selector('#txtLogPW')
login_button = browser.find_element_by_css_selector('#btnLogin')
id1.send_keys('gaeart-shikoku')
id2.send_keys('g948')
pw.send_keys('g948')
time.sleep(1)
login_button.click()
time.sleep(5)
def change_frame(browser, id_):
frame = browser.find_element_by_id(id_)
browser.switch_to.frame(frame)
def search_def(browser):
word = '令和2年度 高知自動車道 高知高速道路事務所管内舗装補修工事'
_search = browser.find_element_by_id('mainT')
search_btn = _search.find_element_by_id('btnSearchPop')
search_btn.click()
time.sleep(2)
search_box = browser.find_element_by_id('detSerachBox_Name')
search_box.send_keys(word)
time.sleep(2)
browser.find_element_by_id('btnDetSearch').click()
time.sleep(2)
_elem = browser.find_element_by_class_name('doclist')
elem = _elem.find_element_by_id('tbd-recKouji')
webdriver.ActionChains(browser).double_click(elem).perform()
time.sleep(2)
elem2 = browser.find_element_by_id('tab-menu')
elem3 = elem2.find_elements_by_tag_name('a')
elem3[4].click()
time.sleep(2)
def search_sys(browser, kw):
_search = browser.find_element_by_id('mainT')
search_btn = _search.find_element_by_id('btnSearchPop')
search_btn.click()
time.sleep(2)
search_box = browser.find_element_by_id('detSerachBox_Name')
search_box.send_keys(kw)
time.sleep(2)
browser.find_element_by_id('btnDetSearch').click()
time.sleep(2)
_elem = browser.find_element_by_class_name('doclist')
elem = _elem.find_element_by_id('tbd-recKouji')
webdriver.ActionChains(browser).double_click(elem).perform()
time.sleep(2)
elem2 = browser.find_element_by_id('tab-menu')
elem3 = elem2.find_elements_by_tag_name('a')
elem3[4].click()
time.sleep(2)
#参考サイトhttps://tanuhack.com/selenium/#tablepandasDataFrame
def save_excel(html):
soup = bs(html, 'html.parser')
selector = '#tl_List' + ' tr'
tr = soup.select(selector)
pattern1 = r'<t[h|d].*?>.*?</t[h|d]>'
pattern2 = r'''<(".*?"|'.*?'|[^'"])*?>'''
columns = [re.sub(pattern2, '', s) for s in re.findall(pattern1, str(tr[0]))]
data = [[re.sub(pattern2, '', s) for s in re.findall(pattern1, str(tr[i]))] for i in range(1, len(tr))]
df = pd.DataFrame(data=data, columns=columns)
df.to_excel('C:/Users/g2945/PycharmProjects/moobius_scrap/datas/sekisan.xlsx')
if __name__ == '__main__':
main() | [
"daiki.nitta888@gmail.com"
] | daiki.nitta888@gmail.com |
236a43ce48ae7a3dc607333f6288c4fc335cd1aa | 99feebd7e64a1961bd3f3c3b152c013b35bc9bad | /testCase/accounts_login_password_test.py | 9c57ce1499bc222b00590c7440c7932a340c9b86 | [] | no_license | andy-29/AutomatedTest | a551fb8d2d608c5191a9f1d71a30188f9a19bba5 | 1c3d2b5295f4b6df4e9321f6a75740a3970df3e4 | refs/heads/master | 2020-06-16T15:24:24.418593 | 2019-06-05T06:26:19 | 2019-06-05T06:26:19 | 195,621,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | import os, sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
func = os.path.basename(__file__).split('_test.py')[0]
from common.gmpackage import *
@ddt
class Accounts_Login_Password(unittest.TestCase):
'''
登入接口
'''
def setUp(self):
self.host = g.host
self.api_name = g.api_name(func)
self.url = self.host + self.api_name
@data(*(get_values(func, "test_accounts_login_password")))
def test_accounts_login_password(self, value):
self._testMethodDoc = '登入接口'
r = gmhttp.login()
self.assertEqual(0,r.get('error'))
@data(*(get_values(func, "test_accounts_login_password_errorPwd")))
def test_accounts_login_password_errorPwd(self, value):
self._testMethodDoc = '账号正确,密码错误'
user = value.get('requestdata').get('phone')
pwd = value.get('requestdata').get('password')
r = gmhttp.login(user,pwd)
self.assertEqual(r, value.get('assertdata'))
@data(*(get_values(func, "test_accounts_login_password_errorTel")))
def test_accounts_login_password_errorTel(self, value):
self._testMethodDoc = '账号错误'
user = value.get('requestdata').get('phone')
pwd = value.get('requestdata').get('password')
r = gmhttp.login(user,pwd)
self.assertEqual(r, value.get('assertdata'))
def tearDown(self):
pass
if __name__ == "__main__":
Accounts_Login_Password.run()
| [
"dayuezaichunji@163.com"
] | dayuezaichunji@163.com |
f92c23b5aeb19501b060ad0c39fd22e808b07ebd | 219e0cf3ff218a142872b939871c3b52c5c09f8f | /database_fill.py | 9d03309f41373f46406dfe395d97a3f5b07119b1 | [] | no_license | fouad3/Item-Catalogue_Udacity_Project | 6a1de3cc3270856f4e853f9a5580d1da7063e062 | 931f7a617321c88dbfb895a921f832e22732a207 | refs/heads/master | 2021-08-31T13:09:19.620877 | 2017-12-21T11:32:59 | 2017-12-21T11:32:59 | 103,182,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,519 | py | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_config import Category, Base, Item, User
engine = create_engine('sqlite:///itemcataloguewithusers.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(
name="Dummy User",
email="dummy_user@example.com",
picture='https://s3-ap-southeast-1.amazonaws.com/static.gig88.com/img/empty_user_male.png')
session.add(User1)
session.commit()
# items for Category Baseball
category1 = Category(user_id=1, name="Baseball")
session.add(category1)
session.commit()
item1 = Item(
user_id=1, title="Rawlings Players Right Hand Glove", description="The Rawlings Players Right Hand Glove is"
"a great accessory for those who are looking to start out in T-Ball or baseball. The glove is worn on the non-dominant"
"hand - leaving your dominant left hand to throw and perfect your passes. Featuring a hook and loop strap for an adjustable"
"and secure fit, and a basket web pattern that makes it a very flexible web.", category=category1)
session.add(item1)
session.commit()
item2 = Item(
user_id=1, title="Wilson Ash 225 Baseball Bat", description="Knock it out of the park with "
"the Wilson Ash 225 Baseball Bat. Constructed with Professional grade Alleghany Ash and featuring a synthetic"
"grip for a firm grasp on the field. The balanced design offers efficient swings, while the laser engraved"
"barrel delivers an official look.", category=category1)
session.add(item2)
session.commit()
item3 = Item(
user_id=1,
title="Worth 12\" Official NCAA Softball Ball",
description="The Worth 12 Official\" NCAA Softball Ball"
"is designed for the practice pitch with a soft polycore centre to help prevent injury. The raised red seams"
"enhance your grip on the ball; while the ProTac synthetic leather makes the ball more durable.",
category=category1)
session.add(item3)
session.commit()
# items for Category Basketball
category2 = Category(user_id=1, name="Basketball")
session.add(category2)
session.commit()
item1 = Item(
user_id=1, title="adidas Dual Threat Junior Basketball Shoes", description="Step up your game and sink some hoops"
"with the adidas Dual Threat Junior Basketball Shoes. The shoes feature an adidas BOUNCE cushioning system to"
"deliver protection for your feet and cushioned comfort every time you hit the court. The synthetic leather mesh upper"
"allows optimum breathability and airflow to help keep your feet cool as the game heats up while the molded foam collar"
"provides premium comfort for your ankles. On-court traction and grip won't be a problem thanks to the full-length herringbone"
"pattern on the outsole while also adding durability for long-lasting wear. Go hard at the ring and hit the big shots for your team"
"in comfort thanks to the adidas Dual Threat Junior Basketball Shoes.", category=category2)
session.add(item1)
session.commit()
item2 = Item(
user_id=1,
title="Nike Dominate Basketball",
description="The Nike Dominate Basketball is perfect for outdoor use and ideal for"
"perfecting your passes. Constructed with a soft touch rubber and featuring a deep pebble texture for superior grip and handling"
"when you need it most.",
category=category2)
session.add(item2)
session.commit()
# items for Category Boxing
category3 = Category(user_id=1, name="Boxing")
session.add(category3)
session.commit()
item1 = Item(
user_id=1,
title="Adidas Women's Speed 100 Boxing Glove",
description="Optimize your speed and performance with the Adidas"
"Women's Speed 100 Glove. Specifically designed for Women, the narrower tapered fit ensures a comfortable and sleek design."
"Constructed with a PU overlay material and rigid hook and loop elastic closing system, allows for a quick and firm adjustment."
"Ideal for Boxfit and fitness classes, break through to the next level with the Adidas Women's Speed 100 Glove.",
category=category3)
session.add(item1)
session.commit()
item2 = Item(
user_id=1,
title="Everlast 108\" Hand Wraps",
description="Get your hands on the Everlast 108\""
"Hand Wraps. Crafted from a polyester and nylon blend, these wraps have Everlast's EverFresh anti-microbial"
"treatment that prevents the growth of odour causing bacteria, keeping them fresher for longer. Engineered"
"for hand protection and support, the thumb strap that helps hold them in place, and a hook and loop closure"
"that delivers a secure and stay put fit. The length of these wraps is 108 inches, ideal for smaller hands and"
"MMA workouts. Stop pulling your punches and uppercut, hook and jab without holding back with the Everlast 108\" Hand Wraps.",
category=category3)
session.add(item2)
session.commit()
item3 = Item(
user_id=1, title="Everlast Chain & Swivel Set", description="Get your punching bag ready for war with"
"the Everlast Chain & Swivel Set. Perfect for heavy bags with web strapping, this will allow you to work on your power,"
"coordination and technique, from the comfort of your home. Featuring carabiners & a spot-welded chain construction provides"
"a more secure hold, while the built in eye swivel allows for freedom of bag movement during after each punch; let the Everlast"
"Chain & Swivel Set help you get fight day ready", category=category3)
session.add(item3)
session.commit()
# items for Category Golf
category4 = Category(user_id=1, name="Golf")
session.add(category4)
session.commit()
item1 = Item(
user_id=1, title="Under Armour Men's Core Golf Visor", description="Perfect for sunny days on the golf course,"
"the Under Armour Men's Core Gold Visor features a pre-curved bill designed to shield your eyes from harsh sun glare."
"The built in HeatGear sweatband absorbs and wicks moisture away from the skin keeping sweat out of the eyes for"
"focused performance; while a flexible stretch fit construction and an adjustable strap provides a customand secure fit. Eliminate"
"all minor distraction and keep your eye on the ball in the Under Armour Men's Core Golf Visor.", category=category4)
session.add(item1)
session.commit()
item2 = Item(
user_id=1,
title="Srixon Soft Feel 12 PK Golf Balls",
description="Built for the golfer looking for distance"
"off the tee, the SrixonSoft Feel 12 PK Golf Balls feature a high velocity core to deliver extra range, while offering"
"a softer feel around the greens..",
category=category4)
session.add(item2)
session.commit()
# items for Category Hockey
category5 = Category(user_id=1, name="Hockey")
session.add(category5)
session.commit()
item1 = Item(
user_id=1,
title="Kookaburra Revoke Hockey Hand Guard",
description="the Kookaburra Revoke Hockey Hand Guard delivers superior quality,"
"comfort and protection. This left hand guard has an open palm design to aid stick control and feel, with an ergonomic plastic shell"
"that provides lightweight backhand protection. With a lightweight and unrestrictive design, this hand guard also has a 2 inch elasticated"
"embossed wristband for a secure fit every time.",
category=category5)
session.add(item1)
session.commit()
item2 = Item(
user_id=1,
title="Kookaburra Feud M-Bow Senior Hockey Stick",
description="The Kookaburra Feud M-Bow Senior Hockey Stick"
"is a great choice for hockey players wanting to develop and improve core skills. Made with a Dual Core construction"
"that delivers high power, and a maxi head shape for excellent control and easier pushing. The M-Bow places the optimum point of the bow"
"in the mid-section of the stick which enhances ball control and sweep hitting techniques, whilst specialising in the strike.",
category=category5)
session.add(item2)
session.commit()
print "added menu items!"
| [
"eng.fouad.ashraf@gmail.com"
] | eng.fouad.ashraf@gmail.com |
882b8a411a7523e551973605d5de272055d5db4c | 60b095695d06c5d59ae0750d605bf2e658e91f05 | /superheroes/models.py | 0f5d6fd01986b8cbfd86f87c438312b2ad36285b | [] | no_license | Rojasknight/Django-Rest-Framework-upplugget | b32f09df267d84d58763bdc14c0adb960d55413e | 7472d5512c7a8e71e5e9674fed8ae6b40a5603ee | refs/heads/master | 2020-04-18T09:21:06.188694 | 2019-01-24T20:28:31 | 2019-01-24T20:28:31 | 167,431,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | from django.db import models
class Publisher(models.Model):
name = models.CharField(max_length=100, blank=True, default='')
founder = models.CharField(max_length=100, blank=True, default='')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class SuperHeroe(models.Model):
name = models.CharField(max_length=100, blank=True, default='')
gender = models.CharField(max_length=100, blank=True, default='')
real_name = models.CharField(max_length=100, blank=True, default='')
publisher = models.ForeignKey(Publisher, on_delete=models.CASCADE)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
| [
"danny.rojas.reyes@accenture.com"
] | danny.rojas.reyes@accenture.com |
68b48de1b46c9d5f18f3e33d5a10939765933eee | cd5baac779b99134bbe4c08ff884466e4a813524 | /Cadastro/migrations/0004_auto_20161128_1008.py | e34d5b3089fc7df68b26bfba32a0d0be3124234e | [] | no_license | judeo66/TCC | 7eec0592a375615191bbb282db2c901c82c0f219 | fe766269d4a44ab8241ecc760994f40189f09ae3 | refs/heads/master | 2021-01-12T08:13:45.937391 | 2016-12-15T02:15:54 | 2016-12-15T02:15:54 | 76,514,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('Cadastro', '0003_tratamentos_sensibilidade'),
]
operations = [
migrations.AlterField(
model_name='tratamentos',
name='Id_medicamentoCurto',
field=models.ForeignKey(related_name='medCurto', to='Cadastro.Medicamentos'),
preserve_default=True,
),
migrations.AlterField(
model_name='tratamentos',
name='Id_medicamentoLongo',
field=models.ForeignKey(related_name='medLongo', to='Cadastro.Medicamentos'),
preserve_default=True,
),
]
| [
"israel.0123@hotmail.com"
] | israel.0123@hotmail.com |
5da2022113b8b642a4dafaaab63a1f47788af57b | e4b2d6863fadf63bdc6b42fafb4ad298d7ad0d08 | /elyra/pipeline/kfp/kfp_properties.py | be5888b2f67e0c2516af36c45c61bee4b1faf1b0 | [
"Apache-2.0",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-SA-4.0",
"BSD-3-Clause"
] | permissive | elyra-ai/elyra | eef32416b7c6039090a2d2a9da08238bb55569b2 | 3c27ada25a27b719529e88268bed38d135e40805 | refs/heads/main | 2023-08-19T02:58:52.615956 | 2023-06-19T16:00:23 | 2023-06-19T16:00:23 | 216,914,329 | 1,707 | 345 | Apache-2.0 | 2023-08-14T23:34:05 | 2019-10-22T21:37:58 | Python | UTF-8 | Python | false | false | 7,078 | py | #
# Copyright 2018-2023 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from keyword import iskeyword
from typing import Any
from typing import List
from typing import Optional
from elyra.pipeline.properties import ListItemPropertyAttribute
from elyra.pipeline.properties import PipelineParameter
from elyra.pipeline.properties import PropertyInputType
class KfpPropertyInputType(PropertyInputType):
"""
An object representing a single allowed input type for a PropertyAttribute
object for KUBEFLOW_PIPELINES runtime processors.
"""
_kfp_defined_types = {
"String": {"type_hint": "str", "json_type": "string", "default_value": ""},
"Bool": {"type_hint": "bool", "json_type": "boolean", "default_value": False, "placeholder": " "},
"Integer": {"type_hint": "int", "json_type": "integer"},
"Float": {"type_hint": "float", "json_type": "number", "render_input_value": True},
# "GCSPath": {"type_hint": "GCSPath", "json_type": "string", "default_value": "", "placeholder": "gs://"},
# "GCRPath": {"type_hint": "GCRPath", "json_type": "string", "default_value": "", "placeholder": "gcr.io/"},
# "GCPRegion": {"type_hint": "GCPRegion", "json_type": "string", "default_value": ""},
# "GCPProjectID": {"type_hint": "GCPProjectID", "json_type": "string", "default_value": ""},
"CustomString": {"json_type": "string", "type_title": "String-valued parameter of arbitrary type"}
# "List": {"type_hint": "list", "json_type": "array", "default_value": []}, # not yet supported by frontend
# "Dict": {"type_hint": "dict", "json_type": "object", "default_value": {}}, # not yet supported by frontend
}
def __init__(
self,
base_type: str,
default_value: Optional[Any] = None,
placeholder: Optional[Any] = None,
enum: Optional[List[Any]] = None,
**kwargs,
):
super().__init__(
base_type=base_type,
default_value=default_value,
placeholder=placeholder,
enum=enum,
runtime_defined_types=self._kfp_defined_types,
**kwargs,
)
self.type_hint = kwargs.get("type_hint") or self._kfp_defined_types[base_type].get("type_hint")
self.component_input_type = kwargs.get("type_hint") or self.base_type
if self.component_input_type == "Bool":
# Due to a known issue with KFP (https://github.com/kubeflow/pipelines/issues/5111),
# inputs of type "Bool" must be rendered as "Boolean" in the component inputs field
# of the component spec
self.component_input_type = "Boolean"
class KfpPipelineParameter(PipelineParameter):
"""An ElyraProperty representing a single pipeline parameter for the Kubeflow Pipelines runtime"""
property_id = "KFP_PIPELINE_PARAMETERS"
property_attributes = [
ListItemPropertyAttribute(
attribute_id="name",
description="The name of the parameter. This must be a valid Python identifier and not a keyword.",
display_name="Parameter Name",
allowed_input_types=[PropertyInputType(base_type="str", placeholder="param_1")],
hidden=False,
required=True,
use_in_key=True,
pattern="^[a-zA-Z][a-zA-Z0-9_]*$",
),
ListItemPropertyAttribute(
attribute_id="description",
description="A description for this parameter.",
display_name="Description",
allowed_input_types=[PropertyInputType(base_type="str")],
hidden=False,
required=False,
use_in_key=False,
),
ListItemPropertyAttribute(
attribute_id="default_value",
description="A default value for the parameter.",
display_name="Default Value",
allowed_input_types=[
KfpPropertyInputType(base_type="String", placeholder="default_val"),
KfpPropertyInputType(base_type="Integer"),
KfpPropertyInputType(base_type="Float"),
KfpPropertyInputType(base_type="Bool"),
],
hidden=False,
required=False,
use_in_key=False,
),
ListItemPropertyAttribute(
attribute_id="value",
display_name="Value",
allowed_input_types=[
KfpPropertyInputType(base_type="String"),
KfpPropertyInputType(base_type="Integer"),
KfpPropertyInputType(base_type="Float"),
KfpPropertyInputType(base_type="Bool"),
],
hidden=True,
required=False,
use_in_key=False,
),
ListItemPropertyAttribute(
attribute_id="required",
description="Whether a value is required for this parameter during pipeline submit or export.",
display_name="Required",
allowed_input_types=[PropertyInputType(base_type="bool", placeholder=" ")],
hidden=False,
required=False,
use_in_key=False,
),
]
default_type = "String"
def __init__(self, name, description, value, default_value, required, **kwargs):
super().__init__(
name=name, description=description, value=value, default_value=default_value, required=required
)
self.input_type = KfpPropertyInputType(base_type=self.selected_type or self.default_type)
def get_all_validation_errors(self) -> List[str]:
"""Perform custom validation on an instance."""
validation_errors = []
if not self.name:
validation_errors.append("Required parameter name was not specified.")
elif not self.name.isidentifier():
# param name is not a valid python variable name
validation_errors.append(
f"'{self.name}' is not a valid parameter name: name must be a Python variable name."
)
elif iskeyword(self.name):
# param name collides with a python keyword (e.g. class, def, etc.)
validation_errors.append(f"'{self.name}' is not a valid parameter name: name cannot be a Python keyword.")
# If 'required' is True, a value must be provided
if self.required and (self.value is None or self.value == ""):
validation_errors.append("Parameter is marked as required but no value has been assigned.")
return validation_errors
| [
"noreply@github.com"
] | noreply@github.com |
473513794296ed1544e1314aa0cdd3e0cc793e96 | ee8f18cb893be3c3e9e584731d6bf7814d6cbf0a | /portfolio/urls.py | aa086de8714761d8ae68010f4763a017fe298b08 | [] | no_license | christensensl/django_portfolio | 8fa5231a67713794997a06180e9215182eccc184 | 759032c8eb96c59249fb1cac323ef101564a73b9 | refs/heads/master | 2022-09-01T16:42:11.109066 | 2020-05-15T22:54:20 | 2020-05-15T22:54:20 | 264,316,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from django.urls import path
from .import views
urlpatterns = [
path('', views.home, name='portfolio-home'),
path('about/', views.about, name='portfolio-about'),
] | [
"60920424+christensensl@users.noreply.github.com"
] | 60920424+christensensl@users.noreply.github.com |
6f922fcbf3a181a5c5f7408b4680e13c3cf945c6 | 04b27f7ac8cf5369f6cf006484a99e308672e5bd | /data/migrations/0018_auto_20170103_0805.py | 21e31407cb57d1d62d267f918030be4fcc3048ad | [] | no_license | VijeshVenugopal/app | 1b752801f317648567859e738348ff5ca3e52ca0 | ae0a1abe2bee5c75693450921401606e610b4be0 | refs/heads/master | 2020-03-23T05:16:42.450301 | 2017-02-02T15:44:08 | 2017-02-02T15:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-03 08:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0017_auto_20170103_0716'),
]
operations = [
migrations.AlterField(
model_name='resetpassword',
name='created_date_time',
field=models.DateTimeField(default=datetime.datetime(2017, 1, 3, 8, 5, 40, 825653)),
),
migrations.AlterField(
model_name='ticket',
name='created_date_time',
field=models.DateTimeField(default=datetime.datetime(2017, 1, 3, 8, 5, 40, 823793)),
),
migrations.AlterField(
model_name='ticketattachment',
name='created_date_time',
field=models.DateTimeField(default=datetime.datetime(2017, 1, 3, 8, 5, 40, 825076)),
),
migrations.AlterField(
model_name='ticketdetail',
name='created_date_time',
field=models.DateTimeField(default=datetime.datetime(2017, 1, 3, 8, 5, 40, 824534)),
),
]
| [
"jeffz.in7@gmail.com"
] | jeffz.in7@gmail.com |
f9b831410e67ccfb2d836ccb97342ca251b29cc8 | 063934d4e0bf344a26d5679a22c1c9e5daa5b237 | /margrave-examples-internal/capirca-margrave/capirca-r242-MODIFIED/lib/aclgenerator.py | 8ee174b93117e296c638b564e0eca83ea55b83e6 | [
"Apache-2.0"
] | permissive | tnelson/Margrave | 329b480da58f903722c8f7c439f5f8c60b853f5d | d25e8ac432243d9ecacdbd55f996d283da3655c9 | refs/heads/master | 2020-05-17T18:43:56.187171 | 2014-07-10T03:24:06 | 2014-07-10T03:24:06 | 749,146 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 11,378 | py | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ACL Generator base class."""
import copy
import re
import policy
# generic error class
class Error(Exception):
"""Base error class."""
pass
class NoPlatformPolicyError(Error):
"""Raised when a policy is received that doesn't support this platform."""
pass
class UnsupportedFilter(Error):
"""Raised when we see an inappropriate filter."""
pass
class UnknownIcmpTypeError(Error):
"""Raised when we see an unknown icmp-type."""
pass
class MismatchIcmpInetError(Error):
"""Raised when mistmatch between icmp/icmpv6 and inet/inet6."""
pass
class EstablishedError(Error):
"""Raised when a term has established option with inappropriate protocol."""
pass
class UnsupportedAF(Error):
"""Raised when provided an unsupported address family."""
pass
class DuplicateTermError(Error):
"""Raised when duplication of term names are detected."""
pass
class UnsupportedFilterError(Error):
"""Raised when we see an inappropriate filter."""
class Term(object):
"""Generic framework for a generator Term."""
ICMP_TYPE = policy.Term.ICMP_TYPE
PROTO_MAP = {'ip': 0,
'icmp': 1,
'igmp': 2,
'ggp': 3,
'ipencap': 4,
'tcp': 6,
'egp': 8,
'igp': 9,
'udp': 17,
'rdp': 27,
'ipv6': 41,
'ipv6-route': 43,
'ipv6-frag': 44,
'rsvp': 46,
'gre': 47,
'esp': 50,
'ah': 51,
'icmpv6': 58,
'ipv6-nonxt': 59,
'ipv6-opts': 60,
'ospf': 89,
'ipip': 94,
'pim': 103,
'vrrp': 112,
'l2tp': 115,
'sctp': 132,
}
AF_MAP = {'inet': 4,
'inet6': 6,
'bridge': 4 # if this doesn't exist, output includes v4 & v6
}
# provide flipped key/value dicts
PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.iteritems()])
AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.iteritems()])
def NormalizeAddressFamily(self, af):
"""Convert (if necessary) address family name to numeric value.
Args:
af: Address family, can be either numeric or string (e.g. 4 or 'inet')
Returns:
af: Numeric address family value
Raises:
UnsupportedAF: Address family not in keys or values of our AF_MAP.
"""
# ensure address family (af) is valid
if af in self.AF_MAP_BY_NUMBER:
return af
elif af in self.AF_MAP:
# convert AF name to number (e.g. 'inet' becomes 4, 'inet6' becomes 6)
af = self.AF_MAP[af]
else:
raise UnsupportedAF('Address family %s is not supported, term %s.' % (
af, self.term.name))
return af
def NormalizeIcmpTypes(self, icmp_types, protocols, af):
"""Return verified list of appropriate icmp-types.
Args:
icmp_types: list of icmp_types
protocols: list of protocols
af: address family of this term, either numeric or text (see self.AF_MAP)
Returns:
sorted list of numeric icmp-type codes.
Raises:
UnsupportedFilterError: icmp-types specified with non-icmp protocol.
MismatchIcmpInetError: mismatch between icmp protocol and address family.
UnknownIcmpTypeError: unknown icmp-type specified
"""
if not icmp_types:
return ['']
# only protocols icmp or icmpv6 can be used with icmp-types
if protocols != ['icmp'] and protocols != ['icmpv6']:
raise UnsupportedFilterError('%s %s' % (
'icmp-types specified for non-icmp protocols in term: ',
self.term.name))
# make sure we have a numeric address family (4 or 6)
af = self.NormalizeAddressFamily(af)
# check that addr family and protocl are appropriate
if ((af != 4 and protocols == ['icmp']) or
(af != 6 and protocols == ['icmpv6'])):
raise MismatchIcmpInetError('%s %s' % (
'ICMP/ICMPv6 mismatch with address family IPv4/IPv6 in term',
self.term.name))
# ensure all icmp types are valid
for icmptype in icmp_types:
if icmptype not in self.ICMP_TYPE[af]:
raise UnknownIcmpTypeError('%s %s %s %s' % (
'\nUnrecognized ICMP-type (', icmptype,
') specified in term ', self.term.name))
rval = []
rval.extend([self.ICMP_TYPE[af][x] for x in icmp_types])
rval.sort()
return rval
class ACLGenerator(object):
"""Generates platform specific filters and terms from a policy object.
This class takes a policy object and renders the output into a syntax which
is understood by a specific platform (eg. iptables, cisco, etc).
"""
_PLATFORM = None
# Default protocol to apply when no protocol is specified.
_DEFAULT_PROTOCOL = 'ip'
# Unsupported protocols by address family.
_SUPPORTED_AF = set(('inet', 'inet6'))
# Commonly misspelled protocols that the generator should reject.
_FILTER_BLACKLIST = {}
# set of required keywords that every generator must support
_REQUIRED_KEYWORDS = set(['action',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'icmp_type',
'name', # obj attribute, not keyword
'option',
'protocol',
'platform',
'platform_exclude',
'source_address',
'source_address_exclude',
'source_port',
'translated', # obj attribute, not keyword
'verbatim',
])
# Generators should redefine this in subclass as optional support is added
_OPTIONAL_SUPPORTED_KEYWORDS = set([])
def __init__(self, pol, exp_info):
"""Initialise an ACLGenerator. Store policy structure for processing."""
object.__init__(self)
# The default list of valid keyword tokens for generators
self._VALID_KEYWORDS = self._REQUIRED_KEYWORDS.union(
self._OPTIONAL_SUPPORTED_KEYWORDS)
self.policy = pol
for header, terms in pol.filters:
if self._PLATFORM in header.platforms:
# Verify valid keywords
# error on unsupported optional keywords that could result
# in dangerous or unexpected results
for term in terms:
# Only verify optional keywords if the term is active on the platform.
err = []
if term.platform:
if self._PLATFORM not in term.platform:
continue
if term.platform_exclude:
if self._PLATFORM in term.platform_exclude:
continue
for el, val in term.__dict__.items():
# Private attributes do not need to be valid keywords.
if (val and el not in self._VALID_KEYWORDS
and not el.startswith('flatten')):
err.append(el)
if err:
raise UnsupportedFilterError('%s %s %s %s %s %s' % ('\n', term.name,
'unsupported optional keywords for target', self._PLATFORM,
'in policy:', ' '.join(err)))
continue
self._TranslatePolicy(pol, exp_info)
def _TranslatePolicy(self, pol, exp_info):
"""Translate policy contents to platform specific data structures."""
raise Error('%s does not implement _TranslatePolicies()' % self._PLATFORM)
def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
"""Evaluate protocol and ports of term, return sane version of term."""
mod = term
# Determine which protocols this term applies to.
if term.protocol:
protocols = set(term.protocol)
else:
protocols = set((self._DEFAULT_PROTOCOL,))
# Check that the address family matches the protocols.
if not af in self._SUPPORTED_AF:
raise UnsupportedAF('\nAddress family %s, found in %s, '
'unsupported by %s' % (af, term.name, self._PLATFORM))
if af in self._FILTER_BLACKLIST:
unsupported_protocols = self._FILTER_BLACKLIST[af].intersection(protocols)
if unsupported_protocols:
raise UnsupportedFilter('\n%s targets do not support protocol(s) %s '
'with address family %s (in %s)' %
(self._PLATFORM, unsupported_protocols,
af, term.name))
# Many renders expect high ports for terms with the established option.
for opt in [str(x) for x in term.option]:
if opt.find('established') == 0:
unstateful_protocols = protocols.difference(set(('tcp', 'udp')))
if not unstateful_protocols:
# TCP/UDP: add in high ports then collapse to eliminate overlaps.
mod = copy.deepcopy(term)
mod.destination_port.append((1024, 65535))
mod.destination_port = mod.CollapsePortList(mod.destination_port)
elif not all_protocols_stateful:
errmsg = 'Established option supplied with inappropriate protocol(s)'
raise EstablishedError('%s %s %s %s' %
(errmsg, unstateful_protocols,
'in term', term.name))
break
return mod
def AddRepositoryTags(prefix=''):
"""Add repository tagging into the output.
Args:
prefix: comment delimiter, if needed, to appear before tags
Returns:
list of text lines containing revision data
"""
tags = []
p4_id = '%sId:%s' % ('$', '$')
p4_date = '%sDate:%s' % ('$', '$')
tags.append('%s%s' % (prefix, p4_id))
tags.append('%s%s' % (prefix, p4_date))
return tags
def WrapWords(textlist, size, joiner='\n'):
"""Insert breaks into the listed strings at specified width.
Args:
textlist: a list of text strings
size: width of reformated strings
joiner: text to insert at break. eg. '\n ' to add an indent.
Returns:
list of strings
"""
# \S*? is a non greedy match to collect words of len > size
# .{1,%d} collects words and spaces up to size in length.
# (?:\s|\Z) ensures that we break on spaces or at end of string.
rval = []
linelength_re = re.compile(r'(\S*?.{1,%d}(?:\s|\Z))' % size)
for index in range(len(textlist)):
if len(textlist[index]) > size:
# insert joiner into the string at appropriate places.
textlist[index] = joiner.join(linelength_re.findall(textlist[index]))
# avoid empty comment lines
rval.extend(x.strip() for x in textlist[index].strip().split(joiner) if x)
return rval
| [
"tn@cs.wpi.edu"
] | tn@cs.wpi.edu |
945b2e66bd120592ad55fe796ccf88aeb4bb2efe | e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a | /Systest/lib/py/issu.py | 3fd94a49e3fafb90378548fd7e78467b943c7ff7 | [] | no_license | muttu2244/MyPython | 1ddf1958e5a3514f9605d1f83c0930b24b856391 | 984ca763feae49a44c271342dbc15fde935174cf | refs/heads/master | 2021-06-09T02:21:09.801103 | 2017-10-10T07:30:04 | 2017-10-10T07:30:04 | 13,803,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568,600 | py | #!/usr/bin/env python2.5
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
# AUTHOR: Jeremiah Alfrey jalfrey@stoke.com
#######################################################################
import sys, os
mydir = os.path.dirname(__file__)
qa_lib_dir = mydir
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
### python stuff
import time
import shutil
import string
### local stuff
from logging import getLogger
from pexpect import TIMEOUT
import pexpect
import time
import datetime
import re
# what is this for?
from pprint import pprint
# Used for nslookup
import socket
### import SSX
# this import may not be required.
#from device import SSX
# used for unix_to_dos_path conversion
import ntpath
# used to get_mac_address
import CISCO
enable_prompt_regex = "[\r\n]*\S+\[\S+\]#"
yesno_prompt_regex =".*[\r\n.]*\(\[*yes\]*/\[*no\]*\)\s*$"
debug = False
month_list = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
# This is a complete list of all the valid ports on the SSX. When the 14 slot chassis is tested this will need to be changed
# to include the new ports
valid_port_list = ['0/0','1/0','2/0','2/1','2/2','2/3','3/0','3/1','3/2','3/3','4/0','4/1','4/2','4/3']
def cli_cmd(self, command, raw=False):
"""
This is a greatly imroved version of cmd from /SSX/device.py
It will read and parse the command prompt to correctly and quickly detect it.
This method can parse very long outputs and is much faster then the existing method
To use it simply send the command and the output will be returned
The output is returned as a list []. If you use the raw=True you will get it as a
single long string not split yet.
"""
debug = False
if debug:
print 'now in cli_cmd'
timeout = 5
self.ses.sendline('\r')
# The first line we read is empty
raw_prompt = self.ses.readline()
# This is the actual prompt
raw_prompt = self.ses.readline()
if debug:
print 'raw_prompt:', raw_prompt
prompt_pieces = raw_prompt.strip()
if len(prompt_pieces) < 2:
self.ses.sendline('\r')
# The first line we read is empty
raw_prompt = self.ses.readline()
# This is the actual prompt
raw_prompt = self.ses.readline()
if debug:
print 'raw_prompt:', raw_prompt
else:
if prompt_pieces == '#':
if debug:
print 'detected QNX Shell prompt: #'
prompt = '#'
else:
prompt_pieces = prompt_pieces.split('[')
if debug:
print 'hostname:', prompt_pieces[0]
print 'remainder:', prompt_pieces
prompt_hostname = prompt_pieces[0]
prompt_pieces = prompt_pieces[1].split(']')
if debug:
print 'Context:', prompt_pieces[0]
print 'remainder:', prompt_pieces
prompt_context = prompt_pieces[0]
prompt_admin_level = prompt_pieces[1]
#prompt = 'australia'
prompt = prompt_hostname + '.' + prompt_context + '.' + prompt_admin_level
if debug:
print 'prompt:', prompt
retr = self.ses.expect(prompt, timeout = timeout)
if retr == 0:
if debug:
print 'command successfull'
elif retr == 1:
print 'Something broke while executing command!'
sys.exit(1)
else:
print retr
if debug:
print 'setting term length infinite'
self.ses.sendline('term length infinite')
retr = self.ses.expect(prompt, timeout = timeout)
if retr == 0:
if debug:
print 'command successfull'
elif retr == 1:
print 'Something broke while executing command!'
sys.exit(1)
else:
print retr
if debug:
print 'About to execute the command you requested'
print 'command:', command
self.ses.sendline(command)
retr = self.ses.expect(prompt, timeout = timeout)
if retr == 0:
if debug:
print 'command successfull'
elif retr == 1:
print 'Something broke while executing command!'
sys.exit(1)
else:
print retr
raw_rtrn = self.ses.before
raw_after = self.ses.after
if debug:
print 'This is what the command returned:'
print '----------------------------------'
print raw_rtrn
print '-------'
print raw_after
print '----------------------------------'
if raw:
# We need to remove the first line of text but it's all one long line
# so we count the length of the original command and add some for the CR\LF characters
command_length = len(command) + 2
return raw_rtrn[command_length:]
# The 1: tells the system to return everything except the first line
# The first line contains the command that was executed.
else:
rtrn = raw_rtrn.splitlines()
return rtrn[1:]
def issu_enable(self, timeout=200):
"""enables ISSU with a set timeout"""
debug = False
if debug:
print 'now in issu.py method issu_enable'
self.ses.sendline("system issu enable")
index = self.ses.expect(yesno_prompt_regex,timeout=timeout)
if index == 0 :
self.ses.sendline("yes")
if "-con" in self.host:
self._handle_login(timeout = timeout)
else:
time.sleep(timeout)
self.telnet()
else :
print "in enable mode"
def install(self, tree, build, package_name, target_path='/hd/issu', username = 'builder', password = 'fuxor8', linux_ip='10.1.1.101'):
"""Retrieves a package via SFTP from the network and installs the package
tree = 4.6-prod
build = 2010011818
package_name = 4.6A1
username = builder
password = password
full_path (optional) = /auto/build/builder/4.6-prod/2010011818/qnx/cb/mc/StokeOS-4.6A1
linux_ip = 10.1.1.101 (this is qa-radxpm-1)
"""
# It's assumed that the host running this script is auto mounting the build directories
# and that the packages for installation are out there
# It's also assumed that the SSX (DUT) has network connectivity and can reach the testing host.
debug = False
## Debug
if debug:
print 'now in issu.py install'
## Validate arguments
# the only argument we can actually validate is the linux_ip
# the SSX will only accept an ip address in the sftp command not a hostname
# so we need to first check to see if it's a hostname and then if not then
# we can try to convert it. If that failes we must bail
if not validIP(linux_ip):
if debug:
print 'detected the value linux_ip is not a valid IP address.'
print 'attempting to do an NS lookup on the hostname'
linux_ip_tmp = nslookup_by_ip(linux_ip)
if validIP(linux_ip_tmp):
linux_ip = linux_ip_tmp
else:
print 'invalid IP address or Host Name provided for liux_ip:', linux_ip
return ("invalid IP address or Host Name provided for liux_ip: %s" % linux_ip)
build_dir = '/auto/build/builder/'
back_half = '/qnx/cb/mc/StokeOS-'
installed_packages = []
## Need to see is the path /hd/issu exists
# !!!!!
command = 'dir ' + target_path
#result_raw = self.cmd('dir /hd/issu')
try:
result_raw = self.cmd(command)
except:
print 'Unable to list the ISSU directory'
print 'System responded'
self.ses.before()
self.ses.after()
result = result_raw.splitlines()
try:
installed_packages = show_versions(self)
print 'Completed reading installed packages'
print 'Found the following versions installed:'
for item in installed_packages:
print item
except:
print 'Unable to read versions installed'
return 'Unable to read versions installed'
#####
# Look to see if the package is already installed
if package_name in installed_packages:
# If so then return with a success
print 'The package:', package_name, 'is already installed on the SSX'
print 'Installation will be skipped.'
return(0)
else:
print 'The package', package_name, 'will be installed'
# the image name looks like 'StokeOS-4.5B2-2009092913'
image_name = 'StokeOS-' + package_name + '-' + build
## We need to see if the file is already on the system
# to avoid overwriting the file
images_on_the_system = []
marker_found = False
if debug:
print 'About to parse the dir /hd/issu command'
print 'Searching the hard drive for the requested version'
# The result is from the earlier Dir information
for line in result:
if len(line) > 0:
"""
if debug:
print 'Line to be processed:', line
"""
# This test will be run for every line
# but there are only like 8 lines so no big deal
if 'Unable to access directory' in result:
# If this fails then something is really messed up!
command = 'mkdir ' + target_path
#self.cmd('mkdir /hd/issu')
self.cmd(command)
else:
## Warning if other files are present then their filenames
## will be stored but it should have net zero effect.
# This turns off the storage
if 'File system:' in line:
marker_found = False
"""
if debug:
print 'Found end of versions'
"""
# This stores the values
if marker_found:
"""
if debug:
print 'Found a version:', word[3]
"""
word = line.split()
images_on_the_system.append(word[3])
# This turns on the storage
if '--------- -------- ---------- ----' in line:
marker_found = True
if debug:
print 'Found beginning of versions'
if debug:
print 'Images installed on the system are:'
for line in images_on_the_system:
print line
print 'We were looking for the following image'
print image_name
if image_name in images_on_the_system:
print 'Image was found on the HD. Will not be coppied over'
else:
## Now we need to actually do the work of copying the package over.
#####
print 'Image not found on hard drive. It will be retrieved.'
if debug:
print 'Piecing the parts together'
# We're already conntecte to the SSX
# We need to SFTP the file from the linux_ip to the SSX
# To do that we need to know the full path to the file
# We have the pieces so we need to assemble them
"""
if debug:
print 'The full_path variable will contain these parts:'
print 'build_dir:', build_dir
print 'tree:', tree
print 'build:', build
print 'back_half:', back_half
print 'package_name:', package_name
"""
# we're re-defining this variable because it was not passed in
full_path = build_dir + tree + '/' + build + back_half + package_name
if debug:
print 'Full path:', full_path
print 'Image will be written to the following filename:', image_name
print 'It will be written to /hd/issu/' + image_name
# At this point we have all the pieces to assemble the SFTP command
"""
cmd = 'copy sftp://' + username + '@' + linux_ip + ':' + full_path + \
' /hd/issu/' + image_name
"""
# added target path for specifiying location to install TO
cmd = 'copy sftp://' + username + '@' + linux_ip + ':' + full_path + \
' ' + target_path + '/' + image_name
print 'about to run the command:'
print cmd
##########
# Copy the file over
# Here we run the command using the ftppasswd method
retr = self.ftppasswd(cmd, password, 210)
if retr:
print 'Failed to SFTP the file over. Aborting install!'
return(1)
if debug:
print 'Completed sending the new build to the SSX'
###########
# At this point the file is actually on the SSX and we can attempt to "install" it.
#command = 'system install package /hd/issu/' + image_name
# Added target path
command = 'system install package ' + target_path + '/' + image_name
if debug:
print "install command will be: %s" % command
#self.cmd(command)
#result = self.cmd('yes')
self.ses.sendline("%s" % command)
index = self.ses.expect(['will be done', 'Install is not permitted'], timeout=30)
print self.ses.before
if index == 0:
print 'recieved YES/NO prompt'
self.ses.sendline('yes')
print 'installing package .....'
elif index == 1:
print 'ISSU is already in progress. Can not install package!'
return 'ERROR - Install is not permitted as ISSU Revert is in progress'
else:
return 'Failed to install package'
index = self.ses.expect(['invalid package path or file', 'Installation complete', 'Installed packages maximum limit'], timeout=300)
if index == 0:
print 'System unable to install file. Bad filename or path'
return(1)
elif index == 1:
print 'Installation complete!'
return(0)
elif index == 2:
print 'There are too many packages installed. Please manually remove at lest 1.'
return(1)
else:
print 'Timeout while installing package.'
return(1)
def change_version(self, version, method, config_filename='default', ignore_port_down = False):
"""Performs Upgrade, Revert and Select
"""
debug = False
# Wait time could be externall exposed if needed
wait_time = 10
if method not in ('upgrade','revert','select'):
return "Unsuported method %s" % method
###########
# UPGRADE #
###########
elif method == 'upgrade':
print 'now in issu.py change_version upgrade'
versions_installed = show_versions(self)
if version in versions_installed:
# Send the upgrade command
if ignore_port_down:
if debug:
print 'about to run the command:'
print "system upgrade package %s ignore-port-down" % version
self.ses.sendline("system upgrade package %s ignore-port-down" % version)
else:
if debug:
print 'about to run the command:'
print "system upgrade package %s" % version
self.ses.sendline("system upgrade package %s" % version)
index = self.ses.expect(['Save configuration to file', 'Package not installed', \
'not supported', 'ISSU mode is disabled', 'in inconsistent state', \
'Upgrade is in progress', 'No Previous Version'], timeout=wait_time)
if index == 0:
if config_filename == 'default':
print 'Saving system configuration to default filename'
# Press enter to accept the default system prompt
self.ses.sendline()
else:
print 'Saving system configuration to:', config_filename
# Otherwise put in the filename
# Expected format is '/hd/issu-upgd-2010-04-20.cfg'
self.ses.sendline(config_filename)
index_2 = self.ses.expect(['Proceed?', 'ERROR: Slot 0 StokeBloader images',\
'ERROR: Slot 1 StokeBloader images', 'ERROR: Slot 2 StokeBloader images', \
'ERROR: Slot 3 StokeBloader images', 'ERROR: Slot 4 StokeBloader images'], timeout=wait_time)
if index_2 == 0:
# Use this method because we are expecting the prompt
self.cmd('yes')
print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
print 'system now upgrading to version:', version
print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
return 0
elif index_2 == 1:
print 'Flash banks do not match.'
return 'Flash mismatch at slot 0'
elif index_2 == 2:
print 'Flash banks do not match.'
return 'Flash mismatch at slot 1'
elif index_2 == 3:
print 'Flash banks do not match.'
return 'Flash mismatch at slot 2'
elif index_2 == 4:
print 'Flash banks do not match.'
return 'Flash mismatch at slot 3'
elif index_2 == 5:
print 'Flash banks do not match.'
return 'Flash mismatch at slot 4'
else:
return "Timeout while waiting for proceed prompt"
elif index == 1:
# Should be converted to log Error message
print 'Package not Installed'
return 'Package not installed'
elif index == 2:
# Should be converted to log Error message
print "Package selected: %s is not supported" %version
return "Package selected: %s is not supported" %version
elif index == 3:
print 'ISSU dissabled on system. Aborting!'
return 'ISSU dissabled on system. Aborting!'
elif index == 4:
print 'ISSU Already in process. Unable to Upgrade!'
return 'ISSU Already in process. Unable to Upgrade!'
elif index == 5:
print 'Upgrade is already in progress'
return 'upgrade already in progress'
elif index == 6:
print 'No Previous version present in ISSU history to revert to'
return 'previous version not present'
else:
print "Timeout when attempting to %s package %s" % (method, version)
else:
print 'Unable to upgrade to version', version
print 'Package not installed on sysetm'
return 'Unable to upgrade to package, because it is not installed'
##########
# REVERT #
##########
elif method == 'revert':
if debug:
print 'in issus.py change_version reverting'
self.ses.sendline('system revert package ignore-port-down')
index = self.ses.expect(['Save configuration to file', 'not supported', 'Pre-Revert Checks Failed', \
'ISSU Upgrade is in progress', 'not permitted during ISSU soak phase', \
'in inconsistent state'], timeout=wait_time)
if index == 0:
if config_filename == 'default':
# Press enter to accept the default system prompt
self.ses.sendline()
else:
# Otherwise put in the filename
# Expected format is '/hd/issu-upgd-2010-04-20.cfg'
self.ses.sendline(config_filename)
index_2 = self.ses.expect('Proceed?', timeout=wait_time)
if index_2 == 0:
# Use this method because we are expecting the prompt
self.cmd('yes')
return 0
if index_2 == 1:
return "Timeout while waiting for proceed prompt"
elif index == 1:
print 'Revert not supported!'
return 'Revert not supported!'
elif index == 2:
print 'Pre-Revert Checks Failed'
return 0
elif index == 3:
print 'ISSU Upgrade is in progress. Revert aborted!'
return 'ISSU Upgrade is in progress. Revert aborted!'
elif index == 4:
print 'not permitted during ISSU soak phase'
return 'not permitted during ISSU soak phase'
elif index == 5:
print 'Action will leave card(s) in inconsistent state'
print 'This error comes up when the system is still booting right after the cards come to'
print 'Running State. Please try putting a time.sleep(60) in the code to fix this!'
self.cmd('no')
return 'ISSU Action still in process. Action will leave card(s) in inconsistent state;'
else:
print "Timeout when attempting to %s" % method
##########
# SELECT #
##########
elif method == 'select':
# Due to the fact that they changed the prompts from ISSUv1 to ISSUv2
# we get the prompts in a different combination and order.
# There are several paths through this code.
if debug:
print 'in issu.py change_version selecting'
print 'about to run the following command:'
print "system select package %s" % version
self.ses.sendline("system select package %s" % version)
if debug:
print 'Command sent. Waiting for response'
index = self.ses.expect(['Select will clear revert history', 'will erase all revert history', 'Proceed?', \
'Package not installed', 'Select is not permitted during ISSU soak phase', 'same as Current Version', \
'Save configuration to file'], timeout=wait_time)
if debug:
print 'Parsing system response'
if (index in [0,1,2]):
# 'Select will clear revert history'
# Proceed? (yes/[no])
self.ses.sendline('yes')
if index == 2:
# We got the early proceed prompt from ISSUv1
return 0
elif index == 3:
print 'Package not installed!'
return 'Package not installed!'
elif index == 4:
print 'Select is not permitted during ISSU soak phase'
return 'Select is not permitted during ISSU soak phase'
elif index == 5:
print 'Requested version is already current version'
return 0
elif index == 6:
if config_filename == 'default':
# Press enter to accept the default system prompt
self.ses.sendline()
else:
# Otherwise put in the filename
# Expected format is '/hd/issu-upgd-2010-04-20.cfg'
self.ses.sendline(config_filename)
else:
print self.ses.before()
print "Timeout when attempting to %s package %s" % (method, version)
return 'Timeout during Select'
index = self.ses.expect(['Save configuration to file', 'System will be automatically reloaded'], timeout=wait_time)
if index == 0:
if config_filename == 'default':
# Press enter to accept the default system prompt
self.ses.sendline()
else:
# Otherwise put in the filename
# Expected format is '/hd/issu-upgd-2010-04-20.cfg'
self.ses.sendline(config_filename)
elif index == 1:
print 'Save Filename prompt not detected.'
else:
print "Timeout when attempting to %s package %s" % (method, version)
return 'Timeout during Select'
index = self.ses.expect('Proceed?', timeout=wait_time)
if index == 0:
# Use this method because we are expecting the prompt
self.ses.sendline('yes')
#self.ssx.cmd('yes')
if (self.host.find("-con") != -1):
print('Using console. Need to wait for Shutdown')
index = self.ses.expect('Shutdown', timeout=60)
if index != 0:
print 'System did not shutdown to reboot'
return 1
# At this point the system is doing a reboot if everything
# worked as planned.
#time.sleep(1)
#self.ssx.wait4cards()
print 'Select command accepted by system'
print 'System will now reboot the GLC then the IMC'
print 'After command completes telnet sessions to the system will be lost'
return 0
else:
return "Timeout while waiting for proceed prompt"
# Catch all for change version
else:
return "Version requested was not %s" % method
def upgrade(self, version, auto_corect = True):
"""Wrapper function for change_version
"""
print 'now in issu.py upgrade'
retr = change_version(self, version, 'upgrade')
# Sometimes the flash does not match on the cards
# It's easy to correct and not a situation for alarm
bad_flash = False
if auto_corect:
print 'Checking to see if there was any flash corruption.'
try:
return_code = str(retr)
except:
print 'unable to cast the return value as a string!'
return 1
if 'slot 0' in return_code:
# If it's bad correct the flash corruption
bad_flash = True
print 'Correcting flash mismatch'
command = 'flash commit 0'
self.ses.sendline(command)
retr = self.ses.expect(['PRIMARY bank copied to BACKUP bank.'], timeout = 30)
if retr == 0:
print 'Commit passed'
else:
print 'unable to correct flash problem on slot 0'
return 'Corrupt flash image on slot 0'
elif 'slot 1' in return_code:
bad_flash = True
print 'Correcting flash mismatch'
command = 'flash commit 1'
self.ses.sendline(command)
retr = self.ses.expect(['PRIMARY bank copied to BACKUP bank.'], timeout = 30)
if retr == 0:
print 'Commit passed'
else:
print 'unable to correct flash problem on slot 1'
return 'Corrupt flash image on slot 1'
elif 'slot 2' in return_code:
bad_flash = True
print 'Correcting flash mismatch'
command = 'flash commit 2'
self.ses.sendline(command)
retr = self.ses.expect(['PRIMARY bank copied to BACKUP bank.'], timeout = 30)
if retr == 0:
print 'Commit passed'
else:
print 'unable to correct flash problem on slot 2'
return 'Corrupt flash image on slot 2'
elif 'slot 3' in return_code:
bad_flash = True
print 'Correcting flash mismatch'
command = 'flash commit 3'
self.ses.sendline(command)
retr = self.ses.expect(['PRIMARY bank copied to BACKUP bank.'], timeout = 30)
if retr == 0:
print 'Commit passed'
else:
print 'unable to correct flash problem on slot 3'
return 'Corrupt flash image on slot 3'
elif 'slot 4' in return_code:
bad_flash = True
print 'Correcting flash mismatch'
command = 'flash commit 4'
self.ses.sendline(command)
retr = self.ses.expect(['PRIMARY bank copied to BACKUP bank.'], timeout = 30)
if retr == 0:
print 'Commit passed'
else:
print 'unable to correct flash problem on slot 4'
return 'Corrupt flash image on slot 4'
else:
print 'No flash corruption detected.'
# Then try to upgrade the system
if bad_flash:
print 'Attempting to upgrade the package again.'
retr = change_version(self, version, 'upgrade')
print 'now returning from issu.py upgrade'
return retr
def revert(self):
"""Wrapper function for change_version
"""
if debug:
print 'Now in issu.py revert'
version = 'NA'
retr = change_version(self, version, 'revert')
return retr
def select(self, version):
"""Wrapper function for change_version
"""
retr = change_version(self, version, 'select')
return retr
def status(self, slot_filter='all'):
"""Runs "show upgrade status" and parses the output
returns a dictionary of card status
"""
debug = False
if debug:
print 'now in issu.py status'
# instantiate a dictionary to store the return data
status_dict = {}
# get the status
raw_output = self.cmd('show upgrade status')
# Check for ISSUv1
issu_v1 = False
## Sample output
"""
australia[local]#show upgrade status
01 ISSU Operation:Upgrade
02
03 Slot StokeOS Ver Upgrade Status
04 ---- ----------- ---------------------------------------------
05 0 4.6B1 In-Progress(Flashing Started)
06 1 4.6B1S1 Complete
07 2 4.6B1 Not Started
08 3 4.6B1 Not Started
09 4 4.6B1 Not Started
"""
# Sometimes it looks like this
"""
australia[local]#show upgrade status
01 ISSU Operation:Upgrade
02 System is currently in ISSU soak phase
03
04 Slot StokeOS Ver Upgrade Status
05 ---- ----------- ---------------------------------------------
06 0 4.6B1 In-Progress(Flashing Started)
07 1 4.6B1S1 Complete
08 2 4.6B1 Not Started
09 3 4.6B1 Not Started
10 4 4.6B1 Not Started
"""
# If your running an ISSUv1 build it looks like this
"""
01 Slot Upgrade Status
02 ---- --------------
03 0 Not Started
04 1 Not Started
05 2 Not Started
06 3 Not Started
07 4 Not Started
"""
# chop the output into lines
output = raw_output.splitlines()
"""
if debug:
print 'Number or lines in output:', len(output)
for line in output:
print 'line:', line
"""
if (len(output) > 2):
# the data we care about is on lines 1, 5-9
## Line 1
line_1 = output[1].rstrip()
if (len(line_1) > 2):
#words = output[1].split()
words = line_1.split()
"""
if debug:
print 'The first line contains:', words
"""
if words[0] == 'ISSU':
## ('ISSU', 'Operation:Upgrade')
issu_status = words[1].split(':')
## ('Operation','Upgrade')
status_dict['ISSU Status'] = issu_status[1]
"""
if debug:
print 'The status detected was', status_dict['ISSU Status']
"""
elif 'Upgrade' in line_1:
#status_dict['ISSU Status'] = 'upgrade'
status_dict['status'] = 'upgrade'
elif 'Revert' in line_1:
#status_dict['ISSU Status'] = 'revert'
status_dict['status'] = 'revert'
elif 'Slot' in line_1:
print '@@@@@@@@ Detected system running ISSUv1 @@@@@@@@@'
print 'ISSU automation not capable of parsing the output at this time'
issu_v1 = True
return 'Unknown Status'
else:
print 'Failure in issu.py status. Unknown status:', line_1
print line_1
return 'Unknown Status'
## Line 2
line_2 = output[2].rstrip()
if (len(line_2) > 2):
words = line_2.split()
if 'soak' in words:
status_dict['ISSU Status'] = 'soak phase'
# The lenght of the output changes because they remove a line of text
# this leads to missing card 0 sometimes.
# we must go look for that seperator line then
start_line = 0
for raw_line in output:
start_line = start_line + 1
if '-----------' in raw_line:
break
"""
if debug:
print 'The first line we care about should be:'
print output[start_line]
"""
if issu_v1:
for raw_line in output[start_line:]:
if debug:
print 'Line to be processed is:'
print raw_line
local_dict = {}
line = raw_line.lstrip()
words = line.split(' ',2)
slot = "slot %s" % words[0]
if debug:
print 'slot #', words[0]
local_dict['status'] = words[2].lstrip()
if debug:
print 'status:', words[2].lstrip()
status_dict[slot] = local_dict
if debug:
print 'The status_dict contains:', status_dict
else:
## Remaining lines
# Ths odd notation means take all the lines from 4 onward
for raw_line in output[start_line:]:
if debug:
print 'Line to be processed is:'
print raw_line
local_dict = {}
#status = []
line = raw_line.lstrip()
words = line.split(' ',2)
local_dict['version'] = words[1]
if debug:
print 'version:', words[1]
local_dict['status'] = words[2].lstrip()
if debug:
print 'status:', words[2].lstrip()
slot = "slot %s" % words[0]
if debug:
print 'slot #', words[0]
status_dict[slot] = local_dict
if debug:
print status_dict
if debug:
print 'The status_dict contains:', status_dict
# we have now parsed all the data. Now to return what the user wants
if slot_filter == 'all':
"""
if debug:
print 'returning the whole dictionary'
"""
return status_dict
elif slot_filter in status_dict.keys():
if debug:
print '=================================='
print 'Detected filter on:', slot_filter
print 'The filtered dictionary contains:'
print status_dict[slot_filter]
print '=================================='
return status_dict[slot_filter]
else:
return "Invalid slot. Expected: %s" % status_dict.keys()
else:
# The ISSU is not in process. Return a Pass value of 0
return status_dict
def install_status(self, slot_filter='all'):
"""Pulls the ISSU status of the install
returns a dictionary of card status
"""
debug = False
if debug:
print 'now in issu.py status'
# instantiate a dictionary to store the return data
status_dict = {}
# get the status
raw_output = self.cmd('show upgrade status')
## Sample output
"""
australia[local]#show upgrade status
01 ISSU Operation:Upgrade
02
03 Slot StokeOS Ver Upgrade Status
04 ---- ----------- ---------------------------------------------
05 0 4.6B1 In-Progress(Flashing Started)
06 1 4.6B1S1 Complete
07 2 4.6B1 Not Started
08 3 4.6B1 Not Started
09 4 4.6B1 Not Started
"""
# Sometimes it looks like this
"""
australia[local]#show upgrade status
01 ISSU Operation:Upgrade
02 System is currently in ISSU soak phase
03
04 Slot StokeOS Ver Upgrade Status
05 ---- ----------- ---------------------------------------------
06 0 4.6B1 In-Progress(Flashing Started)
07 1 4.6B1S1 Complete
08 2 4.6B1 Not Started
09 3 4.6B1 Not Started
10 4 4.6B1 Not Started
"""
# chop the output into lines
output = raw_output.splitlines()
"""
if debug:
print 'Number or lines in output:', len(output)
for line in output:
print 'line:', line
"""
if (len(output) > 2):
# the data we care about is on lines 1, 5-9
## Line 1
line_1 = output[1].rstrip()
if (len(line_1) > 2):
#words = output[1].split()
words = line_1.split()
"""
if debug:
print 'The first line contains:', words
"""
if words[0] == 'ISSU':
## ('ISSU', 'Operation:Upgrade')
issu_status = words[1].split(':')
## ('Operation','Upgrade')
status_dict['ISSU Status'] = issu_status[1]
"""
if debug:
print 'The status detected was', status_dict['ISSU Status']
"""
if 'Upgrade' in line_1:
status_dict['ISSU Status'] = 'upgrade'
elif 'Revert' in line_1:
status_dict['ISSU Status'] = 'revert'
else:
print 'Failure in issu.py status. Unknown status:', line_1
print line_1
return 'Unknown Status'
## Line 2
line_2 = output[2].rstrip()
if (len(line_2) > 2):
words = line_2.split()
if 'soak' in words:
status_dict['ISSU Status'] = 'soak phase'
# The lenght of the output changes because they remove a line of text
# this leads to missing card 0 sometimes.
# we must go look for that seperator line then
start_line = 0
for raw_line in output:
start_line = start_line + 1
if '-----------' in raw_line:
break
"""
if debug:
print 'The first line we care about should be:'
print output[start_line]
"""
## Remaining lines
# Ths odd notation means take all the lines from 4 onward
for raw_line in output[start_line:]:
"""
if debug:
print 'Line to be processed is:'
print raw_line
"""
local_dict = {}
#status = []
line = raw_line.lstrip()
words = line.split(' ',2)
local_dict['version'] = words[1]
"""
if debug:
print 'version:', words[1]
"""
local_dict['status'] = words[2].lstrip()
"""
if debug:
print 'status:', words[2].lstrip()
"""
slot = "slot %s" % words[0]
"""
if debug:
print 'slot #', words[0]
"""
status_dict[slot] = local_dict
"""
if debug:
print status_dict
"""
if debug:
print 'The status_dict contains:', status_dict
# we have now parsed all the data. Now to return what the user wants
if slot_filter == 'all':
"""
if debug:
print 'returning the whole dictionary'
"""
return status_dict
elif slot_filter in status_dict.keys():
if debug:
print '=================================='
print 'Detected filter on:', slot_filter
print 'The filtered dictionary contains:'
print status_dict[slot_filter]
print '=================================='
return status_dict[slot_filter]
else:
return "Invalid slot. Expected: %s" % status_dict.keys()
else:
# The ISSU is not in process. Return a Pass value of 0
return status_dict
def wait_issu(self, max_time = 2400, poll_interval=5):
"""Polls the system during upgrade/revert waiting for ISSU to complete
"""
complete = False
issu_status = status(self)
debug = False
"""
if debug:
print 'This is what we got back from the status function'
print issu_status
"""
try:
card_list = issu_status.keys()
except:
print 'unable to parse the status of the system.'
return 'Failed to get status'
"""
if debug:
print 'this is our list of keys from that dictionary'
print card_list
"""
number_of_cards = len(card_list)
if issu_status.has_key('ISSU Status'):
print 'Detected system in ISSU.'
number_of_cards = number_of_cards - 1
## Debug
#print 'Now in wait_issu function!'
#print 'The value of debug is:', debug
"""
if debug:
print 'Card list contains:', card_list
print 'Detected', number_of_cards, 'cards'
"""
print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
print 'Waiting for the ISSU process to complete.'
print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
# This line is used for the aut-revert functions. It was interfering with
# the normal upgrade and revert functions and needs to be re-written.
done = ['Complete', 'Not Started', 'Auto-Revert Complete']
auto_reverting = False
while not complete:
time.sleep(poll_interval)
issu_status = status(self)
card_pass_count = 0
for card in card_list:
if card == 'ISSU Status':
if issu_status['ISSU Status'] == 'Complete':
print 'Detected ISSU status complete'
# Need to figure out if the system is in auto revert
# This might be the right string
elif issu_status['ISSU Status'] == 'Auto Revert':
print 'Detected systm is Auto Reverting'
auto_reverting = True
debug = True
else:
print 'ISSU Status is:', issu_status['ISSU Status']
if debug:
print 'Please look for auto revert status and update,'
print 'issu.py function wait_issu to include exact auto revert string'
elif (not issu_status.has_key('ISSU Status')):
# when that field disapears then it's done
print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
print '^^^^^^^ ISSU Process Complete ^^^^^^^^^^^'
print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
return 0
#complete = True
#break
else:
if debug:
print 'checking' , card, 'for status'
#if issu_status[card]['status'] == 'Complete':
if debug:
print 'About to see if the card:', card, 'is in one of these states:', done
print 'ISSU status for this card is reported as:', issu_status[card]['status']
print card, ':', issu_status[card]['status']
# This branch is for normal upgrade and revert
if issu_status[card]['status'] in 'Complete':
if debug:
print '!!!! Detected card complete !!!!'
#print 'card', card, 'Complete'
card_pass_count = card_pass_count + 1
#If the system is in auto-rever then a "not-started" is also done
# Code needs to be updated to fix this.
elif 'Auto-Revert' in issu_status[card]['status']:
print 'Detected system in Auto-Revert via card status'
auto_reverting = True
# This branch is for auto-reverting
elif auto_reverting:
if debug:
print 'Now in auto-revert detection loop'
if issu_status[card]['status'] in done:
if debug:
print '!!!! Detected card complete !!!!'
card_pass_count = card_pass_count + 1
else:
print 'Card was not Done. It was:', issu_status[card]['status']
else:
if debug:
print 'Card was not done ---'
print card, 'Status:', issu_status[card]['status']
#break
print 'Card pass rate:', card_pass_count
if card_pass_count == number_of_cards:
print 'Detected all cards complete'
complete = True
if issu_status.has_key('ISSU Status'):
if issu_status['ISSU Status'] == 'soak phase':
print 'Detected Soak Phase. Waiting for soak to complete.'
complete = False
# timer to make sure th polling will eventually finish
max_time = max_time - poll_interval
print 'Time left:', max_time
if max_time < 1:
print 'Maximum polling time was exceeded!'
print 'System never completed ISSU'
return 'Timeout whill polling. Excessive time'
return 0
def install_base(self, base_version, username = 'regress', password = 'gleep7', linux_ip = '10.1.1.101'):
"""This method is used to return the running system to a known base version prior
to begining normal ISSU testing. It uses the methods of install and select to
do this.
self = self.ssx (SSX object)
base_version = ['package_name':'4.7', 'build':'2010022188','tree':'4.7']
"""
# Two branches
# 1. If the version it's running now is not the same version then we can
# simply install the base version and select over to it
# 2. The other possibility is the version it's running is not the same build ID
# meaning you are testing a newer or older build of the same version.
# to install and select to this we must:
# A. Install a known good older version
# B. Select down to that version
# C. Uninstall the old package
# D. Install the new package
# E. Select to the new package
if debug:
print '----------------------------'
print 'now in issu.py install_base'
if not(username == 'regress'):
print 'Non default username detected:', username
if not(password == 'gleep7'):
print 'Non default password detected:', password
if not (linux_ip == '10.1.1.101'):
print 'Non default linux_ip detected:', linux_ip
running_ver = self.get_version()
print 'System will be selected back to the base version'
print 'Base version is:', base_version
print 'running version is:', running_ver
if debug:
print 'testing for:', running_ver['branch'], '=', base_version['package_name']
if (running_ver['branch'] == base_version['package_name']):
if debug:
print 'Detected that the running version name is the same as the base'
# If the version we want to install is the same as the running version but the
# build ID is different then we need to do case 2 above.
if not (running_ver['build'] == base_version['build']):
if debug:
print 'Build ID is different then running version.'
print 'System will:'
print '1. Select to older version'
print '2. Remove old build'
print '3. Install base version'
print '4. Select to base version'
## Write code here!
pass
else:
# If the package name and the build ID are the same then We're already
# running the correct version. Just return
print 'The build ID is also the same. System now at base version.'
return(0)
else:
# This is the simpler path case 1 above
if debug:
print 'The system is not running the base version.'
print 'Sysetm will be installed with base version'
##########
# Install
print("About to install version %s" % base_version)
retr = install(self, tree = base_version['tree'], \
build = base_version['build'], \
package_name = base_version['package_name'], \
username = username, \
password = password, \
linux_ip = linux_ip)
print("install returned %s" % retr)
if retr:
print 'Something went wrong. Returning'
return retr
########
# Select
print 'Base version now installed.'
print 'Selecting to base version (reboot)'
retr = select(self, base_version['package_name'])
if retr:
print 'Something went wrong. Returning'
return retr
else:
return 0
"""
print 'System performing select now.'
print 'Please reconnect after reload'
return 0
"""
"""
reboot_time = 120
print("waiting for the system to finish rebooting: %s seconds" % reboot_time)
time.sleep(reboot_time)
rebooting = True
retries = 20
while rebooting:
print('Sleeping for 30 seconds')
time.sleep(30)
try:
print 'Connecting to SSX'
self.ssx.telnet()
print 'Made it past the telnet command'
# if that command does not fail then the rebooting state should change
rebooting = False
except:
print('System not up yet')
retries = retries - 1
print("%s retries left" % retries)
if retries == 0:
print("System never came back up after select!")
sys.exit(1)
print 'Completed Select to base version'
"""
return 0
def check_session_traffic(self, username_list = 'all', poll_time = 10):
"""This function will use the session_counters method to pull all the active sessions.
Then it will check session by session to see if the counters are increasing.
We expect some sessions will not be sending traffic. To handle that the function accepts
a list of sessions to check. The calling program is responsible to remove items from that
list once they have been detected to no longet be sending traffic.
"""
# Accumulate the result here.
# At the end filter based on requested usernames.
result = {}
if username_list == 'all':
print 'All Sessions will be examined'
print 'Polling the session counters'
baseline = session_counters(self)
print 'Waiting:', poll_time, 'seconds'
time.sleep(poll_time)
delta = session_counters(self)
print 'Computing the delta'
else:
print 'Select sessions will be examined'
print 'Polling the session counters'
# This is all the data
raw_baseline = session_counters(self)
print 'Waiting:', poll_time, 'seconds'
time.sleep(poll_time)
# this is all the data
raw_delta = session_counters(self)
print 'Computing the delta'
baseline = {}
delta = {}
# Now we filter it before doing accounting
for username in raw_baseline:
if username in username_list:
baseline[username] = raw_baseline[username]
# And we filter the detla as well.
for username in raw_delta:
if username in username_list:
delta[username] = raw_delta[username]
if len(baseline):
print 'Found sessions'
else:
print 'No Sessions are active!'
return 'No Sessions are active!'
# At this point we have all the data required we just need to parse it.
session_list = baseline.keys()
print 'The following sessions will be parsed:', session_list
active_sessions = 0
inactive_sessions = 0
total_sessions = len(session_list)
print 'Detected', total_sessions, 'Sessions'
for username in session_list:
if delta.has_key(username):
# Reset the local variables
xmit_active = False
rcv_active = False
# Check the TX
if baseline[username]['Xmit Bytes'] < delta[username]['Xmit Bytes']:
xmit_active = True
# Check the RX
if baseline[username]['Rcv Bytes'] < delta[username]['Rcv Bytes']:
rcv_active = True
# Store the results.
if xmit_active and rcv_active:
result[username] = 'Active'
active_sessions = active_sessions + 1
elif xmit_active:
result[username] = 'Xmit only'
active_sessions = active_sessions + 1
elif rcv_active:
result[username] = 'Rcv only'
active_sessions = active_sessions + 1
else:
result[username] = 'Inactive'
inactive_sessions = inactive_sessions + 1
else:
print 'The following session dropped out while polling'
print baseline[username]
# We'll count that dropped one as an inactive session
inactive_sessions = inactive_sessions + 1
result['Active Sessions'] = active_sessions
result['Inactive Sessions'] = inactive_sessions
# note the variable must be cast as a float to actually get a decimal result.
result['Percent Active'] = 100 * (float(active_sessions) / total_sessions)
result['Percent Inactive'] = 100 * (float(inactive_sessions) / total_sessions)
return result
def show_card_state(self):
"""Simple command runs "show card" and parses the output then returns it:
Here is a sample Dictionary output:
{'Status': 'Complete',
'slot 2':
{'serial_number': '0130901190900001',
'state': 'Running',
'hw_rev': '09.01',
'type': 'GLC2',
'model_name': '4x1000Base-X'},
'slot 3':
{'serial_number': '0110323060000110',
'state': 'Running',
'hw_rev': '02.07',
'type': 'GLC1',
'model_name': '4x1000Base-X'},
'slot 0':
{'serial_number': '0020905420820003',
'state': 'Running(Active)',
'hw_rev': '09.05',
'type': 'IMC1',
'model_name': 'Stoke IMC1'},
'slot 1':
{'serial_number': '0020140050000026',
'state': 'Running(Standby)',
'hw_rev': '05.02',
'type': 'IMC1',
'model_name': 'Stoke IMC1'},
'slot 4':
{'serial_number': '0130114060000035',
'state': 'Running',
'hw_rev': '02.05',
'type': 'GLC2',
'model_name': '4x1000Base-X'}}
NOTE: Dictionary is not Sorted
"""
debug = False
status_dict = {}
command = "show card"
raw_card_response = self.cmd(command)
if len(raw_card_response) > 0:
card_response = raw_card_response.splitlines()
# There could be a test right here to make sure the lines are present
# or we got an error message
if 'ERROR:' in card_response[1]:
print 'Detected an error when running: show card'
print 'Returned text was:'
print raw_card_response
status_dict['Status'] = 'Error'
return status_dict
if debug:
print 'The following lines will be processed:'
print card_response[3:]
print '======================================'
# We don't really want the two header lines so we omit them
for line in card_response[3:]:
if debug:
print 'This is the line to process:', line
words = line.split()
local_dict = {}
if len(words) == 7:
slot = words[0]
local_dict['type'] = words[1]
local_dict['state'] = words[2]
local_dict['serial_number'] = words[3]
local_dict['model_name'] = words[4] + ' ' + words[5]
local_dict['hw_rev'] = words[6]
elif len(words) == 6:
slot = words[0]
local_dict['type'] = words[1]
local_dict['state'] = words[2]
local_dict['serial_number'] = words[3]
local_dict['model_name'] = words[4]
local_dict['hw_rev'] = words[5]
else:
print 'This line has too many/few elements', len(words)
print words
status_dict['Status'] = 'Error'
return status_dict
current_slot = 'slot ' + slot
status_dict[current_slot] = local_dict
status_dict['Status'] = 'Complete'
return status_dict
def wait_for_cards(self, timeout = 360, poll_time = 10):
"""Waits for ALL cards to come to a running state by polling the system.
This is a rewrite of device.py wait4cards and should be used as a replacement.
"""
debug = False
if debug:
print 'now in issu.py wait_for_cards'
print 'System is now waiting for all cards to come to a running state'
print 'Status will be updated every', poll_time, 'seconds'
running = ['Running(Active)','Running(Standby)', 'Running']
total_wait = 0
# This will run until either the timout is reached or an error occurs or all cards
# come to a running state
while True:
print '------------------------'
running_card_count = 0
running_card_list = []
current_card_state = show_card_state(self)
if current_card_state['Status'] == 'Complete':
if debug:
print 'was able to retrieve current card state'
print 'now processing'
card_list = current_card_state.keys()
if debug:
print 'Detected the following cards:', card_list
for card in card_list:
if not (card == 'Status'):
card_state = current_card_state[card]['state']
if card_state in running:
#print card, 'Has come to running state'
running_card_count = running_card_count + 1
running_card_list.append(card)
if running_card_count == (len(card_list) - 1):
print 'All cards have come to running state.'
print 'Total wait time was', total_wait
return 0
else:
return 'Failed to retrieve card state'
try:
print 'ISSU Status:', current_card_state['Status']
except:
print 'No ISSU Status to report'
print 'The following cards are running', running_card_list
print 'Elapsed time:', total_wait, 'seconds'
time.sleep(poll_time)
total_wait = total_wait + poll_time
timeout = timeout - poll_time
if timeout < 1:
return 'Timeout while polling system'
def all_cards_running(self, debug=False):
"""Uses the method show_card_state to verify all cards are running. Returns True/False
Designed as simple test to be run at the begining/end of tests.
Does not wait for cards.
If you want to see some output use the debug option!
"""
if debug:
print 'now in issu.py method all_cards_running'
# This checks to see if any of the cards are in an "error" state
card_state = show_card_state(self)
# We don't need this record
del card_state['Status']
if debug:
print 'here is the raw dictionary'
print card_state
print 'here is the card information'
for card in card_state:
if debug:
print card_state[card]
if 'Running' in card_state[card]['state']:
if debug:
print 'Card:', card, 'is in running state'
else:
if debug:
print 'Card', card, 'is NOT in running state. FAIL'
#self.fail("Card %s is NOT in running state" % card)
return False
return True
def kill_pid(self, raw_pid='none', raw_slot=0):
"""Method kills processes by PID only
"""
slot_range = [0,1,2,3,4]
# Validate the input
if raw_pid == 'none':
return 'No PID Provided!'
try:
pid = int(raw_pid)
except:
print 'PID value not an Integer:', raw_pid
return 'Non integer value for PID'
try:
slot = int(raw_slot)
except:
print 'Invalid value for slot:', raw_slot
print 'Was expecting an integer.'
if not (slot in slot_range):
print 'Invalid value for slot:', slot
print 'Must be in range:', slot_range
# Build the command
command = 'process coredump ' + str(slot) + ' ' + str(pid)
if debug:
print 'The command will be:', command
self.ses.sendline("%s" % command)
index = self.ses.expect(['Continue'], timeout=30)
print self.ses.before
if index == 0:
self.cmd('yes')
else:
print 'Failed to send core dump command!'
return 'Failed'
return 0
def list_ike_sessions(self, slot = 'all'):
"""Uses "show ike-session list" or "show ike-session SLOT_NUMBER list"
to get ike-session details. Then returns the output
"""
debug = False
slot_range = [0,1,2,3,4,'all']
# We will accumulate all the sesion information into this list
return_session_list = []
expected_values = ['SLOT','Session Handle','IKE Version','Remote IP',\
'IKE-SA ID','Session Addr','Session State']
# Example input
"""
australia[local]#show ike-session list
01 Mon Jun 21 16:11:20 PDT 2010.
02
03 -------------------------------------------------------------------------------
04 SLOT : 2
05 Session Handle : fc440200
06 IKE Version : 2
07 Remote IP : 10.11.2.1
08 IKE-SA ID : 16502102800650210@r2
09 Session Addr : 172.1.0.1
10 Session State : IPSEC-ESTABLISHED, IKE-SA DONE, CHILD-SA MATURE
11 -------------------------------------------------------------------------------
12
13 -------------------------------------------------------------------------------
14 SLOT : 3
15 Session Handle : f4480200
16 IKE Version : 2 <LAN<->LAN>
17 Remote IP : 10.11.3.1
18 IKE-SA ID : sswan
19 Session State : IPSEC-ESTABLISHED, IKE-SA DONE, CHILD-SA MATURE
20 -------------------------------------------------------------------------------
21
"""
# Example return value:
"""
[{'SLOT': ' 2', 'Session Addr': ' 172.1.0.1', 'IKE-SA ID': ' 16502102800650210@r2',
'IKE Version': ' 2', 'Session Handle': ' fc440201', 'Remote IP': ' 10.11.2.1',
'Session State': ' IPSEC-ESTABLISHED, IKE-SA DONE, CHILD-SA MATURE'}]
"""
if not (slot in slot_range):
print 'Invalid Slot ID provided for filtering:', slot
return 'Invalid Slot ID provided for filtering:', slot
if slot == 'all':
command = 'show ike-session list'
else:
command = 'show ike-session ' + str(slot) + ' list'
if debug:
print 'The command will be:', command
raw_session_list = self.cmd(command)
session_list = raw_session_list.splitlines()
if debug:
print 'The raw data returned from the command was:'
print raw_session_list
if session_list[1] == 'ERROR: No sessions found on any Card':
print 'No Sessions present'
return 'No Sessions present'
# So we know that the first line which is line 0 is thrown away by our cmd API
# The first available line is line 1 which contains the date. We don't want that.
# Line 2 contains a space which is also useless to us.
# So we'll start parsing at line 3
in_block = False
local_session_dict = {}
for line in session_list[2:]:
# Look for the start.
if '---' in line:
if in_block == True:
# If we find a second one it's the end
in_block = False
# Now we need to stuff this info into the list we return
if debug:
print 'Appending the local_sesions_dict containing:'
print local_session_dict
print 'To the return_session_list which contains:'
print return_session_list
return_session_list.append(local_session_dict)
if debug:
print 'Found the end of the block'
# Flush the local_session_dict for the next block
local_session_dict = {}
else:
if debug:
print 'Found the beging of the block'
in_block = True
elif in_block:
words = line.split(':')
if debug:
print 'Split words are:', words
paramater = words[0].rstrip()
if debug:
print 'Stripped paramater is:', paramater
if paramater in expected_values:
# We simply store it in a local dictionary indexed on it's name
if debug:
print 'Found a paramater we expected:', paramater
print 'Storing it in the local_session_dict'
local_session_dict[paramater] = words[1].lstrip()
if debug:
print 'The local_session_dict contains:', local_session_dict
else:
print 'Got back a value we did not expect:', words[0]
print 'Please modify issu.py list_ike_sessions expected_values list to include this!'
"""
else:
print 'line contains:', line
"""
print 'Succesfully parsed session list'
return return_session_list
def list_tunnels(self):
"""Simply parses the 'show tunnel' output
"""
debug = False
return_list = []
lines_to_parse = []
# Example Input
"""
01 Name CctHdl Type Admin State
02 ------------------------------------------- -------- ---------- ------- -------
03 tun1 ce000002 lan2lan:ip44 enable up
04 1 objects displayed.
"""
# Example output
"""
[{'CctHdl': 'ce000002', 'admin': 'enable', 'state': 'up', 'type': 'lan2lan:ip44', 'name': 'tun1'}]
"""
# It looks like the last line contains the number of tunnels configured.
if debug:
print 'Now in issu.py list_tunnels'
command = 'show tunnel'
raw_input = self.cmd(command)
show_tunnel_list = raw_input.splitlines()
# There needs to be some error checking here but I don't know what the bad input looks like yet
if len(show_tunnel_list) < 4:
print 'Detected no tunnels configured!'
print 'Please review this raw ouptut.'
print raw_input
return 'No tunnels configured'
number_of_tunnels = len(show_tunnel_list) - 4
if debug:
print 'Detected', number_of_tunnels, 'Tunnels'
# This builds up a list of lines we care about
lines_to_parse = range(3, (number_of_tunnels + 3))
if debug:
print 'The following lines will be parsed:', lines_to_parse
for line_number in lines_to_parse:
line = show_tunnel_list[line_number]
local_dict = {}
if debug:
print 'The raw line is:'
print line
words = line.split()
local_dict['name'] = words[0]
local_dict['CctHdl'] = words[1]
local_dict['type'] = words[2]
local_dict['admin'] = words[3]
local_dict['state'] = words[4]
if debug:
print 'local_dict contains:'
print local_dict
return_list.append(local_dict)
if debug:
print 'return_list contains:'
print return_list
print 'Completed parsing "show tunnel" command'
return return_list
def valid_month(month):
"""
verifies the input is a valid 3 character month like "jan", "feb" ...
"""
debug = False
if debug:
print 'verifying month is valid', month
if month in month_list:
if debug:
print 'Valid month detected:', month
return True
else:
if debug:
print 'Invalid Month supplied:', month
print 'Month must be one of the following:'
print month_list
return False
def valid_day_of_month(day_of_month):
"""
verifies the input is a valid day of the month as an integer like "23"
"""
debug = False
###############
## Day of month
try:
num_day = int(day_of_month)
except:
print 'Day of month is not an integer. OOPS!'
return False
if not(num_day in range(1, 32)):
print 'invalid number for day_of_month:', day_of_month
return False
elif len(day_of_month) == 0:
print 'No day of month value provided'
return False
else:
if debug:
print 'Valid day of month detected:', day_of_month
return True
def valid_hour(hour):
"""
verifies the input is a valid hour of the day like "12"
"""
debug = False
try:
num_hour = int(hour)
except:
print 'Hour is not an integer:', hour
return False
if not(num_hour in range(0,24)):
print 'There are only 24 hours in the day. Value too large!'
return False
elif len(hour) == 0:
print 'No hour value provided!'
return False
else:
if debug:
print 'Valid hour detected:', hour
return True
def valid_minute(minute):
"""
verifies the input is a valid minute like "01" or "24"
"""
debug = False
try:
num_minute = int(minute)
except:
print 'Non numeric value for minute caught:', minute
return False
if not (num_minute in range(0, 60)):
print 'Only 60 mintues in an hour. Invalid minute value caught:', minute
return False
if not (len(minute) == 2):
print 'minute must contain two digits:', minute
return False
else:
if debug:
print 'Valid minute detected:', minute
return True
def valid_second(seconds):
"""
verifies the input is a valid second like "01" or "24"
"""
debug = False
try:
num_seconds = int(seconds)
except:
print 'Non numeric value for seconds caught:', seconds
return False
if not (num_seconds in range(0, 60)):
print 'Only 60 mintues in an hour. Invalid seconds value caught:', seconds
return False
if not (len(seconds) == 2):
print 'seconds must contain two digits:', seconds
return False
else:
if debug:
print 'Valid second detected:', seconds
return True
def validIP(address):
debug = False
if debug:
print 'now in validIP in issu.py'
print 'length of address:', len(address)
try:
parts = address.split(".")
except:
if debug:
print 'unable to split the address:', address
return False
if len(parts) != 4:
if debug:
print 'there are not four octests', address
return False
first_octet = parts[0]
try:
int(first_octet)
except:
if debug:
print 'first octet is not an integer', first_octet
return False
if int(first_octet) == 1:
first_octet_1 = True
if debug:
print 'First octet is 1'
else:
first_octet_1 = False
if not 1 <= int(first_octet) <= 254:
return False
for item in parts[1:]:
try:
int(item)
except:
if debug:
print 'value:', item, 'is not an integer'
return False
if first_octet_1:
if debug:
print 'testing from 0 - 254'
print 'value is:', item
if not 0 <= int(item) <= 254:
if debug:
print 'value not in range 0-254, value:', item
return False
else:
if debug:
print 'testing from 0 - 254'
print 'value is:', item
if not 0 <= int(item) <= 254:
if debug:
print 'value not in range 1-254, value:', item
return False
return True
def pull_syslog(self, clock):
"""
Pulls the information available from "show log" and filters based on date/time
"""
debug = False
###################
## Input Validation
# We need to first make sure that the incoming filter list contains all the fields we need!
########
## Month
if clock.has_key('month'):
if valid_month(clock['month']):
if debug:
print 'Filtering on Month', clock['month']
else:
print 'Invalid month detected:', clock['month']
return 'Invalid Month: ' + clock['month']
else:
print 'Month option not detected. Must be present'
return 'value "month" not set'
###############
## Day of month
if clock.has_key('day_of_month'):
if valid_day_of_month(clock['day_of_month']):
if debug:
print 'Filtering on day of month', clock['day_of_month']
else:
print 'Invalid day of month provided:', clock['day_of_month']
return 'Invalid day of month provided: ' + clock['day_of_month']
else:
print 'no day_of_month value provided!'
return 'no day_of_month value provided!'
#######
## Hour
if clock.has_key('hour'):
if valid_hour(clock['hour']):
if debug:
print 'Filtering on hour', clock['hour']
else:
print 'Invalid hour detected', clock['hour']
return 'Invalid hour detected ' + clock['hour']
#########
## Minute
if clock.has_key('minute'):
if valid_minute(clock['minute']):
if debug:
print 'Filtering on minute', clock['minute']
else:
print 'Invalid minute value provided:', clock['minute']
return 'Invalid minute value provided:' + clock['minute']
else:
print 'No minute value found!'
return 'no minute value found'
#################################
## Retrieving the Log information
# The raw log lines look like this:
"""
Jul 19 10:43:40 [0] DEBUG Aaad-HA_SESSION_BUFF_LOAD_SUCCESS-1-0x4400d: Successfully loaded session buff type 1.
"""
# To be able to parse the log based on date/time we need:
# month, day_of_month, raw_long_time
# There is a problem with the time!
# We need thing that happened after the start time
#command = "show log | begin " + '"' + clock['month'] + ' ' + clock['day_of_month'] + '"'
command = "show log | begin " + '"' + clock['month'] + ' ' \
+ clock['day_of_month'] + ' ' + clock['hour'] + ':' + clock['minute'] + '"'
if debug:
print ("The command will be: %s" % command)
self.ses.sendline(command)
raw_log = ''
raw_log_lines = []
collecting_input = True
while collecting_input:
retr = self.ses.expect([':$', enable_prompt_regex], timeout = 10)
if retr == 0:
raw_log = self.ses.before
raw_lines = raw_log.splitlines()
raw_log_lines += raw_lines
if debug:
print '-------------------------------'
print 'We got some input. Here it is!'
print 'it\'s', len(raw_log), 'raw characters'
print 'it\'s', len(raw_lines), 'lines of text'
print 'total is now', len(raw_log_lines)
#print raw_log_lines
print 'more input to capture'
elif retr == 1:
if debug:
print 'back the prompt'
raw_log = self.ses.before
raw_lines = raw_log.splitlines()
raw_log_lines += raw_lines
collecting_input = False
if debug:
print '-------------------------------'
print 'This is the last bit of input'
print 'We got some input. Here it is!'
print 'it\'s', len(raw_log), 'raw characters'
print 'it\'s', len(raw_lines), 'lines of text'
print 'total is now', len(raw_log_lines)
else:
print 'Timeout while retrieving logs. OOPS!'
return 'timeout while retrieving logs'
if len(raw_log_lines) < 2:
print 'Not enough lines caught! Here is what we did get back'
print raw_log_lines
return 'No log retrieved'
if debug:
print 'Got the log back!'
print 'there are', len(raw_log_lines), 'lines to parse'
print 'Here are the first three of them'
print raw_log_lines[1]
print raw_log_lines[2]
print raw_log_lines[3]
print("Searching for log events after the start time")
print("---------------------------------------------")
###############################
## Parse the lines from the log
# 1. Try to parse the line and detect the date/time header
# a. If that succeeds we hold the line in escrow in case there is more on the next line
# aa. If there is already a line in escrow we save it to the return list
# b. If that fails we join the current line to the line in escrow and store it
# bb. If the old line was only 3 words long we add the ":" back in
# This is the container we return the data in
ret_list = []
discarded_lines = 0
broken_line = False
escrow_line = ''
for line in raw_log_lines[1:]:
# Check for empty line
if len(line) > 0:
if debug:
print("------------------------------------")
print("The raw line is:")
print(line)
# Cut the line into words
words = line.split()
############################################
## Peace back together word missing ":" case
if broken_line:
if debug:
print 'This should be the other half of the line'
print escrow_line
print 'The complete line should be:'
print escrow_line, line
# Here we have the first fragmenet and we join it to the other half
escrow_words = escrow_line.split()
if len(escrow_words) == 3:
if debug:
print 'We caught the special case where the ":" is missing.'
word_three = escrow_words[2] + ':' + words[0]
if debug:
print 'our assembled third word is now', word_three
head = escrow_words[0], escrow_words[1], word_three
if debug:
print 'the first three words should now be:', head
tail = words[1:]
words = head, tail
if debug:
print 'The full line should now be:'
print words
# We fixed the broken line
broken_line = False
# and we took the three words out of escrow
escrow_line = ''
##############################
## Parse the month date header
try:
month_log = words[0]
if not (valid_month(month_log)):
if debug:
print 'Invalid month detected'
raise
day_of_month_log = words[1]
if not (valid_day_of_month(day_of_month_log)):
if debug:
print 'Invalid day of month detected'
raise
raw_time_log = words[2]
if debug:
print 'parsing raw_time:', raw_time_log
long_time_log = raw_time_log.split(":")
if debug:
print 'the long_time_log contains:', long_time_log
if not (len(long_time_log) == 3):
if debug:
print 'detected invalid time format:'
print long_time_log
raise
hour_log = long_time_log[0]
if not (valid_hour(hour_log)):
if debug:
print 'Invalid hour detected'
raise
minute_log = long_time_log[1]
if not (valid_minute(minute_log)):
if debug:
print 'Invalid minute detected'
raise
second_log = long_time_log[2]
if not (valid_second(second_log)):
if debug:
print 'invalid second detected'
raise
# We don't care about this stuff at this time but it could be
# parsed in the future.
logs_per_second_log = words[3]
log_type = words[4]
log_deamon = words[5]
log_msg_type = words[6]
log_message = words[7:]
except:
if debug:
print 'Unable to parse this line:'
print line
print 'It is probably part of the previous line'
# Yep it's broken somehow! Either
# 1. It's missing it's ":" "special case"
# 2. It is so long it linewrapped.
broken_line = True
# We store the fragment in escrow
escrow_line = line
#ret_list.append(line)
if debug:
print 'Succesfully parsed the date/time header'
#####################################
## Filter the line based on date time
if not broken_line:
# Ok now the log is parsed we need to compare the dat time
if debug:
print("The month is: %s" % month_log)
print("looking for a month greater then: %s" % clock['month'])
if clock['month'] == month_log:
# Bug here it won't pass the end of the month to the next month
if debug:
print("The day is: %s" % day_of_month_log)
print("Looking for a day greater then: %s" % clock['day_of_month'])
if clock['day_of_month'] <= day_of_month_log:
if debug:
print("The hour is: %s" % hour_log)
print("Looking for an hour greater then: %s" % clock['hour'])
if clock['hour'] <= hour_log:
if debug:
print("The minute is: %s" % minute_log)
print("Looking for a minute greater then: %s" % clock['minute'])
if clock['minute'] <= minute_log:
# At this point we got a good line.
# If we had something in escrow we need to flush it to return_list
if len(escrow_line) > 0:
ret_list.append(escrow_line)
if debug:
print 'We now have a complete line that we are flusing to the return list:'
print escrow_line
print 'clearing the escrow'
escrow_line = ''
# It's possible for the line to have been split onto two lines
# We will hold the line in escrow in case we catch the other half.
else:
if debug:
print 'We have a good line. Saving it in escrow in case we find more parts of it'
escrow_line = line
elif clock['hour'] < hour_log:
# At this point we got a good line.
# If we had something in escrow we need to flush it to return_list
if len(escrow_line) > 0:
ret_list.append(escrow_line)
if debug:
print 'We now have a complete line that we are flusing to the return list:'
print escrow_line
print 'clearing the escrow'
escrow_line = ''
# It's possible for the line to have been split onto two lines
# We will hold the line in escrow in case we catch the other half.
else:
if debug:
print 'We have a good line. Saving it in escrow in case we find more parts of it'
escrow_line = line
else:
if debug:
print 'The following line was not saved becuase it is before the minute we want'
print line
discarded_lines += 1
elif clock['day_of_month'] < day_of_month_log:
# At this point we got a good line.
# If we had something in escrow we need to flush it to return_list
if len(escrow_line) > 0:
ret_list.append(escrow_line)
if debug:
print 'We now have a complete line that we are flusing to the return list:'
print escrow_line
print 'clearing the escrow'
escrow_line = ''
# It's possible for the line to have been split onto two lines
# We will hold the line in escrow in case we catch the other half.
else:
if debug:
print 'We have a good line. Saving it in escrow in case we find more parts of it'
escrow_line = line
else:
if debug:
print 'The following line was not saved becuase it is before the hour we want'
print line
discarded_lines += 1
else:
if debug:
print 'The following line was not saved becuase it is before the day of month we want'
print line
discarded_lines += 1
else:
if debug:
print 'The following line was not saved becuase it is before the month we want'
print line
discarded_lines += 1
#####################################################################################
## concatenate the linewrapped line to the escrow line and add it to the return value
if broken_line:
# The words in the input line were broken up earlier
# Make sure it's not the "special case"
if len(words) > 3:
# Make sure it's not the first input
# we want to append this output
if len(escrow_line) > 0:
if debug:
print 'Found the tail of a linewrapped line'
print 'the head looks like:'
print escrow_line
print 'The tail looks like:'
print line
# We store it back into the escrow line because there could
# be more linewrapped text. (Multi line)
escrow_line = escrow_line + line
if debug:
print 'Put together it looks like'
print escrow_line
if debug:
print 'clearing broken line status'
broken_line = False
else:
# ok something is really messed up here.
# 1. It's not words long
# 2. We don't have any lines in escrow yet
# It must just be crap
print 'Detected something very wrong with this line:'
print line
return 'unknown exception with line' + line
if debug:
print 'Flusing the last line from escrow'
print escrow_line
ret_list.append(escrow_line)
if debug:
print '----------------------------------------'
print 'Completed parsing the log file'
print 'counted', len(ret_list), 'lines of log'
print 'discarded', discarded_lines, 'lines'
return ret_list
def num_month_to_string(month):
"""
converts numeric months to three letter string months
"""
debug = False
try:
num_month = int(month)
except:
if debug:
print 'non numeric month set'
return 'non numeric month set'
return month_list[num_month - 1]
def name_month_to_num(month):
"""
converts the name like "Jul" back to a number
"""
month_list = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
if month in month_list:
if month == 'Jan':
return 1
elif month == 'Feb':
return 2
elif month == 'Mar':
return 3
elif month == 'Apr':
return 4
elif month == 'May':
return 5
elif month == 'Jun':
return 6
elif month == 'Jul':
return 7
elif month == 'Aug':
return 8
elif month == 'Sep':
return 9
elif month == 'Oct':
return 10
elif month == 'Nov':
return 11
elif month == 'Dec':
return 12
else:
print 'oh crap! Bug in issu.py name_month_num'
else:
print 'Invalid month supplied:', month
print 'Must be one of these:', month_list
return 'Invalid month name'
def get_hidden_password(level = '2'):
"""
This command uses the cli-pwd to retrieve the hidden enable password for today only
It takes in as it's input the level you need. Defaulting to 2
"""
debug = False
if debug:
print 'Now in issu.py method get_hidden_password'
if (level in range(1,7)):
print 'Invalid level selected:', level
print 'Level must be:', range(1,7)
return 'Invalid level: ' + level
search_string = 'level ' + level
if debug:
print 'search string will be:', search_string
password = ''
shell = os.popen("cli-pwd")
for line in shell.readlines():
if search_string in line:
if debug:
print("Found the line we were looking for:")
print(line)
words = line.split()
if debug:
print("This should be the word we are looking for: %s" % words[3])
password = words[3].strip(',')
if debug:
print("This should be the password: %s" % password)
print 'exiting this loop'
break
if debug:
print 'about to return:', password
return password
def pull_internal_logs(self, clock):
"""
This method uses the hidden shell to look at the raw log files in /hd/logs and /hdp/logs
It then filters the logs based on a date time and returns them in a list concatenated together
There will be a list header for each log.
The input is a date time which is the same format as the split show clock value
That can be retrieved using the issu.py show_time function
"""
debug = False
# Program Flow
# 1. Validate Input
# 2. Log in and pull the file list
# 3. Parse the Special file
# 4. Dump the other files
# 5. Pull /hdp/logs file list
# 6. Parse the special file
# 7. Dump the other files
######################
## 1. Input Validation
######################
# We need to first make sure that the incoming filter list contains all the fields we need!
########
## Month
if clock.has_key('month'):
if valid_month(clock['month']):
if debug:
print 'Filtering on Month', clock['month']
else:
print 'Invalid month detected:', clock['month']
return 'Invalid Month: ' + clock['month']
else:
print 'Month option not detected. Must be present'
return 'value "month" not set'
###############
## Day of month
if clock.has_key('day_of_month'):
if valid_day_of_month(clock['day_of_month']):
if debug:
print 'Filtering on day of month', clock['day_of_month']
else:
print 'Invalid day of month provided:', clock['day_of_month']
return 'Invalid day of month provided: ' + clock['day_of_month']
else:
print 'no day_of_month value provided!'
return 'no day_of_month value provided!'
#######
## Hour
if clock.has_key('hour'):
if valid_hour(clock['hour']):
if debug:
print 'Filtering on hour', clock['hour']
else:
print 'Invalid hour detected', clock['hour']
return 'Invalid hour detected ' + clock['hour']
#########
## Minute
if clock.has_key('minute'):
if valid_minute(clock['minute']):
if debug:
print 'Filtering on minute', clock['minute']
else:
print 'Invalid minute value provided:', clock['minute']
return 'Invalid minute value provided:' + clock['minute']
else:
print 'No minute value found!'
return 'no minute value found'
###################################
## 2. Log in and pull the file list
###################################
######################
## Get enable password
if debug:
print 'retrieving the hidden enable password'
password = get_hidden_password()
if debug:
print 'retrieved the password:', password
#################
## open the shell
if debug:
print 'opening the hidden enable shell'
try:
self.open_hidden_shell(password)
except:
print 'Unable to open the hidden enable shell!'
return 'failed to open the hidden shell'
if debug:
print 'about to run a simple command in the hidden shell'
####################
## Get the file list
if debug:
print 'going to /hd/logs to read the log files'
raw_output = self.hidden_cmd("cd \/hd\/logs")
if debug:
print 'the return value was'
print raw_output
if debug:
print 'checking the current working directory'
raw_output = self.hidden_cmd("pwd")
if debug:
print 'the return value was'
print raw_output
if debug:
print 'counting the files in the directory'
raw_output = self.hidden_cmd('ls | wc')
if debug:
print 'the raw output was:', raw_output
try:
raw_file_count = raw_output.split()
file_count = int(raw_file_count[0])
if debug:
print 'Found', file_count, 'files'
if file_count > 1000:
print 'There are more then 1000 log files.'
print 'The API can not process the files.'
print 'Please erase some files and re-try'
return 1
except:
print 'The value returned from the file count was not a number'
print 'Please take a look:', raw_output
return 1
#command = 'ls -1 event-log* | tail -n 300'
command = 'ls -1 event-log*'
if debug:
print 'getting the list of log files in /hd/logs'
print 'the command will be:', command
#raw_output = self.hidden_cmd("ls | grep event-log", 10)
raw_output = self.hidden_cmd(command, 10)
#raw_output = self.cli_cmd(command)
if debug:
print 'the return value was'
print raw_output
######################################
## Look for files with the right date
file_list = []
if debug:
print 'Now parsing file list'
print '---------------------'
for line in raw_output.splitlines():
if debug:
print '-------------------------'
print 'raw line:'
print line
# The raw line looks like this:
"""
event-log-20100722-114132
"""
# We split it on the "-"
#if len(line) > 0:
# We need to reject most of the filenames
# our filename is always 25 characters
if len(line) == 25:
if debug:
print 'found a line we care about'
words = line.split('-')
# Here is the decoded data we need to extract
"""
year: 2010
month: 07
day: 22
hour: 11
minute: 41
second: 32
"""
date = words[2]
year_file = date[:4]
raw_month_file = date[4:6]
month_file = num_month_to_string(raw_month_file)
day_file = date[6:]
time = words[3]
hour_file = time[:2]
minute_file = time[2:4]
second_file = time[4:]
if debug:
print 'detected the following date time:'
print 'year:', year_file, 'month:', month_file
print 'day:', day_file
print 'hour:', hour_file, 'minute:', minute_file, 'second:', second_file
# now we must compare the parsed date/time and
# compare it with our filter value
if clock['month'] == month_file:
if debug:
print 'Found a file with the right month:', month_file
if clock['day_of_month'] <= day_file:
if debug:
print 'Found a day that is equal to or greater our filter day:', day_file
if clock['hour'] <= hour_file:
if debug:
print 'Found an hour that is equal or greater then filter hour:', hour_file
if clock['minute'] <= minute_file:
if debug:
print 'found our file!'
print line
print 'Our input value for minute was:', minute_file
print 'The minute value we are filtering on is:', clock['minute']
file_list.append(line)
# If it's outright larger. Example I'm filtering on things that happened
# After 1:10 and I find something that happened at 4:04
# Technically the minute is smaller 10 > 04 but the hour is larger
# Therefore I need to keep it.
elif clock['hour'] < hour_file:
if debug:
print 'Found a keeper:', line
file_list.append(line)
else:
file_to_search_inside = line
elif clock['day_of_month'] < day_file:
if debug:
print 'Found a keeper', line
file_list.append(line)
else:
file_to_search_inside = line
else:
file_to_search_inside = line
else:
file_to_search_inside = line
else:
file_to_search_inside = ''
if debug:
print 'line is:', len(line), 'characters long'
if len(line) > 25:
print 'Rejecting this file name because it is too long'
if len(line) < 25:
print 'Rejecting this file name because it is too too short'
if debug:
print 'Done filtering' , len(raw_output.splitlines()), 'files'
print 'Found', len(file_list), 'files to keep'
for file in file_list:
print file
print 'We filtered on: 2010' + str(name_month_to_num(clock['month'])) + clock['day_of_month'] + \
'-' + clock['hour'] + clock['minute'] + '00'
print 'The file that may contain some more logs is:', file_to_search_inside
if debug:
print 'Now we will dump the special file and search for the first entry after our date'
# This is the list we return
ret_list = []
found_line = False
discarded_lines = 0
#############################
## 3. Search our special file
#############################
if len(file_to_search_inside) > 0:
# Need to add the line reading to this function as well to speed it up.
try:
command = 'wc ' + file_to_search_inside
except:
print 'no files found to read. There is a bug in pull_internal_logs in issu.py!'
sys.exit(1)
try:
raw_output = self.hidden_cmd(command, 20)
except:
print 'Failure while getting the line count of file', file
return 'Failing to get the line count of file: ' + file
# Example raw_output
"""
387 4756 31900 event-log-20100723-140131
"""
words = raw_output.split()
if debug:
print 'The raw output was:'
print raw_output
print 'The file:', file_to_search_inside, 'Has', words[0], 'lines of text'
str_line_count = words[0]
try:
line_count = int(str_line_count)
except:
print 'We got a non integer for the line count!', str_line_count
return 'invalid line count ' + str_line_count
command = 'cat ' + file_to_search_inside
if debug:
print 'Command will be:', command
print 'Sending command.'
self.ses.sendline(command)
# Begin reading the line of the file
reading_input = True
local_lines = []
# The first line returned is the command executed so we need to increment by 1
while reading_input:
if debug:
print 'Lines left:', line_count
try:
line = self.ses.readline()
except:
print 'unable to read the line!'
if '/bin/sh: cannot fork - try again' in line:
print 'Shell died. SSX probably restarting'
return 'Lost Shell. SSX probably rebooting'
if command in line:
if debug:
print 'we got the command line back!'
else:
if found_line:
ret_list.append(line)
else:
if len(line) > 0:
if debug:
print("------------------------------------")
print("The raw line is:")
print(line)
# Cut the line into words
words = line.split()
# raw line looks like
"""
Jul 23 01:50:20 [1] INFO Clock-TZSET: System timezone set to: PDT (Day Light Saving Not set)
"""
##############################
## Parse the month date header
try:
month_log = words[0]
if not (valid_month(month_log)):
if debug:
print 'Invalid month detected'
raise
day_of_month_log = words[1]
if not (valid_day_of_month(day_of_month_log)):
if debug:
print 'Invalid day of month detected'
raise
raw_time_log = words[2]
if debug:
print 'parsing raw_time:', raw_time_log
long_time_log = raw_time_log.split(":")
if debug:
print 'the long_time_log contains:', long_time_log
if not (len(long_time_log) == 3):
if debug:
print 'detected invalid time format:'
print long_time_log
raise
hour_log = long_time_log[0]
if not (valid_hour(hour_log)):
if debug:
print 'Invalid hour detected'
raise
minute_log = long_time_log[1]
if not (valid_minute(minute_log)):
if debug:
print 'Invalid minute detected'
raise
second_log = long_time_log[2]
if not (valid_second(second_log)):
if debug:
print 'invalid second detected'
raise
# We don't care about this stuff at this time but it could be
# parsed in the future.
logs_per_second_log = words[3]
log_type = words[4]
log_deamon = words[5]
log_msg_type = words[6]
log_message = words[7:]
except:
if debug:
print 'Unable to parse this line:'
print line
print 'It is probably part of the previous line'
# Yep it's broken somehow! Either
# 1. It's missing it's ":" "special case"
# 2. It is so long it linewrapped.
broken_line = True
# We store the fragment in escrow
escrow_line = line
#ret_list.append(line)
if debug:
print 'Succesfully parsed the date/time header'
#####################################
## Filter the line based on date time
if debug:
print("The month is: %s" % month_log)
print("looking for a month greater then: %s" % clock['month'])
if clock['month'] == month_log:
# Bug here it won't pass the end of the month to the next month
if debug:
print("The day is: %s" % day_of_month_log)
print("Looking for a day greater then: %s" % clock['day_of_month'])
if clock['day_of_month'] <= day_of_month_log:
if debug:
print("The hour is: %s" % hour_log)
print("Looking for an hour greater then: %s" % clock['hour'])
if clock['hour'] <= hour_log:
if debug:
print("The minute is: %s" % minute_log)
print("Looking for a minute greater then: %s" % clock['minute'])
if clock['minute'] <= minute_log:
# We save the line
ret_list.append(line)
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
elif clock['hour'] < hour_log:
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
ret_list.append(line)
else:
if debug:
print 'The following line was not saved becuase it is before the minute we want'
print line
discarded_lines += 1
elif clock['day_of_month'] < day_of_month_log:
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
ret_list.append(line)
else:
if debug:
print 'The following line was not saved becuase it is before the hour we want'
print line
discarded_lines += 1
elif clock['month'] < month_log:
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
ret_list.append(line)
else:
if debug:
print 'The following line was not saved becuase it is before the day of month we want'
print line
discarded_lines += 1
else:
if debug:
print 'The following line was not saved becuase it is before the month we want'
print line
discarded_lines += 1
# Decement the line count
line_count = line_count - 1
# Break when run out of lines to read
if line_count == 0:
if debug:
'At the end of the counted lines'
reading_input = False
###################
## 4. Dump the rest
###################
for file in file_list:
if debug:
print '----------------------------'
print 'Now reading file:', file
print '----------------------------'
# At this point simply cat-ing the file and reading the output we try to filter every
# character for the '#' prompt. This causes a huge delay and won't work for us.
# Instead we will use 'wc' to count the number of lines we need to read until the next prompt
command = 'wc ' + file
try:
raw_output = self.hidden_cmd(command, 20)
except:
print 'Failure while getting the line count of file', file
break
# Example raw_output
"""
387 4756 31900 event-log-20100723-140131
"""
words = raw_output.split()
if debug:
print 'The raw output was:'
print raw_output
print 'The file:', file, 'Has', words[0], 'lines of text'
str_line_count = words[0]
try:
line_count = int(str_line_count)
except:
print 'We got a non integer for the line count!', str_line_count
return 'invalid line count ' + str_line_count
command = 'cat ' + file
if debug:
print 'Command will be:', command
print 'Sending command.'
self.ses.sendline(command)
reading_input = True
local_lines = []
while reading_input:
if debug:
print 'Lines left:', line_count
try:
line = self.ses.readline()
except:
print 'unable to read the line!'
if debug:
print 'line:'
print line
if command in line:
if debug:
print 'we got the command line back!'
reading_input = False
else:
if debug:
print 'Saving this line'
local_lines.append(line)
# Decrement the line counter
line_count = line_count - 1
# Break when run out of lines to read
if line_count == 0:
if debug:
'At the end of the counted lines'
reading_input = False
if line_count == 0:
if debug:
print 'done dumping lines'
reading_input == False
if debug:
print 'We caught:', len(local_lines), 'lines of output from file:', file
for line in local_lines:
ret_list.append(line)
if debug:
print 'The complete log is now:', len(ret_list)
print '000000000000000000000000000000'
print 'Completed parsing the /hd/logs'
print 'now parsing /hdp/logs'
print '000000000000000000000000000000'
ret_list.append("end of /hd/logs")
ret_list.append("INTERNAL LOGS BEGIN")
#######################
## 5. Get the file list
#######################
raw_output = self.hidden_cmd("cd \/hdp\/logs")
if debug:
print 'the return value was'
print raw_output
raw_output = self.hidden_cmd("pwd")
if debug:
print 'the return value was'
print raw_output
raw_output = self.hidden_cmd("ls | grep event-log")
if debug:
print 'the return value was'
print raw_output
######################################
## Look for files with the right date
file_list = []
if debug:
print 'Now parsing file list'
print '---------------------'
for line in raw_output.splitlines():
if debug:
print '-------------------------'
print 'raw line:'
print line
# The raw line looks like this:
"""
event-log-20100722-114132
"""
# We split it on the "-"
if len(line) > 0:
if debug:
print 'found a line we care about'
words = line.split('-')
# Here is the decoded data we need to extract
"""
year: 2010
month: 07
day: 22
hour: 11
minute: 41
second: 32
"""
date = words[2]
year_file = date[:4]
raw_month_file = date[4:6]
month_file = num_month_to_string(raw_month_file)
day_file = date[6:]
time = words[3]
hour_file = time[:2]
minute_file = time[2:4]
second_file = time[4:]
if debug:
print 'detected the following date time:'
print 'year:', year_file, 'month:', month_file
print 'day:', day_file
print 'hour:', hour_file, 'minute:', minute_file, 'second:', second_file
# now we must compare the parsed date/time and
# compare it with our filter value
if clock['month'] == month_file:
if debug:
print 'Found a file with the right month:', month_file
if clock['day_of_month'] <= day_file:
if debug:
print 'Found a day that is equal to or greater our filter day:', day_file
if clock['hour'] <= hour_file:
if debug:
print 'Found an hour that is equal or greater then filter hour:', hour_file
if clock['minute'] <= minute_file:
if debug:
print 'found our file!'
print line
print 'Our input value for minute was:', minute_file
print 'The minute value we are filtering on is:', clock['minute']
file_list.append(line)
# If it's outright larger. Example I'm filtering on things that happened
# After 1:10 and I find something that happened at 4:04
# Technically the minute is smaller 10 > 04 but the hour is larger
# Therefore I need to keep it.
elif clock['hour'] < hour_file:
if debug:
print 'Found a keeper:', line
file_list.append(line)
else:
file_to_search_inside = line
elif clock['day_of_month'] < day_file:
if debug:
print 'Found a keeper', line
file_list.append(line)
else:
file_to_search_inside = line
else:
file_to_search_inside = line
else:
file_to_search_inside = line
print 'Done filtering' , len(raw_output.splitlines()), 'files'
print 'Found', len(file_list), 'files to keep'
for file in file_list:
print file
print 'We filtered on: 2010' + clock['month'] + clock['day_of_month'] + \
'-' + clock['hour'] + clock['minute'] + '00'
print 'The file that may contain some more logs is:', file_to_search_inside
if debug:
print 'Now we will dump the special file and search for the first entry after our date'
# This is the list we return
ret_list = []
found_line = False
discarded_lines = 0
#############################
## 6. Search our special file
#############################
# Need to add the line reading to this function as well to speed it up.
command = 'wc ' + file_to_search_inside
try:
raw_output = self.hidden_cmd(command, 20)
except:
print 'Failure while getting the line count of file', file
return 'Failing to get the line count of file: ' + file
# Example raw_output
"""
387 4756 31900 event-log-20100723-140131
"""
words = raw_output.split()
if debug:
print 'The raw output was:'
print raw_output
print 'The file:', file_to_search_inside, 'Has', words[0], 'lines of text'
str_line_count = words[0]
try:
line_count = int(str_line_count)
except:
print 'We got a non integer for the line count!', str_line_count
return 'invalid line count ' + str_line_count
command = 'cat ' + file_to_search_inside
if debug:
print 'Command will be:', command
print 'Sending command.'
self.ses.sendline(command)
# Begin reading the line of the file
reading_input = True
local_lines = []
# The first line returned is the command executed so we need to increment by 1
while reading_input:
if debug:
print 'Lines left:', line_count
try:
line = self.ses.readline()
except:
print 'unable to read the line!'
if command in line:
if debug:
print 'we got the command line back!'
else:
if found_line:
ret_list.append(line)
else:
if len(line) > 0:
if debug:
print("------------------------------------")
print("The raw line is:")
print(line)
# Cut the line into words
words = line.split()
# raw line looks like
"""
Jul 27 20:17:08 [2] INT HaMgr-ACT_CONNECTION_AVAILABLE: active ha-mgr connection available
"""
##############################
## Parse the month date header
try:
month_log = words[0]
if not (valid_month(month_log)):
if debug:
print 'Invalid month detected'
raise
day_of_month_log = words[1]
if not (valid_day_of_month(day_of_month_log)):
if debug:
print 'Invalid day of month detected'
raise
raw_time_log = words[2]
if debug:
print 'parsing raw_time:', raw_time_log
long_time_log = raw_time_log.split(":")
if debug:
print 'the long_time_log contains:', long_time_log
if not (len(long_time_log) == 3):
if debug:
print 'detected invalid time format:'
print long_time_log
raise
hour_log = long_time_log[0]
if not (valid_hour(hour_log)):
if debug:
print 'Invalid hour detected'
raise
minute_log = long_time_log[1]
if not (valid_minute(minute_log)):
if debug:
print 'Invalid minute detected'
raise
second_log = long_time_log[2]
if not (valid_second(second_log)):
if debug:
print 'invalid second detected'
raise
# We don't care about this stuff at this time but it could be
# parsed in the future.
logs_per_second_log = words[3]
log_type = words[4]
log_deamon = words[5]
log_msg_type = words[6]
log_message = words[7:]
except:
if debug:
print 'Unable to parse this line:'
print line
print 'It is probably part of the previous line'
# Yep it's broken somehow! Either
# 1. It's missing it's ":" "special case"
# 2. It is so long it linewrapped.
broken_line = True
# We store the fragment in escrow
escrow_line = line
#ret_list.append(line)
if debug:
print 'Succesfully parsed the date/time header'
#####################################
## Filter the line based on date time
if debug:
print("The month is: %s" % month_log)
print("looking for a month greater then: %s" % clock['month'])
if clock['month'] == month_log:
# Bug here it won't pass the end of the month to the next month
if debug:
print("The day is: %s" % day_of_month_log)
print("Looking for a day greater then: %s" % clock['day_of_month'])
if clock['day_of_month'] <= day_of_month_log:
if debug:
print("The hour is: %s" % hour_log)
print("Looking for an hour greater then: %s" % clock['hour'])
if clock['hour'] <= hour_log:
if debug:
print("The minute is: %s" % minute_log)
print("Looking for a minute greater then: %s" % clock['minute'])
if clock['minute'] <= minute_log:
# We save the line
ret_list.append(line)
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
elif clock['hour'] < hour_log:
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
ret_list.append(line)
else:
if debug:
print 'The following line was not saved becuase it is before the minute we want'
print line
discarded_lines += 1
elif clock['day_of_month'] < day_of_month_log:
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
ret_list.append(line)
else:
if debug:
print 'The following line was not saved becuase it is before the hour we want'
print line
discarded_lines += 1
elif clock['month'] < month_log:
found_line = True
if debug:
print 'Found the beginning line. Skipping filtering other lines'
ret_list.append(line)
else:
if debug:
print 'The following line was not saved becuase it is before the day of month we want'
print line
discarded_lines += 1
else:
if debug:
print 'The following line was not saved becuase it is before the month we want'
print line
discarded_lines += 1
# Decement the line count
line_count = line_count - 1
# Break when run out of lines to read
if line_count == 0:
if debug:
'At the end of the counted lines'
reading_input = False
###################
## 7. Dump the rest
###################
for file in file_list:
if debug:
print '----------------------------'
print 'Now reading file:', file
print '----------------------------'
# At this point simply cat-ing the file and reading the output we try to filter every
# character for the '#' prompt. This causes a huge delay and won't work for us.
# Instead we will use 'wc' to count the number of lines we need to read until the next prompt
command = 'wc ' + file
try:
raw_output = self.hidden_cmd(command, 20)
except:
print 'Failure while getting the line count of file', file
break
# Example raw_output
"""
387 4756 31900 event-log-20100723-140131
"""
words = raw_output.split()
if debug:
print 'The raw output was:'
print raw_output
print 'The file:', file, 'Has', words[0], 'lines of text'
str_line_count = words[0]
try:
line_count = int(str_line_count)
except:
print 'We got a non integer for the line count!', str_line_count
return 'invalid line count ' + str_line_count
command = 'cat ' + file
if debug:
print 'Command will be:', command
print 'Sending command.'
self.ses.sendline(command)
reading_input = True
local_lines = []
while reading_input:
if debug:
print 'Lines left:', line_count
try:
line = self.ses.readline()
except:
print 'unable to read the line!'
if debug:
print 'line:'
print line
if command in line:
if debug:
print 'we got the command line back!'
else:
if debug:
print 'Saving this line'
local_lines.append(line)
# Decrement the line counter
line_count = line_count - 1
# Break when run out of lines to read
if line_count == 0:
if debug:
'At the end of the counted lines'
reading_input = False
if debug:
print 'We caught:', len(local_lines), 'lines of output from file:', file
for line in local_lines:
ret_list.append(line)
if debug:
print 'The complete log is now:', len(ret_list)
ret_list.append("INTERNAL LOGS END")
###########
## Complete
###########
if debug:
print 'closing the shell'
self.close_hidden_shell()
if debug:
print 'done with issu.py pull_internal_logs'
return ret_list
def pull_corefiles(self, clock, username='regress', user_password='gleep7', host='10.1.1.101'):
"""
retrieves the core files from the SSX and drops them in your CWD
Files are renamed with the YYYY-MM-DD
clock = list of split time
username = username to log into the linux system with. Defaults to "regress"
user_password = password for above username. Defaults to "gleep7"
host = linux host to sftp the files to. Defaults to "10.1.1.101" which is
qa-radxpm-1
"""
# Program Flow
# 1. Validate Input
# 2. Get file list based on Date
# 3. SFTP the files off
# 4. Copy the files from /home/regress to /home/USERNAME
######################
## 1. Input Validation
######################
# We need to first make sure that the incoming filter list contains all the fields we need!
########
## Month
if clock.has_key('month'):
if valid_month(clock['month']):
if debug:
print 'Filtering on Month', clock['month']
else:
print 'Invalid month detected:', clock['month']
return 'Invalid Month: ' + clock['month']
else:
print 'Month option not detected. Must be present'
return 'value "month" not set'
###############
## Day of month
if clock.has_key('day_of_month'):
if valid_day_of_month(clock['day_of_month']):
if debug:
print 'Filtering on day of month', clock['day_of_month']
else:
print 'Invalid day of month provided:', clock['day_of_month']
return 'Invalid day of month provided: ' + clock['day_of_month']
else:
print 'no day_of_month value provided!'
return 'no day_of_month value provided!'
#######
## Hour
if clock.has_key('hour'):
if valid_hour(clock['hour']):
if debug:
print 'Filtering on hour', clock['hour']
else:
print 'Invalid hour detected', clock['hour']
return 'Invalid hour detected ' + clock['hour']
#########
## Minute
if clock.has_key('minute'):
if valid_minute(clock['minute']):
if debug:
print 'Filtering on minute', clock['minute']
else:
print 'Invalid minute value provided:', clock['minute']
return 'Invalid minute value provided:' + clock['minute']
else:
print 'No minute value found!'
return 'no minute value found'
###################################
## 2. Log in and pull the file list
###################################
######################
## Get enable password
if debug:
print 'retrieving the hidden enable password'
password = get_hidden_password()
if debug:
print 'retrieved the password:', password
#################
## open the shell
if debug:
print 'opening the hidden enable shell'
try:
self.open_hidden_shell(password)
except:
print 'Unable to open the hidden enable shell!'
return 'failed to open the hidden shell'
if debug:
print 'about to run a simple command in the hidden shell'
####################
## Get the file list
dump_dirs = ['slot0','slot1','slot2','slot3','slot4']
file_list = []
for dir in dump_dirs:
command = 'cd \/hd\/dump\/' + dir
if debug:
print 'the command will be:', command
raw_output = self.hidden_cmd(command)
if debug:
print 'the return value was'
print raw_output
command = 'ls -l | grep core.gz'
if debug:
print 'the command will be:', command
raw_output = self.hidden_cmd(command)
if debug:
print 'the return value was'
print raw_output
# the raw line looks like this:
"""
-rw-r--r-- 1 root root 2807430 Jul 26 16:53 dfn.1.core.gz
"""
discarded_lines = 0
raw_lines = raw_output.splitlines()
for line in raw_lines[2:]:
if debug:
print 'parsing:'
print line
words = line.split()
month_log = words[5]
day_of_month_log = words[6]
raw_time_log = words[7]
split_time_log = raw_time_log.split(":")
hour_log = split_time_log[0]
minute_log = split_time_log[1]
filename_log = words[8]
if debug:
print filename_log, 'Month:', month_log, 'day', day_of_month_log, 'hour:', hour_log, 'Minute:', minute_log
full_path = dir + '/' + filename_log
if debug:
print 'That file lives:', full_path
#####################################
## Filter the line based on date time
if debug:
print("The month is: %s" % month_log)
print("looking for a month greater then: %s" % clock['month'])
if clock['month'] == month_log:
# Bug here it won't pass the end of the month to the next month
if debug:
print("The day is: %s" % day_of_month_log)
print("Looking for a day greater then: %s" % clock['day_of_month'])
if clock['day_of_month'] <= day_of_month_log:
if debug:
print("The hour is: %s" % hour_log)
print("Looking for an hour greater then: %s" % clock['hour'])
if clock['hour'] <= hour_log:
if debug:
print("The minute is: %s" % minute_log)
print("Looking for a minute greater then: %s" % clock['minute'])
if clock['minute'] <= minute_log:
# We save the line
file_list.append(full_path)
if debug:
print 'Found a file:', filename_log
elif clock['hour'] < hour_log:
if debug:
print 'Found a file:', filename_log
file_list.append(full_path)
else:
if debug:
print 'The following file was not saved becuase it is before the minute we want'
print line
discarded_lines += 1
elif clock['day_of_month'] < day_of_month_log:
if debug:
print 'Found a file:', filename_log
file_list.append(full_path)
else:
if debug:
print 'The following file was not saved becuase it is before the hour we want'
print full_path
elif clock['month'] < month_log:
if debug:
print 'Found a file:', filename_log
file_list.append(full_path)
else:
if debug:
print 'The following file was not saved becuase it is before the day of month we want'
print filename_log
else:
if debug:
print 'The following file was not saved becuase it is before the month we want'
print filename_log
print 'The following', len(file_list), 'core files will be coppied to the testing directory:'
for file in file_list:
print file
self.close_hidden_shell()
#################
## SFTP files off
unsaved_files = []
linux_file_list = []
for file in file_list:
file_parts = file.split('/')
slot = file_parts[-2]
filename = file_parts[-1]
filename_parts = filename.split(".")
if len(filename_parts) == 3:
filename_head = filename_parts[0]
elif len(filename_parts) == 4:
filename_head = filename_parts[0] + '-' + filename_parts[1]
else:
print 'This filename has too many "." in it!'
print filename_parts
filename_head = filename_parts[0] + '-' + filename_parts[1]
extension = filename_parts[-2] + '.' + filename_parts[-1]
month = name_month_to_num(clock['month'])
if debug:
print 'file:', file, 'filename:', filename
print 'in slot:', slot
if not clock.has_key('year'):
print 'No year detected. Defaulting to 2010'
clock['year'] = '2010'
file_name_with_timestamp = filename_head + '-' + str(clock['year']) + str(month) + \
str(clock['day_of_month']) + str(clock['hour']) + str(clock['minute']) + '.' + str(extension)
if debug:
print 'the full filename will be:', file_name_with_timestamp
command = 'copy /hd/dump/' + file + ' sftp://' + username + '@' + host + ':/home/' + username \
+ '/' + file_name_with_timestamp
if debug:
print 'The command will be:'
print command
print 'Copying the core file:', filename, 'off the system.'
#self.ftppasswd(command, user_password)
self.ftppasswd(command, user_password, 60)
print 'File copied succesfully'
linux_file_list.append(file_name_with_timestamp)
"""
if len(unsaved_files) > 0:
print 'There were:', len(file_list), 'files to copy.', len(unsaved_files), 'files were not coppied'
#print 'These files were not coppied off the system:'
#for file in unsaved_files:
# print file
"""
print 'Completed copying Core Files off'
current_dir = os.getcwd()
if debug:
print 'script is being run from:', current_dir
for file in linux_file_list:
source_path = '/home/' + username + '/'
full_filename = source_path + file
dest_filename = current_dir + '/' + file
print '------------------------'
print 'about to move:', full_filename, 'to:', dest_filename
shutil.copyfile(full_filename, dest_filename)
print 'file moved succesfully.'
print 'All done moving the files.'
return 0
def filter_logs(self, clock):
"""
Pulls information available from "show log".
Then logs in and pulls the internal log files in /hd/logs and /hdp/logs
It also pulls any core files to the scripts CWD
"""
debug = False
###################
## Input Validation
# We need to first make sure that the incoming filter list contains all the fields we need!
if debug:
print 'Validating the Date/Time'
########
## Month
if clock.has_key('month'):
if valid_month(clock['month']):
if debug:
print 'Filtering on Month', clock['month']
else:
print 'Invalid month detected:', clock['month']
return 'Invalid Month: ' + clock['month']
else:
print 'Month option not detected. Must be present'
return 'value "month" not set'
###############
## Day of month
if clock.has_key('day_of_month'):
if valid_day_of_month(clock['day_of_month']):
if debug:
print 'Filtering on day of month', clock['day_of_month']
if clock['day_of_month'][0] == '0':
# This line may require a space!
clock['day_of_month'] = clock['day_of_month'].lstrip('0')
if debug:
print 'the stripped hour now looks like', clock['day_of_month']
else:
print 'Invalid day of month provided:', clock['day_of_month']
return 'Invalid day of month provided: ' + clock['day_of_month']
else:
print 'no day_of_month value provided!'
return 'no day_of_month value provided!'
#######
## Hour
if clock.has_key('hour'):
if valid_hour(clock['hour']):
if debug:
print 'Filtering on hour', clock['hour']
print 'stripping any trailing zeros in the time'
else:
print 'Invalid hour detected', clock['hour']
return 'Invalid hour detected ' + clock['hour']
#########
## Minute
if clock.has_key('minute'):
if valid_minute(clock['minute']):
if debug:
print 'Filtering on minute', clock['minute']
else:
print 'Invalid minute value provided:', clock['minute']
return 'Invalid minute value provided:' + clock['minute']
else:
print 'No minute value found!'
return 'no minute value found'
ret_logs = []
print 'Pulling information using "show log"'
syslog = pull_syslog(self, clock)
print 'Completed pulling information.'
ret_logs.append(syslog)
print 'Pulling the internal logging information.'
internal_logs = pull_internal_logs(self, clock)
print 'Complete pulling the internal log informaiton'
ret_logs.append(internal_logs)
print 'Retrieving any core files.'
retr = pull_corefiles(self, clock)
print 'Completed pulling core files'
print 'Completed pulling log information'
return ret_logs
def generate_ixia_dict(source_file, number_of_streams, stream_dict):
"""
This method takes the variables from the topo.py configuration file and generates
nested dictionaries that are required for the rewrite_ixia_config method.
This method is written to shorten the manual labor of making configurations with large
number of streams (10 or more) and is not required if you want to write the
ixia_dictionary by hand.
Variables:
'Chassis IP Address' - Topo
'Username' - Topo
'Source File'
'Card Number' - Topo
'Port Number' - Topo
'Number of Streams'
# Per stream
'Stream Name'
'Source IP Address'
'Destination IP Address'
'Destination MAC Address'
"""
# Example values (working good)
# with only 1 stream
"""
ixia_dict = { \
'Chassis IP Address':'10.4.2.30', \
'Username':'jalfrey', \
'Source File':'JF-FUN-009-1.tcl', \
'Card Number 3':{ \
'Card Number':3, \
'Port Number 3':{ \
'Port Number':3, \
'Source MAC Address':'00 de bb 00 00 01', \
'Destination MAC Address':'00 DE BB 00 00 02', \
'Stream ID 1':{ \
'Stream ID':1, \
'Stream Name':'Session_payload', \
'Source IP Address':'10.11.12.1', \
'Destination IP Address':'10.11.20.1', \
'Destination MAC Address':'00 DE BB 00 00 02'
}
}
}
}
"""
# Topo to ixia_dict variable mapping
"""
'Chassis IP Address' = topo.ixia['ip_addr']
'Username' = topo.ixia['username']
'Source File' = script_var['test_name'] - appears in jf_config.py
'Card Number' = topo.ixia['CardID']
'Port Number' = topo.ixia['TxportID']
'Number of Streams' = script_var['test_name'] -
# Per stream
'Stream Name' = script_var['test_name'] -
'Stream ID' = fed into script
'Source IP Address' = fed into script
'Destination IP Address' = fed into script
'Destination MAC Address' = fed into script
"""
def rewrite_ixia_config(ixia_dict):
"""
This function opens an IXIA.tcl script and rewrites the IP Address and other values to make
the script send traffic to any DUT
All values MUST be set in this dictionary or the file can not be rewritten correctly!
After this method completes it will write an ouptut file or if set to "none" it will
return the whole configuration as a very long string which can then be split and fed
into the IXIA via CLI
ixia_dict{
Chassis IP Address:10.4.2.30
# The IP of the IXIA itself
Username:jalfrey
# Username that "owns" the ports that will send traffic
Source File
# This is the source file it is read from
# This needs to either be a full path or relative to current directory path
Output File
# This can be set to "none" and the method will return the whole configuration
# Or if it is set it will write the file out to disk
Card Number_X:
# If there are multiple cards then there will be multiple dictionaries.
# For my configuration I use card 3 to the dictionary will be called
# "Card Number 3"
Dictionary {
Card Number
# Card which port lives on. Same information contained in the dictionary
# name but just as the number "3"
Port Number X:
# Port to be configured. There will be one key per port
Dictionary {
Port Number
# This is the port number on the IXIA itself (physical port)
Source MAC Address
# Can be set or left "default" which will leave the config unchanged or null ''
Destination MAC Address
# This is the MAC of what the IXIA is directly connected to
# In my case it's a Cisco Router
Stream ID X:
# This is the Stream ID. There is one ID per stream configured
Dictionary: {
Stream ID:1
# Stream numeric ID. Matches "Stream ID X" value X
Stream Name
# Optional. If value is Null nothing will be set
# whatever was there will be left there
Source IP Address
# Source on IXIA side
Destination IP Address
# Where the traffic should go. In my case that's the SSX (DUT)
Destination MAC Address
# This should be the same as the "Destination MAC Address" found above
# But clearly it can be set differently but I'm not sure why
# Maybe for testing through a Hub?
}
}
}
"""
debug = False
# Configuration will overwrite this value
generate_output_file = False
###############################
# Variable Validation Section #
###############################
if len(ixia_dict) > 0:
top_keys = ixia_dict.keys()
if debug:
print '------------------------------------'
print 'The top keys extracted were:'
for key in top_keys:
print key, ':', ixia_dict[key]
print '------------------------------------'
# IP Address
if ixia_dict.has_key('Chassis IP Address'):
if validIP(ixia_dict['Chassis IP Address']):
top_keys.remove('Chassis IP Address')
if debug:
print 'Chassis IP is valid'
else:
error_message = 'Invalid IP address for the chassis: ' + ixia_dict.has_key('Chassis IP Address')
return error_message
# Username
if ixia_dict.has_key('Username'):
if (len(ixia_dict['Username']) > 0):
top_keys.remove('Username')
if debug:
print 'Username is valid'
else:
error_message = 'No Username value provided'
return error_message
# Source File
if ixia_dict.has_key('Source File'):
if (ixia_dict['Source File'] == ''):
return 'No source file value set'
if os.path.exists(ixia_dict['Source File']):
top_keys.remove('Source File')
if debug:
print 'Source filename is valid'
else:
return 'unable to locate the source file!'
# Output File
# IF the length is zero then no file is generated
# if it is set to "none" then no file is generated
# Otherwise whatever the filename is it's generated with that
# Since the filename could be mostly anything we don't validate it
if ixia_dict.has_key('Output File'):
# Here we change the case to lowercase so that we can compare the string once
# Instead of testing to see if it's formatted like 'None', 'NONE', etc.
output_filename = ixia_dict['Output File'].lower()
if output_filename == 'none':
generate_output_file = False
if debug:
print 'No output file will be generate'
else:
generate_output_file = True
if debug:
print 'Output file will be generated'
top_keys.remove('Output File')
if debug:
print 'Output filename is valid'
if debug:
print 'At this point the top_keys should only contain card numbers'
print top_keys
# At this point the top_keys dictionary should only contain entries
# of card numbers. like "Card Number 3"
for card_number in top_keys:
# Now we use this "key" to retrieve all the ports listed for that card
# Then we verify the port list is valid
port_list = ixia_dict[card_number].keys()
if debug:
print 'Now parsing the following items in the port_list'
print port_list
for port_number in port_list:
if 'Card Number' in port_number:
if not (int(ixia_dict[card_number][port_number]) in range(1,15)):
error_message = 'Card Number: ' + ixia_dict[card_number][port_number] + ' Outside expected range of: 1-14'
return error_message
if 'Port Number' in port_number:
if debug:
print '000000000'
print 'port_number = ', port_number
print 'The port number being tested is:', ixia_dict[card_number][port_number]['Port Number']
# The range function is odd. If you say 1,13 you get 13 numbers
# starting at 1 not zero and it ends at 12 instead of 13.
if not (int(ixia_dict[card_number][port_number]['Port Number']) in range(1,14)):
error_message = 'Port number: ' + port_number + ' on Card: ' \
+ card_number + ' is invalide. Expected to be in the range 1 - 13'
return error_message
else:
if debug:
print 'the following item wil not be parsed:'
print port_number
else:
return 'No variables set. Can not proceed!'
##############
# Open Files #
##############
try:
input_file = open(ixia_dict['Source File'], 'r')
except:
return 'Unable to open the Soucre File'
if generate_output_file:
try:
output_file = open(ixia_dict['Output File'], 'w')
except:
return 'Unable to open the ouptut file!'
########################
# Parse the input_file #
########################
# Method:
#
# 1. Read the file line by line
# 2. Look for section headers
# a. If the line matches one of the section headers we note that down
# b. The section header itself may need re-writing
# c. Increment the section header counter
# 3. Inside the sections search for specific lines
# 4. Read each line and write it to the output file
# 5. When special lines are found re-write them and write to output file
next_header = '# This Script has been generated by Ixia ScriptGen'
next_line = 'default_nothing'
modified_line = 'default_nothing'
section_index = 0
line_index = 0
line_count = 0
break_after = 345
run_to_completion = True
raw_keys = ixia_dict.keys()
card_number_list = []
port_number_list = []
for key in raw_keys:
if 'Card Number' in key:
card_number_list.append(ixia_dict[key]['Card Number'])
if debug:
print 'We are expecting to configure the following cards:', card_number_list
if debug:
print 'Now reading the input file line by line looking for the section headers'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
for input_line in input_file:
line_count = line_count + 1
if debug and (line_count > break_after) and not run_to_completion:
print 'Breaking here for debuging'
return 0
if debug:
print '******* Line Number:', line_count, ' ********************'
print 'have: "', input_line.strip(), '"'
print 'want Header:', next_header
print ' want Line: "', next_line, '"'
print '******* Line Number:', line_count, ' ********************'
if next_header in input_line:
if debug:
print 'valid header:"', next_header, '"'
# This will give us a numeric index telling us what section we're in
section_index = section_index + 1
if section_index == 1:
next_line = 'if {[ixConnectToTclServer'
if debug:
print 'Found first section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(input_line)
elif section_index == 2:
modified_line = '######### Chassis list - {' + ixia_dict['Chassis IP Address'] + '} #########\n'
next_line = 'ixConnectToChassis {' + local_chassis_ip_address + '}'
if debug:
print 'Found second section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(modified_line)
elif section_index == 3:
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] + ' #########\n'
next_line = 'chassis get "' + local_chassis_ip_address + '"'
if debug:
print 'Found second section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(modified_line)
elif section_index == 4:
next_line = 'set card '
if debug:
print 'Found second section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(input_line)
elif section_index == 5:
long_card_number = 'Card Number ' + str(card_number_list[0])
raw_port_list = ixia_dict[long_card_number].keys()
port_name_list = []
for key in raw_port_list:
if 'Port' in key:
port_name_list.append(key)
if debug:
print 'building the port_number_list from the port_name_list:'
print port_name_list
print 'ixia_dict[long_card_number]:', ixia_dict[long_card_number]
for port in port_name_list:
if debug:
print 'port:', port
print 'long_card_number:', long_card_number
port_number_list.append(ixia_dict[long_card_number][port]['Port Number'])
if debug:
print 'port_number_list:', port_number_list
if debug:
print 'The ports that will be configured for card:', long_card_number, 'are:', port_number_list
# Example line
"""
######### Chassis-10.4.2.30 Card-3 Port-3 #########
"""
words = input_line.split()
raw_port_number = words[3].split('-')
local_port_number = raw_port_number[1]
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] \
+ ' Card-' + str(card_number_list[0]) + ' Port-' + str(port_number_list[0]) + ' #########\n'
if generate_output_file:
output_file.write(input_line)
next_line = 'set port ' + str(local_port_number)
elif section_index == 6:
if generate_output_file:
output_file.write(input_line)
# This is a strange one. This header is identical to a header we have already seen in section 5
# but if we executed the same code it would mess stuff up so we just look for it to step
# over it.
next_header = '######### Chassis-' + local_chassis_ip_address + ' Card-' + str(local_card_number)
elif section_index == 7:
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] + \
' Card-' + str(card_number_list[0]) + ' Port-' + str(port_number_list[0]) + ' #########\n'
next_line = 'chassis get "' + local_chassis_ip_address + '"'
if generate_output_file:
output_file.write(input_line)
else:
return 'Failure while parsing the section index'
# line we're looking for
elif (next_line in input_line) and (len(input_line) > 2):
if debug:
print 'valid line: "', input_line.strip(), '"'
words = input_line.split()
if debug:
print 'The line was broken into these words:'
print words
if section_index == 1:
if line_index == 0:
raw_target_word = words[2].split(']')
local_chassis_ip_address = raw_target_word[0]
if debug:
print 'The Chassis IP Address found in the original configuraiton file was:', local_chassis_ip_address
next_line = 'ixPuts "Error connecting to Tcl Server ' + local_chassis_ip_address + ' "'
# now we need to rewrite the line and write it to the log file
modified_line = ' if {[ixConnectToTclServer ' + ixia_dict['Chassis IP Address'] + ']} {\n'
line_index = line_index + 1
elif line_index == 1:
modified_line = ' ixPuts "Error connecting to Tcl Server ' + ixia_dict['Chassis IP Address'] + ' "\n'
# we may need to empy the next line variable because we are looking for a section header
#next_line = ''
next_header = '######### Chassis list - {' + local_chassis_ip_address + '} #########'
line_index = line_index + 1
# reset the line index because we are going to the next section
line_index = 0
else:
print 'line_index out of range at value:', line_index
return 'Error in automation! bad line index in section 1'
elif section_index == 2:
if line_index == 0:
modified_line = 'ixConnectToChassis {' + ixia_dict['Chassis IP Address'] + '}\n'
next_line = 'set owner "'
line_index = line_index + 1
elif line_index == 1:
modified_line = 'set owner "' + ixia_dict['Username'] + '"\n'
# going to the next section
next_header = '######### Chassis-' + local_chassis_ip_address + ' #########'
line_index = 0
elif section_index == 3:
if line_index == 0:
modified_line = 'chassis get "' + ixia_dict['Chassis IP Address'] + '"\n'
# going to next section
next_header = '######### Card Type : 10/100/1000 LSM XMVR16 ############'
line_index = 0
elif section_index == 4:
if line_index == 0:
# There could be multiple cards. It's hard to say if there should be more then one
# variable for the card number. I don't think it's neccarry because the system configures
# the cards sequentially so it should not be overwritten.
local_card_number = words[2]
# We take the first element from the card number list.
# After we're done using that information we will delete it from the list
# and then we can use element zero again. (like a stack)
modified_line = 'set card ' + str(card_number_list[0]) + '\n'
#next_header = '######### Chassis-' + local_chassis_ip_address + ' ' + local_card_number
next_header = '######### Chassis-' + local_chassis_ip_address + ' Card-' + local_card_number
line_index = 0
elif section_index == 5:
if line_index == 0:
modified_line = 'set port ' + str(port_number_list[0]) + '\n'
line_index = line_index + 1
next_line = 'port config -MacAddress "'
elif line_index == 1:
long_port_number = 'Port Number ' + str(port_number_list[0])
# The source MAC address "can" be configured if you like
# But this does lead to more complexity about "what" to configure it to
try:
modified_line = next_line + ixia_dict[long_card_number][long_port_number]['Source MAC Address'] + '"\n'
except:
modified_line = input_line
line_index = 0
next_header = '######### Generating streams for all the ports from above #########'
else:
error_message = 'line_index out of range 0-1 for section_index 5!'
return error_message
elif section_index == 6:
error_message = 'Failure. Found a line in section six not expected.'
return error_message
elif section_index == 7:
if line_index == 0:
modified_line = 'chassis get "' + ixia_dict['Chassis IP Address'] + '"\n'
line_index = line_index + 1
#next_line = 'set card ' + local_card_number[0]
next_line = 'set card'
elif line_index == 1:
modified_line = 'set card ' + str(card_number_list[0]) + '\n'
line_index = line_index + 1
next_line = 'set port ' + local_port_number
elif line_index == 2:
modified_line = 'set port ' + str(port_number_list[0]) + '\n'
line_index = line_index + 1
"""
if debug:
print 'Looking for the stream ID itself in this dictionary:'
print ixia_dict
print 'Using these two keys to find it:'
print long_card_number, long_port_number
"""
raw_stream_id = ixia_dict[long_card_number][long_port_number].keys()
"""
if debug:
print 'Sorting through this list of keys:'
print raw_stream_id
"""
stream_id_list = []
for key in raw_stream_id:
if 'Stream ID' in key:
"""
if debug:
print 'Found a Stream ID:', key
"""
stream_id_list.append(key)
"""
elif debug:
print 'This value was not the Stream ID:', key
"""
stream_number_list = []
for stream_id in stream_id_list:
stream_number_list.append(ixia_dict[long_card_number][long_port_number][stream_id])
long_stream_id = stream_id_list[0]
next_line = 'set streamId ' + str(stream_number_list[0]['Stream ID'])
# At this point we're configuring the individual streams
# This will need to recurse itself until done with all the streams
#
# At the end of this mess we will check to see if there are more then one streams listed
# in the stream_numbe_list. If so that means that there are more then on stream that
# needs to be rewritten. To achieve this feat we will do a little cute trick.
# 1. We will remove the first element in the stream_number_list[0]
# 2. Then we will change the line_index = 2 so that this whole routine
# is repeated until there are no more streams to rewrite.
#
# The hopes are that all the streams are actully in this section.
elif line_index == 3:
modified_line = 'set streamId ' + str(stream_number_list[0]['Stream ID']) + '\n'
next_line = '# Stream ' + str(stream_number_list[0]['Stream ID'])
line_index = line_index + 1
elif line_index == 4:
modified_line = '# Stream ' + str(stream_number_list[0]['Stream ID']) + '\n'
next_line = 'stream config -name "'
line_index = line_index + 1
elif line_index == 5:
modified_line = 'stream config -name "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Stream Name'] + '"\n'
next_line = 'stream config -framesize '
line_index = line_index + 1
elif line_index == 6:
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Frame Size'):
modified_line = 'stream config -framesize ' + \
str(ixia_dict[long_card_number][long_port_number][long_stream_id]['Frame Size']) + '\n'
else:
modified_line = input_line
next_line = 'ip config -sourceIpAddr "'
line_index = line_index + 1
elif line_index == 7:
modified_line = 'ip config -sourceIpAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Source IP Address'] + '"\n'
next_line = 'ip config -destIpAddr "'
line_index = line_index + 1
elif line_index == 8:
modified_line = 'ip config -destIpAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination IP Address'] + '"\n'
next_line = 'ip config -destMacAddr "'
line_index = line_index + 1
elif line_index == 9:
modified_line = 'ip config -destMacAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination MAC Address'] + '"\n'
if len(stream_number_list) > 1:
stream_number = stream_number_list[0]
stream_number_list.remove(stream_number)
line_index = 2
else:
error_message = 'Something went wrong while processing the line_index value. Out of range 1-8'
return error_message
else:
print 'Something should happen here!'
if len(modified_line) > 1:
# Write out the modified line
if generate_output_file:
if debug:
print 'The modified line to be written will be:'
print modified_line
output_file.write(modified_line)
else:
print 'modified line:', modified_line
else:
# Write out the original line
if generate_output_file:
output_file.write(input_line)
else:
"""
if debug:
print 'This is the line that would have been written out'
print input_line
print '-----------------------------------------------------------------------'
"""
if debug:
print 'The ending section index is:', section_index
print 'The ending line index is:', line_index
# Clean up
input_file.close()
if generate_output_file:
print 'Closing the output file:', output_file
output_file.close()
else:
if debug:
print 'This is where we would have closed the output file'
return 0
def rewrite_ixia_config_2(ixia_dict):
# updated for setting auto increment values. used for generating 8k traffic
# Due to a change in the whitepsace of the config the method for finding the next line
# must be changed. Instead of looking for the complete string the line must be sliced
# so the whitespace is removed. Then the list must have the varibles removed from it
# after that the list object can be compared with another list object.
# This requires rewrite of all the expected lines to be lists.
# most lines can be split on whitespace. The lines containing MAC addresses have
# whitepace where the collons shoudl be. So the comparing logig should only
# compare elements expected to elements read. That way it will stop reading before
# it gets to the MAC.
"""
This function opens an IXIA.tcl script and rewrites the IP Address and other values to make
the script send traffic to any DUT
All values MUST be set in this dictionary or the file can not be rewritten correctly!
After this method completes it will write an ouptut file or if set to "none" it will
return the whole configuration as a very long string which can then be split and fed
into the IXIA via CLI
ixia_dict{
Chassis IP Address:10.4.2.30
# The IP of the IXIA itself
Username:jalfrey
# Username that "owns" the ports that will send traffic
Source File
# This is the source file it is read from
# This needs to either be a full path or relative to current directory path
Output File
# This can be set to "none" and the method will return the whole configuration
# Or if it is set it will write the file out to disk
Card Number_X:
# If there are multiple cards then there will be multiple dictionaries.
# For my configuration I use card 3 to the dictionary will be called
# "Card Number 3"
Dictionary {
Card Number
# Card which port lives on. Same information contained in the dictionary
# name but just as the number "3"
Port Number X:
# Port to be configured. There will be one key per port
Dictionary {
Port Number
# This is the port number on the IXIA itself (physical port)
Source MAC Address
# Can be set or left "default" which will leave the config unchanged or null ''
Destination MAC Address
# This is the MAC of what the IXIA is directly connected to
# In my case it's a Cisco Router
Stream ID X:
# This is the Stream ID. There is one ID per stream configured
Dictionary: {
Stream ID:1
# Stream numeric ID. Matches "Stream ID X" value X
0 - Stream Name
# Optional. If value is Null nothing will be set
# whatever was there will be left there
1 - Source IP Address
# used for incrementing the source IP
2 - Source IP Mask
3 - Source IP Address Mode
# Source on IXIA side
4 - Source IP Address Repeat Count
# could be ipIncrHost or ...
5 - Source Class
# when ipIncrHost enabled this option is ignored
6 - Destination IP Address
7 - Destination IP Mask
8 - Destination IP Address Mode
9 - Destination IP Address Repeat Count
10 - Destination Class
# Where the traffic should go. In my case that's the SSX (DUT)
11 - Destination MAC Address
# This should be the same as the "Destination MAC Address" found above
# But clearly it can be set differently but I'm not sure why
# Maybe for testing through a Hub?
}
}
}
"""
debug = True
# Configuration will overwrite this value
generate_output_file = False
###############################
# Variable Validation Section #
###############################
if len(ixia_dict) > 0:
top_keys = ixia_dict.keys()
if debug:
print '------------------------------------'
print 'The top keys extracted were:'
for key in top_keys:
print key, ':', ixia_dict[key]
print '------------------------------------'
# IP Address
if ixia_dict.has_key('Chassis IP Address'):
if validIP(ixia_dict['Chassis IP Address']):
top_keys.remove('Chassis IP Address')
if debug:
print 'Chassis IP is valid'
else:
error_message = 'Invalid IP address for the chassis: ' + ixia_dict.has_key('Chassis IP Address')
return error_message
# Username
if ixia_dict.has_key('Username'):
if (len(ixia_dict['Username']) > 0):
top_keys.remove('Username')
if debug:
print 'Username is valid'
else:
error_message = 'No Username value provided'
return error_message
# Source File
if ixia_dict.has_key('Source File'):
if (ixia_dict['Source File'] == ''):
return 'No source file value set'
if os.path.exists(ixia_dict['Source File']):
top_keys.remove('Source File')
if debug:
print 'Source filename is valid'
else:
return 'unable to locate the source file!'
# Output File
# IF the length is zero then no file is generated
# if it is set to "none" then no file is generated
# Otherwise whatever the filename is it's generated with that
# Since the filename could be mostly anything we don't validate it
if ixia_dict.has_key('Output File'):
# Here we change the case to lowercase so that we can compare the string once
# Instead of testing to see if it's formatted like 'None', 'NONE', etc.
output_filename = ixia_dict['Output File'].lower()
if output_filename == 'none':
generate_output_file = False
if debug:
print 'No output file will be generate'
else:
generate_output_file = True
if debug:
print 'Output file will be generated'
top_keys.remove('Output File')
if debug:
print 'Output filename is valid'
if debug:
print 'At this point the top_keys should only contain card numbers'
print top_keys
# At this point the top_keys dictionary should only contain entries
# of card numbers. like "Card Number 3"
for card_number in top_keys:
# Now we use this "key" to retrieve all the ports listed for that card
# Then we verify the port list is valid
port_list = ixia_dict[card_number].keys()
if debug:
print 'Now parsing the following items in the port_list'
print port_list
for port_number in port_list:
if 'Card Number' in port_number:
if not (int(ixia_dict[card_number][port_number]) in range(1,15)):
error_message = 'Card Number: ' + ixia_dict[card_number][port_number] + ' Outside expected range of: 1-14'
return error_message
if 'Port Number' in port_number:
if debug:
print '000000000'
print 'port_number = ', port_number
print 'The port number being tested is:', ixia_dict[card_number][port_number]['Port Number']
# The range function is odd. If you say 1,13 you get 13 numbers
# starting at 1 not zero and it ends at 12 instead of 13.
if not (int(ixia_dict[card_number][port_number]['Port Number']) in range(1,14)):
error_message = 'Port number: ' + port_number + ' on Card: ' \
+ card_number + ' is invalide. Expected to be in the range 1 - 13'
return error_message
else:
if debug:
print 'the following item wil not be parsed:'
print port_number
else:
return 'No variables set. Can not proceed!'
##############
# Open Files #
##############
try:
input_file = open(ixia_dict['Source File'], 'r')
except:
return 'Unable to open the Soucre File'
if generate_output_file:
try:
output_file = open(ixia_dict['Output File'], 'w')
except:
return 'Unable to open the ouptut file!'
########################
# Parse the input_file #
########################
# Method:
#
# 1. Read the file line by line
# 2. Look for section headers
# a. If the line matches one of the section headers we note that down
# b. The section header itself may need re-writing
# c. Increment the section header counter
# 3. Inside the sections search for specific lines
# 4. Read each line and write it to the output file
# 5. When special lines are found re-write them and write to output file
next_header = '# This Script has been generated by Ixia ScriptGen'
next_line = 'default_nothing'
modified_line = 'default_nothing'
section_index = 0
line_index = 0
line_count = 1
########################
## Used for Debugging ##
run_to_completion = True
break_after = 57
########################
raw_keys = ixia_dict.keys()
card_number_list = []
port_number_list = []
next_line_cache = ''
for key in raw_keys:
if 'Card Number' in key:
card_number_list.append(ixia_dict[key]['Card Number'])
if debug:
print 'We are expecting to configure the following cards:', card_number_list
if debug:
print 'Now reading the input file line by line looking for the section headers'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
for input_line in input_file:
"""
if debug and (line_count >= break_after) and not run_to_completion:
print 'Breaking on line:', line_count ,'for debuging'
return 0
line_count = line_count + 1
"""
###################
## regex rewrite ##
###################
local_debug = False
if not (next_line_cache == next_line):
if debug:
print 'the next line we are looking for has changed. Regex will be regenerated'
next_line_cache = next_line
#Due to the searching bug we now need to change the next_line variable into a regex here
# Logic
# chop it into words
# append the \s* betwen the words. That means any number of spaces in regex
# then compile it into a regex pattern so we can search using it.
if local_debug:
print 'reworking the next line to become regex'
print next_line
next_line_words = next_line.split()
if local_debug:
print 'the split words are:', next_line_words
raw_regex_next_line = ''
#word = ''
if local_debug:
print '*' * 40
for raw_word in next_line_words:
word = ''
# regex does not like some characters and will fail!
# we need to "escape" them with an extra slash
if local_debug:
print 'looking for invalid characters in word'
for char in raw_word:
if local_debug:
print 'working on char:', char
if char in ['[',']','\\','/','{','}']:
if local_debug:
print 'found a bad char:', char
word = word + '[\\' + char + ']'
else:
if local_debug:
print 'found a regular char:', char
word = word + char
if local_debug:
print 'word is now:', word
print '*' * 40
if local_debug:
print 'working on word:', raw_word
raw_regex_next_line = raw_regex_next_line + word + '\s*'
if local_debug:
print 'the raw regex is now:', raw_regex_next_line
# now finally at the end of the statement we need a don't care
# we will only look for the first part of the statement
# the end can be anything
raw_regex_next_line = raw_regex_next_line + '.*'
if local_debug:
print 'the completed raw regex is:', raw_regex_next_line
next_line_regex = re.compile(raw_regex_next_line)
#######################
## end regex rewrite ##
#######################
if debug:
print '******* Line Number:', line_count, ' ********************'
print 'have: "', input_line.strip(), '"'
print 'want Header:', next_header
print ' want Line: "', next_line, '"'
try:
print ' regex: "', raw_regex_next_line, '"'
except:
pass
print '******* Line Number:', line_count, ' ********************'
###############################
## Do the regex matchin here ##
###############################
#There seems to be no if regex.match logic available
#so we need to do the logic here so we can use it for a branch later
match = re.search(next_line_regex, input_line)
if next_header in input_line:
if debug:
print 'valid section header:', input_line.strip()
# This will give us a numeric index telling us what section we're in
section_index = section_index + 1
if section_index == 1:
next_line = 'if {[ixConnectToTclServer'
if debug:
print 'Found first section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(input_line)
elif section_index == 2:
modified_line = '######### Chassis list - {' + ixia_dict['Chassis IP Address'] + '} #########\n'
next_line = 'ixConnectToChassis {' + local_chassis_ip_address + '}'
if debug:
print 'Found second section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(modified_line)
elif section_index == 3:
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] + ' #########\n'
next_line = 'chassis get "' + local_chassis_ip_address + '"'
if debug:
print 'Found third section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(modified_line)
elif section_index == 4:
next_line = 'set card '
if debug:
print 'Found fourth section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(input_line)
elif section_index == 5:
if debug:
print 'found fith section header'
long_card_number = 'Card Number ' + str(card_number_list[0])
raw_port_list = ixia_dict[long_card_number].keys()
port_name_list = []
for key in raw_port_list:
if 'Port' in key:
port_name_list.append(key)
if debug:
print 'building the port_number_list from the port_name_list:'
print port_name_list
print 'ixia_dict[long_card_number]:', ixia_dict[long_card_number]
for port in port_name_list:
if debug:
print 'port:', port
print 'long_card_number:', long_card_number
port_number_list.append(ixia_dict[long_card_number][port]['Port Number'])
if debug:
print 'port_number_list:', port_number_list
if debug:
print 'The ports that will be configured for card:', long_card_number, 'are:', port_number_list
# Example line
"""
######### Chassis-10.4.2.30 Card-3 Port-3 #########
"""
words = input_line.split()
raw_port_number = words[3].split('-')
local_port_number = raw_port_number[1]
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] \
+ ' Card-' + str(card_number_list[0]) + ' Port-' + str(port_number_list[0]) + ' #########\n'
if generate_output_file:
output_file.write(modified_line)
next_line = 'set port ' + str(local_port_number)
elif section_index == 6:
if debug:
print 'found sixth section header'
if generate_output_file:
output_file.write(input_line)
# This is a strange one. This header is identical to a header we have already seen in section 5
# but if we executed the same code it would mess stuff up so we just look for it to step
# over it.
next_header = '######### Chassis-' + local_chassis_ip_address + ' Card-' + str(local_card_number)
elif section_index == 7:
if debug:
print 'found seventh section header. (final)'
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] + \
' Card-' + str(card_number_list[0]) + ' Port-' + str(port_number_list[0]) + ' #########\n'
next_line = 'chassis get "' + local_chassis_ip_address + '"'
if generate_output_file:
output_file.write(input_line)
else:
return 'Failure while parsing the section index'
elif match:
"""
The IXIA does not care about the size of the whitespace. Some .tcl files will have different amount
of space between the variable names. The old method for searching for the lines to replace was:
if the line we were looking for was:
"filter config -captureTriggerPattern anyPattern"
the value in that line we would want to change would be:
"anyPattern"
So the line minus the variable we want to change would be:
"filter config -captureTriggerPattern "
We were checking to see if that partial line was part of the line we wanted to change.
The problem with this is the spacing of the original line in the tcl script could change. Say like
"filter config -captureTriggerPattern "
That would cause the line to not be found.
To work around this problem the following will be done:
1. The string we are loooking for which is called next_line will be changed into a regular expression
2. the file will be searched using regular expressions
"""
# line we're looking for
#elif (next_line in input_line) and (len(input_line) > 2):
# Changed order in statement so lenght is evaluated first. Faster
if debug:
print 'Found a next_line: "', input_line.strip(), '"'
words = input_line.split()
if debug:
print 'The line was broken into these words:'
print words
if section_index == 1:
if debug:
print 'now in section 1'
if line_index == 0:
raw_target_word = words[2].split(']')
local_chassis_ip_address = raw_target_word[0]
if debug:
print 'The Chassis IP Address found in the original configuraiton file was:', local_chassis_ip_address
#next_line = 'errorMsg "Error connecting to Tcl Server ' + local_chassis_ip_address + ' "'
next_line = 'errorMsg "Error connecting to Tcl Server 127.0.0.1 "'
# now we need to rewrite the line and write it to the log file
modified_line = ' if {[ixConnectToTclServer ' + ixia_dict['Chassis IP Address'] + ']} {\n'
line_index = line_index + 1
elif line_index == 1:
modified_line = ' errorMsg "Error connecting to Tcl Server ' + ixia_dict['Chassis IP Address'] + ' "\n'
# we may need to empy the next line variable because we are looking for a section header
#next_line = ''
next_header = '######### Chassis list - {' + local_chassis_ip_address + '} #########'
line_index = line_index + 1
# reset the line index because we are going to the next section
line_index = 0
else:
print 'line_index out of range at value:', line_index
return 'Error in automation! bad line index in section 1'
elif section_index == 2:
if line_index == 0:
modified_line = 'ixConnectToChassis {' + ixia_dict['Chassis IP Address'] + '}\n'
next_line = 'set owner "'
line_index = line_index + 1
elif line_index == 1:
modified_line = 'set owner "' + ixia_dict['Username'] + '"\n'
# going to the next section
next_header = '######### Chassis-' + local_chassis_ip_address + ' #########'
line_index = 0
elif section_index == 3:
if line_index == 0:
modified_line = 'chassis get "' + ixia_dict['Chassis IP Address'] + '"\n'
# going to next section
#next_header = '######### Card Type : 10/100/1000 LSM XMVR16 ############'
next_header = '######### Card Type : 10/100/1000 LSM XMVDC16 ############'
line_index = 0
elif section_index == 4:
if line_index == 0:
# There could be multiple cards. It's hard to say if there should be more then one
# variable for the card number. I don't think it's neccarry because the system configures
# the cards sequentially so it should not be overwritten.
local_card_number = words[2]
# We take the first element from the card number list.
# After we're done using that information we will delete it from the list
# and then we can use element zero again. (like a stack)
modified_line = 'set card ' + str(card_number_list[0]) + '\n'
#next_header = '######### Chassis-' + local_chassis_ip_address + ' ' + local_card_number
#next_header = '######### Chassis-' + local_chassis_ip_address + ' Card-' + local_card_number
next_header = '######### Chassis-127.0.0.1' + ' Card-' + local_card_number
line_index = 0
elif section_index == 5:
if line_index == 0:
modified_line = 'set port ' + str(port_number_list[0]) + '\n'
line_index = line_index + 1
next_line = 'port config -MacAddress "'
elif line_index == 1:
long_port_number = 'Port Number ' + str(port_number_list[0])
# The source MAC address "can" be configured if you like
# But this does lead to more complexity about "what" to configure it to
try:
modified_line = next_line + ixia_dict[long_card_number][long_port_number]['Source MAC Address'] + '"\n'
except:
modified_line = input_line
line_index = 0
next_header = '######### Generating streams for all the ports from above #########'
else:
error_message = 'line_index out of range 0-1 for section_index 5!'
return error_message
elif section_index == 6:
error_message = 'Failure. Found a line in section six not expected.'
return error_message
elif section_index == 7:
if line_index == 0:
modified_line = 'chassis get "' + ixia_dict['Chassis IP Address'] + '"\n'
line_index = line_index + 1
#next_line = 'set card ' + local_card_number[0]
next_line = 'set card'
elif line_index == 1:
modified_line = 'set card ' + str(card_number_list[0]) + '\n'
line_index = line_index + 1
next_line = 'set port ' + local_port_number
elif line_index == 2:
modified_line = 'set port ' + str(port_number_list[0]) + '\n'
line_index = line_index + 1
"""
if debug:
print 'Looking for the stream ID itself in this dictionary:'
print ixia_dict
print 'Using these two keys to find it:'
print long_card_number, long_port_number
"""
raw_stream_id = ixia_dict[long_card_number][long_port_number].keys()
"""
if debug:
print 'Sorting through this list of keys:'
print raw_stream_id
"""
stream_id_list = []
for key in raw_stream_id:
if 'Stream ID' in key:
"""
if debug:
print 'Found a Stream ID:', key
"""
stream_id_list.append(key)
"""
elif debug:
print 'This value was not the Stream ID:', key
"""
stream_number_list = []
for stream_id in stream_id_list:
stream_number_list.append(ixia_dict[long_card_number][long_port_number][stream_id])
long_stream_id = stream_id_list[0]
next_line = 'set streamId ' + str(stream_number_list[0]['Stream ID'])
# At this point we're configuring the individual streams
# This will need to recurse itself until done with all the streams
#
# At the end of this mess we will check to see if there are more then one streams listed
# in the stream_numbe_list. If so that means that there are more then on stream that
# needs to be rewritten. To achieve this feat we will do a little cute trick.
# 1. We will remove the first element in the stream_number_list[0]
# 2. Then we will change the line_index = 2 so that this whole routine
# is repeated until there are no more streams to rewrite.
#
# The hopes are that all the streams are actully in this section.
elif line_index == 3:
modified_line = 'set streamId ' + str(stream_number_list[0]['Stream ID']) + '\n'
next_line = '# Stream ' + str(stream_number_list[0]['Stream ID'])
line_index = line_index + 1
elif line_index == 4:
modified_line = '# Stream ' + str(stream_number_list[0]['Stream ID']) + '\n'
next_line = 'stream config -name "'
line_index = line_index + 1
elif line_index == 5:
modified_line = 'stream config -name "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Stream Name'] + '"\n'
next_line = 'stream config -framesize '
line_index = line_index + 1
elif line_index == 6:
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Frame Size'):
modified_line = 'stream config -framesize ' + \
str(ixia_dict[long_card_number][long_port_number][long_stream_id]['Frame Size']) + '\n'
else:
modified_line = input_line
next_line = 'ip config -sourceIpAddr "'
line_index = line_index + 1
elif line_index == 7:
modified_line = 'ip config -sourceIpAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Source IP Address'] + '"\n'
next_line = 'ip config -destIpAddr "'
line_index = line_index + 1
elif line_index == 8:
modified_line = 'ip config -destIpAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination IP Address'] + '"\n'
next_line = 'ip config -destMacAddr "'
line_index = line_index + 1
elif line_index == 9:
modified_line = 'ip config -destMacAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination MAC Address'] + '"\n'
if len(stream_number_list) > 1:
stream_number = stream_number_list[0]
stream_number_list.remove(stream_number)
line_index = 2
else:
error_message = 'Something went wrong while processing the line_index value. Out of range 1-8'
return error_message
else:
print 'Something should happen here!'
if len(modified_line) > 1:
# Write out the modified line
if generate_output_file:
if debug:
print 'The modified line to be written will be:'
print modified_line
output_file.write(modified_line)
else:
print 'modified line:', modified_line
else:
# Write out the original line
if generate_output_file:
output_file.write(input_line)
else:
if debug:
print 'This is the line that would have been written out'
print input_line.strip()
print '-----------------------------------------------------------------------'
#############################
## Global debug breakpoint ##
#############################
if debug and (line_count >= break_after) and not run_to_completion:
print 'Breaking on line:', line_count ,'for debuging'
return 0
line_count = line_count + 1
####################
## end breakpoint ##
####################
if debug:
print 'The ending section index is:', section_index
print 'The ending line index is:', line_index
# Clean up
input_file.close()
if generate_output_file:
print 'Closing the output file:', output_file
output_file.close()
else:
if debug:
print 'This is where we would have closed the output file'
return 0
def rewrite_ixia_config_3(ixia_dict):
# updated for setting auto increment values. used for generating 8k traffic
# Due to a change in the whitepsace of the config the method for finding the next line
# must be changed. Instead of looking for the complete string the line must be sliced
# so the whitespace is removed. Then the list must have the varibles removed from it
# after that the list object can be compared with another list object.
# This requires rewrite of all the expected lines to be lists.
# most lines can be split on whitespace. The lines containing MAC addresses have
# whitepace where the collons shoudl be. So the comparing logig should only
# compare elements expected to elements read. That way it will stop reading before
# it gets to the MAC.
"""
This function opens an IXIA.tcl script and rewrites the IP Address and other values to make
the script send traffic to any DUT
All values MUST be set in this dictionary or the file can not be rewritten correctly!
After this method completes it will write an ouptut file or if set to "none" it will
return the whole configuration as a very long string which can then be split and fed
into the IXIA via CLI
ixia_dict{
Chassis IP Address:10.4.2.30
# The IP of the IXIA itself
Username:jalfrey
# Username that "owns" the ports that will send traffic
Source File
# This is the source file it is read from
# This needs to either be a full path or relative to current directory path
Output File
# This can be set to "none" and the method will return the whole configuration
# Or if it is set it will write the file out to disk
Card Number_X:
# If there are multiple cards then there will be multiple dictionaries.
# For my configuration I use card 3 to the dictionary will be called
# "Card Number 3"
Dictionary {
Card Number
# Card which port lives on. Same information contained in the dictionary
# name but just as the number "3"
Port Number X:
# Port to be configured. There will be one key per port
Dictionary {
Port Number
# This is the port number on the IXIA itself (physical port)
Source MAC Address
# Can be set or left "default" which will leave the config unchanged or null ''
Destination MAC Address
# This is the MAC of what the IXIA is directly connected to
# In my case it's a Cisco Router
Stream ID X:
# This is the Stream ID. There is one ID per stream configured
Dictionary: {
Stream ID:1
# Stream numeric ID. Matches "Stream ID X" value X
0 - Stream Name
# Optional. If value is Null nothing will be set
# whatever was there will be left there
Frame Size
1 - Source IP Address
# used for incrementing the source IP
2 - Source IP Mask
3 - Source IP Address Mode
# Source on IXIA side
4 - Source IP Address Repeat Count
# could be ipIncrHost or ...
5 - Source Class
# when ipIncrHost enabled this option is ignored
6 - Destination IP Address
7 - Destination IP Mask
8 - Destination IP Address Mode
9 - Destination IP Address Repeat Count
10 - Destination Class
# Where the traffic should go. In my case that's the SSX (DUT)
11 - Destination MAC Address
# This should be the same as the "Destination MAC Address" found above
# But clearly it can be set differently but I'm not sure why
# Maybe for testing through a Hub?
}
}
}
"""
debug = True
# Configuration will overwrite this value
generate_output_file = False
###############################
# Variable Validation Section #
###############################
if len(ixia_dict) > 0:
top_keys = ixia_dict.keys()
if debug:
print '------------------------------------'
print 'The top keys extracted were:'
for key in top_keys:
print key, ':', ixia_dict[key]
print '------------------------------------'
# IP Address
if ixia_dict.has_key('Chassis IP Address'):
if validIP(ixia_dict['Chassis IP Address']):
top_keys.remove('Chassis IP Address')
if debug:
print 'Chassis IP is valid'
else:
error_message = 'Invalid IP address for the chassis: ' + ixia_dict.has_key('Chassis IP Address')
return error_message
# Username
if ixia_dict.has_key('Username'):
if (len(ixia_dict['Username']) > 0):
top_keys.remove('Username')
if debug:
print 'Username is valid'
else:
error_message = 'No Username value provided'
return error_message
# Source File
if ixia_dict.has_key('Source File'):
if (ixia_dict['Source File'] == ''):
return 'No source file value set'
if os.path.exists(ixia_dict['Source File']):
top_keys.remove('Source File')
if debug:
print 'Source filename is valid'
else:
return 'unable to locate the source file!'
# Output File
# IF the length is zero then no file is generated
# if it is set to "none" then no file is generated
# Otherwise whatever the filename is it's generated with that
# Since the filename could be mostly anything we don't validate it
if ixia_dict.has_key('Output File'):
# Here we change the case to lowercase so that we can compare the string once
# Instead of testing to see if it's formatted like 'None', 'NONE', etc.
output_filename = ixia_dict['Output File'].lower()
if output_filename == 'none':
generate_output_file = False
if debug:
print 'No output file will be generate'
else:
generate_output_file = True
if debug:
print 'Output file will be generated'
top_keys.remove('Output File')
if debug:
print 'Output filename is valid'
if debug:
print 'At this point the top_keys should only contain card numbers'
print top_keys
# At this point the top_keys dictionary should only contain entries
# of card numbers. like "Card Number 3"
for card_number in top_keys:
# Now we use this "key" to retrieve all the ports listed for that card
# Then we verify the port list is valid
port_list = ixia_dict[card_number].keys()
if debug:
print 'Now parsing the following items in the port_list'
print port_list
for port_number in port_list:
if 'Card Number' in port_number:
if not (int(ixia_dict[card_number][port_number]) in range(1,15)):
error_message = 'Card Number: ' + ixia_dict[card_number][port_number] + ' Outside expected range of: 1-14'
return error_message
if 'Port Number' in port_number:
if debug:
print '000000000'
print 'port_number = ', port_number
print 'The port number being tested is:', ixia_dict[card_number][port_number]['Port Number']
# The range function is odd. If you say 1,13 you get 13 numbers
# starting at 1 not zero and it ends at 12 instead of 13.
if not (int(ixia_dict[card_number][port_number]['Port Number']) in range(1,14)):
error_message = 'Port number: ' + port_number + ' on Card: ' \
+ card_number + ' is invalide. Expected to be in the range 1 - 13'
return error_message
else:
if debug:
print 'the following item wil not be parsed:'
print port_number
else:
return 'No variables set. Can not proceed!'
##############
# Open Files #
##############
try:
input_file = open(ixia_dict['Source File'], 'r')
except:
return 'Unable to open the Soucre File'
if generate_output_file:
try:
output_file = open(ixia_dict['Output File'], 'w')
except:
return 'Unable to open the ouptut file!'
########################
# Parse the input_file #
########################
# Method:
#
# 1. Read the file line by line
# 2. Look for section headers
# a. If the line matches one of the section headers we note that down
# b. The section header itself may need re-writing
# c. Increment the section header counter
# 3. Inside the sections search for specific lines
# 4. Read each line and write it to the output file
# 5. When special lines are found re-write them and write to output file
next_header = '# This Script has been generated by Ixia ScriptGen'
next_line = 'default_nothing'
modified_line = 'default_nothing'
section_index = 0
line_index = 0
line_count = 1
########################
## Used for Debugging ##
run_to_completion = True
break_after = 57
########################
raw_keys = ixia_dict.keys()
card_number_list = []
port_number_list = []
next_line_cache = ''
for key in raw_keys:
if 'Card Number' in key:
card_number_list.append(ixia_dict[key]['Card Number'])
if debug:
print 'We are expecting to configure the following cards:', card_number_list
if debug:
print 'Now reading the input file line by line looking for the section headers'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
print '-----------------------------------------------------------------------'
for input_line in input_file:
"""
if debug and (line_count >= break_after) and not run_to_completion:
print 'Breaking on line:', line_count ,'for debuging'
return 0
line_count = line_count + 1
"""
###################
## regex rewrite ##
###################
local_debug = False
if not (next_line_cache == next_line):
if debug:
print 'the next line we are looking for has changed. Regex will be regenerated'
next_line_cache = next_line
#Due to the searching bug we now need to change the next_line variable into a regex here
# Logic
# chop it into words
# append the \s* betwen the words. That means any number of spaces in regex
# then compile it into a regex pattern so we can search using it.
if local_debug:
print 'reworking the next line to become regex'
print next_line
next_line_words = next_line.split()
if local_debug:
print 'the split words are:', next_line_words
raw_regex_next_line = ''
#word = ''
if local_debug:
print '*' * 40
for raw_word in next_line_words:
word = ''
# regex does not like some characters and will fail!
# we need to "escape" them with an extra slash
if local_debug:
print 'looking for invalid characters in word'
for char in raw_word:
if local_debug:
print 'working on char:', char
if char in ['[',']','\\','/','{','}']:
if local_debug:
print 'found a bad char:', char
word = word + '[\\' + char + ']'
else:
if local_debug:
print 'found a regular char:', char
word = word + char
if local_debug:
print 'word is now:', word
print '*' * 40
if local_debug:
print 'working on word:', raw_word
raw_regex_next_line = raw_regex_next_line + word + '\s*'
if local_debug:
print 'the raw regex is now:', raw_regex_next_line
# now finally at the end of the statement we need a don't care
# we will only look for the first part of the statement
# the end can be anything
raw_regex_next_line = raw_regex_next_line + '.*'
if local_debug:
print 'the completed raw regex is:', raw_regex_next_line
next_line_regex = re.compile(raw_regex_next_line)
#######################
## end regex rewrite ##
#######################
if debug:
print '******* Line Number:', line_count, ' ********************'
print 'have: "', input_line.strip(), '"'
print 'want Header:', next_header
print ' want Line: "', next_line, '"'
try:
print ' regex: "', raw_regex_next_line, '"'
except:
pass
print '******* Line Number:', line_count, ' ********************'
###############################
## Do the regex matchin here ##
###############################
#There seems to be no if regex.match logic available
#so we need to do the logic here so we can use it for a branch later
match = re.search(next_line_regex, input_line)
if next_header in input_line:
if debug:
print 'valid section header:', input_line.strip()
# This will give us a numeric index telling us what section we're in
section_index = section_index + 1
if section_index == 1:
next_line = 'if {[ixConnectToTclServer'
if debug:
print 'Found first section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(input_line)
elif section_index == 2:
modified_line = '######### Chassis list - {' + ixia_dict['Chassis IP Address'] + '} #########\n'
next_line = 'ixConnectToChassis {' + local_chassis_ip_address + '}'
if debug:
print 'Found second section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(modified_line)
elif section_index == 3:
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] + ' #########\n'
next_line = 'chassis get "' + local_chassis_ip_address + '"'
if debug:
print 'Found third section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(modified_line)
elif section_index == 4:
next_line = 'set card '
if debug:
print 'Found fourth section header'
print 'next_line updated to:', next_line
if generate_output_file:
output_file.write(input_line)
elif section_index == 5:
if debug:
print 'found fith section header'
long_card_number = 'Card Number ' + str(card_number_list[0])
raw_port_list = ixia_dict[long_card_number].keys()
port_name_list = []
for key in raw_port_list:
if 'Port' in key:
port_name_list.append(key)
if debug:
print 'building the port_number_list from the port_name_list:'
print port_name_list
print 'ixia_dict[long_card_number]:', ixia_dict[long_card_number]
for port in port_name_list:
if debug:
print 'port:', port
print 'long_card_number:', long_card_number
port_number_list.append(ixia_dict[long_card_number][port]['Port Number'])
if debug:
print 'port_number_list:', port_number_list
if debug:
print 'The ports that will be configured for card:', long_card_number, 'are:', port_number_list
# Example line
"""
######### Chassis-10.4.2.30 Card-3 Port-3 #########
"""
words = input_line.split()
raw_port_number = words[3].split('-')
local_port_number = raw_port_number[1]
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] \
+ ' Card-' + str(card_number_list[0]) + ' Port-' + str(port_number_list[0]) + ' #########\n'
if generate_output_file:
output_file.write(modified_line)
next_line = 'set port ' + str(local_port_number)
elif section_index == 6:
if debug:
print 'found sixth section header'
if generate_output_file:
output_file.write(input_line)
# This is a strange one. This header is identical to a header we have already seen in section 5
# but if we executed the same code it would mess stuff up so we just look for it to step
# over it.
next_header = '######### Chassis-' + local_chassis_ip_address + ' Card-' + str(local_card_number)
elif section_index == 7:
if debug:
print 'found seventh section header. (final)'
modified_line = '######### Chassis-' + ixia_dict['Chassis IP Address'] + \
' Card-' + str(card_number_list[0]) + ' Port-' + str(port_number_list[0]) + ' #########\n'
next_line = 'chassis get "' + local_chassis_ip_address + '"'
if generate_output_file:
output_file.write(input_line)
else:
return 'Failure while parsing the section index'
elif match:
"""
The IXIA does not care about the size of the whitespace. Some .tcl files will have different amount
of space between the variable names. The old method for searching for the lines to replace was:
if the line we were looking for was:
"filter config -captureTriggerPattern anyPattern"
the value in that line we would want to change would be:
"anyPattern"
So the line minus the variable we want to change would be:
"filter config -captureTriggerPattern "
We were checking to see if that partial line was part of the line we wanted to change.
The problem with this is the spacing of the original line in the tcl script could change. Say like
"filter config -captureTriggerPattern "
That would cause the line to not be found.
To work around this problem the following will be done:
1. The string we are loooking for which is called next_line will be changed into a regular expression
2. the file will be searched using regular expressions
"""
# line we're looking for
#elif (next_line in input_line) and (len(input_line) > 2):
# Changed order in statement so lenght is evaluated first. Faster
if debug:
print 'Found a next_line: "', input_line.strip(), '"'
words = input_line.split()
if debug:
print 'The line was broken into these words:'
print words
if section_index == 1:
if debug:
print 'now in section 1'
if line_index == 0:
raw_target_word = words[2].split(']')
local_chassis_ip_address = raw_target_word[0]
if debug:
print 'The Chassis IP Address found in the original configuraiton file was:', local_chassis_ip_address
#next_line = 'errorMsg "Error connecting to Tcl Server ' + local_chassis_ip_address + ' "'
next_line = 'errorMsg "Error connecting to Tcl Server 127.0.0.1 "'
# now we need to rewrite the line and write it to the log file
modified_line = ' if {[ixConnectToTclServer ' + ixia_dict['Chassis IP Address'] + ']} {\n'
line_index = line_index + 1
elif line_index == 1:
modified_line = ' errorMsg "Error connecting to Tcl Server ' + ixia_dict['Chassis IP Address'] + ' "\n'
# we may need to empy the next line variable because we are looking for a section header
#next_line = ''
next_header = '######### Chassis list - {' + local_chassis_ip_address + '} #########'
line_index = line_index + 1
# reset the line index because we are going to the next section
line_index = 0
else:
print 'line_index out of range at value:', line_index
return 'Error in automation! bad line index in section 1'
elif section_index == 2:
if line_index == 0:
modified_line = 'ixConnectToChassis {' + ixia_dict['Chassis IP Address'] + '}\n'
next_line = 'set owner "'
line_index = line_index + 1
elif line_index == 1:
modified_line = 'set owner "' + ixia_dict['Username'] + '"\n'
# going to the next section
next_header = '######### Chassis-' + local_chassis_ip_address + ' #########'
line_index = 0
elif section_index == 3:
if line_index == 0:
modified_line = 'chassis get "' + ixia_dict['Chassis IP Address'] + '"\n'
# going to next section
#next_header = '######### Card Type : 10/100/1000 LSM XMVR16 ############'
next_header = '######### Card Type : 10/100/1000 LSM XMVDC16 ############'
line_index = 0
elif section_index == 4:
if line_index == 0:
# There could be multiple cards. It's hard to say if there should be more then one
# variable for the card number. I don't think it's neccarry because the system configures
# the cards sequentially so it should not be overwritten.
local_card_number = words[2]
# We take the first element from the card number list.
# After we're done using that information we will delete it from the list
# and then we can use element zero again. (like a stack)
modified_line = 'set card ' + str(card_number_list[0]) + '\n'
#next_header = '######### Chassis-' + local_chassis_ip_address + ' ' + local_card_number
#next_header = '######### Chassis-' + local_chassis_ip_address + ' Card-' + local_card_number
next_header = '######### Chassis-127.0.0.1' + ' Card-' + local_card_number
line_index = 0
elif section_index == 5:
if line_index == 0:
modified_line = 'set port ' + str(port_number_list[0]) + '\n'
line_index = line_index + 1
next_line = 'port config -MacAddress "'
elif line_index == 1:
long_port_number = 'Port Number ' + str(port_number_list[0])
# The source MAC address "can" be configured if you like
# But this does lead to more complexity about "what" to configure it to
try:
modified_line = next_line + ixia_dict[long_card_number][long_port_number]['Source MAC Address'] + '"\n'
except:
modified_line = input_line
line_index = 0
next_header = '######### Generating streams for all the ports from above #########'
else:
error_message = 'line_index out of range 0-1 for section_index 5!'
return error_message
elif section_index == 6:
error_message = 'Failure. Found a line in section six not expected.'
return error_message
elif section_index == 7:
if line_index == 0:
modified_line = 'chassis get "' + ixia_dict['Chassis IP Address'] + '"\n'
line_index = line_index + 1
#next_line = 'set card ' + local_card_number[0]
next_line = 'set card'
elif line_index == 1:
modified_line = 'set card ' + str(card_number_list[0]) + '\n'
line_index = line_index + 1
next_line = 'set port ' + local_port_number
elif line_index == 2:
modified_line = 'set port ' + str(port_number_list[0]) + '\n'
line_index = line_index + 1
"""
if debug:
print 'Looking for the stream ID itself in this dictionary:'
print ixia_dict
print 'Using these two keys to find it:'
print long_card_number, long_port_number
"""
raw_stream_id = ixia_dict[long_card_number][long_port_number].keys()
"""
if debug:
print 'Sorting through this list of keys:'
print raw_stream_id
"""
stream_id_list = []
for key in raw_stream_id:
if 'Stream ID' in key:
"""
if debug:
print 'Found a Stream ID:', key
"""
stream_id_list.append(key)
"""
elif debug:
print 'This value was not the Stream ID:', key
"""
stream_number_list = []
for stream_id in stream_id_list:
stream_number_list.append(ixia_dict[long_card_number][long_port_number][stream_id])
long_stream_id = stream_id_list[0]
next_line = 'set streamId ' + str(stream_number_list[0]['Stream ID'])
# At this point we're configuring the individual streams
# This will need to recurse itself until done with all the streams
#
# At the end of this mess we will check to see if there are more then one streams listed
# in the stream_numbe_list. If so that means that there are more then on stream that
# needs to be rewritten. To achieve this feat we will do a little cute trick.
# 1. We will remove the first element in the stream_number_list[0]
# 2. Then we will change the line_index = 2 so that this whole routine
# is repeated until there are no more streams to rewrite.
#
# The hopes are that all the streams are actully in this section.
elif line_index == 3:
## Stream ID ##
modified_line = 'set streamId ' + str(stream_number_list[0]['Stream ID']) + '\n'
next_line = '# Stream ' + str(stream_number_list[0]['Stream ID'])
line_index = line_index + 1
elif line_index == 4:
## Stream number ##
modified_line = '# Stream ' + str(stream_number_list[0]['Stream ID']) + '\n'
next_line = 'stream config -name "'
line_index = line_index + 1
elif line_index == 5:
## Stream name ##
modified_line = 'stream config -name "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Stream Name'] + '"\n'
next_line = 'stream config -framesize '
line_index = line_index + 1
elif line_index == 6:
## Framesize ##
# There may be a bug here. It looks like the new framesize is not single quoted
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Frame Size'):
modified_line = 'stream config -framesize ' + \
str(ixia_dict[long_card_number][long_port_number][long_stream_id]['Frame Size']) + '\n'
else:
modified_line = input_line
next_line = 'ip config -sourceIpAddr "'
line_index = line_index + 1
elif line_index == 7:
## source IP Address ##
modified_line = 'ip config -sourceIpAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Source IP Address'] + '"\n'
#next_line = 'ip config -destIpAddr "'
next_line = 'ip config -sourceIpMask "'
line_index = line_index + 1
## New variables ##
# This is where the new variables come in
elif line_index == 8:
## source IP Address Mask ##
# generally set to "255.255.0.0"
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Source IP Mask'):
modified_line = 'ip config -sourceIpMask "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Source IP Mask'] + '"\n'
else:
modified_line = input_line
next_line = 'ip config -sourceIpAddrMode'
line_index = line_index + 1
elif line_index == 9:
## source IP Address Mode ##
# normally set to "ipIncrHost"
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Source IP Address Mode'):
modified_line = 'ip config -sourceIpAddrMode ' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Source IP Address Mode'] + '\n'
else:
modified_line = input_line
next_line = 'ip config -sourceIpAddrRepeatCount'
line_index = line_index + 1
elif line_index == 10:
## source IP Address Repeat Count##
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Source IP Address Repeat Count'):
modified_line = 'ip config -sourceIpAddrRepeatCount ' + \
str(ixia_dict[long_card_number][long_port_number][long_stream_id]['Source IP Address Repeat Count']) + '\n'
else:
modified_line = input_line
next_line = 'ip config -sourceClass'
line_index = line_index + 1
elif line_index == 11:
## source IP Class ##
# used for incerementing the source IP.
# typicall set to "classC"
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Source IP Class'):
modified_line = 'ip config -sourceClass ' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Source IP Class'] + '\n'
else:
modified_line = input_line
next_line = 'ip config -destIpAddr'
line_index = line_index + 1
## End new variables ##
#elif line_index == 8:
# generally set to ""30.222.0.1""
# string is quoted
elif line_index == 12:
## destination IP address
modified_line = 'ip config -destIpAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination IP Address'] + '"\n'
#next_line = 'ip config -destMacAddr "'
next_line = 'ip config -destIpAddrMode'
line_index = line_index + 1
## New variables ##
elif line_index == 13:
## destination IP Address Mode ##
# genearlly set to "ipIncrHost"
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Destination IP Address Mode'):
modified_line = 'ip config -destIpAddrMode ' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination IP Address Mode'] + '\n'
else:
modified_line = input_line
next_line = 'ip config -destIpAddrRepeatCount'
line_index = line_index + 1
elif line_index == 14:
## destination IP Address Repeat Count ##
# genearlly set to "4096"
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Destination IP Address Repeat Count'):
modified_line = 'ip config -destIpAddrRepeatCount ' + \
str(ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination IP Address Repeat Count']) + '\n'
else:
modified_line = input_line
next_line = 'ip config -destClass'
line_index = line_index + 1
elif line_index == 15:
## destination Class ##
# genearlly set to "classC"
if ixia_dict[long_card_number][long_port_number][long_stream_id].has_key('Destination IP Class'):
modified_line = 'ip config -destClass ' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination IP Class'] + '\n'
else:
modified_line = input_line
next_line = 'ip config -destMacAddr'
line_index = line_index + 1
## end new variables ##
#elif line_index == 9:
elif line_index == 16:
## Destionation MAC address
## Debug
print ixia_dict[long_card_number][long_port_number][long_stream_id].keys()
modified_line = 'ip config -destMacAddr "' + \
ixia_dict[long_card_number][long_port_number][long_stream_id]['Destination MAC Address'] + '"\n'
if len(stream_number_list) > 1:
stream_number = stream_number_list[0]
stream_number_list.remove(stream_number)
line_index = 2
else:
error_message = 'Something went wrong while processing the line_index value. Out of range 1-8'
return error_message
else:
print 'Something should happen here!'
if len(modified_line) > 1:
# Write out the modified line
if generate_output_file:
if debug:
print 'The modified line to be written will be:'
print modified_line
output_file.write(modified_line)
else:
print 'modified line:', modified_line
else:
# Write out the original line
if generate_output_file:
output_file.write(input_line)
else:
if debug:
print 'This is the line that would have been written out'
print input_line.strip()
print '-----------------------------------------------------------------------'
#############################
## Global debug breakpoint ##
#############################
if debug and (line_count >= break_after) and not run_to_completion:
print 'Breaking on line:', line_count ,'for debuging'
return 0
line_count = line_count + 1
####################
## end breakpoint ##
####################
if debug:
print 'The ending section index is:', section_index
print 'The ending line index is:', line_index
# Clean up
input_file.close()
if generate_output_file:
print 'Closing the output file:', output_file
output_file.close()
else:
if debug:
print 'This is where we would have closed the output file'
return 0
def scp_to_ixia(source_file, destination_file, ixia = '10.1.10.12', username = 'ixia', password = '1102607'):
"""
This simple method will copy a file from your local filesystem to the IXIA of choice
to the specified file location
"""
# local debug
debug = True
max_copy_time = 120
if debug:
print 'now in issu.py scp_to_ixia'
if debug:
print 'incoming variables:'
print ' source_file:', source_file
print ' destination_file:', destination_file
print ' ixia:', ixia
print ' username:', username
print ' password:', password
# The Logging functions change the current working directory (CWD) to /Logs
# That means all the files end up in /Logs instead of the script directory
# This leads to confusion and a mess.
current_dir = os.getcwd()
if debug:
print 'current working dir is:', current_dir
dir_name = os.path.split(current_dir)
if 'Logs' in dir_name:
if debug:
print 'we are in the log dir!'
in_log_dir = True
else:
if debug:
print 'we are not in the log dir!'
in_log_dir = False
if in_log_dir:
os.chdir('../')
# we will use the os.system method. It has many failures. The best way would be to use
# a library called paramiko. But you would need to install it everywhere
if debug:
print 'trying to SCP the file'
"""
the format of the scp command is:
scp <local file> <username>@<hostname/IP>:<destination file>
"""
no_hostname = 'nodename nor servname provided, or not known'
accept_key = 'Are you sure you want to continue connecting (yes/no)?'
password_prompt = 'password:'
bad_username_password = 'Permission denied, please try again.'
no_such_local_file = 'No such file or directory'
permission_denied = 'Permission denied'
if debug:
print 'the command will be:'
print "scp -q %s %s@%s:%s" % (source_file, username, ixia, destination_file)
ses = pexpect.spawn("scp -q %s %s@%s:%s" % (source_file, username, ixia, destination_file))
# This is where you set the timeout for the SCP to start
retr = ses.expect([accept_key, password_prompt, no_hostname, permission_denied], timeout = 10)
if retr == 0:
# This is the accept_key optionn:
ses.sendline('yes')
retr == ses.expect([password_prompt], timeout = 10)
if retr == 0:
# This is the password option:
ses.sendline(password)
else:
print 'timeout waiting for password prompt'
raise("timeout during SCP")
elif retr == 1:
# This is the password option:
if debug:
print ses.before
ses.sendline(password)
elif retr == 2:
# no_hostname option
print ses.before()
raise("unable to SCP file. Error in command")
elif retr == 3:
# Permission denied
raise("Permission denied")
else:
print 'timeoute while trying to SCP file'
raise("timout during SCP")
# this is where the max copy time is set. That means how long it takes to transfer the file
# It's set at 120 seconds (2 minutes). That should be pleanty for any IXIA config
#retr = ses.expect([bad_username_password, '$', no_such_local_file], timeout = max_copy_time)
retr = ses.expect([bad_username_password, pexpect.EOF, no_such_local_file], timeout = max_copy_time)
if retr == 0:
print 'bad username or password provided'
raise('bad username or password provided')
elif retr == 1:
if debug:
print 'SCP successful'
elif retr == 2:
error_root = ses.before
error_message = 'No such file or directory: %s' % error_root
print error_message
raise('No such file or directory')
else:
error_message = 'maximum time: %s for SCP to complete exceeded' % max_copy_time
print error_message
raise('Max copy time exceeded')
return 0
def get_mac_cisco(cisco_ip, interface):
"""
Connects to cisco and retrieves the MAC address for the interface specified.
Input:
cisco_ip = hostname or IP of console
interface = interface on cisco. 1/0, 3/1 etc.
Returns:
MAC address
"""
"""
All API dealing with the cisco are passed only the cisco IP. The idea is to not
remain connected. Cisco configuration is infrequent and generally we remain
disconnected during the test.
"""
debug = True
if debug:
print 'connecting to cisco:', cisco_ip
cisco = CISCO.CISCO(cisco_ip)
cisco.console(cisco_ip)
if debug:
print 'figuring out the full interface name'
# This code taken from CISCO.py clear_interface_config
out = cisco.cmd("sh ip interface brief | inc %s"%interface)
intType=re.search("(\w+Ethernet)",out).group(0)
# conf = self.cmd("show running interface %s %s"%(intType,intf))
interface_name = intType + ' ' + interface
if debug:
print 'the interface name is:', interface_name
raw_line = cisco.cmd("show interfaces %s | include address" % interface_name)
raw_words = raw_line.split()
if debug:
print 'raw words:', raw_words
print 'this should be the MAC:', raw_words[6]
return raw_words[6]
cisco.close()
def get_mac_ssx(self, interface):
"""
takes the SSX object. retrieves the MAC address of the port specified
Input:
SSX Object
interface = 2/1, 3/2
Returns:
MAC address
"""
"""
We are going to run into some problems here. the SSX can have two or four
ports per line card. There is no way of knowing in advance if the port
exists prior to retrieving the information. There needs to be some way
of signaling the developer that the port does not exist.
"""
debug = True
if debug:
print 'retrieving the port information for port:', interface
port_detail = show_port_detail(self, interface)
if debug:
print 'here is the dictionary we got back:'
print port_detail[interface]
return port_detail[interface]['MAC Address']
def cisco_mac_to_ixia_mac(mac_address):
"""
The MAC address from a cisco is formatted: "0013.196e.0a81"
The IXIA MAC format is "01 80 C2 00 00 01"
This API converts from cisco to ixia format
"""
debug = True
if debug:
print 'incomding MAC:', mac_address
parts = str.upper(mac_address).split('.')
part_1 = parts[0][:2]
part_2 = parts[0][2:]
part_3 = parts[1][2:]
part_4 = parts[1][:2]
part_5 = parts[2][2:]
part_6 = parts[2][:2]
ixia_mac = part_1 + ' ' + part_2 + ' ' + part_3 + ' ' + part_4 + ' ' + part_5 + ' ' + part_6
return ixia_mac
def ssx_mac_to_ixia_mac(mac_address):
"""
The MAC address from a ssx is formatted: "00:12:73:00:64:a0"
The IXIA MAC format is "01 80 C2 00 00 01"
This API converts from ssx to ixia format
"""
debug = True
if debug:
print 'incoming MAC:', mac_address
parts = str.upper(mac_address).split(':')
ixia_mac = parts[0] + ' ' + parts[1] + ' ' + parts[2] + ' ' + parts[3] + ' ' + parts[4] + ' ' + parts[5]
return ixia_mac
def ftp(source_file, destination_file, hostname='localhost', destination_directory='current', username='regress', password='gleep7',getOrPut='put'):
"""
This is a simple function that will take a local file specified as "source_file"
and FTP put it to a remote system into the destination_directory if specified.
There are two optional variables to set the "username" and "password" of the
remote FTP server.
source_file = full path and filename of the source file or relative path
destination_file = the filename itself
hostname = ip address or hostname
destination_directory = just the directory not the filename
username = ftp username
password = password for that user
getOrPut = ftp put/get. Default: put
When this function finishes it will return. If anything is returned the put/get failed.
Return value will be the error message.
"""
debug = False
timeout = 10
if (not os.path.exists(source_file)) and (getOrPut == 'put'):
# check source file only if it is a put
return 'Invalid source file:' + source_file
if debug:
if hostname == 'localhost':
print 'Openning connection to localhost'
else:
print 'Openning connection to', hostname
cmd = 'ftp ' + hostname
if debug:
print 'command will be:', cmd
try:
ses = pexpect.spawn(cmd)
except:
error_message = 'unable to connect to host ' + hostname
return error_message
prompt = 'Name'
retr = ses.expect([prompt, 'Connection refused'], timeout=timeout)
if debug:
print 'retr ', retr
# This is what we want
if retr == 0:
if debug:
print 'username ', username
ses.sendline(username)
retr = ses.expect(['Password required'], timeout=timeout)
if retr == 0:
if debug:
print 'password ', password
ses.sendline(password)
prompt = 'ftp>'
retr = ses.expect([prompt, 'cannot log in'], timeout=timeout)
if retr == 0:
print 'succusffully logged into host:', hostname
elif retr == 1:
return 'invalid username or password'
else:
return 'timeout while waiting for login (network error)'
if retr == 1:
error_message = 'Connection refused'
return error_message
if retr == 2:
error_message = 'Timeout while connecting'
return error_message
if not (destination_directory == 'current'):
cmd = 'cd ' + destination_directory
if debug:
print 'comand will be:', cmd
ses.sendline(cmd)
retr = ses.expect(['command successful', 'find the file specified','CWD successful'], timeout=timeout)
if debug:
print 'retr', retr
if retr > 2:
return 'Unable to change directory to: ' + destination_directory
else:
print 'changed directory to:', destination_directory
# expect the prompt
retr = ses.expect([prompt], timeout=timeout)
if retr > 0:
return 'Unable to get the prompt'
else:
if debug:
print 'Get the prompt'
if getOrPut == 'put':
cmd = 'put ' + destination_file
else:
cmd = 'get ' + source_file + " " + destination_file
if debug:
print 'cmd ', cmd
time.sleep(5)
ses.sendline(cmd)
retr = ses.expect(["Transfer OK",prompt], timeout)
if debug:
print 'retr ', retr
if retr > 1:
return 'unable to put the file'
else:
print 'file:', destination_file, 'was %s to the server successfully' %getOrPut
cmd = 'bye'
ses.sendline(cmd)
# Done!
return 0
def clear_context(self, context=""):
"""Clear the configuration of a specific context on a SSX
Leaves no context behind. Rewrite of clear_context from device.py
"""
debug = False
# Checking to make sure the context exists!
context_list = list_contexts(self)
context_names = context_list.keys()
if context in context_names:
print "Clearing the configuration of context:", context
command = 'config'
if debug:
print 'command will be:', command
retrn_val = self.cmd(command)
command = 'no context ' + context
if debug:
print 'command will be:', command
retrn_val = self.cmd(command)
# Code needs to be written to handle any failed return value
if debug:
print 'Command returned:', retrn_val
command = 'end'
if debug:
print 'command will be:', command
retrn_val = self.cmd(command)
################################
## Verify Context was Removed ##
################################
print 'Verifying Context was removed'
command = 'show configuration context ' + context
if debug:
print 'The command will be:', command
raw_retrn_val = self.cmd(command)
retrn_val = string.lstrip(raw_retrn_val)
expected_string = 'ERROR: Context ' + context + ' not found'
if debug:
print 'Checking to see if:'
print '"', expected_string, '" = "', retrn_val, '"'
if retrn_val == expected_string:
print 'Context was succesfully removed'
return
else:
print 'Context was NOT removed!'
print 'System returned:', retrn_val
sys.exit(1)
else:
print 'Context name provided:', context, 'Is NOT a context on the SSX. FAIL'
sys.exit(1)
def minimal_configuration(self):
"""
This method will remove all the contexts except local. This will allow loading of configuration
without conflicting configuration from the last config used. It will also work over a telnet session
"""
debug = False
context_dict = list_contexts(self)
if debug:
print "the SSX has the following contexts:"
context_list = context_dict.keys()
if debug:
print "=========================================="
print "About to remove all contexts except local"
for context in context_list:
if context != 'local':
if debug:
print "About to clear the context:", context
retrn_val = clear_context(self, context)
if debug:
print "returned:", retrn_val
print "Context was cleared sucessfully"
print 'All configuration was removed successfully except "local" context'
# Now we need to unblind all the physical interfaces.
print 'unbinding all the Ethernet ports except admin (0/0, 1/0)'
unbind_interfaces(self)
return
def unbind_interfaces(self, protect_admin=True):
"""
This will remove all the physical port configuration
By default it will not remove the admin ports of 0/0 and 1/0
"""
# Example Port Config
"""
australia(cfg-port)#show conf port
port ethernet 0/0
bind interface mgmt local
exit
enable
exit
port ethernet 1/0
bind interface mgmt local
exit
enable
exit
port ethernet 2/0
bind interface 2-0 r2
ipsec policy ikev2 phase1 name p11
ipsec policy ikev2 phase2 name p12
exit
service ipsec
enable
exit
port ethernet 2/1
bind interface 2-1 tunnels
ipsec policy ikev2 phase1 name ph1_c1
ipsec policy ikev2 phase2 name ph2_c1
exit
service ipsec
enable
exit
port ethernet 2/2
bind interface 2-2 r2
exit
enable
exit
port ethernet 2/3
bind interface 2-3 tunnels
exit
enable
exit
port ethernet 3/0
service ipsec
enable
exit
port ethernet 3/1
enable
exit
port ethernet 3/2
enable
exit
port ethernet 3/3
enable
exit
port ethernet 4/0
bind interface ashu local
exit
service ipsec
enable
exit
port ethernet 4/1
enable
exit
port ethernet 4/2
enable
exit
port ethernet 4/3
enable
exit
"""
debug = False
admin_ports = ['0/0','1/0']
command = 'show conf port '
raw_ports = self.cmd(command)
port_conf = raw_ports.splitlines()
# This is a list where we accumulate the commands
# we will execute to clear the ports
clear_commands = []
# used to ignore the configuration in an admin port
admin_port = False
if debug:
print '------------------------------------'
for raw_line in port_conf:
if len(raw_line) > 1:
if debug:
print 'The raw line is:'
print raw_line
words = raw_line.split()
if debug:
print 'after split:'
print words
print 'checking to see if this is an interface name'
if words[0] == 'port':
if debug:
print 'testing:', words[2]
if words[2] in valid_port_list:
if debug:
print 'found and interface:', words[2]
if words[2] not in admin_ports:
admin_port = False
if debug:
print 'Found a port:', words[2]
print 'Saving it to the clear_commands'
if len(clear_commands) > 0:
clear_commands.append('exit')
clear_commands.append(raw_line)
else:
if protect_admin:
if debug:
print 'found an admin port'
print 'port will be left configured'
admin_port = True
if protect_admin == False:
if debug:
print 'found an admin interface'
print 'Will be unconfiguring it as well'
admin_port = False
else:
if debug:
print 'This line not an interface'
print raw_line
elif admin_port == False:
if debug:
'line not protected as admin'
if not 'exit' == words[0]:
no_line = 'no' + raw_line
if debug:
print 'no line:'
print no_line
clear_commands.append(no_line)
else:
if debug:
print 'discarding:', raw_line
else:
if debug:
print 'discarding this line'
print raw_line
if debug:
print '------------------------------------'
if debug:
print 'Completed processing the port config'
print 'now removing the interfaces'
print 'The commands that will be run are:'
print '----------------------------------'
for line in clear_commands:
print line
command = 'conf'
self.cmd(command)
for line in clear_commands:
self.cmd(line)
if debug:
print 'completed sending commands'
command = 'end'
self.cmd(command)
return
def odd_or_even(value):
"""
Very simple check to see if an integer is odd or even
"""
try:
int(value)
except:
return 'Value provided was not an integer'
return value%2 and 'Odd' or 'Even'
def ssx_date_to_log(date, offset=0):
"""
This function will take the date/time from the SSX command "show clock" and then reformat the
date/time to be identical to the event log format of "event-log-yyyymmdd-hhmmss".
This method is for generating the fake log files required to verify the log errasal
This method takes as it's input the date and an offset in days.
The method will subtract the offset in days from the original days
"""
debug = False
try:
int(offset)
except:
print 'The offset value passed:', offset, 'Was not an integer!'
raise("invalid offset value %s" % offset)
begin_time_stamp_parts = date.split()
test_year = int(begin_time_stamp_parts[3])
if debug:
print 'test_year:', test_year
test_letter_month = begin_time_stamp_parts[1]
if debug:
print 'test_letter_month:', test_letter_month
test_month = name_month_to_num(test_letter_month)
if debug:
print 'test_month:', test_month
test_day = int(begin_time_stamp_parts[2])
if debug:
print 'test_day:', test_day
time_parts = begin_time_stamp_parts[4].split(':')
if debug:
print 'time_parts:', time_parts
test_hour = int(time_parts[0])
if debug:
print 'test_hour', time_parts
test_minute = int(time_parts[1])
if debug:
print 'test_minute:', test_minute
test_second = int(time_parts[2])
if debug:
print 'test_second:', test_second
# Convert the date/time into python native format
if debug:
print test_year, test_month, test_day, test_hour, test_minute, test_second
now = datetime.datetime(test_year, test_month, test_day, test_hour, test_minute, test_second)
if not(offset == 0):
actual_offset = datetime.timedelta(days=offset)
calculated_date = now - actual_offset
else:
calculated_date = now
log_filename = calculated_date.strftime("%Y%m%d-%H%M%S")
return log_filename
def nslookup_by_host(hostname):
"""
This runs just like a command line nslookup command. You provide the hostname.
Method returns the IP Address
"""
debug = False
if debug:
print 'about to retrieve the ip for:', hostname
try:
output = socket.gethostbyname(hostname)
if debug:
print 'the raw output was:', output
except:
output = 'not found'
return output
def nslookup_by_ip(ip_address):
"""
This runs just like a command line nslookup command. You provide the IP I provide the hostname
This method is broken on our systems! I don't know why. It works fine on my mac.
"""
debug = False
if debug:
print 'received the following IP Adress:', ip_address
if validIP(ip_address):
try:
output = socket.gethostbyaddr(ip_address)
if debug:
print 'the raw output was:', output
return output[0]
except:
output = 'not available'
return output
else:
output = 'invalid IP adderss provided'
return output
def unix_to_dos_path(path):
"""
On UNIX the path is formated using the forward slash /
On Windows the path uses the backslash \
So when you are using UNIX with windows you will need to convert
any path statements. This takes the UNIX style path and generates a
python friendly DOS style of path. (adding extra slashes to escape the slashes)
the companion function is called dos_to_unix
"""
debug = False
if debug:
print 'now in issu.py unix_to_dos'
if debug:
print 'the path passed was:'
print path
return_path = ntpath.abspath(path)
return return_path
def dos_to_unix_path(path):
"""
On UNIX the path is formated using the forward slash /
On Windows the path uses the backslash \
So when you are using UNIX with windows you will need to convert
any path statements. This takes the DOS style path and generates a
python friendly UNIX style of path. (adding extra slashes to escape the slashes)
the companion function is called unix_to_dos
"""
debug = False
if debug:
print 'now in issu.py dos_to_unix'
if debug:
print 'the path passed was:'
print path
return_path = ''
if debug:
print '*' * 40
for char in path:
if debug:
print 'processing char:', char
if char == '\\':
if debug:
print 'changing slash'
return_path = return_path + '/'
else:
return_path = return_path + char
if debug:
print 'accumulated path:'
print return_path
print '*' * 40
if debug:
print 'the return_path looks like:'
print return_path
print path
return return_path
def select_to_base(self, base_version):
"""
This method will take the version and select the system back to that base version.
This would be used at the beginning of any ISSU related test to set the system to a
known version.
The version iformation consists of two values.
The version itself like: 4.6B2 = package name
The build ID like: 2010051019 = build
base_version = {'package_name':'4.6B2', 'build':'2010051019'}
"""
debug = False
# Get the current version from the SSX
running_ver = self.ssx.get_version()
if debug:
self.myLog.info("The system is currently running %s " % running_ver)
#if (running_ver['build'] == topo.base_version['build']):
if (running_ver['build'] == base_version['build']):
if debug:
self.myLog.debug("Build version on system same as base version: %s" % running_ver['build'])
build_the_same = True
else:
build_the_same = False
#if (running_ver['branch'] == topo.base_version['package_name']):
if (running_ver['branch'] == base_version['package_name']):
self.myLog.debug("Branch name on the system same as base version: %s" % running_ver['branch'])
branch_the_same = True
else:
branch_the_same = False
if build_the_same and branch_the_same:
if debug:
self.myLog.info('System is running base version already. No install/select required')
else:
if debug:
self.myLog.info('About to boot the system with the base version')
#self.myLog.info("Base version is: %s " % topo.base_version)
self.myLog.info("Base version is: %s " % base_version)
self.myLog.debug("8888888888888888888")
#retr = install_base(self.ssx, topo.base_version)
retr = install_base(self.ssx, base_version)
if debug:
self.myLog.debug("8888888888888888888")
if retr:
self.myLog.error('Somethine went wrong when selecting to base version!')
self.myLog.error(" it was: %s", retr)
sys.exit(1)
# we need to close that file handle to create a fresh one.
time.sleep(2)
self.ssx.close()
reboot_time = 200
if debug:
self.myLog.info("waiting for the system to finish rebooting: %s seconds" % reboot_time)
time.sleep(reboot_time)
rebooting = True
retries = 20
while rebooting:
if debug:
self.myLog.info('Sleeping for 30 seconds')
time.sleep(30)
try:
if debug:
self.myLog.info('Connecting to SSX')
self.ssx.telnet()
if debug:
self.myLog.info('Made it past the telnet command')
# if that command does not fail then the rebooting state should change
rebooting = False
except:
if debug:
self.myLog.info('System not up yet')
retries = retries - 1
if debug:
self.myLog.info("%s retries left" % retries)
if retries == 0:
if debug:
self.myLog.info("System never came back up after select!")
# Need to return the failure here
#sys.exit(1)
return 'System never came back up after select!'
if debug:
self.myLog.info('Completed Select to base version')
self.ssx.wait4cards()
return 'complete'
def session_counters_handle(self, session_handle):
"""This method pulls the session counters via the session handle.
it executes "show session counters handle <handle>"
It returns a dictionary containing all the fields present in the output
"""
if debug:
print 'Now in issu.py session_counters_handle method'
# Example Data
"""
01 Tue May 11 10:32:22 PDT 2010.
02
03 Username Session Rcv Pkts Xmit Pkts Rcv Bytes Xmit Bytes
04 Handle
05 -------------------- ---------- ----------- ----------- ----------- -----------
06 16502102800650210@r2 fc44020b 58869 58897 2708020 2709492
"""
# If you provide an invalid session handle there is no response from the SSX
# This will result in an empty dictionary being returned.
results = {}
command = "show session counters handle %s" % session_handle
session_counters_raw = self.cmd(command)
if debug:
print 'This is the raw result:', session_counters_raw
if len(session_counters_raw) > 0:
# Chop the ouput into lines
session_counters = session_counters_raw.splitlines()
# We have a good idea of what the ouptut is going to look like.
# The very last line "should" always contain our data.
# The column headers will not change so we don't need to look at them.
words = session_counters[-1:].split()
results['Username'] = words[0]
results['Session Handle'] = words[1]
results['Rcv Pkts'] = words[2]
results['Xmit Pkts'] = words[3]
results['Rcv Bytes'] = words[4]
results['Xmit Bytes'] = words[5]
if debug:
print 'Completed parsing the output. This is what we got:'
print results
return results
else:
print 'Invalid Session Handle provided:', session_handle
return 0
def session_counters_username(self, username):
"""This method pulls the session counters via the session username.
It returns a dictionary containing all the fields present in the output
"""
if debug:
print 'Now in issu.py session_counters_username method'
# Example Data
"""
01 Tue May 11 10:32:22 PDT 2010.
02
03 Username Session Rcv Pkts Xmit Pkts Rcv Bytes Xmit Bytes
04 Handle
05 -------------------- ---------- ----------- ----------- ----------- -----------
06 16502102800650210@r2 fc44020b 58869 58897 2708020 2709492
"""
# If you provide an invalid session handle there is no response from the SSX
# This will result in an empty dictionary being returned.
results = {}
command = "show session counters username %s" % username
session_counters_raw = self.cmd(command)
if debug:
print 'This is the raw result:', session_counters_raw
if len(session_counters_raw) > 0:
# Chop the ouput into lines
session_counters = session_counters_raw.splitlines()
# We have a good idea of what the ouptut is going to look like.
# The very last line "should" always contain our data.
# The column headers will not change so we don't need to look at them.
words = session_counters[-1:].split()
results['Username'] = words[0]
results['Session Handle'] = words[1]
results['Rcv Pkts'] = words[2]
results['Xmit Pkts'] = words[3]
results['Rcv Bytes'] = words[4]
results['Xmit Bytes'] = words[5]
if debug:
print 'Completed parsing the output. This is what we got:'
print results
return results
else:
print 'Invalid Session Handle provided:', session_handle
return 0
def session_counters(self):
"""This returns a list indexed on username of every session listed along with a
dictionary of the values
"""
debug = False
if debug:
print 'Now in issu.py session_counters_method'
# Example Data
"""
01 Tue May 11 10:32:22 PDT 2010.
02
03 Username Session Rcv Pkts Xmit Pkts Rcv Bytes Xmit Bytes
04 Handle
05 -------------------- ---------- ----------- ----------- ----------- -----------
06 16502102800650210@r2 fc44020b 58869 58897 2708020 2709492
07 16502102800650211@r2 fc44021b 0 0 0 0
"""
# If you provide an invalid session handle there is no response from the SSX
# This will result in an empty dictionary being returned.
results = {}
command = "show session counters"
session_counters_raw = self.cmd(command)
if debug:
print 'This is the raw result:', session_counters_raw
if len(session_counters_raw) > 0:
# Chop the ouput into lines
session_counters = session_counters_raw.splitlines()
# we need to figure out which lines to read. The output is variable.
# The header information is always 5 lines long.
# We can take the lenght of the output and subtract the header to get
# the length of the output we want.
# The calculation should net a negative number. We hope.
line_count = 6 - len(session_counters)
if debug:
print 'We calculated there should be', line_count, 'lines to parse'
print 'If the above output is positive then something went wrong.'
print 'Found', abs(line_count), 'sessions active'
"""
if debug:
print 'The lines we will process are:'
print session_counters[line_count:]
"""
# This odd syntax should get us only the last N lines.
for line in session_counters[line_count:]:
if '-------' in line:
print 'We went too far and got the seperator!'
print 'Please increase the number of lines to count in.'
else:
# Create a fresh local dictionary to accumulate the results into
line_dict = {}
# cut the line into words
words = line.split()
# The list is indexed on the username
# so we will store it here for clean code
username = words[0]
# Everything else is dumpted into the local dictionary
line_dict['Username'] = words[0]
line_dict['Session Handle'] = words[1]
line_dict['Rcv Pkts'] = words[2]
line_dict['Xmit Pkts'] = words[3]
line_dict['Rcv Bytes'] = words[4]
line_dict['Xmit Bytes'] = words[5]
# This packs the line dictionary into the results dictionary
results[username] = line_dict
return results
def show_process(self, slot='all'):
"""Runs the command 'show process' and parses the output.
"""
slot_list = ['slot 0','slot 1','slot 2','slot 3','slot 4']
process_dict = {}
debug = False
# Sample raw input
"""
australia[local]#show process
01 Name PID StartTime CPU NumThreads Priority
02 -------------- ------- ------------------------ --- ---------- --------
03 NSM:0 651272 Tue Jun 01 15:31:53 0 21 7
04 Smid:0 696345 Tue Jun 01 15:32:03 0 10 7
05 Ip:0 696349 Tue Jun 01 15:32:07 0 32 7
06 CtxMgr:0 696348 Tue Jun 01 15:32:07 0 9 7
07 Fpd:0 696347 Tue Jun 01 15:32:06 0 16 7
08 Aaad:0 696353 Tue Jun 01 15:32:09 1 31 7
09 Cli:0 696368 Tue Jun 01 15:36:39 0 8 7
10 Snmpd:0 696355 Tue Jun 01 15:32:09 0 9 7
11 Inets:0 696354 Tue Jun 01 15:32:09 0 13 7
12 Logind:0 696346 Tue Jun 01 15:32:06 0 9 7
13 Ospf:0 696350 Tue Jun 01 15:32:07 0 10 7
14 Bgp4:0 696351 Tue Jun 01 15:32:07 0 11 7
15 Evl:0 696342 Tue Jun 01 15:32:03 0 13 7
16 EvlColl:0 696343 Tue Jun 01 15:32:03 0 8 7
17 Qosd:0 696352 Tue Jun 01 15:32:07 0 10 7
18 IkedMc:0 696356 Tue Jun 01 15:32:09 0 11 7
19 Ntp:0 696357 Tue Jun 01 15:32:09 0 10 7
20 Rip:0 696358 Tue Jun 01 15:32:09 0 12 7
21 Evt:0 696341 Tue Jun 01 15:32:03 0 9 7
22 Fabric:0 696364 Tue Jun 01 15:32:09 0 8 7
"""
if slot == 'all':
command = 'show process'
elif slot in slot_list:
command = 'show process ' + slot
else:
print 'Invalide specification for slot:', slot
print 'Expected slot to be one of the following:', slot_list
return 'Invalid option'
raw_process_list = self.cmd(command)
process_list = raw_process_list.splitlines()
if debug:
print 'The raw value returned was:'
print process_list
for raw_line in process_list[3:]:
line = raw_line.split()
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
raw_name = line[0].split(':')
name = raw_name[0]
if debug:
print 'The name is:', name
local_dict['pid'] = line[1]
if debug:
print 'The PID is:', local_dict['pid']
day = line[2]
month = line[3]
year = line[4]
time = line[5]
start_time = day, month, year, time
local_dict['start_time'] = start_time
if debug:
print 'The start time is:', local_dict['start_time']
local_dict['cpu'] = line[6]
if debug:
print 'The CPU it\'s on is:', local_dict['cpu']
local_dict['number_of_threads'] = line[7]
if debug:
print 'The number of threads is:', local_dict['number_of_threads']
local_dict['priority'] = line[8]
if debug:
print 'The priority is:', local_dict['priority']
# We store each entry in the main dictionary we return
process_dict[name] = local_dict
return process_dict
def show_process_cpu(self, slot='all'):
"""Runs the command 'show process' and parses the output.
"""
slot_list = ['slot 0','slot 1','slot 2','slot 3','slot 4']
process_dict = {}
debug = False
# Sample raw input
# If you have normal page breaks turned on (normal CLI) you will see the
# banner information containing the column headers like "name" "PID" etc.
# at every page break. You will also see the CPU Utilization again
# This information is redundant and will be identical
"""
australia[local]#show process cpu
01 CPU0 Utilization for 5 seconds: 3.45% 1 Minute: 5.20% 5 Minutes: 11.56%
02 CPU1 Utilization for 5 seconds: 0.21% 1 Minute: 0.22% 5 Minutes: 2.68%
03
04 Name PID StartTime CPU uTime sTime % Now
05 -------------- ------- ------------------------ --- ------ ------ ------
06 System:0 0 Mon Jun 20 13:22:23 0/1 16.337 5.995 0.00%
07 NSM:0 602115 Mon Jun 20 13:22:22 0 6.909 0.904 1.09%
08 Smid:0 671769 Mon Jun 20 13:22:31 0 1.004 0.065 0.00%
09 Ip:0 671773 Mon Jun 20 13:22:33 0 0.524 0.095 0.09%
10 CtxMgr:0 671772 Mon Jun 20 13:22:33 0 0.100 0.009 0.00%
11 Fpd:0 671771 Mon Jun 20 13:22:33 0 0.253 0.037 0.19%
12 Aaad:0 671777 Mon Jun 20 13:22:34 1 0.217 0.140 0.00%
13 Cli:0 831542 Mon Jun 20 13:23:21 0 0.976 0.043 0.79%
14 Cli:1 999472 Mon Jun 20 13:27:01 0 0.839 0.009 0.00%
15 Snmpd:0 671779 Mon Jun 20 13:22:34 0 0.128 0.020 0.00%
16 Inets:0 671778 Mon Jun 20 13:22:34 0 0.128 0.034 0.00%
17 Logind:0 671770 Mon Jun 20 13:22:33 0 0.088 0.006 0.00%
18 Logind:1 831541 Mon Jun 20 13:23:20 0 0.079 0.007 0.00%
19 Ospf:0 671774 Mon Jun 20 13:22:33 0 0.126 0.013 0.00%
20 Bgp4:0 671775 Mon Jun 20 13:22:33 0 0.132 0.016 0.00%
21 Evl:0 671766 Mon Jun 20 13:22:31 0 0.113 0.012 0.00%
22 EvlColl:0 671767 Mon Jun 20 13:22:31 0 0.101 0.027 0.00%
23 Qosd:0 671776 Mon Jun 20 13:22:33 0 0.118 0.010 0.00%
24 IkedMc:0 671780 Mon Jun 20 13:22:34 0 0.145 0.023 0.00%
25 Ntp:0 671781 Mon Jun 20 13:22:34 0 0.106 0.021 0.00%
26 Rip:0 671782 Mon Jun 20 13:22:34 0 0.127 0.013 0.00%
27 Evt:0 671765 Mon Jun 20 13:22:31 0 0.129 0.029 0.00%
28 Fabric:0 671788 Mon Jun 20 13:22:35 0 0.091 0.012 0.00%
29 Fsync:0 671768 Mon Jun 20 13:22:31 0 0.171 0.170 0.00%
30 TunMgr:0 671783 Mon Jun 20 13:22:34 0 0.095 0.022 0.00%
31 PPPoEMC:0 671784 Mon Jun 20 13:22:34 0 0.091 0.016 0.00%
32 PPPdMc:0 671785 Mon Jun 20 13:22:34 0 0.102 0.021 0.00%
33 CDR:0 671786 Mon Jun 20 13:22:34 0 0.182 0.012 0.00%
34 DHCPdMC:0 671787 Mon Jun 20 13:22:35 0 0.123 0.018 0.00%
35 MIPd:0 671789 Mon Jun 20 13:22:35 0 0.133 0.021 0.00%
36 SLA:0 671790 Mon Jun 20 13:22:35 0 0.101 0.014 0.00%
37 Dfn:0 671791 Mon Jun 20 13:22:35 1 0.194 0.108 0.00%
"""
if debug:
print 'now in show_process_cpu in issu.py'
if slot == 'all':
command = 'show process cpu'
elif slot in slot_list:
command = 'show process cpu slot' + slot
else:
print 'Invalid specification for slot:', slot
print 'Expected slot to be one of the following:', slot_list
return 'Invalid option'
raw_process_list = self.cmd(command)
#raw_process_list = cmd(command)
process_list = raw_process_list.splitlines()
if debug:
print 'The raw value returned was:'
print process_list
# processing this output will be split into two sections
# Section 1:
# This includes the cpu utilization stats. (2 lines)
# Section 2:
# This includes the states for each process (all other lines)
#############
# Section 1 #
#############
"""
{'CPU0':
{'1 minute': '7.28',
'5 minute': '4.44',
'5 second': '20.63'},
'CPU1':
{'1 minute': '0.48',
'5 minute': '0.25',
'5 second': '0.17'}}
"""
if debug:
print 'now processing the CPU usage header'
print '-----------------------------------'
cpu_usage = {}
local_dict = {}
for line_number in range(1,3):
if debug:
print 'now processing line:', line_number
print 'Raw line:', process_list[line_number]
raw_input = process_list[line_number].split()
if debug:
print 'the splite elements are:'
print raw_input
cpu_number = raw_input[0]
if debug:
print 'now processing:', cpu_number
local_dict[cpu_number] = {}
## 5 Second
raw_five_second = raw_input[5]
if debug:
print 'processing the 5 second value:', raw_five_second
# This is index notation for everything except the last char
# on the line
five_second = raw_five_second[:-1]
if debug:
print '5 second average:', five_second
local_dict[cpu_number]['5 second'] = five_second
## 1 minute
raw_one_minute = raw_input[8]
if debug:
print 'processing the 1 minute value:', raw_one_minute
# This is index notation for everything except the last char
# on the line
one_minute = raw_one_minute[:-1]
if debug:
print '1 minute average:', one_minute
local_dict[cpu_number]['1 minute'] = one_minute
## 5 minute
raw_five_minute = raw_input[11]
if debug:
print 'processing the 5 minute value:', raw_five_minute
# This is index notation for everything except the last char
# on the line
five_minute = raw_five_minute[:-1]
if debug:
print '5 minute average:', five_minute
local_dict[cpu_number]['5 minute'] = five_minute
if debug:
print 'The CPU utilizaiton dictionary contains:'
print local_dict
process_dict['CPU Utilization'] = local_dict
if debug:
print 'The return dictionary (process_dict) now contains:'
print process_dict
#############
# Section 2 #
#############
if debug:
print 'now processing per process stats'
print '--------------------------------'
for raw_line in process_list[6:]:
if debug:
print 'now processing raw line:'
print raw_line
line = raw_line.split()
local_dict = {}
raw_name = line[0].split(':')
## Process Name
name = raw_name[0]
if debug:
print 'The name is:', name
## PID (program ID)
local_dict['pid'] = line[1]
if debug:
print 'The PID is:', local_dict['pid']
## Start Time
day = line[2]
month = line[3]
year = line[4]
time = line[5]
start_time = day, month, year, time
local_dict['start_time'] = start_time
if debug:
print 'The start time is:', local_dict['start_time']
## CPU
local_dict['CPU'] = line[6]
if debug:
print 'running on CPU:', local_dict['CPU']
## uTime
local_dict['uTime'] = line[7]
if debug:
print 'running uTime:', local_dict['uTime']
## sTime
local_dict['sTime'] = line[8]
if debug:
print 'runnit sTime:', local_dict['sTime']
## % Now
raw_percent_now = line[9]
# This strips the '%' off the value to make it easier
# to process with automation
local_dict['percent now'] = raw_percent_now[:-1]
if debug:
print '% now:', local_dict['percent now']
# We store each entry in the main dictionary we return
process_dict[name] = local_dict
# uncomment of to process only 1 line
"""
if debug:
print '--------------------------'
print 'stopping here for debuging'
print '--------------------------'
sys.exit(1)
"""
if debug:
print 'returning from show_process_cpu'
print '-------------------------------'
return process_dict
def show_tunnel_details(self, slot = 'all', handle = 'none'):
"""Retrieves the 'show ike-session list' information then
filters out only tunnels.
Once tunnel handle is known can filter by handle
Can also be filted by slot.
"""
debug = False
slot_range = [0,1,2,3,4,'all']
tunnel_list = []
if not (slot in slot_range):
print 'Invalid slot passed:', slot
print 'Expected to be one of the following:', slot_range
return 'Invalid Slot number supplied'
if slot == 'all':
raw_session_list = list_ike_sessions(self)
else:
raw_session_list = list_ike_sessions(self, slot)
if raw_session_list == 'No Sessions present':
return 'No Tunnels present'
if debug:
print 'The raw_session list contains:'
print raw_session_list
# The format of the Tunnel response is similar to the tha of a Session
# The differences are as follows:
# 1. Contains 6 lines of output
# 2. IKE Version = 2 <LAN<->LAN>
# we will filter on the second option
for item in raw_session_list:
if debug:
print 'the complete item is:'
print item
print 'Searching for ike version info'
print item['IKE Version']
if item['IKE Version'] == '2 <LAN<->LAN>':
if debug:
print '!!!!Found a tunnel!!!!'
tunnel_list.append(item)
if debug:
print 'Here are the Tunnels'
print tunnel_list
return tunnel_list
def show_session_details(self, slot = 'all', handle = 'none'):
"""Retrieves the 'show ike-session list' information then
filters out only tunnels.
Once tunnel handle is known can filter by handle
Can also be filted by slot.
"""
# Method not yet written.
def show_time(self):
"""
runs the "show clock" command returns the output
"""
debug = False
ret_dict = {}
if debug:
print 'now in issu.py show_time'
time_stamp = self.cmd('show clock')
if debug:
print("The timestamp that was retrieved is: %s" % time_stamp)
# The raw input looks like this
"""
Mon Jul 19 2010 10:43:43 PDT
"""
if debug:
print("Parsing the current time")
# We parse it into it's elements
raw_time = time_stamp.split()
ret_dict['day_of_week'] = raw_time[0]
ret_dict['month'] = raw_time[1]
ret_dict['day_of_month'] = raw_time[2]
ret_dict['year'] = raw_time[3]
raw_long_time = raw_time[4]
ret_dict['timezone'] = raw_time[5]
long_time = raw_long_time.split(":")
ret_dict['hour'] = long_time[0]
ret_dict['minute'] = long_time[1]
ret_dict['second'] = long_time[2]
if debug:
print 'The fully parsed values are:'
print ret_dict
return ret_dict
def show_port_counters_detail(self, filter_port = 'None'):
"""
This function runs the command "show port counters detail" on the SSX.
It then takes the output from that command and parses all the values.
For your convenience you can filter out the data from a single port by passing in
a "filter_port" value
"""
debug = False
# Example raw input
"""
Tue Sep 28 09:45:04 PDT 2010.
Port Input Output
----- ----------------------------------- -----------------------------------
0/0 Good Packets: 267565 Packets: 149557
Octets: 194788889 Octets: 24106413
UcastPkts: 240846 UcastPkts: 149543
McastPkts: 26635 McastPkts: 0
BcastPkts: 84 BcastPkts: 14
ErrorPkts: 0 ErrorPkts: 0
OctetsGood: 194788889 OctetsGood: 24106413
OctetsBad: 0 OctetsBad: 0
PktRate(pps, 0-sec avg): 0 PktRate(pps, 0-sec avg): 0
DataRate(bps, 0-sec avg): 0 DataRate(bps, 0-sec avg): 0
BandWidthUtil(%, 0-sec avg): 0 BandWidthUtil(%, 0-sec avg): 0
CRCErrors: 0 PktsCRCErrs: 0
DataErrors: 0 TotalColls: 0
AlignErrs: 0 SingleColls: 0
LongPktErrs: 0 MultipleColls: 0
JabberErrs: 0 LateCollisions: 0
SymbolErrs: 0 ExcessiveColls: 0
PauseFrames: 0 PauseFrames: 0
UnknownMACCtrl: 0 FlowCtrlColls: 0
VeryLongPkts: 0 ExcessLenPkts: 0
RuntErrPkts: 0 UnderrunPkts: 0
ShortPkts: 0 ExcessDefers: 0
CarrierExtend: 0
SequenceErrs: 0
SymbolErrPkts: 0
NoResourceDrop: 0
1/0 Good Packets: 53279 Packets: 6028
Octets: 37718652 Octets: 955547
UcastPkts: 26555 UcastPkts: 6020
McastPkts: 26634 McastPkts: 0
BcastPkts: 90 BcastPkts: 8
ErrorPkts: 0 ErrorPkts: 0
OctetsGood: 37718652 OctetsGood: 955547
OctetsBad: 0 OctetsBad: 0
PktRate(pps, 0-sec avg): 0 PktRate(pps, 0-sec avg): 0
DataRate(bps, 0-sec avg): 0 DataRate(bps, 0-sec avg): 0
BandWidthUtil(%, 0-sec avg): 0 BandWidthUtil(%, 0-sec avg): 0
CRCErrors: 0 PktsCRCErrs: 0
DataErrors: 0 TotalColls: 0
AlignErrs: 0 SingleColls: 0
LongPktErrs: 0 MultipleColls: 0
JabberErrs: 0 LateCollisions: 0
SymbolErrs: 0 ExcessiveColls: 0
PauseFrames: 0 PauseFrames: 0
UnknownMACCtrl: 0 FlowCtrlColls: 0
VeryLongPkts: 0 ExcessLenPkts: 0
RuntErrPkts: 0 UnderrunPkts: 0
ShortPkts: 0 ExcessDefers: 0
CarrierExtend: 0
SequenceErrs: 0
SymbolErrPkts: 0
NoResourceDrop: 0
"""
command = "show port counters detail"
if debug:
print 'The command to the SSX will be:', command
print 'Calling function cli_cmd() to execute command'
raw_card_response = cli_cmd(self, command)
"""
if debug:
print 'returned from cli_cmd()'
print 'here is the raw returned value'
print raw_card_response
print '******************* *************** ************** ****************'
"""
input_dict = {}
output_dict = {}
return_dict = {}
port_name = ''
# We start by reading only line 4 and beyond. We don't want the following lines:
"""
Tue Sep 28 09:45:04 PDT 2010.
Port Input Output
----- ----------------------------------- -----------------------------------
"""
if debug:
print 'the raw_card_response contains:', len(raw_card_response), 'lines'
for line in raw_card_response[3:]:
if debug:
print 'processing line:'
print '"', line, '"'
print 'contains:', len(line), 'characters'
if len(line) > 0:
words = line.split()
if debug:
print 'words:'
print words
# At this point it splits the names and leaves the ':' on the end
# This makes for messy processing!
# We need to
# 1. identify all the words till the ':'
# 2. Join them back into a single "word"
new_line = []
if words[0] in valid_port_list:
port_name = words[0]
# We then remove it from the list
words.remove(port_name)
if debug:
print 'Found the port name:', port_name
input_dict_key = ''
input_value = ''
found_input_key = False
found_input_value = False
output_dict_key = ''
output_value = ''
found_output_key = False
found_output_value = False
if debug:
print 'the line now countains:', len(words), 'words to parse'
print words
for element in words:
if debug:
print 'working on word:', element
if found_input_key == False:
if debug:
print 'looking for the input_key value'
if element[-1] == ':':
input_dict_key = input_dict_key + ' ' + element.strip(':')
found_input_key = True
if debug:
print 'found input key:', input_dict_key
else:
input_dict_key = input_dict_key + ' ' + element
if debug:
print 'this was just part of a longer key:', input_dict_key
elif (found_input_key == True) and (found_input_value == False):
if debug:
print 'looking for the input value'
input_value = element
found_input_value = True
if debug:
print 'found the input value:', input_value
elif (found_input_value == True) and (found_output_key == False):
if debug:
print 'looking fo the output_key'
if element[-1] == ':':
output_dict_key = output_dict_key + ' ' + element.strip(':')
found_output_key = True
if debug:
print 'found the output key:', output_dict_key
else:
output_dict_key = output_dict_key + ' ' + element
if debug:
print 'this was just part of a longer key:', output_dict_key
else:
# The last thing left must be the output value
output_value = element
found_output_value = True
if debug:
print 'found the output value:', output_value
if (found_output_value == False) and (len(words) > 2):
print 'Unable to determine the output value for', output_dict_key
print 'please examine the following line:'
print line
print 'It was broken into the following words:'
print words
print 'Those were recognized as:'
print 'Input', input_dict_key, ':', input_value
print 'Output', output_dict_key, ': Unable to recoginze value!'
sys.exit(1)
input_dict[input_dict_key.lstrip()] = input_value
if len(output_value) > 0:
output_dict[output_dict_key.lstrip()] = output_value
if debug:
print '========= Parsed Data ==========='
print 'input_dict:'
print input_dict
print 'output_dict'
print output_dict
print '========= Parsed Data ==========='
else:
if debug:
print 'this should be a section end'
# When we reach the end of a section we stick our local dictionary with all the values
# for a port into the return dictionary indexed on port name/number.
dict_key = port_name + ' Input'
return_dict[dict_key] = input_dict
# We now need to clear the dictionary so we can get the next values
input_dict = {}
dict_key = port_name + ' Output'
return_dict[dict_key] = output_dict
output_dict = {}
if debug:
print 'section end found'
if debug:
print 'Done processing the command!'
print 'This is what we got back'
print return_dict
return return_dict
def show_system_mtu(self):
"""
This function runs the command "show system" on the SSX and searches for the MTU values
It will then return a dictionar that looks like this:
{'Next Boot': '1500', 'Current Boot': '1500'}
"""
debug = False
ret_dict = {}
if debug:
print 'about to run the command "show system | grep MTU"'
show_system_raw = self.cmd('show system | grep MTU')
if debug:
print 'the output of the command was:'
print show_system_raw
show_system_lines = show_system_raw.splitlines()
if debug:
print 'counted', len(show_system_lines), 'lines to parse.'
current_boot = show_system_lines[1].split()
ret_dict['Current Boot'] = current_boot[1]
next_boot = show_system_lines[2].split()
ret_dict['Next Boot'] = next_boot[1]
if debug:
print 'about to return:'
print ret_dict
return ret_dict
def show_port_detail(self, port_filter='none'):
"""
This function runs the command "show port detail" on the SSX and returns a netsted
dictionary containing all the information available.
"""
# Currenlty the port_filter is not implemented
debug = False
# Example raw data
"""
australia[r2]#show port detail
Tue Oct 26 13:35:08 PDT 2010.
0/0 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:0a:d0
Connector Autonegotiation: Enabled
Type: RJ45 Speed: 100
Vendor: Marvell Duplex: Full
Model No: 88E1111 MTU 1500
Serial No: N/A
Transcvr: Unknown
1/0 Admin State: Configured Media Type: Eth
Link State: Down MAC Address: 00:12:73:00:0a:d1
Connector Autonegotiation: Enabled
Type: RJ45 Speed: 100
Vendor: Marvell Duplex: Full
Model No: 88E1111 MTU 1500
Serial No: N/A
Transcvr: Unknown
2/0 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:15:80
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: AVAGO Duplex: Full
Model No: ABCU-5710RZ MTU 1500
Serial No: AN08474W5T
Transcvr: 1000BASE-T
2/1 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:15:81
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: AVAGO Duplex: Full
Model No: ABCU-5710RZ MTU 1500
Serial No: AN07381VZ2
Transcvr: 1000BASE-T
2/2 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:15:82
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: FIBERXON INC. Duplex: Full
Model No: FTM-C012R-LM MTU 1500
Serial No: au220052201136
Transcvr: 1000BASE-T
2/3 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:15:83
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: AVAGO Duplex: Full
Model No: ABCU-5710RZ MTU 1500
Serial No: AN07250ZPR
Transcvr: 1000BASE-T
3/0 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:07:40
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: FIBERXON INC. Duplex: Full
Model No: FTM-C012R-LM MTU 1500
Serial No: AU220062414400
Transcvr: 1000BASE-T
3/1 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:07:41
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: AVAGO Duplex: Full
Model No: ABCU-5710RZ MTU 1500
Serial No: AN07331GAR
Transcvr: 1000BASE-T
3/2 Admin State: Unconfigured Media Type: Eth
Link State: Down MAC Address: 00:12:73:00:07:42
Connector Autonegotiation: Disabled
Type: SFP Speed: 1000
Vendor: AVAGO Duplex: Full
Model No: ABCU-5710RZ MTU 1500
Serial No: AN0852519F
Transcvr: 1000BASE-T
3/3 Admin State: Unconfigured Media Type: Eth
Link State: Down MAC Address: 00:12:73:00:07:43
Connector Autonegotiation: Disabled
Type: SFP Speed: 1000
Vendor: AVAGO Duplex: Full
Model No: ABCU-5710RZ MTU 1500
Serial No: AN07250ZKZ
Transcvr: 1000BASE-T
4/0 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:09:48
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: FIBERXON INC. Duplex: Full
Model No: FTM-C012R-LM MTU 1500
Serial No: AU210052303996
Transcvr: 1000BASE-T
4/1 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:09:49
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: FIBERXON INC. Duplex: Full
Model No: FTM-C012R-LM MTU 1500
Serial No: AU220053201722
Transcvr: 1000BASE-T
4/2 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:09:4a
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: FIBERXON INC. Duplex: Full
Model No: FTM-C012R-LM MTU 1500
Serial No: AU210052304186
Transcvr: 1000BASE-T
4/3 Admin State: Up Media Type: Eth
Link State: Up MAC Address: 00:12:73:00:09:4b
Connector Autonegotiation: Enabled
Type: SFP Speed: 1000
Vendor: FIBERXON INC. Duplex: Full
Model No: FTM-C012R-LM MTU 1500
Serial No: AU220053201743
Transcvr: 1000BASE-T
"""
## Note:
#
# This data is very similar to other data but the "Connector" data is wrapped so
# that messes things up a bit. We need to be sure to do the following:
# 1. Look for the keyword "Connector" and then skip it
# 2. For all the connector data we should append the word "Connector" on
# to the values. Such as "Type" becomes "Connector Type"
#
# The MAC has a bunch of Collons in it ":" and that could get "split" out
# We should fix that somehow.
if debug:
print '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
print 'now in issu.py show_port_details'
print 'about to run the command "show port detail"'
show_port_detail_raw = self.cmd('show port detail')
if debug:
print 'the output of the command was:'
print show_port_detail_raw
ret_dict = {}
# These dictionaries should not be needed.
#left_dict = {}
#right_dict = {}
local_dict = {}
port_name = ''
valid_connector_values = ['Type','Vendor','Model No','Serial No','Transcvr']
show_port_detail_lines = show_port_detail_raw.splitlines()
for line in show_port_detail_lines[2:]:
if debug:
print 'processing line:'
print '"', line, '"'
print 'contains:', len(line), 'characters'
if len(line) > 0:
#=================================================
# The values are key:value key:value on the line
# This means there are two columns of data
# We look for the left one first
# then we look for the right one.
left_dict_key = ''
left_value = ''
found_left_key = False
found_left_value = False
right_dict_key = ''
right_value = ''
found_right_key = False
found_right_value = False
found_connector = False
words = line.split()
if debug:
print 'words:'
print words
# At this point it splits the names and leaves the ':' on the end
# This makes for messy processing!
# We need to
# 1. identify all the words till the ':'
# 2. Join them back into a single "word"
new_line = []
if words[0] in valid_port_list:
port_name = words[0]
# We then remove it from the list
words.remove(port_name)
if debug:
print 'Found the port name:', port_name
elif words[0] == 'Connector':
if debug:
print 'Found the Connector'
found_connector = True
if debug:
print 'Removing it from the line'
words.remove('Connector')
if debug:
print 'the line now countains:', len(words), 'words to parse'
print words
for element in words:
if debug:
print 'working on word:', element
if found_left_key == False:
if debug:
print 'looking for the left_key value'
if element[-1] == ':':
left_dict_key = left_dict_key + ' ' + element.strip(':')
found_left_key = True
if debug:
print 'found left key:', left_dict_key
else:
left_dict_key = left_dict_key + ' ' + element
if debug:
print 'this was just part of a longer key:', left_dict_key
elif (found_left_key == True) and (found_left_value == False):
if debug:
print 'looking for the left value'
left_value = element
found_left_value = True
if debug:
print 'found the left value:', left_value
elif (found_left_value == True) and (found_right_key == False):
if debug:
print 'looking fo the right_key'
if element[-1] == ':':
if element == 'Duplex:':
left_value = left_value + right_dict_key
if debug:
print 'Found a piece of the last value:', right_dict_key
right_dict_key = element.strip(':')
found_right_key = True
if debug:
print 'found the right key:', right_dict_key
else:
right_dict_key = right_dict_key + ' ' + element.strip(':')
found_right_key = True
if debug:
print 'found the right key:', right_dict_key
elif element == 'MTU':
right_dict_key = element
found_right_key = True
if debug:
print 'found the right key:', right_dict_key
else:
right_dict_key = right_dict_key + ' ' + element
if debug:
print 'this was just part of a longer key:', right_dict_key
else:
# The last thing left must be the right value
right_value = element
found_right_value = True
if debug:
print 'found the right value:', right_value
"""
if (found_right_value == False) and (len(words) > 2):
print 'Unable to determine the right value for', right_dict_key
print 'please examine the following line:'
print line
print 'It was broken into the following words:'
print words
print 'Those were recognized as:'
print 'left', left_dict_key, ':', left_value
print 'right', right_dict_key, ': Unable to recoginze value!'
sys.exit(1)
"""
#left_dict[left_dict_key.lstrip()] = left_value
local_dict[left_dict_key.lstrip()] = left_value
if len(right_value) > 0:
#right_dict[right_dict_key.lstrip()] = right_value
local_dict[right_dict_key.lstrip()] = right_value
if debug:
print '========= Parsed Data ==========='
print 'output for port:', port_name
print 'local_dict:'
print local_dict
print '========= Parsed Data ==========='
else:
if debug:
print 'this should be a section end for port:', port_name
# When we reach the end of a section we stick our local dictionary with all the values
# for a port into the return dictionary indexed on port name/number.
dict_key = port_name
ret_dict[port_name] = local_dict
# We now need to clear the dictionary so we can get the next values
local_dict = {}
if debug:
print 'section end found'
# There is still the last port information in the buffer. Need to save it too
dict_key = port_name
ret_dict[port_name] = local_dict
if debug:
print 'Last port found:', port_name
print 'end of processing data'
if debug:
print 'completed show_port_details method'
print '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
return ret_dict
def show_ip_interface_detail(self, context='local'):
"""
This function runs the command "show ip interface detail" on the SSX and returns a netsted
dictionary containing all the information available.
"""
# Sample raw_data
"""
Name: tunnel_loopbk IP address: 10.19.0.1/32
State: Up mtu:
Arp: Off Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Loopback Index: 0x25
Bind/session count: 0 Session default: No
Bound to: None
Name: tun_ssx1 IP address: 172.1.1.2/32
State: Up mtu: 1500
Arp: Off Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Tunnel Index: 0x26
Bind/session count: 1 Session default: No
Bound to: lan2lan/ip4/2
Name: 4-0 IP address: 10.11.40.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x27
Bind/session count: 1 Session default: No
Bound to: cct 4/0/1
Name: 4-1 IP address: 10.11.41.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x28
Bind/session count: 1 Session default: No
Bound to: cct 4/1/1
Name: 4-2 IP address: 10.11.42.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x29
Bind/session count: 1 Session default: No
Bound to: cct 4/2/1
Name: 4-3 IP address: 10.11.43.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x2a
Bind/session count: 1 Session default: No
Bound to: cct 4/3/1
Name: 2-0 IP address: 10.11.20.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x2c
Bind/session count: 1 Session default: No
Bound to: cct 2/0/1
Name: 2-1 IP address: 10.11.21.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x2d
Bind/session count: 1 Session default: No
Bound to: cct 2/1/1
Name: 2-2 IP address: 10.11.22.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x2e
Bind/session count: 1 Session default: No
Bound to: cct 2/2/1
Name: 2-3 IP address: 10.11.23.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x2f
Bind/session count: 1 Session default: No
Bound to: cct 2/3/1
Name: 3-0 IP address: 10.11.30.1/24
State: Up mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x30
Bind/session count: 1 Session default: No
Bound to: cct 3/0/1
Name: 3-1 IP address: 10.11.31.1/24
State: Up mtu: 1400
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x31
Bind/session count: 1 Session default: No
Bound to: cct 3/1/1
Name: 3-2 IP address: 10.11.32.1/24
State: Down mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x32
Bind/session count: 0 Session default: No
Bound to: None
Name: 3-3 IP address: 10.11.33.1/24
State: Down mtu: 1500
Arp: On Arp timeout: 3600
Arp refresh: Off Ignore DF: Off
Icmp unreachables: Off Mask reply: Off
Default source: No Description: None
Type: Classic Index: 0x33
Bind/session count: 0 Session default: No
Bound to: None
"""
debug = False
# The ip interfaces are based on the context you are in
self.cmd("context %s" % context)
if debug:
print '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
print 'now in issu.py show_ip_interface_detail'
print 'about to run the command "show ip interface detail"'
show_ip_interface_detail_raw = self.cmd('show ip interface detail')
ret_dict = {}
local_dict = {}
end_of_section = False
port_name = ''
show_ip_interface_detail_lines = show_ip_interface_detail_raw.splitlines()
for line in show_ip_interface_detail_lines[1:]:
if debug:
print 'processing line:'
print '"', line, '"'
print 'contains:', len(line), 'characters'
if len(line) > 0:
#=================================================
# The values are key:value key:value on the line
# This means there are two columns of data
# We look for the left one first
# then we look for the right one.
left_dict_key = ''
left_value = ''
found_left_key = False
found_left_value = False
right_dict_key = ''
right_value = ''
found_right_key = False
found_right_value = False
found_connector = False
words = line.split()
if debug:
print 'words:'
print words
# At this point it splits the names and leaves the ':' on the end
# This makes for messy processing!
# We need to
# 1. identify all the words till the ':'
# 2. Join them back into a single "word"
new_line = []
if debug:
print 'the line now countains:', len(words), 'words to parse'
print words
for element in words:
if debug:
print 'working on word:', element
if found_left_key == False:
if debug:
print 'looking for the left_key value'
if element[-1] == ':':
left_dict_key = left_dict_key + ' ' + element.strip(':')
found_left_key = True
if debug:
print 'found left key:', left_dict_key
else:
left_dict_key = left_dict_key + ' ' + element
if debug:
print 'this was just part of a longer key:', left_dict_key
elif (found_left_key == True) and (found_left_value == False):
if debug:
print 'looking for the left value'
left_value = element
found_left_value = True
if debug:
print 'found the left value:', left_value
elif found_left_key and found_left_value and (len(port_name) < 1):
if (left_dict_key == ' Name'):
port_name = left_value
if debug:
print '!!!! found ip interface name:', port_name
elif (found_left_value == True) and (found_right_key == False):
if debug:
print 'looking fo the right_key'
if element[-1] == ':':
if element == 'Duplex:':
left_value = left_value + right_dict_key
if debug:
print 'Found a piece of the last value:', right_dict_key
right_dict_key = element.strip(':')
found_right_key = True
if debug:
print 'found the right key:', right_dict_key
else:
right_dict_key = right_dict_key + ' ' + element.strip(':')
found_right_key = True
if debug:
print 'found the right key:', right_dict_key
else:
right_dict_key = right_dict_key + ' ' + element
if debug:
print 'this was just part of a longer key:', right_dict_key
else:
# The last thing left must be the right value
right_value = element
found_right_value = True
if debug:
print 'found the right value:', right_value
"""
if (found_right_value == False) and (len(words) > 2):
print 'Unable to determine the right value for', right_dict_key
print 'please examine the following line:'
print line
print 'It was broken into the following words:'
print words
print 'Those were recognized as:'
print 'left', left_dict_key, ':', left_value
print 'right', right_dict_key, ': Unable to recoginze value!'
sys.exit(1)
"""
#left_dict[left_dict_key.lstrip()] = left_value
local_dict[left_dict_key.lstrip()] = left_value
if len(right_value) > 0:
#right_dict[right_dict_key.lstrip()] = right_value
local_dict[right_dict_key.lstrip()] = right_value
if debug:
print '========= Parsed Data ==========='
print 'output for port:', port_name
print 'local_dict:'
print local_dict
print '========= Parsed Data ==========='
else:
if debug:
print 'this should be a section end for port:', port_name
# When we reach the end of a section we stick our local dictionary with all the values
# for a port into the return dictionary indexed on port name/number.
dict_key = port_name
ret_dict[port_name] = local_dict
# We now need to clear the dictionary so we can get the next values
local_dict = {}
port_name = ''
if debug:
print 'section end found'
# There is still the last port information in the buffer. Need to save it too
dict_key = port_name
ret_dict[port_name] = local_dict
if debug:
print 'Last port found:', port_name
print 'end of processing data'
# Return the system back to the default context
self.cmd("context local")
if debug:
print 'completed show_port_details method'
print '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
return ret_dict
def list_contexts(self):
"""This method runs the command "show context all" and parses the output.
It returns a nested dictionary indexed on the context names
dict = {'context name':{'index':'1','domains':'null'}}
to get only the context names use
dict.keys()
"""
debug = False
retrn_dict = {}
if debug:
print 'Now in issu.py method list_contexts'
command = "show context all"
raw_output = self.cmd(command)
raw_lines = raw_output.splitlines()
if debug:
print 'The raw_lines is:', raw_lines
print '==============================='
for line in raw_lines[3:]:
local_dict = {}
if debug:
print 'processing line:', line
words = line.split()
context_name = words[0]
local_dict['index'] = words[1]
local_dict['domains'] = words[2:]
retrn_dict[context_name] = local_dict
if debug:
print 'Done with list_contexts returning'
print 'The retrn_dict contains:'
print retrn_dict
return retrn_dict
def show_logging(self):
"""
This method executes the command "show logging". That command is hidden from tab completion normally
The data included in the response is the internal buffers that control when the files are flushed
to the disk. This method simply retrieves that data and parses it.
"""
debug = False
# Example Data
"""
00 Save Next
01 Log Size First-Ix Last-Ix Next-Ix Start-Ix Save-Ix W Not Read
02 ------ ----- ---------- ---------- ---------- ---------- ---------- - ----------
03 Local 8192 0 5653 5654 N 0
04 0 5653 5654
05 Glob-R 8192 0 1858 1859 1806 7950 N 0
06 0 1858 1859 1806 7950
07 Glob-D 32768 304130 336897 336898 336790 361366 Y 24611
08 9218 9217 9218 9110 918
09 Glob-I 2048 1859 3906 3907 3606 5142 Y 0
10 1859 1858 1859 1558 1046
11 File Readers 1
12 Syslog Readers 0
"""
if debug:
print 'Now in issu.py method show_logging'
ret_dict = {}
command = 'show logging'
raw_output = self.cmd(command)
if debug:
print 'The raw values are:'
print raw_output
print '-------------------------------------'
lines = raw_output.splitlines()
if len(lines) > 0:
local_dict = {}
line_number = 3
if debug:
print 'The following lines will be processed:'
print lines[4:11]
for line in lines[4:12]:
if debug:
print 'processing line:'
print line
words = line.split()
if debug:
print 'Broke the line into these words:'
print words
print 'testing to see if we are on an odd or even line'
print odd_or_even(line_number)
if (odd_or_even(line_number) == 'Odd'):
local_dict = {}
log_name = words[0]
if debug:
print 'Found the log name:', log_name
local_dict['Size'] = words[1]
if debug:
print 'Found the Size:', words[1]
local_dict['First-Ix 1'] = words[2]
if debug:
print 'Found the First-Ix 1:', words[2]
local_dict['Last-Ix 1'] = words[3]
if debug:
print 'Found Last-IX 1:', words[3]
local_dict['Next-Ix 1'] = words[4]
if debug:
print 'Found Next-Ix 1:', words[4]
if log_name == 'Local':
if debug:
print 'Processing local info'
local_dict['W'] = words[5]
if debug:
print 'found W:', words[5]
local_dict['Not Read'] = words[6]
if debug:
print 'found Not Read:', words[6]
else:
if debug:
print 'Not processing local info'
local_dict['Save Start-Ix 1'] = words[5]
if debug:
print 'found Save Start-Ix 1:', words[5]
local_dict['Next Save-Ix 1'] = words[6]
if debug:
print 'found Next Save-Ix 1:', words[6]
local_dict['W'] = words[7]
if debug:
print 'found W:', words[7]
local_dict['Not Read'] = words[8]
if debug:
print 'found Not Read:', words[8]
if (odd_or_even(line_number) == 'Even'):
local_dict['First-Ix 2'] = words[0]
if debug:
print 'found First-Ix 2:', words[0]
local_dict['Last-Ix 2'] = words[1]
if debug:
print 'found Last-Ix 2:', words[1]
local_dict['Next-Ix 2'] = words[2]
if debug:
print 'found Next-Ix 2:', words[2]
if not(log_name == 'Local'):
local_dict['Save Start-Ix 2'] = words[3]
if debug:
print 'Found Save Start-Ix 2:', words[3]
local_dict['Next Save-Ix 2'] = words[4]
if debug:
print 'found Next Save-Ix 2:', words[4]
if debug:
print 'storing loca_dict in ret_dict for log name:', log_name
ret_dict[log_name] = local_dict
if debug:
print 'The processed line looks like:'
print local_dict
if debug:
print 'Done with line number:', line_number
line_number = line_number + 1
if debug:
print '-------------------------------------'
file_readers_raw = lines[12].split()
ret_dict['File Readers'] = file_readers_raw[2]
syslog_readers_raw = lines[13].split()
ret_dict['Syslog Readers'] = syslog_readers_raw[2]
else:
print 'We got no lines back from the command "show logging"!'
print 'Something is broken!'
sys.exit(1)
return ret_dict
def show_mem(self):
"""
runs the command "show memory" and parses the output
"""
# Example input
"""
australia[local]#show memory
00
01 Mon Jun 20 16:15:28 PDT 2011.
02 Slot Type Bytes Total Bytes Used % Available
03 ----- ----- --------------- --------------- ------------
04 0 IMC1 2,147,483,648 689,876,992 67
05 1 IMC1 2,147,483,648 652,500,992 69
"""
debug = False
ret_dict = {}
if debug:
print 'now in show_mem part of issu.py'
command = 'show mem'
raw_mem_list = self.cmd(command)
mem_list = raw_mem_list.splitlines()
if debug:
print 'The raw value returned was:'
for line in mem_list:
print line
## Date/Time
local_dict = {}
raw_line = mem_list[1]
if debug:
print 'the raw line is:'
print raw_line
words = raw_line.split()
local_dict['day of week'] = words[0]
local_dict['month'] = words[1]
local_dict['day'] = words[2]
raw_time = words[3]
if debug:
print 'the raw time is:'
print raw_time
time = raw_time.split(':')
local_dict['hour'] = time[0]
local_dict['minute'] = time[1]
local_dict['second'] = time[2]
local_dict['time zone'] = words[4]
local_dict['year'] = words[5]
ret_dict['time stamp'] = local_dict
for raw_line in mem_list[4:]:
local_dict = {}
if debug:
print 'the raw line is:'
print raw_line
words = raw_line.split()
if debug:
print 'the split values are:'
print words
slot = 'slot ' + words[0]
local_dict['type'] = words[1]
local_dict['bytes total'] = words[2]
local_dict['bytes used'] = words[3]
local_dict['percent available'] = words[4]
if debug:
print 'the local dictionary contains:'
for key in local_dict.keys():
print key, ':' , local_dict[key]
# pack the values into the return dictionary
ret_dict[slot] = local_dict
return ret_dict
def show_syscount(self):
"""
Executes the command "show syscount" and then returns
a parsed dictionary
"""
# Example Input
"""
0
1 Wed Jun 22 07:54:55 PDT 2011.
2 System Counters:
3 IMC Switchover: 0
4 Card Reset: 2
5 Card Restart: 0
6 Process Core: 1
7 Process Exit: 1
8 Process Restart: 0
9 CRIT Event: 4
10 ERR Event: 1
11 WARN Event: 25
"""
debug = False
if debug:
print 'now in show_syscount in issu.py'
ret_dict = {}
command = 'show syscount'
raw_syscount = self.cmd(command)
syscount_lines = raw_syscount.splitlines()
if debug:
print 'the raw values are:'
line_index = 0
for line in syscount_lines:
print repr(line_index).ljust(2), line
line_index = line_index + 1
# we throw away lines 0-2
for line in syscount_lines[3:]:
if debug:
print 'processing the following line:'
print line
# Break the line into words
words = line.split(':')
counter_name = words[0].lstrip()
if counter_name == 'IMC Switchover':
ret_dict['IMC Switchover'] = int(words[1])
elif counter_name == 'Card Reset':
ret_dict['Card Reset'] = int(words[1])
elif counter_name == 'Card Restart':
ret_dict['Card Restart'] = int(words[1])
elif counter_name == 'Process Core':
ret_dict['Process Core'] = int(words[1])
elif counter_name == 'Process Exit':
ret_dict['Process Exit'] = int(words[1])
elif counter_name == 'Process Restart':
ret_dict['Process Restart'] = int(words[1])
elif counter_name == 'CRIT Event':
ret_dict['CRIT Event'] = int(words[1])
elif counter_name == 'ERR Event':
ret_dict['ERR Event'] = int(words[1])
elif counter_name == 'WARN Event':
ret_dict['WARN Event'] = int(words[1])
else:
print 'While processing the "show syscount" command encountered'
print 'the following value: "' + words[0] + '"'
print 'the method show_syscount can not process it!'
sys.exit(1)
return ret_dict
def show_version(self, slot='active'):
"""
runs the command "show version"
optionally will run the command "show version slot 1"
it then parses the output and returns a dictionary of values
"""
debug = True
# the default behavior is to show the "active" cards version
# optionally you can specify a slot
# Sample input
"""
0
1 Slot 1 Information (IMC1):
2 ----------------------------------------------------------------------------
3 StokeOS Release 4.146X1B1S4 (2011061319).
4 Built Mon Jun 13 20:41:21 PDT 2011 by builder.
5
6 Stoke uptime is 2 minutes
7 Card uptime is 2 minutes
8
9 System restart at Wed Jun 22 09:27:18 PDT 2011
10 Card restart at Wed Jun 22 09:27:18 PDT 2011
11 Restart by remote reset
12
13 Firmware Version: v91
14
15 Stoke-Boot Version
16 *Booted Primary: StokeBoot Release 4.2 (2009120817).
17 Booted Backup: StokeBoot Release 4.2 (2009120817).
18 Stoke-Bloader Version
19 *Booted Primary: Stoke Bootloader Release 4.146X1B1S4 (2011061319).
20 Booted Backup: Stoke Bootloader Release 4.6B1S4 (2011061319).
"""
if debug:
print 'now in show_version is issu.py'
ret_dict = {}
valid_slot_list = range(0,5)
if slot == 'active':
command = 'show version'
elif int(slot) in valid_slot_list:
command = 'show version slot ' + str(slot)
else:
print 'invalid option for slot:', slot
print 'must be one of the following:', valid_slot_list
sys.exit(1)
raw_version_list = self.cmd(command)
version_list = raw_version_list.splitlines()
if debug:
print 'the raw input was:'
line_index = 0
for line in version_list:
print repr(line_index).ljust(2), line
line_index = line_index + 1
# Parsing:
# Slot 1 Information (IMC1):
line = version_list[1]
if debug:
print 'parsing:', line
words = line.split()
ret_dict['slot'] = words[1]
raw_card_type = words[3]
card_type = raw_card_type.strip('():')
ret_dict['card type'] = card_type
# Parsing:
# StokeOS Release 4.146X1B1S4 (2011061319).
line = version_list[3]
if debug:
print 'parsing:', line
words = line.split()
ret_dict['version'] = words[2]
raw_build_id = words[3]
build_id = raw_build_id.strip('().')
ret_dict['build id'] = build_id
# Parsing
# Built Mon Jun 13 20:41:21 PDT 2011 by builder.
line = version_list[4]
if debug:
print 'parsing:', line
words = line.split()
if debug:
print 'the split line:'
print words
local_dict = {}
local_dict['day of week'] = words[1]
local_dict['month'] = words[2]
local_dict['day of month'] = words[3]
raw_time = words[4]
time = raw_time.split(':')
local_dict['hour'] = time[0]
local_dict['minute'] = time[1]
local_dict['second'] = time[2]
local_dict['time zone'] = words[5]
local_dict['year'] = words[6]
ret_dict['build date time'] = local_dict
ret_dict['build by'] = words[8]
if (slot == 'active') or (version_list[6][0:5] == 'Stoke'):
if debug:
print 'parsing output for Active card'
# Parsing
# Stoke uptime is 2 minutes
line = version_list[6]
words = line.split()
local_dict = {}
if len(words) == 5:
local_dict['hour'] = 0
local_dict['minute'] = int(words[3])
else:
local_dict['hour'] = int(words[3])
local_dict['minute'] = int(words[5])
ret_dict['system uptime'] = local_dict
# Parsing
# Card uptime is 2 minutes
line = version_list[7]
if debug:
print 'parsing:', line
words = line.split()
if debug:
print 'the split line contains:'
print words
local_dict = {}
if len(words) == 5:
local_dict['hour'] = 0
local_dict['minute'] = int(words[3])
else:
local_dict['hour'] = int(words[3])
local_dict['minute'] = int(words[5])
ret_dict['card uptime'] = local_dict
# Parsing
# System restart at Wed Jun 22 09:27:18 PDT 2011
line = version_list[9]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
local_dict['day of week'] = words[3]
local_dict['month'] = words[4]
local_dict['day of month'] = words[5]
raw_time = words[6]
time = raw_time.split(':')
local_dict['hour'] = time[0]
local_dict['minute'] = time[1]
local_dict['second'] = time[2]
local_dict['time zone'] = words[7]
local_dict['year'] = words[8]
ret_dict['system restart date time'] = local_dict
# Parsing
# Card restart at Wed Jun 22 09:27:18 PDT 2011
line = version_list[10]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
local_dict['day of week'] = words[3]
local_dict['month'] = words[4]
local_dict['day of month'] = words[5]
raw_time = words[6]
time = raw_time.split(':')
local_dict['hour'] = time[0]
local_dict['minute'] = time[1]
local_dict['second'] = time[2]
local_dict['time zone'] = words[7]
local_dict['year'] = words[8]
ret_dict['card restart date time'] = local_dict
# Parsing
# Restart by remote reset
ret_dict['restart by'] = version_list[11:]
# Parsing
# Firmware Version: v91
line = version_list[13]
if debug:
print 'parsing:', line
words = line.split()
ret_dict['firmware version'] = words[2]
# Parsing
# *Booted Primary: StokeBoot Release 4.2 (2009120817).
line = version_list[16]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['primary'] = {'version': version, 'build id': build_id}
# Parsing
# Booted Backup: StokeBoot Release 4.2 (2009120817).
line = version_list[17]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['backup'] = {'version': version, 'build id': build_id}
ret_dict['stoke boot version'] = local_dict
# Parsing
# *Booted Primary: Stoke Bootloader Release 4.146X1B1S4 (2011061319).
line = version_list[21]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['primary'] = {'version': version, 'build id': build_id}
# Parsing
# Booted Backup: Stoke Bootloader Release 4.6B1S4 (2011061319).
line = version_list[22]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['backup'] = {'version': version, 'build id': build_id}
ret_dict['stoke os version'] = local_dict
elif slot in ['0','1']:
if debug:
print 'parsing output for selected card'
print 'card is either in slot-0 or slot-1'
# sample input
"""
0
1 Slot 1 Information (IMC1):
2 ----------------------------------------------------------------------------
3 StokeOS Release 4.6B1S2 (2010062215).
4 Built Tue Jun 22 16:44:08 PDT 2010 by builder.
5
6 Card uptime is 1 week, 5 days, 8 hours, 38 minutes
7
8 Card restart at Thu Jun 23 02:18:41 PDT 2011
9 Restart by remote reset
10
11 Firmware Version: v91
12
13 Stoke-Boot Version
14 *Booted Primary: StokeBoot Release 4.2 (2009120817).
15 Booted Backup: StokeBoot Release 4.2 (2009120817).
16 Stoke-Bloader Version
17 *Booted Primary: Stoke Bootloader Release 4.6B1S2 (2010062215).
18 Booted Backup: Stoke Bootloader Release 4.146X1B1S4 (2011061319).
19 Update Backup: Stoke Bootloader Release 4.6B1S2 (2010062215).
"""
# Parsing
# Card uptime is 2 minutes
line = version_list[6]
if debug:
print 'parsing:', line
words = line.split()
if debug:
print 'the split line contains:'
print words
local_dict = {}
if len(words) == 5:
local_dict['hour'] = 0
local_dict['minute'] = int(words[3])
else:
local_dict['hour'] = int(words[3])
local_dict['minute'] = int(words[5])
ret_dict['card uptime'] = local_dict
# Parsing
# Card restart at Thu Jun 23 02:33:34 PDT 2011
line = version_list[8]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
local_dict['day of week'] = words[3]
local_dict['month'] = words[4]
local_dict['day of month'] = words[5]
raw_time = words[6]
time = raw_time.split(':')
local_dict['hour'] = time[0]
local_dict['minute'] = time[1]
local_dict['second'] = time[2]
local_dict['time zone'] = words[7]
local_dict['year'] = words[8]
ret_dict['card restart date time'] = local_dict
# Parsing
# Restart by remote reset
ret_dict['restart by'] = version_list[8:]
# Parsing
# Firmware Version: v91
line = version_list[11]
if debug:
print 'parsing:', line
words = line.split()
ret_dict['firmware version'] = words[2]
# Parsing
# *Booted Primary: StokeBoot Release 4.2 (2009120817).
line = version_list[14]
if debug:
print '16 *Booted Primary: StokeBoot Release 4.2 (2009120817).'
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['primary'] = {'version': version, 'build id': build_id}
# Parsing
# Booted Backup: StokeBoot Release 4.2 (2009120817).
line = version_list[15]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['backup'] = {'version': version, 'build id': build_id}
ret_dict['stoke boot version'] = local_dict
# Parsing
# *Booted Primary: Stoke Bootloader Release 4.146X1B1S4 (2011061319).
line = version_list[17]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['primary'] = {'version': version, 'build id': build_id}
# Parsing
# Booted Backup: Stoke Bootloader Release 4.6B1S4 (2011061319).
line = version_list[18]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['backup'] = {'version': version, 'build id': build_id}
ret_dict['stoke os version'] = local_dict
else:
if debug:
print 'parsing output for selected card'
print 'card is either in slot-2, slot-3 or slot-4'
# sample input
"""
0
1 Slot 2 Information (GLC2):
2 ----------------------------------------------------------------------------
3 StokeOS Release 4.6B1S2 (2010062215).
4 Built Tue Jun 22 16:44:08 PDT 2010 by builder.
5
6 Card uptime is 12 hours, 25 minutes
7
8 Card restart at Thu Jun 23 02:33:34 PDT 2011
9 Restart by remote reset
10
11 Firmware Version: v91
12
13 Stoke MicroEngine Image Release 4.0 (2010062216 builder).
14
15 Stoke-Boot Version
16 *Booted Primary: StokeBoot Release 4.2 (2009120817).
17 Booted Backup: StokeBoot Release 4.2 (2009120817).
18 Stoke-Bloader Version
19 *Booted Primary: Stoke Bootloader Release 4.6B1S2 (2010062215).
20 Booted Backup: Stoke Bootloader Release 4.6B1S2 (2010062215).
"""
# Parsing
# Card uptime is 2 minutes
line = version_list[6]
if debug:
print 'parsing:', line
words = line.split()
if debug:
print 'the split line contains:'
print words
local_dict = {}
if len(words) == 5:
local_dict['hour'] = 0
local_dict['minute'] = int(words[3])
else:
local_dict['hour'] = int(words[3])
local_dict['minute'] = int(words[5])
ret_dict['card uptime'] = local_dict
# Parsing
# Card restart at Thu Jun 23 02:33:34 PDT 2011
line = version_list[8]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
local_dict['day of week'] = words[3]
local_dict['month'] = words[4]
local_dict['day of month'] = words[5]
raw_time = words[6]
time = raw_time.split(':')
local_dict['hour'] = time[0]
local_dict['minute'] = time[1]
local_dict['second'] = time[2]
local_dict['time zone'] = words[7]
local_dict['year'] = words[8]
ret_dict['card restart date time'] = local_dict
# Parsing
# Restart by remote reset
ret_dict['restart by'] = version_list[8:]
# Parsing
# Firmware Version: v91
line = version_list[11]
if debug:
print 'parsing:', line
words = line.split()
ret_dict['firmware version'] = words[2]
# Parsing
# *Booted Primary: StokeBoot Release 4.2 (2009120817).
line = version_list[16]
if debug:
print '16 *Booted Primary: StokeBoot Release 4.2 (2009120817).'
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['primary'] = {'version': version, 'build id': build_id}
# Parsing
# Booted Backup: StokeBoot Release 4.2 (2009120817).
line = version_list[17]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['backup'] = {'version': version, 'build id': build_id}
ret_dict['stoke boot version'] = local_dict
# Parsing
# *Booted Primary: Stoke Bootloader Release 4.146X1B1S4 (2011061319).
line = version_list[19]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['primary'] = {'version': version, 'build id': build_id}
# Parsing
# Booted Backup: Stoke Bootloader Release 4.6B1S4 (2011061319).
line = version_list[20]
if debug:
print 'parsing:', line
words = line.split()
local_dict = {}
version = words[4]
build_id = words[5].strip('().')
local_dict['backup'] = {'version': version, 'build id': build_id}
ret_dict['stoke os version'] = local_dict
return ret_dict
def show_environmental(self):
"""
Runs the command "shown environmental" and parses the output
it then returns a nested dictionary
"""
debug = False
# sample input
"""
0
1 Environmental status as of Wed Jun 22 13:41:53 2011
2 Data polling interval is 60 second(s)
3
4 Voltage readings:
5 =================
6 Slot Source Level
7 ---- ------ -------
8 0 No errors detected
9 1 No errors detected
10 2 No errors detected
11 3 No errors detected
12 4 No errors detected
13
14 Temperature readings:
15 =====================
16 Slot Source Level
17 ---- ------ -------
18 0 No errors detected
19 1 No errors detected
20 2 No errors detected
21 3 No errors detected
22 4 No errors detected
23
24
25 Power status:
26 =============
27 Slot Source Level
28 ---- ------ -------
29 PEMA No errors detected
30 PEMB No errors detected
31
32 Fan status:
33 ===========
34 Slot Source Level
35 ---- ------ -------
36 FANTRAY1 No errors detected
37 FANTRAY2 No errors detected
38
39 Alarm status:
40 =============
41 No System-Wide Alarm triggered
42 ALARMM1 No errors detected
"""
if debug:
print 'now in show_environmental in issu.py'
ret_dict = {}
command = 'show environmental'
raw_environmental = self.cmd(command)
environmental_lines = raw_environmental.splitlines()
if debug:
print 'the raw values are:'
line_index = 0
for line in environmental_lines:
print repr(line_index).ljust(2), line
line_index = line_index + 1
# Now we parse the sections
section_header = ['Voltage readings:','Temperature readings:','Power status:','Fan status:','Alarm status:']
crap_lines = ['=================','Slot Source Level','---- ------ -------', \
'=====================','=============', '===========']
local_dict = {}
line_counter = 0
section_name = ''
for line in environmental_lines[3:-4]:
if debug:
print 'now processing:'
print line
if len(line.strip()) > 1:
if line in section_header:
raw_section_name = line.strip(':')
section_name = raw_section_name.lower()
if debug:
print 'clearing the local dictionary'
local_dict = {}
if debug:
print 'local dictionary:', local_dict
print 'found section header:', section_name
elif line in crap_lines:
if debug:
print 'discarding this stuff:'
print line
pass
else:
words = line.split('\t')
if debug:
print 'the split line looks like:'
print words
try:
slot = int(words[0])
except:
slot = words[0]
if len(words[1]) == 0:
words.remove('')
slot_name = 'slot ' + str(slot)
local_dict[slot_name] = {}
source = words[1].lstrip()
local_dict[slot_name]['source'] = source
if len(words) > 2:
level = words[2]
local_dict[slot_name]['level'] = level
if debug:
print 'the local dictionary for section:', section_name
print local_dict
else:
if len(local_dict) > 1:
ret_dict[section_name] = local_dict
if debug:
print '-------------------------------------------------------------'
print 'storing the following local_dict values into the main ret_dict'
print 'under section:', section_name
local_dict_keys = local_dict.keys()
for key in local_dict_keys:
print key
print '\t', local_dict[key]
print 'here is the ret_dict'
ret_dict_keys = ret_dict.keys()
for key in ret_dict_keys:
print key
sub_keys = ret_dict[key].keys()
for sub_key in sub_keys:
print '\t', sub_key
print '\t\t', ret_dict[key][sub_key]
print '-------------------------------------------------------------'
local_dict = {}
general_alarm = environmental_lines[-2].strip('\t')
local_dict['general status'] = general_alarm
raw_alarmm1 = environmental_lines[-1].split('\t')
if debug:
print 'the last line contains:'
print raw_alarmm1
alarmm1 = raw_alarmm1[2].lstrip(' ')
local_dict['alarmm1'] = alarmm1
ret_dict['alarm status'] = local_dict
return ret_dict
def show_file_system(self):
"""
Runs the command "show file-system" which is a hidden command to display
the disk utilization. It then parses the output and returns a nested
dictionary of values.
"""
debug = False
if debug:
print 'now in show_file_system in issu.py'
# Sample Input
"""
0
1 Thu Jun 23 11:53:16 PDT 2011.
2 Name Size % Used Used Free
3 ---------------- -------------- ------ -------------- --------------
4 /hd 40,012,611,584 16 6,551,703,552 33,460,908,032
5 /hdp 40,013,643,776 2 935,257,088 39,078,386,688
6 /cfint 128,974,848 11 14,220,800 114,754,048
7 /cfintp 130,007,040 0 53,248 129,953,792
"""
ret_dict = {}
command = 'show file-system'
raw_file_system = self.cmd(command)
file_system_lines = raw_file_system.splitlines()
if debug:
print 'the raw values are:'
line_index = 0
for line in file_system_lines:
print repr(line_index).ljust(2), line
line_index = line_index + 1
for line in file_system_lines[4:]:
local_dict = {}
if debug:
print 'now processing the following line:'
print line
words = line.split()
if debug:
print 'the split line contains:'
print words
mount_point = words[0].strip('\t/')
local_dict['size'] = words[1]
local_dict['percent used'] = words[2]
local_dict['used'] = words[3]
local_dict['free'] = words[4]
if debug:
print 'for the mount point:', mount_point
print local_dict
ret_dict[mount_point] = local_dict
return ret_dict
def show_versions(self):
"""Retrieves the versions installed on the system and returns a dictionary of them
"""
debug = False
installed_packages = []
## We need to see if the package is already installed on the system!!
show_system_raw = self.cmd('show system')
show_system_lines = show_system_raw.splitlines()
# We will parse the linse last to first searching for two things
# 1. Other Packages:
# 2. In-Use Packages:
# When we find the second item we will stop searching
searching = True
ndex = len(show_system_lines) - 1
if debug:
print 'Found', ndex, 'lines'
while searching:
current_line = show_system_lines[ndex]
if debug:
print 'Parsing this line:', current_line
word = current_line.split()
# If the word is in the search list we don't want that line
if not (word[0] in ('Other','In-Use','reverts')):
print 'Found the following version installed:', word[0]
installed_packages.append(word[0])
if word[0] == 'In-Use':
print 'Found the last line. All versions read.'
searching = False
ndex = ndex - 1
if debug:
print 'Found the following versions installed:'
for item in installed_packages:
print item
print 'returning from issu.py show_versions'
return installed_packages
def show_versions_and_build(self):
"""Retrieves the versions installed on the system and returns a dictionary of them
"""
debug = False
installed_packages = {}
## We need to see if the package is already installed on the system!!
show_system_raw = self.cmd('show system')
show_system_lines = show_system_raw.splitlines()
# We will parse the linse last to first searching for two things
# 1. Other Packages:
# 2. In-Use Packages:
# When we find the second item we will stop searching
searching = True
ndex = len(show_system_lines) - 1
if debug:
print 'Found', ndex, 'lines'
print '------------------------'
while searching:
current_line = show_system_lines[ndex]
if debug:
print 'Parsing this line:', current_line
word = current_line.split()
# If the word is in the search list we don't want that line
if not (word[0] in ('Other','In-Use','reverts', 'ISSU')):
print 'Found the following version installed:', word[0]
if debug:
print 'The version should be:', word[-1]
version = word[0]
raw_build_id = word[-1]
build_id = raw_build_id[1:-3]
if debug:
print 'Build ID determined to be:', build_id
installed_packages[version] = build_id
if debug:
print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
if word[0] == 'In-Use':
print 'Found the last line. All versions read.'
searching = False
ndex = ndex - 1
if debug:
print 'Found the following versions installed:'
for item in installed_packages:
print item
print 'returning from issu.py show_versions'
return installed_packages
def show_port(self):
"""
Runs the command "show port" and parses the ouptut
"""
debug = False
# Sample raw imput
"""
Wed Dec 14 11:15:04 PDT 2011.
Port Type Admin Link Speed Duplex Connector Medium MAC Address
----- ---- ------ ------ ----- ------ --------- ------ -----------------
0/0 Eth Config Down 100M Full RJ45 Copper 00:12:73:00:0a:d0
1/0 Eth Up Up 100M Full RJ45 Copper 00:12:73:00:0a:d1
"""
if debug:
print 'now in issu.py show_port'
port_dict = {}
raw_port = self.cmd("show port")
if debug:
print 'the raw returned value was:'
print raw_port
port_list = raw_port.splitlines()
if debug:
line_index = 0
print 'the lines are:'
for line in port_list:
print line_index, line
line_index = line_index + 1
labels_line = port_list[2].split()
divider_line = port_list[3]
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
for raw_line in port_list[4:]:
line = raw_line
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
if debug:
print 'The name is:', name
local_dict["Type"] = line[0]
for labels_idx in range(1,(len(labels_line) - 1)):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
local_dict[labels_line[labels_idx]] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_line[labels_idx],local_dict[labels_line[labels_idx]]))
# We store each entry in the main dictionary we return
port_dict[name] = local_dict
return port_dict
###### Anthony Ton code start here #########
def show_dos_counters(self, slot):
"""Runs the command 'show dos slot <0..4> counters' and parses the output.
"""
dos_dict = {}
debug = False
# Sample raw input
"""
kenya[local]#show dos slot 2 counters
Total Drops
--------------------------------------------------------------------------------
ARP : 0 0
Local TCP : 0 0
Local UDP : 0 0
Local ICMP : 0 0
IP4 MIP Exception : 0 0
IKE : 0 0
Local Adjacency : 0 0
ARP Transit : 0 0
IP4 Unreachable : 0 0
TTL Expired : 0 0
TTL Expired Encap : 0 0
IP4 Options : 0 0
Over MTU : 0 0
kenya[local]#
# Sample dictionary output:
{
'ARP': { 'Drops': '0',
'Total': '0'},
'ARP Transit': { 'Drops': '0',
'Total': '0'},
'IKE': { 'Drops': '0',
'Total': '0'},
'IP4 MIP Exception': { 'Drops': '0',
'Total': '0'},
'IP4 Options': { 'Drops': '0',
'Total': '0'},
'IP4 Unreachable': { 'Drops': '0',
'Total': '0'},
'Local Adjacency': { 'Drops': '0',
'Total': '0'},
'Local ICMP': { 'Drops': '0',
'Total': '0'},
'Local TCP': { 'Drops': '0',
'Total': '0'},
'Local UDP': { 'Drops': '0',
'Total': '0'},
'Over MTU': { 'Drops': '0',
'Total': '0'},
'TTL Expired': { 'Drops': '0',
'Total': '0'},
'TTL Expired Encap': { 'Drops': '0',
'Total': '0'}}
"""
command = "show dos slot " + slot + " counter"
raw_dos_list = self.cmd(command)
dos_list = raw_dos_list.splitlines()
if debug:
print 'The raw value returned was:'
print dos_list
if 'ERROR:' in raw_dos_list:
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_dos_list
dos_dict['Status'] = 'Error'
return dos_dict
for raw_line in dos_list[3:]:
line = raw_line.split(':')
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
name = line[0].strip()
if debug:
print 'The name is:', name
raw_data = line[1].split();
local_dict['Total'] = raw_data[0]
if debug:
print 'The Total is:', local_dict['Total']
local_dict['Drops'] = raw_data[1]
if debug:
print 'The Drops is:', local_dict['Drops']
# We store each entry in the main dictionary we return
dos_dict[name] = local_dict
return dos_dict
def show_fast_path_counters(self):
"""Runs the command 'show fast-path counters' and parses the output.
"""
fastpath_dict = {}
debug = False
# Sample raw input
"""
kenya[local]#show fast-path counters
Slot Port Type Count
---- ---- ----------------------------- -----------------
2 1 InvalidFib 748
3 1 InvalidFib 2,067
kenya[local]#
# Sample dictionary output:
{
'2/0': { 'Count': '363',
'Type': 'Reserved4'},
'2/1': { 'Count': '82',
'Type': 'Reserved4'}}
"""
command = "show fast-path counters"
raw_fastpath_counters_list = self.cmd(command)
fastpath_counters_list = raw_fastpath_counters_list.splitlines()
if debug:
print 'The raw value returned was:'
print fastpath_counters_list
labels_line = fastpath_counters_list[1].split()
for raw_line in fastpath_counters_list[3:]:
line = raw_line.split()
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
name = line[0] + "/" + line[1]
if debug:
print 'The name is:', name
for labels_idx in range(2,len(labels_line)):
local_dict[labels_line[labels_idx]] = line[labels_idx]
if debug:
print("The %s is: %s " %(labels_line[labels_idx],local_dict[labels_line[labels_idx]]))
# We store each entry in the main dictionary we return
fastpath_dict[name] = local_dict
return fastpath_dict
def parse_divider_line(self,str,divChar='-'):
""" Parse the divider line and return a dictionary of length of each column
in format {column#:[length,start,end],...,column#n:[start,end]}
Example: "----- ---- ---------- ---" return {0:[0,4],1:[6,9],2:[11,20],3:[22,24]}
"""
local_dict = {}
column = 0
startFound = False
endFound = False
for idx in range(0,len(str)):
if (str[idx] == divChar) and not startFound:
start = idx
startFound = True
endFound = False
elif (str[idx] == ' ') and startFound:
end = idx - 1
startFound = False
endFound = True
local_dict[column] = [start,end]
column += 1
if startFound and (not endFound):
# the last column has not been accounted for
local_dict[column] = [start,len(str)-1]
return local_dict
def show_ip_ospf_route(self):
"""Runs the command 'show ip ospf route [detail]' and parses the output.
"""
ipOspfRoute_dict = {}
debug = False
# Sample raw input
"""
kenya[stoke]#show ip ospf route
Network/Mask Cost Cost2 Nexthop Interface Area-ID
------------------ ----- ----- --------------- -------------- ---------------
O 10.254.1.0/24 1 direct isp 0.0.0.0
O 11.11.11.11/32 1 direct lo0 0.0.0.0
kenya[stoke]#
"""
command = "show ip ospf route "
raw_ip_ospf_route_list = self.cmd(command)
ip_ospf_route_list = raw_ip_ospf_route_list.splitlines()
if debug:
print 'The raw value returned was:'
print ip_ospf_route_list
if 'ERROR:' in ip_ospf_route_list[1]:
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_ip_ospf_route_list
ipOspfRoute_dict['Status'] = 'Error'
return ipOspfRoute_dict
labels_line = ip_ospf_route_list[1].split()
divider_line = ip_ospf_route_list[2]
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
for raw_line in ip_ospf_route_list[3:]:
line = raw_line
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
if debug:
print 'The name is:', name
local_dict["Type"] = line[0]
for labels_idx in range(1,len(labels_line)):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
local_dict[labels_line[labels_idx]] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_line[labels_idx],local_dict[labels_line[labels_idx]]))
# We store each entry in the main dictionary we return
ipOspfRoute_dict[name] = local_dict
return ipOspfRoute_dict
def show_module_iked_slot_ma_pp_detail(self,slot):
"""Runs the command 'show module iked slot <slot> ma pp detail' and parses the output.
"""
modIkedMaPpDetail_dict = {}
debug = False
# Sample raw input
"""
kenya[local]#show module iked slot 2 ma pp detail
_global_:
User Element Size................0 User Init Elements..............0
User Grow Elements...............0 Max Elements....................0
Element Size.....................0 Grow Size.......................0
Initial Elements.................0 Grow Elements...................0
Elements In Use..................0 Allocations....................41
Frees............................0 Max Elements In Use.............0
HALibHAPP::0:
User Element Size..............384 User Init Elements.............64
User Grow Elements..............64 Max Elements...................64
Element Size...................396 Grow Size..................28,672
Initial Elements................64 Grow Elements..................64
Elements In Use.................13 Allocations....................13
Frees............................0 Max Elements In Use............13
HALibHAGlobCB::0:
User Element Size..............192 User Init Elements.............16
User Grow Elements..............16 Max Elements...................16
Element Size...................204 Grow Size...................4,096
Initial Elements................16 Grow Elements..................16
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
HALibAsyncCB::0:
User Element Size...............48 User Init Elements..........1,024
User Grow Elements...........1,024 Max Elements........4,294,967,295
Element Size....................60 Grow Size..................65,536
Initial Elements.............1,024 Grow Elements...............1,024
Elements In Use..................0 Allocations................31,674
Frees.......................31,674 Max Elements In Use.............2
IKE Session Pool:17::0:
User Element Size............2,120 User Init Elements..........8,000
User Grow Elements...........8,000 Max Elements...............91,216
Element Size.................2,132 Grow Size.................520,192
Initial Elements.............8,000 Grow Elements...............8,000
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV2 SA Pool:17::0:
User Element Size............1,120 User Init Elements..........8,000
User Grow Elements...........8,000 Max Elements..............273,648
Element Size.................1,132 Grow Size.................520,192
Initial Elements.............8,000 Grow Elements...............8,000
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
ph1 pool:17::0:
User Element Size............1,816 User Init Elements..........8,000
User Grow Elements...........8,000 Max Elements...............45,608
Element Size.................1,828 Grow Size.................520,192
Initial Elements.............8,000 Grow Elements...............8,000
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
natt opt pool:17::0:
User Element Size..............132 User Init Elements..........8,000
User Grow Elements...........8,000 Max Elements...............45,608
Element Size...................144 Grow Size.................520,192
Initial Elements.............8,000 Grow Elements...............8,000
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
ph2 pool:17::0:
User Element Size..............656 User Init Elements..........8,000
User Grow Elements...........8,000 Max Elements...............45,608
Element Size...................668 Grow Size.................520,192
Initial Elements.............8,000 Grow Elements...............8,000
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
ph2 app pool:17::0:
User Element Size..............240 User Init Elements..........4,096
User Grow Elements...........4,096 Max Elements...............22,804
Element Size...................252 Grow Size.................520,192
Initial Elements.............4,096 Grow Elements...............4,096
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
ph2 app pool:17::1:
User Element Size..............368 User Init Elements..........2,048
User Grow Elements...........2,048 Max Elements...............22,804
Element Size...................380 Grow Size.................520,192
Initial Elements.............2,048 Grow Elements...............2,048
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKE SA Info Pool:17::0:
User Element Size..............824 User Init Elements.........16,000
User Grow Elements..........16,000 Max Elements..............547,296
Element Size...................836 Grow Size.................520,192
Initial Elements............16,000 Grow Elements..............16,000
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
OUT SA Block HA Poo:17::0:
User Element Size..............640 User Init Elements............356
User Grow Elements.............356 Max Elements................5,696
Element Size...................652 Grow Size.................233,472
Initial Elements...............356 Grow Elements.................356
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKE Counter HA Pool:17::0:
User Element Size..............560 User Init Elements..............4
User Grow Elements...............4 Max Elements....................4
Element Size...................572 Grow Size...................3,072
Initial Elements.................4 Grow Elements...................4
Elements In Use..................1 Allocations.....................1
Frees............................0 Max Elements In Use.............1
ISAKMP Statistics H:17::0:
User Element Size..............428 User Init Elements..............4
User Grow Elements...............4 Max Elements....................4
Element Size...................440 Grow Size...................2,048
Initial Elements.................4 Grow Elements...................4
Elements In Use..................1 Allocations.....................1
Frees............................0 Max Elements In Use.............1
Tunmgr ha pool:17::0:
User Element Size..............192 User Init Elements..........8,000
User Grow Elements...........8,000 Max Elements..............364,864
Element Size...................204 Grow Size.................520,192
Initial Elements.............8,000 Grow Elements...............8,000
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV2 last response:17::0:
User Element Size..............368 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size...................380 Grow Size...................2,048
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV2 last response:17::1:
User Element Size..............624 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size...................636 Grow Size...................3,072
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV2 last response:17::2:
User Element Size............1,136 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size.................1,148 Grow Size...................5,120
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV2 last response:17::3:
User Element Size............2,160 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size.................2,172 Grow Size..................12,288
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV1 Last Resp HA :17::0:
User Element Size..............368 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size...................380 Grow Size...................2,048
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV1 Last Resp HA :17::1:
User Element Size..............624 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size...................636 Grow Size...................3,072
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV1 Last Resp HA :17::2:
User Element Size............1,136 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size.................1,148 Grow Size...................5,120
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
IKEV1 Last Resp HA :17::3:
User Element Size............2,160 User Init Elements..............4
User Grow Elements...............4 Max Elements..............273,648
Element Size.................2,172 Grow Size..................12,288
Initial Elements.................4 Grow Elements...................4
Elements In Use..................0 Allocations.....................0
Frees............................0 Max Elements In Use.............0
kenya[local]#
# Sample dictionary output
{
'HALibAsyncCB::0:': { 'Allocations': '1,446',
'Element Size': '60',
'Elements In Use': '0',
'Frees': '1,446',
'Grow Elements': '1,024',
'Grow Size': '65,536',
'Initial Elements': '1,024',
'Max Elements': '4,294,967,295',
'Max Elements In Use': '1',
'User Element Size': '48',
'User Grow Elements': '1,024',
'User Init Elements': '1,024'},
'HALibHAGlobCB::0:': { 'Allocations': '0',
'Element Size': '204',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '16',
'Grow Size': '4,096',
'Initial Elements': '16',
'Max Elements': '16',
'Max Elements In Use': '0',
'User Element Size': '192',
'User Grow Elements': '16',
'User Init Elements': '16'},
'HALibHAPP::0:': { 'Allocations': '13',
'Element Size': '396',
'Elements In Use': '13',
'Frees': '0',
'Grow Elements': '64',
'Grow Size': '28,672',
'Initial Elements': '64',
'Max Elements': '64',
'Max Elements In Use': '13',
'User Element Size': '384',
'User Grow Elements': '64',
'User Init Elements': '64'},
'IKE Counter HA Pool:17::0:': { 'Allocations': '1',
'Element Size': '572',
'Elements In Use': '1',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '3,072',
'Initial Elements': '4',
'Max Elements': '4',
'Max Elements In Use': '1',
'User Element Size': '560',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKE SA Info Pool:17::0:': { 'Allocations': '0',
'Element Size': '836',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '16,000',
'Grow Size': '520,192',
'Initial Elements': '16,000',
'Max Elements': '1,447,296',
'Max Elements In Use': '0',
'User Element Size': '824',
'User Grow Elements': '16,000',
'User Init Elements': '16,000'},
'IKE Session Pool:17::0:': { 'Allocations': '0',
'Element Size': '2,132',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '8,000',
'Grow Size': '520,192',
'Initial Elements': '8,000',
'Max Elements': '241,216',
'Max Elements In Use': '0',
'User Element Size': '2,120',
'User Grow Elements': '8,000',
'User Init Elements': '8,000'},
'IKEV1 Last Resp HA :17::0:': { 'Allocations': '0',
'Element Size': '380',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '2,048',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '368',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKEV1 Last Resp HA :17::1:': { 'Allocations': '0',
'Element Size': '636',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '3,072',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '624',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKEV1 Last Resp HA :17::2:': { 'Allocations': '0',
'Element Size': '1,148',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '5,120',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '1,136',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKEV1 Last Resp HA :17::3:': { 'Allocations': '0',
'Element Size': '2,172',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '12,288',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '2,160',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKEV2 SA Pool:17::0:': { 'Allocations': '0',
'Element Size': '1,132',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '8,000',
'Grow Size': '520,192',
'Initial Elements': '8,000',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '1,120',
'User Grow Elements': '8,000',
'User Init Elements': '8,000'},
'IKEV2 last response:17::0:': { 'Allocations': '0',
'Element Size': '380',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '2,048',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '368',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKEV2 last response:17::1:': { 'Allocations': '0',
'Element Size': '636',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '3,072',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '624',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKEV2 last response:17::2:': { 'Allocations': '0',
'Element Size': '1,148',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '5,120',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '1,136',
'User Grow Elements': '4',
'User Init Elements': '4'},
'IKEV2 last response:17::3:': { 'Allocations': '0',
'Element Size': '2,172',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '12,288',
'Initial Elements': '4',
'Max Elements': '723,648',
'Max Elements In Use': '0',
'User Element Size': '2,160',
'User Grow Elements': '4',
'User Init Elements': '4'},
'ISAKMP Statistics H:17::0:': { 'Allocations': '1',
'Element Size': '440',
'Elements In Use': '1',
'Frees': '0',
'Grow Elements': '4',
'Grow Size': '2,048',
'Initial Elements': '4',
'Max Elements': '4',
'Max Elements In Use': '1',
'User Element Size': '428',
'User Grow Elements': '4',
'User Init Elements': '4'},
'OUT SA Block HA Poo:17::0:': { 'Allocations': '0',
'Element Size': '652',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '942',
'Grow Size': '520,192',
'Initial Elements': '942',
'Max Elements': '15,072',
'Max Elements In Use': '0',
'User Element Size': '640',
'User Grow Elements': '942',
'User Init Elements': '942'},
'Tunmgr ha pool:17::0:': { 'Allocations': '0',
'Element Size': '204',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '8,000',
'Grow Size': '520,192',
'Initial Elements': '8,000',
'Max Elements': '964,864',
'Max Elements In Use': '0',
'User Element Size': '192',
'User Grow Elements': '8,000',
'User Init Elements': '8,000'},
'_global_:': { 'Allocations': '41',
'Element Size': '0',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '0',
'Grow Size': '0',
'Initial Elements': '0',
'Max Elements': '0',
'Max Elements In Use': '0',
'User Element Size': '0',
'User Grow Elements': '0',
'User Init Elements': '0'},
'natt opt pool:17::0:': { 'Allocations': '0',
'Element Size': '144',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '8,000',
'Grow Size': '520,192',
'Initial Elements': '8,000',
'Max Elements': '120,608',
'Max Elements In Use': '0',
'User Element Size': '132',
'User Grow Elements': '8,000',
'User Init Elements': '8,000'},
'ph1 pool:17::0:': { 'Allocations': '0',
'Element Size': '1,828',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '8,000',
'Grow Size': '520,192',
'Initial Elements': '8,000',
'Max Elements': '120,608',
'Max Elements In Use': '0',
'User Element Size': '1,816',
'User Grow Elements': '8,000',
'User Init Elements': '8,000'},
'ph2 app pool:17::0:': { 'Allocations': '0',
'Element Size': '252',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '4,096',
'Grow Size': '520,192',
'Initial Elements': '4,096',
'Max Elements': '60,304',
'Max Elements In Use': '0',
'User Element Size': '240',
'User Grow Elements': '4,096',
'User Init Elements': '4,096'},
'ph2 app pool:17::1:': { 'Allocations': '0',
'Element Size': '380',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '2,048',
'Grow Size': '520,192',
'Initial Elements': '2,048',
'Max Elements': '60,304',
'Max Elements In Use': '0',
'User Element Size': '368',
'User Grow Elements': '2,048',
'User Init Elements': '2,048'},
'ph2 pool:17::0:': { 'Allocations': '0',
'Element Size': '668',
'Elements In Use': '0',
'Frees': '0',
'Grow Elements': '8,000',
'Grow Size': '520,192',
'Initial Elements': '8,000',
'Max Elements': '120,608',
'Max Elements In Use': '0',
'User Element Size': '656',
'User Grow Elements': '8,000',
'User Init Elements': '8,000'}}
"""
command = "show module iked slot " + slot + " ma pp detail"
raw_modIkedMaPpDetail_list = self.cmd(command)
modIkedMaPpDetail_list = raw_modIkedMaPpDetail_list.splitlines()
if debug:
print 'The raw value returned was:'
print modIkedMaPpDetail_list
if 'ERROR:' in raw_modIkedMaPpDetail_list:
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modIkedMaPpDetail_list
modIkedMaPpDetail_dict['Status'] = 'Error'
return modIkedMaPpDetail_dict
name = None
local_dict = {}
for raw_line in modIkedMaPpDetail_list[1:]:
line = raw_line.strip()
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
# if the last character is :, then it is the name
if debug:
print("Last char is %s" %line[len(line)-1])
if line[len(line)-1] == ":":
if name != None:
# Done with previous name, save it to main dictionary
modIkedMaPpDetail_dict[name] = local_dict
local_dict = {}
name = line
if debug:
print 'The name is:', name
else:
p = re.compile('(?P<name1>[A-Za-z ]*)\.+(?P<value1>[\d,]+)\s+(?P<name2>[A-Za-z ]*)\.+(?P<value2>[\d,]+)')
m = p.search(line)
if m:
dict = m.groupdict()
if debug:
print("The dict is: %s " %dict)
local_dict[dict['name1']] = dict['value1']
local_dict[dict['name2']] = dict['value2']
if debug:
print("The %s is: %s " %(dict['name1'],local_dict[dict['name1']]))
print("The %s is: %s " %(dict['name2'],local_dict[dict['name2']]))
# We store last entry in the main dictionary we return
modIkedMaPpDetail_dict[name] = local_dict
return modIkedMaPpDetail_dict
def show_module_iked_slot_ma_pool(self,slot):
"""Runs the command 'show module iked slot <slot> ma pool' and parses the output.
"""
modIkedMaPool_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show module iked slot 2 ma pools
Name Size InUse Free Allocs Frees
---------------- ------------- --------- --------- ------------- -------------
DaSet 128 64 6 64 0
DaJudy 40 49 2,075 22,442 22,393
DaJudy 72 2 2,093 27 25
DaJudy 136 10 514 19 9
CrhHandleData 60 5 35 5 0
CrhRegData 32 1 42 1 0
CrhCmdBlk 8,224 5 3 5 0
NvTimer 56 5 7,643 8,233 8,228
IpcConnIds 28 12 36 12 0
IpcArepIds 28 6 42 7 1
IpcReg 156 9 26 9 0
IpcConn 400 10 29 12 2
IpcRegmsg 8 9 19 9 0
IpcAsyncReply 344 6 10 7 1
IpcSndrArep 36 3 15 3 0
IpcThrEnt 36 0 18 10 10
IpcThrData 28 0 22 86 86
IpcRmReg 24 9 44 9 0
IpcRmInfo 36 1 145 25 24
IpcAmInfo 72 2 142 6,814 6,812
MsgVerPool 176 5 16 5 0
IpcTrWantReg 28 8 40 8 0
IpcTrRegac 76 14 19 15 1
IpcTrRegpc 72 14 21 15 1
IpcTrReg 84 9 32 9 0
IpcTrConn 388 10 30 12 2
IpcTrConnG 188 10 25 12 2
IpcTrSlot 64 10 28 12 2
IpcTrNode 112 10 22 12 2
IpcTrRegacI 28 14 34 15 1
IpcTrRegpcI 28 14 34 15 1
IpcTrCgIds 28 12 36 12 0
IpcPeer 48 14 18 14 0
IpcPeerMsgData 80 0 20 81 81
IpcPeerMsg 56 0 28 72 72
IpcQnxReg 80 9 23 9 0
IpcQnxConn 12 4 56 6 2
IpcTcpReg 52 9 37 9 0
IpcTcpConn 16 6 54 7 1
IpcTcpRegpc 104 14 20 15 1
IpcMsgReg 52 9 37 9 0
IpcMsgConn 124 16 20 19 3
NvMsg 8,300 6 26 13,710 13,704
EvtStateNotify 32 1 19 1 0
EvtCrhCallBack 8 0 28 15 15
EvtRegWait 40 0 17 1 1
H:CMOHandler 20 3 153 3 0
H:CMOHandler 20 2 154 2 0
H:CMOHandler 20 28 128 28 0
H:CMOHandler 20 1 155 1 0
CMOHandlerPool 12 34 2,010 34 0
CMOObjectPool 8,080 0 64 2 2
IKEd-var-pool 24 1,555 12,891 3,740 2,185
IKEd-var-pool 40 1 10,000 10,056 10,055
IKEd-var-pool 72 1 6,190 6 5
IKEd-var-pool 136 4 3,509 6 2
IKEd-var-pool 264 0 1,884 8 8
IKEd-var-pool 520 1 976 2 1
IKE global struc 848 1 1 1 0
DH pool 44 10,050 8,522 10,050 0
RNG pool 36 2,000 2,008 2,000 0
cdpipc 1,460 1 352 1 0
JobResult 44 0 4,166 12,050 12,050
JobDesc 272 0 1,831 12,050 12,050
JobHandle 72 0 6,191 12,050 12,050
Func pool 20 55 325 55 0
sess mgmt pool 32 0 1,114 4,117 4,117
iked_sess_h 32 1 3,999 1 0
p1 policy pool 2,636 0 50 1 1
p2 policy pool 96 0 55 1 1
DArbn:IKED_P1_MA 20 1 27 2 1
p1 map pool 696 0 51 1 1
DArbn:IKED_P2_MA 20 1 27 2 1
p2 map pool 52 0 62 1 1
DArbn:IKED_IP_P1 20 1 27 1 0
DArbn:IKED_IP_P2 20 1 27 1 0
DArbn:IKED_XAUTH 20 1 27 1 0
DArbn:IKED_IP_XA 20 1 27 1 0
DAt:IKEDV2_RCF_R 20 0 8,060 5 5
DAt:IKEDV2_RCF_S 20 0 8,060 3 3
80 objects displayed.
Stoke[local]#
# Sample dictionary output
{
'CMOHandlerPool': { 'Allocs': '34',
'Free': '2,010',
'Frees': '0',
'InUse': '34',
'Size': '12'},
'CMOObjectPool': { 'Allocs': '1',
'Free': '64',
'Frees': '1',
'InUse': '0',
'Size': '8,080'},
'CrhCmdBlk': { 'Allocs': '4',
'Free': '4',
'Frees': '0',
'InUse': '4',
'Size': '8,224'},
'CrhHandleData': { 'Allocs': '5',
'Free': '35',
'Frees': '0',
'InUse': '5',
'Size': '60'},
'CrhRegData': { 'Allocs': '1',
'Free': '42',
'Frees': '0',
'InUse': '1',
'Size': '32'},
'DArbn:IKED_IP_P1': { 'Allocs': '1',
'Free': '27',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'DArbn:IKED_IP_P2': { 'Allocs': '1',
'Free': '27',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'DArbn:IKED_IP_XA': { 'Allocs': '1',
'Free': '27',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'DArbn:IKED_P1_MA': { 'Allocs': '1',
'Free': '27',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'DArbn:IKED_P2_MA': { 'Allocs': '1',
'Free': '27',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'DArbn:IKED_XAUTH': { 'Allocs': '1',
'Free': '27',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'DH pool': { 'Allocs': '10,050',
'Free': '8,522',
'Frees': '0',
'InUse': '10,050',
'Size': '44'},
'DaJudy': { 'Allocs': '12,146',
'Free': '2,078',
'Frees': '12,100',
'InUse': '46',
'Size': '40'},
'DaJudy_1': { 'Allocs': '24',
'Free': '2,093',
'Frees': '22',
'InUse': '2',
'Size': '72'},
'DaJudy_2': { 'Allocs': '17',
'Free': '514',
'Frees': '7',
'InUse': '10',
'Size': '136'},
'DaSet': { 'Allocs': '64',
'Free': '6',
'Frees': '0',
'InUse': '64',
'Size': '128'},
'EvtCrhCallBack': { 'Allocs': '3',
'Free': '28',
'Frees': '3',
'InUse': '0',
'Size': '8'},
'EvtRegWait': { 'Allocs': '1',
'Free': '17',
'Frees': '1',
'InUse': '0',
'Size': '40'},
'EvtStateNotify': { 'Allocs': '1',
'Free': '19',
'Frees': '0',
'InUse': '1',
'Size': '32'},
'Func pool': { 'Allocs': '55',
'Free': '325',
'Frees': '0',
'InUse': '55',
'Size': '20'},
'H:CMOHandler': { 'Allocs': '3',
'Free': '153',
'Frees': '0',
'InUse': '3',
'Size': '20'},
'H:CMOHandler_1': { 'Allocs': '2',
'Free': '154',
'Frees': '0',
'InUse': '2',
'Size': '20'},
'H:CMOHandler_2': { 'Allocs': '28',
'Free': '128',
'Frees': '0',
'InUse': '28',
'Size': '20'},
'H:CMOHandler_3': { 'Allocs': '1',
'Free': '155',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'IKE global struc': { 'Allocs': '1',
'Free': '1',
'Frees': '0',
'InUse': '1',
'Size': '848'},
'IKEd-var-pool': { 'Allocs': '3,543',
'Free': '12,904',
'Frees': '2,001',
'InUse': '1,542',
'Size': '24'},
'IKEd-var-pool_1': { 'Allocs': '10,052',
'Free': '10,000',
'Frees': '10,051',
'InUse': '1',
'Size': '40'},
'IKEd-var-pool_2': { 'Allocs': '4',
'Free': '6,189',
'Frees': '2',
'InUse': '2',
'Size': '72'},
'IKEd-var-pool_3': { 'Allocs': '5',
'Free': '3,510',
'Frees': '2',
'InUse': '3',
'Size': '136'},
'IKEd-var-pool_4': { 'Allocs': '2',
'Free': '1,884',
'Frees': '2',
'InUse': '0',
'Size': '264'},
'IKEd-var-pool_5': { 'Allocs': '2',
'Free': '976',
'Frees': '1',
'InUse': '1',
'Size': '520'},
'IpcAmInfo': { 'Allocs': '2',
'Free': '144',
'Frees': '2',
'InUse': '0',
'Size': '72'},
'IpcArepIds': { 'Allocs': '5',
'Free': '43',
'Frees': '0',
'InUse': '5',
'Size': '28'},
'IpcAsyncReply': { 'Allocs': '5',
'Free': '11',
'Frees': '0',
'InUse': '5',
'Size': '344'},
'IpcConn': { 'Allocs': '12',
'Free': '29',
'Frees': '2',
'InUse': '10',
'Size': '400'},
'IpcConnIds': { 'Allocs': '12',
'Free': '36',
'Frees': '0',
'InUse': '12',
'Size': '28'},
'IpcMsgConn': { 'Allocs': '17',
'Free': '21',
'Frees': '2',
'InUse': '15',
'Size': '124'},
'IpcMsgReg': { 'Allocs': '9',
'Free': '37',
'Frees': '0',
'InUse': '9',
'Size': '52'},
'IpcPeer': { 'Allocs': '12',
'Free': '20',
'Frees': '0',
'InUse': '12',
'Size': '48'},
'IpcPeerMsg': { 'Allocs': '62',
'Free': '28',
'Frees': '62',
'InUse': '0',
'Size': '56'},
'IpcPeerMsgData': { 'Allocs': '71',
'Free': '20',
'Frees': '71',
'InUse': '0',
'Size': '80'},
'IpcQnxConn': { 'Allocs': '6',
'Free': '56',
'Frees': '2',
'InUse': '4',
'Size': '12'},
'IpcQnxReg': { 'Allocs': '9',
'Free': '23',
'Frees': '0',
'InUse': '9',
'Size': '80'},
'IpcReg': { 'Allocs': '9',
'Free': '26',
'Frees': '0',
'InUse': '9',
'Size': '156'},
'IpcRegmsg': { 'Allocs': '9',
'Free': '19',
'Frees': '0',
'InUse': '9',
'Size': '8'},
'IpcRmInfo': { 'Allocs': '4',
'Free': '145',
'Frees': '3',
'InUse': '1',
'Size': '36'},
'IpcRmReg': { 'Allocs': '9',
'Free': '44',
'Frees': '0',
'InUse': '9',
'Size': '24'},
'IpcSndrArep': { 'Allocs': '3',
'Free': '15',
'Frees': '0',
'InUse': '3',
'Size': '36'},
'IpcTcpConn': { 'Allocs': '5',
'Free': '55',
'Frees': '0',
'InUse': '5',
'Size': '16'},
'IpcTcpReg': { 'Allocs': '9',
'Free': '37',
'Frees': '0',
'InUse': '9',
'Size': '52'},
'IpcTcpRegpc': { 'Allocs': '13',
'Free': '21',
'Frees': '0',
'InUse': '13',
'Size': '104'},
'IpcThrData': { 'Allocs': '73',
'Free': '22',
'Frees': '73',
'InUse': '0',
'Size': '28'},
'IpcThrEnt': { 'Allocs': '6',
'Free': '18',
'Frees': '6',
'InUse': '0',
'Size': '36'},
'IpcTrCgIds': { 'Allocs': '12',
'Free': '36',
'Frees': '0',
'InUse': '12',
'Size': '28'},
'IpcTrConn': { 'Allocs': '12',
'Free': '30',
'Frees': '2',
'InUse': '10',
'Size': '388'},
'IpcTrConnG': { 'Allocs': '12',
'Free': '25',
'Frees': '2',
'InUse': '10',
'Size': '188'},
'IpcTrNode': { 'Allocs': '12',
'Free': '22',
'Frees': '2',
'InUse': '10',
'Size': '112'},
'IpcTrReg': { 'Allocs': '9',
'Free': '32',
'Frees': '0',
'InUse': '9',
'Size': '84'},
'IpcTrRegac': { 'Allocs': '13',
'Free': '20',
'Frees': '0',
'InUse': '13',
'Size': '76'},
'IpcTrRegacI': { 'Allocs': '13',
'Free': '35',
'Frees': '0',
'InUse': '13',
'Size': '28'},
'IpcTrRegpc': { 'Allocs': '13',
'Free': '22',
'Frees': '0',
'InUse': '13',
'Size': '72'},
'IpcTrRegpcI': { 'Allocs': '13',
'Free': '35',
'Frees': '0',
'InUse': '13',
'Size': '28'},
'IpcTrSlot': { 'Allocs': '12',
'Free': '28',
'Frees': '2',
'InUse': '10',
'Size': '64'},
'IpcTrWantReg': { 'Allocs': '8',
'Free': '40',
'Frees': '0',
'InUse': '8',
'Size': '28'},
'JobDesc': { 'Allocs': '12,050',
'Free': '1,831',
'Frees': '12,050',
'InUse': '0',
'Size': '272'},
'JobHandle': { 'Allocs': '12,050',
'Free': '6,191',
'Frees': '12,050',
'InUse': '0',
'Size': '72'},
'JobResult': { 'Allocs': '12,050',
'Free': '4,166',
'Frees': '12,050',
'InUse': '0',
'Size': '44'},
'MsgVerPool': { 'Allocs': '5',
'Free': '16',
'Frees': '0',
'InUse': '5',
'Size': '176'},
'NvMsg': { 'Allocs': '26',
'Free': '27',
'Frees': '21',
'InUse': '5',
'Size': '8,300'},
'NvTimer': { 'Allocs': '1,659',
'Free': '7,643',
'Frees': '1,654',
'InUse': '5',
'Size': '56'},
'Object Count': { 'Count': '74 objects displayed.'},
'RNG pool': { 'Allocs': '2,000',
'Free': '2,008',
'Frees': '0',
'InUse': '2,000',
'Size': '36'},
'cdpipc': { 'Allocs': '1',
'Free': '352',
'Frees': '0',
'InUse': '1',
'Size': '1,460'},
'iked_sess_h': { 'Allocs': '1',
'Free': '3,999',
'Frees': '0',
'InUse': '1',
'Size': '32'},
'sess mgmt pool': { 'Allocs': '828',
'Free': '1,114',
'Frees': '828',
'InUse': '0',
'Size': '32'}}
"""
command = "show module iked slot " + slot + " ma pool"
raw_modIkedMaPool_list = self.cmd(command)
modIkedMaPool_list = raw_modIkedMaPool_list.splitlines()
if debug:
print 'The raw value returned was:'
print modIkedMaPool_list
if 'ERROR:' in raw_modIkedMaPool_list:
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modIkedMaPool_list
modIkedMaPool_dict['Status'] = 'Error'
return modIkedMaPool_dict
labels_line = modIkedMaPool_list[1].split()
dupKey_dict = {}
divider_line = modIkedMaPool_list[2]
columnDict = parse_divider_line(self,divider_line)
for raw_line in modIkedMaPool_list[3:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
if "objects displayed" in line:
# Save the objec count
modIkedMaPool_dict["Object Count"] = {"Count":line}
else:
local_dict = {}
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
if debug:
print 'The name is:', name
for labels_idx in range(1,len(labels_line)):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
local_dict[labels_line[labels_idx]] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_line[labels_idx],local_dict[labels_line[labels_idx]]))
# We store last entry in the main dictionary we return
if name in dupKey_dict:
# for duplicate keys, append the index to the key ti differentiate between them
dupKey_dict[name] += 1
name = name + "_" + `dupKey_dict[name]`
modIkedMaPool_dict[name] = local_dict
else:
dupKey_dict[name] = 0
modIkedMaPool_dict[name] = local_dict
return modIkedMaPool_dict
def show_module_iked_slot_ma_shared(self,slot):
"""Runs the command 'show module iked slot <slot> ma share' and parses the output.
"""
modIkedMaShared_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show module iked slot 2 ma shared
Name/ Elements HiWat/ In Use/ Allocs/ Alloc Fail/
Pool Size Elem Size User Size Free Frees Double Free
---------------- --------- --------- --------- ------------- -----------
MBuf 97,340 4,109 4,099 18,935 0
211,812,352 2,176 2,144 93,241 14,836 0
FpdPage 4,964 1 1 1 0
20,971,520 4,224 4,192 4,963 0 0
Stoke[local]#
# Sample dictionary output:
{
'FpdPage': { 'Alloc Fail/': '0',
'Allocs/': '1',
'Double Free': '0',
'Elem Size': '4,224',
'Elements': '4,964',
'Free': '4,963',
'Frees': '0',
'HiWat/': '1',
'In Use/': '1',
'Pool Size': '20,971,520',
'User Size': '4,192'},
'MBuf': { 'Alloc Fail/': '0',
'Allocs/': '4,099',
'Double Free': '0',
'Elem Size': '2,176',
'Elements': '97,340',
'Free': '93,241',
'Frees': '0',
'HiWat/': '4,099',
'In Use/': '4,099',
'Pool Size': '211,812,352',
'User Size': '2,144'}}
"""
command = "show module iked slot " + slot + " ma shared"
raw_modIkedMaShared_list = self.cmd(command)
modIkedMaShared_list = raw_modIkedMaShared_list.splitlines()
if debug:
print 'The raw value returned was:'
print modIkedMaShared_list
if 'ERROR:' in raw_modIkedMaShared_list:
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modIkedMaShared_list
modIkedMaShared_dict['Status'] = 'Error'
return modIkedMaShared_dict
labels_line1 = modIkedMaShared_list[1]
labels_line2 = modIkedMaShared_list[2]
divider_line = modIkedMaShared_list[3]
columnDict = parse_divider_line(self,divider_line)
oddLine = False
local_dict = {}
for raw_line in modIkedMaShared_list[4:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
#name = line[start:end].strip()
if oddLine:
labels_line = labels_line2
else:
local_dict = {}
labels_line = labels_line1
for idx in columnDict.keys():
start = columnDict[idx][0]
end = columnDict[idx][1]+1
label = labels_line[start:end].strip()
if (idx == 0) and (not oddLine):
name = line[start:end].strip()
if debug:
print 'The name is:', name
else:
local_dict[label] = line[start:end].strip()
if debug:
print("The %s is: %s " %(label,local_dict[label]))
# We store last entry in the main dictionary we return
modIkedMaShared_dict[name] = local_dict
if oddLine:
oddLine = False
else:
oddLine = True
return modIkedMaShared_dict
def show_port_counters_drop(self,slotport):
"""Runs the command 'show port <slot/port> counters drop' and parses the output.
"""
portCountersDrop_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show port 2/1 counters drop
Port Drop Counters
----- --------------------------------------------
2/1 Disabled Port: 0
CCT expects IPv4: 17626
Stoke[local]#
# Sample dictionary output
{
'2/1': { 'Disabled Port': '0',
'Invalid FIB': '64'}}
"""
command = "show port " + slotport + " counters drop"
raw_portCountersDrop_list = self.cmd(command)
portCountersDrop_list = raw_portCountersDrop_list.splitlines()
if debug:
print 'The raw value returned was:'
print portCountersDrop_list
if ('ERROR:' in raw_portCountersDrop_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_portCountersDrop_list
portCountersDrop_dict['Status'] = 'Error'
return portCountersDrop_dict
divider_line = portCountersDrop_list[2]
columnDict = parse_divider_line(self,divider_line)
local_dict = {}
for raw_line in portCountersDrop_list[3:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
tmp_name = line[start:end].strip()
if tmp_name != "":
name = tmp_name
local_dict = {}
if debug:
print 'The name is:', name
for idx in range(1,len(columnDict.keys())):
start = columnDict[idx][0]
end = columnDict[idx][1]+1
labelValue = line[start:end].strip().split(":")
local_dict[labelValue[0].strip()] = labelValue[1].strip()
if debug:
print("The %s is: %s " %(labelValue[0],local_dict[labelValue[0]]))
# We store last entry in the main dictionary we return
portCountersDrop_dict[name] = local_dict
return portCountersDrop_dict
def show_process_cpu_non_zero(self):
"""Runs the command 'show process cpu non-zero' and parses the output.
"""
processCpuNonZero_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show process cpu non-zero
CPU0 Utilization for 5 seconds: 1.94% 1 Minute: 4.29% 5 Minutes: 4.14%
CPU1 Utilization for 5 seconds: 0.01% 1 Minute: 0.15% 5 Minutes: 0.09%
Name PID StartTime CPU uTime sTime % Now
-------------- ------- ------------------------ --- ------ ------ ------
System:0 0 Sat Oct 01 11:01:44 all 38m21s 27.748 0.99%
NSM:0 704514 Sat Oct 01 11:01:44 0 37m15s 2.553 1.09%
Stoke[local]#
# Sample dictionary output
{
'CPU0 Utilization ': { 'fivemins': '2.54%',
'fivesecs': '21.03%',
'onemin': '3.02%'},
'CPU1 Utilization ': { 'fivemins': '0.03%',
'fivesecs': '0.89%',
'onemin': '0.05%'},
'Cli:0 ': { '% Now': '0.69%',
'CPU': '0',
'PID': '974895',
'StartTime': 'Fri Oct 07 20:35:03',
'sTime': '0.021',
'uTime': '0.423'},
'Ip:0 ': { '% Now': '0.29%',
'CPU': '0',
'PID': '745500',
'StartTime': 'Fri Oct 07 19:39:21',
'sTime': '0.060',
'uTime': '0.451'},
'NSM:0 ': { '% Now': '1.09%',
'CPU': '0',
'PID': '704514',
'StartTime': 'Fri Oct 07 19:39:10',
'sTime': '0.322',
'uTime': '38.418'},
'System:0 ': { '% Now': '0.99%',
'CPU': 'all',
'PID': '0',
'StartTime': 'Fri Oct 07 19:39:11',
'sTime': '3.415',
'uTime': '50.834'}}
"""
command = "show process cpu non-zero"
raw_processCpuNonZero_list = self.cmd(command)
processCpuNonZero_list = raw_processCpuNonZero_list.splitlines()
if debug:
print 'The raw value returned was:'
print processCpuNonZero_list
# process the first two lines of output
for idx in range(1,3):
local_dict = {}
line = processCpuNonZero_list[idx]
p = re.compile('(?P<cpu>CPU. Utilization )for 5 seconds:\s+(?P<fivesecs>[\d.%]+)\s+1 Minute:\s+(?P<onemin>[\d.%]+)\s+5 Minutes:\s+(?P<fivemins>[\d.%]+)')
m = p.search(line)
if m:
dict = m.groupdict()
if debug:
print("The dict is: %s " %dict)
local_dict['fivesecs'] = dict['fivesecs']
if debug:
print("The five seconds is: %s " %(local_dict['fivesecs']))
local_dict['onemin'] = dict['onemin']
if debug:
print("The one minute is: %s " %(local_dict['onemin']))
local_dict['fivemins'] = dict['fivemins']
if debug:
print("The five minutes is: %s " %(local_dict['fivemins']))
processCpuNonZero_dict[dict['cpu']] = local_dict
labels_line = processCpuNonZero_list[4]
divider_line = processCpuNonZero_list[5]
columnDict = parse_divider_line(self,divider_line)
for raw_line in processCpuNonZero_list[6:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end]
if debug:
print 'The name is:', name
local_dict = {}
for idx in range(1,len(columnDict.keys())):
start = columnDict[idx][0]
end = columnDict[idx][1]+1
label = labels_line[start:end].strip()
local_dict[label] = line[start:end].strip()
if debug:
print("The %s is: %s " %(label,local_dict[label]))
# We store last entry in the main dictionary we return
processCpuNonZero_dict[name] = local_dict
return processCpuNonZero_dict
def show_qos_red_slot(self,slot):
"""Runs the command 'show qos red slot <slot>' and parses the output.
"""
qosRedSlot_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show qos red slot 2
average current
port queue weight queue depth queue depth red drops red tail drops
---- ----- -------- ----------- ----------- -------------- --------------
0
nct 1/1 0 0 0 0
ct 1/1 0 0 0 0
ef 1/1 0 0 0 0
af4 1/1 0 0 0 0
af3 1/1 0 0 0 0
af2 1/1 0 0 0 0
af1 1/1 0 0 0 0
be 1/1 0 0 0 0
1
nct 1/1 0 0 0 0
ct 1/1 0 0 0 0
ef 1/1 0 0 0 0
af4 1/1 0 0 0 0
af3 1/1 0 0 0 0
af2 1/1 0 0 0 0
af1 1/1 0 0 0 0
be 1/1 0 0 0 0
2
nct 1/1 0 0 0 0
ct 1/1 0 0 0 0
ef 1/1 0 0 0 0
af4 1/1 0 0 0 0
af3 1/1 0 0 0 0
af2 1/1 0 0 0 0
af1 1/1 0 0 0 0
be 1/1 0 0 0 0
3
nct 1/1 0 0 0 0
ct 1/1 0 0 0 0
ef 1/1 0 0 0 0
af4 1/1 0 0 0 0
af3 1/1 0 0 0 0
af2 1/1 0 0 0 0
af1 1/1 0 0 0 0
be 1/1 0 0 0 0
Stoke[local]#
# Sample dictionary output
{
'0 - af1': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'0 - af2': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'0 - af3': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'0 - af4': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'0 - be': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'0 - ct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'0 - ef': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'0 - nct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - af1': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - af2': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - af3': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - af4': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - be': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - ct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - ef': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'1 - nct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - af1': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - af2': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - af3': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - af4': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - be': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - ct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - ef': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'2 - nct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - af1': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - af2': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - af3': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - af4': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - be': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - ct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - ef': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'},
'3 - nct': { 'average queue depth': '0',
'current queue depth': '0',
'red drops': '0',
'red tail drops': '0',
'weight': '1/1'}}
"""
command = "show qos red slot " + slot
raw_qosRedSlot_list = self.cmd(command)
qosRedSlot_list = raw_qosRedSlot_list.splitlines()
if debug:
print 'The raw value returned was:'
print qosRedSlot_list
if ('ERROR:' in raw_qosRedSlot_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_qosRedSlot_list
qosRedSlot_dict['Status'] = 'Error'
return qosRedSlot_dict
labels_line1 = qosRedSlot_list[1]
labels_line2 = qosRedSlot_list[2]
divider_line = qosRedSlot_list[3]
columnDict = parse_divider_line(self,divider_line)
for raw_line in qosRedSlot_list[4:]:
line = raw_line.expandtabs(columnDict[1][0])
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
tmp_name = line[start:end].strip()
if tmp_name != "":
name = "%s/%s" %(slot,tmp_name)
local_dict = {}
if debug:
print 'The name is:', name
else:
tmp_dict = {}
start = columnDict[1][0]
end = columnDict[1][1]+1
qname = line[start:end].strip()
for idx in range(2,len(columnDict.keys())):
start = columnDict[idx][0]
end = columnDict[idx][1]+1
label = labels_line1[start:end].strip() + " " + labels_line2[start:end].strip()
label = label.strip()
tmp_dict[label] = line[start:end].strip()
if debug:
print("The %s is: %s " %(label,tmp_dict[label]))
local_dict[qname] = tmp_dict
qosRedSlot_dict[name] = local_dict
return qosRedSlot_dict
def show_port_counters(self):
"""Runs the command 'show port counters' and parses the output.
"""
portCounters_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show port counter
Wed Oct 5 04:15:01 UTC 2011.
Port Input Packets Input Octets Output Packets Output Octets
----- ---------------- ------------------ ---------------- ------------------
0/0 22907 1709566 3926 308871
1/0 0 0 0 0
2/0 89288 7579994 76301 6534824
2/1 86243 7314258 76124 6506526
3/0 1660 157990 1926 127614
3/1 1678 159519 114 11646
4/0 17355 1377341 16934 1391282
4/1 14305 1117637 17561 1407530
Stoke[local]#
# Sample dictionary output
{
'0/0': { 'Input Octets': '328099',
'Input Packets': '3020',
'Output Octets': '91680',
'Output Packets': '684'},
'1/0': { 'Input Octets': '0',
'Input Packets': '0',
'Output Octets': '0',
'Output Packets': '0'},
'2/0': { 'Input Octets': '32402',
'Input Packets': '221',
'Output Octets': '0',
'Output Packets': '0'},
'2/1': { 'Input Octets': '21164',
'Input Packets': '51',
'Output Octets': '0',
'Output Packets': '0'}}
"""
command = "show port counters"
raw_portCounters_list = self.cmd(command)
portCounters_list = raw_portCounters_list.splitlines()
if debug:
print 'The raw value returned was:'
print portCounters_list
labels_line = portCounters_list[2]
divider_line = portCounters_list[3]
columnDict = parse_divider_line(self,divider_line)
for raw_line in portCounters_list[4:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
if debug:
print 'The name is:', name
local_dict = {}
for idx in range(1,len(columnDict.keys())):
start = columnDict[idx][0]
end = columnDict[idx][1]+1
label = labels_line[start:end].strip()
local_dict[label] = line[start:end].strip()
if debug:
print("The %s is: %s " %(label,local_dict[label]))
# We store last entry in the main dictionary we return
portCounters_dict[name] = local_dict
return portCounters_dict
def show_ike_session_counters(self):
"""Runs the command 'show ike-session counters' and parses the output.
"""
ikeSessionCounters_dict = {}
debug = False
"""
# Sample raw input
iceland[ctx1]#show ike-session counters
Wed Oct 5 16:43:42 UTC 2011.
-----------------------------------------------------------------------
Phase1 Phase1 Phase1 Phase1 Phase2 Phase2
Slot Successful Dropped Failed Active Successful Failed
---- ---------- ---------- ---------- ---------- ---------- ----------
2 0 0 150 0 0 150
---- ---------- ---------- ---------- ---------- ---------- ----------
Sum 0 0 150 0 0 150
Active Sessions: 0 Total Sessions: 0
iceland[ctx1]#
# Sample dictionary output
{
'2': { 'Phase1 Active': '0',
'Phase1 Dropped': '0',
'Phase1 Failed': '0',
'Phase1 Successful': '0',
'Phase2 Failed': '0',
'Phase2 Successful': '0'},
'Sessions': { 'Active Sessions': '0',
'Total Sessions': '0'},
'Sum': { 'Phase1 Active': '0',
'Phase1 Dropped': '0',
'Phase1 Failed': '0',
'Phase1 Successful': '0',
'Phase2 Failed': '0',
'Phase2 Successful': '0'}
}
"""
command = "show ike-session counters"
raw_ikeSessionCounters_list = self.cmd(command)
ikeSessionCounters_list = raw_ikeSessionCounters_list.splitlines()
if debug:
print 'The raw value returned was:'
print ikeSessionCounters_list
labels_line1 = ikeSessionCounters_list[3]
labels_line2 = ikeSessionCounters_list[4]
divider_line = ikeSessionCounters_list[5]
columnDict = parse_divider_line(self,divider_line)
processLine = 6
for raw_line in ikeSessionCounters_list[6:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
if (name == "----") or (name == ""):
# the divider/empty line between slot and sum. Ignore these lines
continue
if debug:
print 'The name is:', name
local_dict = {}
for idx in range(1,len(columnDict.keys())):
start = columnDict[idx][0]
end = columnDict[idx][1]+1
label = labels_line1[start:end].strip() + " " + labels_line2[start:end].strip()
label = label.strip()
local_dict[label] = line[start:end].strip()
if debug:
print("The %s is: %s " %(label,local_dict[label]))
# We store last entry in the main dictionary we return
ikeSessionCounters_dict[name] = local_dict
processLine += 1
if name == "Sum":
# End of normal output display. Stop
break
for raw_line in ikeSessionCounters_list[processLine:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
p = re.compile('(?P<active>Active Sessions):\s+(?P<actses>[\d]+)\s+(?P<total>Total Sessions):\s+(?P<totses>[\d]+)')
m = p.search(line)
if m:
local_dict = {}
dict = m.groupdict()
if debug:
print("The dict is: %s " %dict)
local_dict[dict['active']] = dict['actses']
if debug:
print("The %s is: %s " %(dict['active'],local_dict[dict['active']]))
local_dict[dict['total']] = dict['totses']
if debug:
print("The %s is: %s " %(dict['total'],local_dict[dict['total']]))
# We store last entry in the main dictionary we return
ikeSessionCounters_dict['Sessions'] = local_dict
return ikeSessionCounters_dict
def show_environmental_detail(self):
"""Runs the command 'show environmental detail' and parses the output.
"""
environmentalDetail_dict = {}
debug = False
# Sample raw input
"""
iceland[local]#show environmental detail
Environmental status as of Fri Oct 7 14:52:21 2011
Data polling interval is 60 second(s)
Voltage readings:
=================
Slot Source Reading Level
---- ------ ----------- -------
0 GPP 1111 None
0 VCC 1.8V 1784 None
0 TCAM 1194 None
0 VCC 2.5V 2520 None
0 DDR Term 1239 None
0 VCC 3.3V 3294 None
0 VCC 5.0V 4985 None
0 FIC 4902 None
0 SysContr 1478 None
0 VCC 12.0V 11989 None
1 GPP 1122 None
1 VCC 1.8V 1784 None
1 TCAM 1214 None
1 VCC 2.5V 2492 None
1 DDR Term 1252 None
1 VCC 3.3V 3312 None
1 VCC 5.0V 4985 None
1 FIC 4957 None
1 SysContr 1494 None
1 VCC 12.0V 11923 None
1 GPP 1122 None
1 VCC 1.8V 1784 None
1 TCAM 1214 None
1 VCC 2.5V 2492 None
1 DDR Term 1252 None
1 VCC 3.3V 3312 None
1 VCC 5.0V 4985 None
1 FIC 4957 None
1 SysContr 1494 None
1 VCC 12.0V 11923 None
2 CPU 1.0V CA 1012 None
2 CPU 1.0V CB 1004 None
2 CPU 1.0V PL 996 None
2 CPU DDR3 1492 None
2 CPU SDRAM VTT 748 None
2 KBP0 Analog 892 None
2 KBP1 Analog 892 None
2 KBP0 Core 900 None
2 KBP1 Core 900 None
2 NPU 1.0V 996 None
2 NPU VDD SRAM 1004 None
2 NPU0 Analog 988 None
2 NPU1 Analog 988 None
2 NPU0 AC SD VTT 740 None
2 NPU0 BD SD VTT 740 None
2 NPU1 AC SD VTT 740 None
2 NPU1 BD SD VTT 732 None
2 NPU0 DDR3 1492 None
2 NPU1 DDR3 1484 None
2 Switch Analog 988 None
2 Switch Core 996 None
2 VCC 1.2V 1204 None
2 VCC 1.8V 1800 None
2 VCC 2.5V 2473 None
2 VCC 3.3V 3323 None
2 VCC 12.0V 11868 None
Temperature readings:
=====================
Slot Source Reading Level
---- ------ ----------- -------
0 Inlet 33 None
0 Outlet 44 None
0 GPP0 60 None
0 GPP1 38 None
1 Inlet 31 None
1 Outlet 45 None
1 GPP0 64 None
1 GPP1 41 None
2 GPP0 71 None
2 NPU0 67 None
2 NPU1 77 None
Power status:
=============
Slot Source Reading Level
---- ------ ----------- -------
PEMA Power Trip OK None
PEMA Temperature OK None
PEMA -48V Powergood OK None
PEMA -48V Miswire OK None
PEMA Backplane 3.3V OK None
PEMB Power Trip Tripped Minor
PEMB Temperature OK None
PEMB -48V Powergood OK None
PEMB -48V Miswire OK None
PEMB Backplane 3.3V OK None
Fan status:
===========
Slot Source Reading Level
---- ------ ----------- -------
FANTRAY1 48V Fuse-A OK None
FANTRAY1 48V Fuse-B OK None
FANTRAY1 Fans-Stat OK None
FANTRAY1 Fan1 status 0
FANTRAY1 Fan2 status 0
FANTRAY1 Fan1 speed 4028
FANTRAY1 Fan2 speed 4700
FANTRAY2 48V Fuse-A OK None
FANTRAY2 48V Fuse-B OK None
FANTRAY2 Fans-Stat OK None
FANTRAY2 Fan1 status 0
FANTRAY2 Fan2 status 0
FANTRAY2 Fan1 speed 4512
FANTRAY2 Fan2 speed 3889
Alarm status:
=============
Slot Source Reading Level
---- ------ ----------- -------
ALARM1 Backplane 3.3V OK None
ALARM1 Alarm Cutfoff Off None
iceland[local]#
Sample dictionary output:
=========================
{
'Alarm status - ALARM1 - Alarm Cutfoff': { 'level': 'None',
'reading': 'Off'},
'Alarm status - ALARM1 - Backplane 3.3V': { 'level': 'None',
'reading': 'OK'},
'Fan status - FANTRAY1 - 48V Fuse-A': { 'level': 'None',
'reading': 'OK'},
'Fan status - FANTRAY1 - 48V Fuse-B': { 'level': 'None',
'reading': 'OK'},
'Fan status - FANTRAY1 - Fan1 speed': { 'reading': '4028'},
'Fan status - FANTRAY1 - Fan1 status': { 'reading': '0'},
'Fan status - FANTRAY1 - Fan2 speed': { 'reading': '4700'},
'Fan status - FANTRAY1 - Fan2 status': { 'reading': '0'},
'Fan status - FANTRAY1 - Fans-Stat': { 'level': 'None',
'reading': 'OK'},
'Fan status - FANTRAY2 - 48V Fuse-A': { 'level': 'None',
'reading': 'OK'},
'Fan status - FANTRAY2 - 48V Fuse-B': { 'level': 'None',
'reading': 'OK'},
'Fan status - FANTRAY2 - Fan1 speed': { 'reading': '4338'},
'Fan status - FANTRAY2 - Fan1 status': { 'reading': '0'},
'Fan status - FANTRAY2 - Fan2 speed': { 'reading': '3889'},
'Fan status - FANTRAY2 - Fan2 status': { 'reading': '0'},
'Fan status - FANTRAY2 - Fans-Stat': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMA - -48V Miswire': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMA - -48V Powergood': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMA - Backplane 3.3V': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMA - Power Trip': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMA - Temperature': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMB - -48V Miswire': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMB - -48V Powergood': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMB - Backplane 3.3V': { 'level': 'None',
'reading': 'OK'},
'Power status - PEMB - Power Trip': { 'level': 'Minor',
'reading': 'Tripped'},
'Power status - PEMB - Temperature': { 'level': 'None',
'reading': 'OK'},
'Temperature readings - 0 - GPP0': { 'level': 'None',
'reading': '60'},
'Temperature readings - 0 - GPP1': { 'level': 'None',
'reading': '39'},
'Temperature readings - 0 - Inlet': { 'level': 'None',
'reading': '33'},
'Temperature readings - 0 - Outlet': { 'level': 'None',
'reading': '45'},
'Temperature readings - 1 - GPP0': { 'level': 'None',
'reading': '64'},
'Temperature readings - 1 - GPP1': { 'level': 'None',
'reading': '41'},
'Temperature readings - 1 - Inlet': { 'level': 'None',
'reading': '31'},
'Temperature readings - 1 - Outlet': { 'level': 'None',
'reading': '45'},
'Temperature readings - 2 - GPP0': { 'level': 'None',
'reading': '71'},
'Temperature readings - 2 - NPU0': { 'level': 'None',
'reading': '67'},
'Temperature readings - 2 - NPU1': { 'level': 'None',
'reading': '76'},
'Voltage readings - 0 - DDR Term': { 'level': 'None',
'reading': '1239'},
'Voltage readings - 0 - FIC': { 'level': 'None',
'reading': '4902'},
'Voltage readings - 0 - GPP': { 'level': 'None',
'reading': '1111'},
'Voltage readings - 0 - SysContr': { 'level': 'None',
'reading': '1478'},
'Voltage readings - 0 - TCAM': { 'level': 'None',
'reading': '1194'},
'Voltage readings - 0 - VCC 1.8V': { 'level': 'None',
'reading': '1784'},
'Voltage readings - 0 - VCC 12.0V': { 'level': 'None',
'reading': '11989'},
'Voltage readings - 0 - VCC 2.5V': { 'level': 'None',
'reading': '2492'},
'Voltage readings - 0 - VCC 3.3V': { 'level': 'None',
'reading': '3294'},
'Voltage readings - 0 - VCC 5.0V': { 'level': 'None',
'reading': '4985'},
'Voltage readings - 1 - DDR Term': { 'level': 'None',
'reading': '1252'},
'Voltage readings - 1 - FIC': { 'level': 'None',
'reading': '5013'},
'Voltage readings - 1 - GPP': { 'level': 'None',
'reading': '1122'},
'Voltage readings - 1 - SysContr': { 'level': 'None',
'reading': '1486'},
'Voltage readings - 1 - TCAM': { 'level': 'None',
'reading': '1214'},
'Voltage readings - 1 - VCC 1.8V': { 'level': 'None',
'reading': '1784'},
'Voltage readings - 1 - VCC 12.0V': { 'level': 'None',
'reading': '11923'},
'Voltage readings - 1 - VCC 2.5V': { 'level': 'None',
'reading': '2492'},
'Voltage readings - 1 - VCC 3.3V': { 'level': 'None',
'reading': '3312'},
'Voltage readings - 1 - VCC 5.0V': { 'level': 'None',
'reading': '4985'},
'Voltage readings - 2 - CPU 1.0V CA': { 'level': 'None',
'reading': '1012'},
'Voltage readings - 2 - CPU 1.0V CB': { 'level': 'None',
'reading': '1004'},
'Voltage readings - 2 - CPU 1.0V PL': { 'level': 'None',
'reading': '996'},
'Voltage readings - 2 - CPU DDR3': { 'level': 'None',
'reading': '1484'},
'Voltage readings - 2 - CPU SDRAM VTT': { 'level': 'None',
'reading': '740'},
'Voltage readings - 2 - KBP0 Analog': { 'level': 'None',
'reading': '892'},
'Voltage readings - 2 - KBP0 Core': { 'level': 'None',
'reading': '892'},
'Voltage readings - 2 - KBP1 Analog': { 'level': 'None',
'reading': '892'},
'Voltage readings - 2 - KBP1 Core': { 'level': 'None',
'reading': '900'},
'Voltage readings - 2 - NPU 1.0V': { 'level': 'None',
'reading': '996'},
'Voltage readings - 2 - NPU VDD SRAM': { 'level': 'None',
'reading': '1004'},
'Voltage readings - 2 - NPU0 AC SD VTT': { 'level': 'None',
'reading': '740'},
'Voltage readings - 2 - NPU0 Analog': { 'level': 'None',
'reading': '988'},
'Voltage readings - 2 - NPU0 BD SD VTT': { 'level': 'None',
'reading': '740'},
'Voltage readings - 2 - NPU0 DDR3': { 'level': 'None',
'reading': '1492'},
'Voltage readings - 2 - NPU1 AC SD VTT': { 'level': 'None',
'reading': '740'},
'Voltage readings - 2 - NPU1 Analog': { 'level': 'None',
'reading': '980'},
'Voltage readings - 2 - NPU1 BD SD VTT': { 'level': 'None',
'reading': '740'},
'Voltage readings - 2 - NPU1 DDR3': { 'level': 'None',
'reading': '1484'},
'Voltage readings - 2 - Switch Analog': { 'level': 'None',
'reading': '988'},
'Voltage readings - 2 - Switch Core': { 'level': 'None',
'reading': '996'},
'Voltage readings - 2 - VCC 1.2V': { 'level': 'None',
'reading': '1204'},
'Voltage readings - 2 - VCC 1.8V': { 'level': 'None',
'reading': '1800'},
'Voltage readings - 2 - VCC 12.0V': { 'level': 'None',
'reading': '11868'},
'Voltage readings - 2 - VCC 2.5V': { 'level': 'None',
'reading': '2473'},
'Voltage readings - 2 - VCC 3.3V': { 'level': 'None',
'reading': '3323'}
}
"""
command = "show environmental detail"
raw_environmentalDetail_list = self.cmd(command)
environmentalDetail_list = raw_environmentalDetail_list.splitlines()
if debug:
print 'The raw value returned was:'
print environmentalDetail_list
curname = ""
isName = False
for raw_line in environmentalDetail_list[4:]:
line = raw_line.strip()
if line in ["===========","---- ------ ----------- -------", \
"Slot Source Reading Level",""]:
continue
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
regList = ['(?P<label>.*):','^(?P<slot>[a-zA-Z-0-9]{1,8})\s+(?P<source>[a-zA-Z-0-9\. \-]{1,15})\s+(?P<reading>[a-zA-Z-0-9\. ]{1,11})$','(?P<slot>[a-zA-Z-0-9]{1,8})\s+(?P<source>[a-zA-Z-0-9\. \-]{1,15})\s+(?P<reading>[a-zA-Z-0-9\. ]{1,11})\s+(?P<level>[\w]{1,7})']
pList = [re.compile(regexp) for regexp in regList]
mList = [p.search(line) for p in pList]
if debug:
print 'The mList is:', mList
local_dict = {}
if curname != "":
name = curname
for m in mList:
if m == None:
continue
dict = m.groupdict()
if debug:
print 'The dict is:', dict
for key in dict.keys():
if debug:
print 'The key is:', key
if key == "label":
curname = dict['label'].strip()
name = curname
isName = True
if debug:
print 'The name is:', name
elif (key == "slot") or (key == "source"):
print 'The name is:', name
print 'dict[%s] is %s' %(key,dict[key])
name = '%s - %s' %(name,dict[key].strip())
isName = True
if debug:
print 'The name is:', name
else:
local_dict[key] = dict[key]
isName = False
if debug:
print("The %s is: %s " %(key,local_dict[key]))
break
# We store last entry in the main dictionary we return
if not isName:
environmentalDetail_dict[name] = local_dict
return environmentalDetail_dict
def show_process_memory(self,slot='0'):
"""Runs the command 'show process mem slot <slot>' and parses the output.
Default slot is 0
"""
processMem_dict = {}
debug = False
# Sample raw input
"""
iceland[local]#show process mem
Process Name PID Text Data soText soData Stack Heap Shared
------------- ------- ------- ------- ------- ------- ------- ------- -------
NSM 704514 16KB 4096 8MB 1192KB 156KB 17MB 249MB
Smid 745496 224KB 16KB 12MB 3256KB 128KB 2504KB 21MB
Ip 745500 4096 4096 8MB 2028KB 188KB 4928KB 260MB
CtxMgr 745499 36KB 4096 7492KB 868KB 76KB 1268KB 21MB
Fpd 745498 32KB 8192 7616KB 900KB 92KB 1300KB 243MB
Aaad 745504 424KB 84KB 13MB 1312KB 204KB 5432KB 132MB
Cli 925743 44KB 16KB 12MB 3272KB 120KB 2600KB 21MB
Cli 1011760 44KB 16KB 12MB 3272KB 120KB 2600KB 21MB
Snmpd 745506 604KB 52KB 7680KB 912KB 80KB 2324KB 22MB
Inets 745505 32KB 8192 8MB 1056KB 112KB 1304KB 21MB
Logind 745497 16KB 4096 7628KB 924KB 80KB 1268KB 21MB
Logind 1011758 16KB 4096 7628KB 924KB 80KB 1268KB 21MB
Ospf 745501 332KB 8192 8000KB 952KB 88KB 1304KB 38MB
Bgp4 745502 320KB 8192 8020KB 960KB 96KB 1468KB 38MB
Evl 745493 108KB 4096 7828KB 920KB 92KB 1272KB 25MB
EvlColl 745494 36KB 4096 7508KB 876KB 76KB 1300KB 25MB
Qosd 745503 180KB 4096 9MB 1108KB 92KB 1304KB 127MB
IkedMc 745507 152KB 68KB 8MB 1004KB 88KB 1300KB 21MB
Ntp 745508 4096 4096 8076KB 1188KB 92KB 1300KB 21MB
Rip 745509 96KB 8192 7736KB 928KB 88KB 1268KB 38MB
Evt 745492 32KB 4096 7492KB 868KB 76KB 1268KB 21MB
Fsync 745495 20KB 4096 7408KB 868KB 72KB 1332KB 20MB
TunMgr 745510 112KB 4096 7540KB 876KB 84KB 1304KB 23MB
CDR 745511 112KB 8192 9MB 1076KB 100KB 1304KB 122MB
DHCPdMC 745512 48KB 1028KB 7600KB 900KB 80KB 1268KB 21MB
MIPd 745513 160KB 4096 7768KB 1952KB 96KB 2360KB 21MB
SLA 745514 32KB 4096 7664KB 900KB 76KB 1272KB 21MB
Dfn 745515 1172KB 4096 10MB 1072KB 92KB 13MB 21MB
Gtppd 745516 52KB 4096 9MB 1100KB 84KB 1380KB 122MB
iceland[local]#
Sample dictionary output:
=========================
{
'Aaad': { 'Data': '84KB',
'Heap': '5432KB',
'PID': '745504',
'Shared': '132MB',
'Stack': '204KB',
'Text': '424KB',
'soData': '1312KB',
'soText': '13MB'},
'Bgp4': { 'Data': '8192',
'Heap': '1468KB',
'PID': '745502',
'Shared': '38MB',
'Stack': '96KB',
'Text': '320KB',
'soData': '960KB',
'soText': '8020KB'},
'CDR': { 'Data': '8192',
'Heap': '1304KB',
'PID': '745511',
'Shared': '122MB',
'Stack': '100KB',
'Text': '112KB',
'soData': '1076KB',
'soText': '9MB'},
'Cli': { 'Data': '16KB',
'Heap': '2600KB',
'PID': '925743',
'Shared': '21MB',
'Stack': '120KB',
'Text': '44KB',
'soData': '3272KB',
'soText': '12MB'},
'Cli_1': { 'Data': '16KB',
'Heap': '2600KB',
'PID': '1011760',
'Shared': '21MB',
'Stack': '120KB',
'Text': '44KB',
'soData': '3272KB',
'soText': '12MB'},
'Cli_2': { 'Data': '16KB',
'Heap': '2600KB',
'PID': '1011763',
'Shared': '21MB',
'Stack': '140KB',
'Text': '44KB',
'soData': '3272KB',
'soText': '12MB'},
'CtxMgr': { 'Data': '4096',
'Heap': '1268KB',
'PID': '745499',
'Shared': '21MB',
'Stack': '80KB',
'Text': '36KB',
'soData': '868KB',
'soText': '7492KB'},
'DHCPdMC': { 'Data': '1028KB',
'Heap': '1268KB',
'PID': '745512',
'Shared': '21MB',
'Stack': '80KB',
'Text': '48KB',
'soData': '900KB',
'soText': '7600KB'},
'Dfn': { 'Data': '4096',
'Heap': '13MB',
'PID': '745515',
'Shared': '21MB',
'Stack': '92KB',
'Text': '1172KB',
'soData': '1072KB',
'soText': '10MB'},
'Evl': { 'Data': '4096',
'Heap': '1272KB',
'PID': '745493',
'Shared': '25MB',
'Stack': '92KB',
'Text': '108KB',
'soData': '920KB',
'soText': '7828KB'},
'EvlColl': { 'Data': '4096',
'Heap': '1300KB',
'PID': '745494',
'Shared': '25MB',
'Stack': '76KB',
'Text': '36KB',
'soData': '876KB',
'soText': '7508KB'},
'Evt': { 'Data': '4096',
'Heap': '1268KB',
'PID': '745492',
'Shared': '21MB',
'Stack': '80KB',
'Text': '32KB',
'soData': '868KB',
'soText': '7492KB'},
'Fpd': { 'Data': '8192',
'Heap': '1300KB',
'PID': '745498',
'Shared': '243MB',
'Stack': '92KB',
'Text': '32KB',
'soData': '900KB',
'soText': '7616KB'},
'Fsync': { 'Data': '4096',
'Heap': '1332KB',
'PID': '745495',
'Shared': '20MB',
'Stack': '72KB',
'Text': '20KB',
'soData': '868KB',
'soText': '7408KB'},
'Gtppd': { 'Data': '4096',
'Heap': '1380KB',
'PID': '745516',
'Shared': '122MB',
'Stack': '84KB',
'Text': '52KB',
'soData': '1100KB',
'soText': '9MB'},
'IkedMc': { 'Data': '68KB',
'Heap': '1300KB',
'PID': '745507',
'Shared': '21MB',
'Stack': '88KB',
'Text': '152KB',
'soData': '1004KB',
'soText': '8MB'},
'Inets': { 'Data': '8192',
'Heap': '1304KB',
'PID': '745505',
'Shared': '21MB',
'Stack': '116KB',
'Text': '32KB',
'soData': '1056KB',
'soText': '8MB'},
'Ip': { 'Data': '4096',
'Heap': '4932KB',
'PID': '745500',
'Shared': '260MB',
'Stack': '188KB',
'Text': '4096',
'soData': '2028KB',
'soText': '8MB'},
'Logind': { 'Data': '4096',
'Heap': '1268KB',
'PID': '745497',
'Shared': '21MB',
'Stack': '80KB',
'Text': '16KB',
'soData': '924KB',
'soText': '7628KB'},
'Logind_1': { 'Data': '4096',
'Heap': '1268KB',
'PID': '1011758',
'Shared': '21MB',
'Stack': '80KB',
'Text': '16KB',
'soData': '924KB',
'soText': '7628KB'},
'Logind_2': { 'Data': '4096',
'Heap': '1268KB',
'PID': '1011762',
'Shared': '21MB',
'Stack': '100KB',
'Text': '16KB',
'soData': '924KB',
'soText': '7628KB'},
'MIPd': { 'Data': '4096',
'Heap': '2360KB',
'PID': '745513',
'Shared': '21MB',
'Stack': '96KB',
'Text': '160KB',
'soData': '1952KB',
'soText': '7768KB'},
'NSM': { 'Data': '4096',
'Heap': '17MB',
'PID': '704514',
'Shared': '249MB',
'Stack': '160KB',
'Text': '16KB',
'soData': '1192KB',
'soText': '8MB'},
'Ntp': { 'Data': '4096',
'Heap': '1300KB',
'PID': '745508',
'Shared': '21MB',
'Stack': '92KB',
'Text': '4096',
'soData': '1188KB',
'soText': '8076KB'},
'Ospf': { 'Data': '8192',
'Heap': '1304KB',
'PID': '745501',
'Shared': '38MB',
'Stack': '88KB',
'Text': '332KB',
'soData': '952KB',
'soText': '8000KB'},
'Qosd': { 'Data': '4096',
'Heap': '1304KB',
'PID': '745503',
'Shared': '127MB',
'Stack': '92KB',
'Text': '180KB',
'soData': '1108KB',
'soText': '9MB'},
'Rip': { 'Data': '8192',
'Heap': '1268KB',
'PID': '745509',
'Shared': '38MB',
'Stack': '88KB',
'Text': '96KB',
'soData': '928KB',
'soText': '7736KB'},
'SLA': { 'Data': '4096',
'Heap': '1272KB',
'PID': '745514',
'Shared': '21MB',
'Stack': '76KB',
'Text': '32KB',
'soData': '900KB',
'soText': '7664KB'},
'Smid': { 'Data': '16KB',
'Heap': '2504KB',
'PID': '745496',
'Shared': '21MB',
'Stack': '132KB',
'Text': '224KB',
'soData': '3256KB',
'soText': '12MB'},
'Snmpd': { 'Data': '52KB',
'Heap': '2324KB',
'PID': '745506',
'Shared': '22MB',
'Stack': '80KB',
'Text': '604KB',
'soData': '912KB',
'soText': '7680KB'},
'TunMgr': { 'Data': '4096',
'Heap': '1304KB',
'PID': '745510',
'Shared': '23MB',
'Stack': '84KB',
'Text': '112KB',
'soData': '876KB',
'soText': '7540KB'}}
"""
command = "show process mem slot %s" %slot
raw_processMem_list = self.cmd(command)
processMem_list = raw_processMem_list.splitlines()
if debug:
print 'The raw value returned was:'
print processMem_list
if ('ERROR:' in raw_processMem_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_processMem_list
processMem_dict['Status'] = 'Error'
return processMem_dict
labels_line = processMem_list[1]
divider_line = processMem_list[2]
columnDict = parse_divider_line(self,divider_line)
dupKey_dict = {}
for raw_line in processMem_list[3:]:
line = raw_line.strip()
if debug:
print '----------------------------------------------'
print 'The line to be processed is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
if name in dupKey_dict:
# for duplicate keys, append the index to the key to differentiate between them
dupKey_dict[name] += 1
name = name + "_" + `dupKey_dict[name]`
else:
dupKey_dict[name] = 0
if debug:
print 'The name is:', name
local_dict = {}
for idx in range(1,len(columnDict.keys())):
start = columnDict[idx][0]
end = columnDict[idx][1]+1
labels_name = labels_line[start:end].strip()
local_dict[labels_name] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_name,local_dict[labels_name]))
# We store last entry in the main dictionary we return
processMem_dict[name] = local_dict
return processMem_dict
def showmoduleprocessmashared(self,slot):
showmodprocmashared_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show module Rip slot 0 ma shared
Name/ Elements HiWat/ In Use/ Allocs/ Alloc Fail/
Pool Size Elem Size User Size Free Frees Double Free
---------------- --------- --------- --------- ------------- -----------
MBuf 97,340 48,463 47,543 60,251 0
211,812,352 2,176 2,144 49,797 12,708 0
FpdPage 4,964 13 13 13 0
20,971,520 4,224 4,192 4,951 0 0
RouteMap 1,351 0 0 0 0
3,145,728 2,328 2,320 1,351 0 0
PfxList 6,553 0 0 0 0
524,288 80 72 6,553 0 0
CommList 9,361 0 0 0 0
524,288 56 48 9,361 0 0
UI32Array 5,957 0 0 0 0
524,288 88 80 5,957 0 0
AsPathAcl 13,106 0 0 0 0
524,288 40 32 13,106 0 0
RtPolRegex200 20,164 0 0 0 0
4,194,304 208 200 20,164 0 0
RtPolRegex400 10,280 0 0 0 0
4,194,304 408 400 10,280 0 0
RtPolRegex512 6,898 0 0 0 0
4,194,304 608 600 6,898 0 0
Stoke[local]#
# Sample output
'Rip': { 'AsPathAcl': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '40',
'Elements': '13,106',
'Free': '13,106',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '524,288',
'User Size': '32'},
'CommList': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '56',
'Elements': '9,361',
'Free': '9,361',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '524,288',
'User Size': '48'},
'FpdPage': { 'Alloc Fail': '0',
'Allocs': '13',
'Double Free': '0',
'Elem Size': '4,224',
'Elements': '4,964',
'Free': '4,951',
'Frees': '0',
'HiWat': '13',
'In Use': '13',
'Pool Size': '20,971,520',
'User Size': '4,192'},
'MBuf': { 'Alloc Fail': '0',
'Allocs': '60,251',
'Double Free': '0',
'Elem Size': '2,176',
'Elements': '97,340',
'Free': '49,797',
'Frees': '12,708',
'HiWat': '48,463',
'In Use': '47,543',
'Pool Size': '211,812,352',
'User Size': '2,144'},
'PfxList': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '80',
'Elements': '6,553',
'Free': '6,553',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '524,288',
'User Size': '72'},
'RouteMap': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '2,328',
'Elements': '1,351',
'Free': '1,351',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '3,145,728',
'User Size': '2,320'},
'RtPolRegex200': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '208',
'Elements': '20,164',
'Free': '20,164',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '4,194,304',
'User Size': '200'},
'RtPolRegex400': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '408',
'Elements': '10,280',
'Free': '10,280',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '4,194,304',
'User Size': '400'},
'RtPolRegex512': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '608',
'Elements': '6,898',
'Free': '6,898',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '4,194,304',
'User Size': '600'},
'UI32Array': { 'Alloc Fail': '0',
'Allocs': '0',
'Double Free': '0',
'Elem Size': '88',
'Elements': '5,957',
'Free': '5,957',
'Frees': '0',
'HiWat': '0',
'In Use': '0',
'Pool Size': '524,288',
'User Size': '80'}}
"""
# call to get a list of processes on this slot
processMemory_dict = show_process_memory(self,slot)
#pprint(processMemory_dict,indent=4,width=20,depth=20)
process_dict = {}
for process in processMemory_dict.keys():
if process == "Status":
# show_process_memory returns error then skip
if processMemory_dict['Status'] == "Error":
continue
elif re.search('.*_\d+',process) != None:
# probably _<digit> added to differentiate same process name in show_process_memory, then skip it
continue
command = "show module %s slot %s ma shared" %(process,slot)
raw_modslotmashared_list = self.cmd(command)
if ('ERROR:' in raw_modslotmashared_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modslotmashared_list
showmodprocmashared_dict[process] = {'Error':raw_modslotmashared_list.strip()}
continue
elif raw_modslotmashared_list == "":
# no output. Give out warning and continue on
print "Command %s shows no output" %command
continue
modslotmashared_list = raw_modslotmashared_list.splitlines()
if debug:
print 'The raw value returned was:'
print modslotmashared_list
labels_line1 = modslotmashared_list[1]
labels_line2 = modslotmashared_list[2]
divider_line = modslotmashared_list[3]
numcol = len(divider_line.split())
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
temp_dict = {}
linenum = 4
for raw_line in modslotmashared_list[4:]:
line = raw_line
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
if linenum % 2 == 0:
# even line number
local_dict = {}
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
startrange = 1
labels_line = labels_line1
else:
startrange = 0
labels_line = labels_line2
for labels_idx in range(startrange,numcol):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
label_name = labels_line[start:end].strip(' /')
local_dict[label_name] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_line[label_name],local_dict[labels_line[label_name]]))
# We store each entry in the temp dictionary
# odd line we save
if linenum % 2 == 1:
temp_dict[name] = local_dict
linenum += 1
# We store each temp dictionary to process
showmodprocmashared_dict[process] = temp_dict
return showmodprocmashared_dict
def showmoduleprocessmapool(self,slot):
showmodprocmapool_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show module NSM slot 2 ma pool
Name Size InUse Free Allocs Frees
---------------- ------------- --------- --------- ------------- -------------
DaSet 128 27 8 27 0
DaJudy 40 57 15 261 204
DaJudy 72 2 33 21 19
DaJudy 136 2 31 16 14
DaJudy 264 6 38 8 2
CrhHandleData 60 5 35 5 0
CrhRegData 32 21 22 21 0
CrhCmdBlk 8,224 4 4 21 17
NvTimer 56 19 24 20 1
IpcConnIds 28 20 28 20 0
IpcArepIds 28 4 44 4 0
IpcReg 156 4 31 4 0
IpcConn 400 20 19 20 0
IpcRegmsg 8 11 17 11 0
IpcAsyncReply 344 4 12 4 0
IpcSndrArep 36 3 15 3 0
IpcThrEnt 36 0 18 12 12
IpcThrData 28 0 22 118 118
IpcRmReg 24 4 49 4 0
IpcRmInfo 36 1 145 110 109
IpcAmInfo 72 0 144 44 44
MsgVerPool 176 2 19 2 0
IpcTrWantReg 28 4 44 4 0
IpcTrRegac 76 30 3 30 0
IpcTrRegpc 72 23 12 23 0
IpcTrReg 84 4 37 4 0
IpcTrConn 388 20 20 20 0
IpcTrConnG 188 15 20 15 0
IpcTrSlot 64 19 19 19 0
IpcTrNode 112 39 25 39 0
IpcTrRegacI 28 30 18 30 0
IpcTrRegpcI 28 23 25 23 0
IpcTrCgIds 28 15 33 15 0
IpcPeer 48 17 15 17 0
IpcPeerMsgData 80 0 20 118 118
IpcPeerMsg 56 0 28 118 118
IpcQnxReg 80 4 28 4 0
IpcQnxConn 12 12 48 12 0
IpcTcpReg 52 4 42 4 0
IpcTcpConn 16 6 54 6 0
IpcTcpRegpc 104 23 11 23 0
IpcMsgReg 52 4 42 4 0
IpcMsgConn 124 24 12 24 0
NvMsg 8,300 2 30 743 741
EvtStateNotify 32 4 16 4 0
EvtCrhCallBack 8 0 28 9 9
EvtRegWait 40 0 17 4 4
H:CMOHandler 20 3 153 3 0
H:CMOHandler 20 7 149 7 0
H:CMOHandler 20 28 128 28 0
H:CMOHandler 20 1 155 1 0
CMOHandlerPool 12 39 2,005 39 0
CMOObjectPool 8,080 0 64 2 2
IpcMbType 36 0 18 1 1
IpcMbMsg 36 0 40 2 2
NvfuCdpipcInfo 48 1 133 16 15
cdpipc 72 0 534 48,471 48,471
cdpipc 264 0 518 32 32
cdpipc 1,460 1 352 48,502 48,501
CardAgt I2C Job 28 0 73 404 404
ProcMgrNPE 680 15 20 15 0
NPE/NSE 188 7 28 7 0
PWQ 112 0 32 7 7
ProcMgr Mon Eve 28 0 22 218,278 218,278
64 objects displayed.
Stoke[local]#
# Sample output
'NSM': { '68 objects displ': { 'Allocs': '',
'Free': '',
'Frees': '',
'InUse': '',
'Size': 'yed.'},
'CMOHandlerPool': { 'Allocs': '104',
'Free': '1,940',
'Frees': '0',
'InUse': '104',
'Size': '12'},
'CMOObjectPool': { 'Allocs': '204',
'Free': '64',
'Frees': '204',
'InUse': '0',
'Size': '8,080'},
'CardMgr I2C Job': { 'Allocs': '427,449',
'Free': '72',
'Frees': '427,448',
'InUse': '1',
'Size': '28'},
'CrhCmdBlk': { 'Allocs': '9',
'Free': '8',
'Frees': '1',
'InUse': '8',
'Size': '8,224'},
'CrhHandleData': { 'Allocs': '8',
'Free': '32',
'Frees': '0',
'InUse': '8',
'Size': '60'},
'CrhRegData': { 'Allocs': '20',
'Free': '23',
'Frees': '0',
'InUse': '20',
'Size': '32'},
'DaJudy': { 'Allocs': '1,310',
'Free': '18',
'Frees': '1,112',
'InUse': '198',
'Size': '40'},
'DaJudy_2': { 'Allocs': '105',
'Free': '3',
'Frees': '73',
'InUse': '32',
'Size': '72'},
'DaJudy_4': { 'Allocs': '73',
'Free': '20',
'Frees': '60',
'InUse': '13',
'Size': '136'},
'DaJudy_8': { 'Allocs': '31',
'Free': '42',
'Frees': '29',
'InUse': '2',
'Size': '264'},
'DaSet': { 'Allocs': '49',
'Free': '21',
'Frees': '0',
'InUse': '49',
'Size': '128'},
'EvtCrhCallBack': { 'Allocs': '171',
'Free': '28',
'Frees': '171',
'InUse': '0',
'Size': '8'},
'EvtRegWait': { 'Allocs': '7',
'Free': '17',
'Frees': '7',
'InUse': '0',
'Size': '40'},
'EvtStateNotify': { 'Allocs': '7',
'Free': '13',
'Frees': '0',
'InUse': '7',
'Size': '32'},
'H:CMOHandler': { 'Allocs': '12',
'Free': '144',
'Frees': '0',
'InUse': '12',
'Size': '20'},
'H:CMOHandler_128': { 'Allocs': '1',
'Free': '155',
'Frees': '0',
'InUse': '1',
'Size': '20'},
'H:CMOHandler_16': { 'Allocs': '31',
'Free': '125',
'Frees': '0',
'InUse': '31',
'Size': '20'},
'H:CMOHandler_2': { 'Allocs': '14',
'Free': '142',
'Frees': '0',
'InUse': '14',
'Size': '20'},
'H:CMOHandler_32': { 'Allocs': '12',
'Free': '144',
'Frees': '0',
'InUse': '12',
'Size': '20'},
'H:CMOHandler_4': { 'Allocs': '4',
'Free': '152',
'Frees': '0',
'InUse': '4',
'Size': '20'},
'H:CMOHandler_64': { 'Allocs': '8',
'Free': '148',
'Frees': '0',
'InUse': '8',
'Size': '20'},
'H:CMOHandler_8': { 'Allocs': '22',
'Free': '134',
'Frees': '0',
'InUse': '22',
'Size': '20'},
'HAMgrVRISet': { 'Allocs': '2',
'Free': '20',
'Frees': '0',
'InUse': '2',
'Size': '28'},
'IpcAmInfo': { 'Allocs': '139',
'Free': '144',
'Frees': '139',
'InUse': '0',
'Size': '72'},
'IpcArepIds': { 'Allocs': '22',
'Free': '29',
'Frees': '3',
'InUse': '19',
'Size': '28'},
'IpcAsyncReply': { 'Allocs': '22',
'Free': '13',
'Frees': '3',
'InUse': '19',
'Size': '344'},
'IpcConn': { 'Allocs': '103',
'Free': '37',
'Frees': '23',
'InUse': '80',
'Size': '400'},
'IpcConnIds': { 'Allocs': '103',
'Free': '14',
'Frees': '21',
'InUse': '82',
'Size': '28'},
'IpcMbMsg': { 'Allocs': '3',
'Free': '40',
'Frees': '3',
'InUse': '0',
'Size': '36'},
'IpcMbType': { 'Allocs': '2',
'Free': '18',
'Frees': '2',
'InUse': '0',
'Size': '36'},
'IpcMsgConn': { 'Allocs': '125',
'Free': '9',
'Frees': '26',
'InUse': '99',
'Size': '124'},
'IpcMsgReg': { 'Allocs': '7',
'Free': '39',
'Frees': '0',
'InUse': '7',
'Size': '52'},
'IpcPeer': { 'Allocs': '56',
'Free': '8',
'Frees': '0',
'InUse': '56',
'Size': '48'},
'IpcPeerMsg': { 'Allocs': '458',
'Free': '28',
'Frees': '458',
'InUse': '0',
'Size': '56'},
'IpcPeerMsgData': { 'Allocs': '452',
'Free': '20',
'Frees': '452',
'InUse': '0',
'Size': '80'},
'IpcQnxConn': { 'Allocs': '59',
'Free': '24',
'Frees': '23',
'InUse': '36',
'Size': '12'},
'IpcQnxReg': { 'Allocs': '7',
'Free': '25',
'Frees': '0',
'InUse': '7',
'Size': '80'},
'IpcReg': { 'Allocs': '7',
'Free': '28',
'Frees': '0',
'InUse': '7',
'Size': '156'},
'IpcRegmsg': { 'Allocs': '14',
'Free': '14',
'Frees': '0',
'InUse': '14',
'Size': '8'},
'IpcRmInfo': { 'Allocs': '687',
'Free': '145',
'Frees': '686',
'InUse': '1',
'Size': '36'},
'IpcRmReg': { 'Allocs': '7',
'Free': '46',
'Frees': '0',
'InUse': '7',
'Size': '24'},
'IpcSndrArep': { 'Allocs': '5',
'Free': '13',
'Frees': '0',
'InUse': '5',
'Size': '36'},
'IpcTcpConn': { 'Allocs': '19',
'Free': '44',
'Frees': '3',
'InUse': '16',
'Size': '16'},
'IpcTcpReg': { 'Allocs': '7',
'Free': '39',
'Frees': '0',
'InUse': '7',
'Size': '52'},
'IpcTcpRegpc': { 'Allocs': '85',
'Free': '9',
'Frees': '26',
'InUse': '59',
'Size': '104'},
'IpcThrData': { 'Allocs': '477',
'Free': '22',
'Frees': '477',
'InUse': '0',
'Size': '28'},
'IpcThrEnt': { 'Allocs': '38',
'Free': '18',
'Frees': '38',
'InUse': '0',
'Size': '36'},
'IpcTrCgIds': { 'Allocs': '75',
'Free': '42',
'Frees': '21',
'InUse': '54',
'Size': '28'},
'IpcTrConn': { 'Allocs': '103',
'Free': '40',
'Frees': '23',
'InUse': '80',
'Size': '388'},
'IpcTrConnG': { 'Allocs': '75',
'Free': '18',
'Frees': '23',
'InUse': '52',
'Size': '188'},
'IpcTrNode': { 'Allocs': '99',
'Free': '20',
'Frees': '23',
'InUse': '76',
'Size': '112'},
'IpcTrReg': { 'Allocs': '7',
'Free': '34',
'Frees': '0',
'InUse': '7',
'Size': '84'},
'IpcTrRegac': { 'Allocs': '115',
'Free': '13',
'Frees': '29',
'InUse': '86',
'Size': '76'},
'IpcTrRegacI': { 'Allocs': '115',
'Free': '10',
'Frees': '29',
'InUse': '86',
'Size': '28'},
'IpcTrRegpc': { 'Allocs': '85',
'Free': '11',
'Frees': '26',
'InUse': '59',
'Size': '72'},
'IpcTrRegpcI': { 'Allocs': '85',
'Free': '37',
'Frees': '26',
'InUse': '59',
'Size': '28'},
'IpcTrSlot': { 'Allocs': '79',
'Free': '20',
'Frees': '23',
'InUse': '56',
'Size': '64'},
'IpcTrWantReg': { 'Allocs': '3',
'Free': '45',
'Frees': '0',
'InUse': '3',
'Size': '28'},
'MsgVerPool': { 'Allocs': '3',
'Free': '18',
'Frees': '0',
'InUse': '3',
'Size': '176'},
'NPE/NSE': { 'Allocs': '31',
'Free': '25',
'Frees': '21',
'InUse': '10',
'Size': '188'},
'NSMClientSrvr': { 'Allocs': '3',
'Free': '137',
'Frees': '0',
'InUse': '3',
'Size': '104'},
'NvMsg': { 'Allocs': '6,927',
'Free': '30',
'Frees': '6,925',
'InUse': '2',
'Size': '8,300'},
'NvTimer': { 'Allocs': '35',
'Free': '24',
'Frees': '16',
'InUse': '19',
'Size': '56'},
'PWQ': { 'Allocs': '31',
'Free': '31',
'Frees': '30',
'InUse': '1',
'Size': '112'},
'ProcMgr Mon Eve': { 'Allocs': '116,642',
'Free': '22',
'Frees': '116,642',
'InUse': '0',
'Size': '28'},
'ProcMgrNPE': { 'Allocs': '56',
'Free': '0',
'Frees': '21',
'InUse': '35',
'Size': '680'},
'evt notify_wait': { 'Allocs': '105',
'Free': '1,071',
'Frees': '105',
'InUse': '0',
'Size': '72'},
'evt notify_wait_2': { 'Allocs': '4',
'Free': '133',
'Frees': '4',
'InUse': '0',
'Size': '264'}}
"""
# call to get a list of processes on this slot
processMemory_dict = show_process_memory(self,slot)
#pprint(processMemory_dict,indent=4,width=20,depth=20)
process_dict = {}
for process in processMemory_dict.keys():
if process == "Status":
# show_process_memory returns error then skip
if processMemory_dict['Status'] == "Error":
continue
elif re.search('.*_\d+',process) != None:
# probably _<digit> added to differentiate same process name in show_process_memory, then skip it
continue
command = "show module %s slot %s ma pool" %(process,slot)
raw_modslotmapool_list = self.cmd(command)
modslotmapool_list = raw_modslotmapool_list.splitlines()
if debug:
print 'The raw value returned was:'
print modslotmapool_list
if ('ERROR:' in raw_modslotmapool_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modslotmapool_list
showmodprocmapool_dict[process] = {'Error':raw_modslotmapool_list.strip()}
continue
elif raw_modslotmapool_list == "":
# no output. Give out warning and continue on
print "Command %s shows no output" %command
continue
labels_line = modslotmapool_list[1].split()
divider_line = modslotmapool_list[2]
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
name_dict = {}
temp_dict = {}
for raw_line in modslotmapool_list[3:-1]:
line = raw_line
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
if name in name_dict.keys():
name_dict[name] += 1
name = name + "_" + str(name_dict[name])
else:
name_dict[name] = 0
for labels_idx in range(1,len(labels_line)):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
local_dict[labels_line[labels_idx]] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_line[labels_idx],local_dict[labels_line[labels_idx]]))
# We store each entry in the temp dictionary
temp_dict[name] = local_dict
# We store each temp dictionary to process
showmodprocmapool_dict[process] = temp_dict
return showmodprocmapool_dict
def showmoduleprocessmapp(self,slot):
showmodprocmapp_dict = {}
debug = False
# Sample raw input
"""
Stoke[local]#show module NSM slot 2 ma pp
Elem__________________________________
Name Size InUse Allocs Frees Blocks
------------------------------ -------- --------- --------- --------- ---------
_global_ 0 0 8 0 0
HALibHAPP::0 396 0 0 0 1
HALibHAGlobCB::0 204 0 0 0 1
HALibAsyncCB::0 60 0 0 0 1
Stoke[local]#
# Sample output
'NSM': { 'GlcLSstats:16::0': { 'Allocs': '2',
'Blocks': '1',
'Frees': '0',
'InUse': '2',
'Size': '168'},
'HALibAsyncCB::0': { 'Allocs': '17',
'Blocks': '0',
'Frees': '17',
'InUse': '0',
'Size': '60'},
'HALibHAGlobCB::0': { 'Allocs': '2',
'Blocks': '1',
'Frees': '0',
'InUse': '2',
'Size': '204'},
'HALibHAPP::0': { 'Allocs': '1',
'Blocks': '1',
'Frees': '0',
'InUse': '1',
'Size': '396'},
'_global_': { 'Allocs': '12',
'Blocks': '0',
'Frees': '0',
'InUse': '0',
'Size': '0'}},
"""
# call to get a list of processes on this slot
processMemory_dict = show_process_memory(self,slot)
#pprint(processMemory_dict,indent=4,width=20,depth=20)
process_dict = {}
for process in processMemory_dict.keys():
if process == "Status":
# show_process_memory returns error then skip
if processMemory_dict['Status'] == "Error":
continue
elif re.search('.*_\d+',process) != None:
# probably _<digit> added to differentiate same process name in show_process_memory, then skip it
continue
command = "show module %s slot %s ma pp" %(process,slot)
raw_modslotmapp_list = self.cmd(command)
modslotmapp_list = raw_modslotmapp_list.splitlines()
if debug:
print 'The raw value returned was:'
print modslotmapp_list
if ('ERROR:' in raw_modslotmapp_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modslotmapp_list
showmodprocmapp_dict[process] = {'Error':raw_modslotmapp_list.strip()}
continue
elif raw_modslotmapp_list == "":
# no output. Give out warning and continue on
print "Command %s shows no output" %command
continue
labels_line = modslotmapp_list[2].split()
divider_line = modslotmapp_list[3]
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
temp_dict = {}
for raw_line in modslotmapp_list[4:]:
line = raw_line
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
for labels_idx in range(1,len(labels_line)):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
local_dict[labels_line[labels_idx]] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_line[labels_idx],local_dict[labels_line[labels_idx]]))
# We store each entry in the temp dictionary
temp_dict[name] = local_dict
# We store each temp dictionary to process
showmodprocmapp_dict[process] = temp_dict
return showmodprocmapp_dict
def showmoduleprocessma(self,slot):
showmodprocma_dict = {}
debug = False
# Sample raw input
"""
brazil[local]#show module NSM slot 2 ma
Type Usage Allocs Frees
------------------------ ------------- ------------- -------------
Slabs 2,097,152 2 0
Pools 628,020 137,486 136,914
Default VarPool 107,808 402 302
VarPool Fixed Pools 732,176 60,522 60,454
VarPool malloc 0 0 0
Shared Pools 0 0 0
Persistent Pools 12,288 8 0
malloc 7,757,824
Overhead 18,192 94 0
MMap 2,097,152 2 0
User MMap 724 1 0
brazil[local]#
Sample output:
==============
'NSM': { 'Default VarPool': { 'Allocs': '569',
'Frees': '469',
'Usage': '107,808'},
'MMap': { 'Allocs': '2',
'Frees': '0',
'Usage': '2,097,152'},
'Overhead': { 'Allocs': '94',
'Frees': '0',
'Usage': '18,192'},
'Persistent Pools': { 'Allocs': '8',
'Frees': '0',
'Usage': '12,288'},
'Pools': { 'Allocs': '287,847',
'Frees': '287,275',
'Usage': '628,020'},
'Shared Pools': { 'Allocs': '0',
'Frees': '0',
'Usage': '0'},
'Slabs': { 'Allocs': '2',
'Frees': '0',
'Usage': '2,097,152'},
'User MMap': { 'Allocs': '1',
'Frees': '0',
'Usage': '724'},
'VarPool Fixed Pools': { 'Allocs': '127,048',
'Frees': '126,980',
'Usage': '732,176'},
'VarPool malloc': { 'Allocs': '0',
'Frees': '0',
'Usage': '0'},
'malloc': { 'Allocs': '',
'Frees': '',
'Usage': '7,757,824'}}}
"""
# call to get a list of processes on this slot
processMemory_dict = show_process_memory(self,slot)
#pprint(processMemory_dict,indent=4,width=20,depth=20)
process_dict = {}
for process in processMemory_dict.keys():
if process == "Status":
# show_process_memory returns error then skip
if processMemory_dict['Status'] == "Error":
continue
elif re.search('.*_\d+',process) != None:
# probably _<digit> added to differentiate same process name in show_process_memory, then skip it
continue
command = "show module %s slot %s ma" %(process,slot)
raw_modslotma_list = self.cmd(command)
modslotma_list = raw_modslotma_list.splitlines()
if debug:
print 'The raw value returned was:'
print modslotma_list
if ('ERROR:' in raw_modslotma_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modslotma_list
showmodprocma_dict[process] = {'Error':raw_modslotma_list.strip()}
continue
elif raw_modslotma_list == "":
# no output. Give out warning and continue on
print "Command %s shows no output" %command
continue
labels_line = modslotma_list[1].split()
divider_line = modslotma_list[2]
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
temp_dict = {}
for raw_line in modslotma_list[3:]:
line = raw_line
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
for labels_idx in range(1,len(labels_line)):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
local_dict[labels_line[labels_idx]] = line[start:end].strip()
if debug:
print("The %s is: %s " %(labels_line[labels_idx],local_dict[labels_line[labels_idx]]))
# We store each entry in the temp dictionary
temp_dict[name] = local_dict
# We store each temp dictionary to process
showmodprocma_dict[process] = temp_dict
return showmodprocma_dict
def getshowmemcounters(self):
"""
Call show_mem to get data, but remove the "slot" keyword
and remove time stamp entry
"""
shMemory_dict = show_mem(self.ssx)
tmpDict = {}
for slot in shMemory_dict.keys():
if slot == "time stamp":
continue
newSlot = slot[-1:]
tmpDict[newSlot] = shMemory_dict[slot]
return tmpDict
def showmoduleprocessmappslab(self,slot):
# Per Greg comment, treat this data as a slob, meaning calculate a total of how many entries
# and the sum of "Space In Use" and report as one data point.
showmodprocmappslab_dict = {}
debug = False
# Sample raw input
"""
{'Count': { 'Space In Use': 8204288,
'Total Entry': 8},
'DHCPdLC': { 'Space In Use': 99328,
'Total Entry': 1},
'Evl': { 'Space In Use': 99328,
'Total Entry': 1},
'Evt': { 'Space In Use': 111616,
'Total Entry': 1},
'Fpd': { 'Space In Use': 99328,
'Total Entry': 1},
'Iked': { 'Space In Use': 8910848,
'Total Entry': 9},
'Inspectd': { 'Space In Use': 99328,
'Total Entry': 1},
'IpLc': { 'Space In Use': 99328,
'Total Entry': 1},
'NSM': { 'Space In Use': 99328,
'Total Entry': 1}}
"""
# call to get a list of processes on this slot
processMemory_dict = show_process_memory(self,slot)
#pprint(processMemory_dict,indent=4,width=20,depth=20)
process_dict = {}
for process in processMemory_dict.keys():
if process == "Status":
# show_process_memory returns error then skip
if processMemory_dict['Status'] == "Error":
continue
elif re.search('.*_\d+',process) != None:
# probably _<digit> added to differentiate same process name in show_process_memory, then skip it
continue
command = "show module %s slot %s ma pp-slab" %(process,slot)
raw_modslotmappslab_list = self.cmd(command)
modslotmappslab_list = raw_modslotmappslab_list.splitlines()
if debug:
print 'The raw value returned was:'
print modslotmappslab_list
if ('ERROR:' in raw_modslotmappslab_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modslotmappslab_list
showmodprocmappslab_dict[process] = {'Error':raw_modslotmappslab_list.strip()}
continue
elif raw_modslotmappslab_list == "":
# no output. Give out warning and continue on
print "Command %s shows no output" %command
continue
labels_line1 = modslotmappslab_list[1]
labels_line2 = modslotmappslab_list[2]
divider_line = modslotmappslab_list[3]
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
temp_dict = {}
sum = 0
for raw_line in modslotmappslab_list[4:-1]:
line = raw_line
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
for labels_idx in range(1,len(columnDict.keys())):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
label = labels_line1[start:end].strip() + " " + labels_line2[start:end].strip()
label = label.strip()
local_dict[label] = line[start:end].strip()
if debug:
print("The %s is: %s " %(label,local_dict[label]))
# calculate the sum of inuse by adding size and subtracting free space
if labels_idx == 1:
# add it
sum += int(local_dict[label].replace(',',''))
elif labels_idx == 2:
# subtract it
sum -= int(local_dict[label].replace(',',''))
# We store each entry in the temp dictionary
temp_dict[name] = local_dict
# We store each temp dictionary to process
showmodprocmappslab_dict[process] = {'Total Entry':str(len(modslotmappslab_list[4:-1])),'Space In Use':str(sum)}
return showmodprocmappslab_dict
def showmoduleprocessmaslab(self,slot):
# Per Greg comment, treat this data as a slob, meaning calculate a total of how many entries
# and the sum of "Space In Use" and report as one data point.
showmodprocmaslab_dict = {}
debug = False
# Sample raw input
"""
{'Count': { 'Space In Use': 6480896,
'Total Entry': 7},
'DHCPdLC': { 'Space In Use': 1531904,
'Total Entry': 2},
'Evl': { 'Space In Use': 1525760,
'Total Entry': 2},
'Evt': { 'Space In Use': 1561600,
'Total Entry': 2},
'Fpd': { 'Space In Use': 1505280,
'Total Entry': 2},
'Iked': { 'Space In Use': 36207616,
'Total Entry': 35},
'Inspectd': { 'Space In Use': 2042880,
'Total Entry': 2},
'IpLc': { 'Space In Use': 39867392,
'Total Entry': 39},
'NSM': { 'Space In Use': 2301952,
'Total Entry': 3}}
"""
# call to get a list of processes on this slot
processMemory_dict = show_process_memory(self,slot)
#pprint(processMemory_dict,indent=4,width=20,depth=20)
process_dict = {}
for process in processMemory_dict.keys():
if process == "Status":
# show_process_memory returns error then skip
if processMemory_dict['Status'] == "Error":
continue
elif re.search('.*_\d+',process) != None:
# probably _<digit> added to differentiate same process name in show_process_memory, then skip it
continue
command = "show module %s slot %s ma slab" %(process,slot)
raw_modslotmaslab_list = self.cmd(command)
modslotmaslab_list = raw_modslotmaslab_list.splitlines()
if debug:
print 'The raw value returned was:'
print modslotmaslab_list
if ('ERROR:' in raw_modslotmaslab_list):
print 'Detected an error when running: ' + command
print 'Returned text was:'
print raw_modslotmaslab_list
showmodprocmaslab_dict[process] = {'Error':raw_modslotmaslab_list.strip()}
continue
elif raw_modslotmaslab_list == "":
# no output. Give out warning and continue on
print "Command %s shows no output" %command
continue
labels_line1 = modslotmaslab_list[1]
labels_line2 = modslotmaslab_list[2]
divider_line = modslotmaslab_list[3]
columnDict = parse_divider_line(self,divider_line)
if debug:
print 'The columnDict is:'
print columnDict
temp_dict = {}
sum = 0
for raw_line in modslotmaslab_list[4:-1]:
line = raw_line
local_dict = {}
if debug:
print '----------------------------------------------'
print 'The line to be processes is:'
print line
start = columnDict[0][0]
end = columnDict[0][1]+1
name = line[start:end].strip()
for labels_idx in range(1,len(columnDict.keys())):
start = columnDict[labels_idx][0]
end = columnDict[labels_idx][1]+1
label = labels_line1[start:end].strip() + " " + labels_line2[start:end].strip()
label = label.strip()
local_dict[label] = line[start:end].strip()
if debug:
print("The %s is: %s " %(label,local_dict[label]))
# calculate the sum of "Space in Use" in column 1
if labels_idx == 1:
sum += int(local_dict[label].replace(',',''))
# We store each entry in the temp dictionary
temp_dict[name] = local_dict
# We store each temp dictionary to process
#showmodprocmaslab_dict[process] = temp_dict
showmodprocmaslab_dict[process] = {'Total Entry':str(len(modslotmaslab_list[4:-1])),'Space In Use':str(sum)}
return showmodprocmaslab_dict
#================================
"""
def uninstall(self,version = ""):
# Uninstall a package. If verion is specified, uninstall that version
# Otherwise, choose an avaialble version to install
debug = True
if version == "":
# call to get a list of installed version
installed_version = show_versions_and_build(self)
if debug:
pprint(installed_version,indent=4,width=20,depth=20)
# try to uninstall one version
enable_prompt_regex = "[\r\n]*\S+\[\S+\]#"
yesno_prompt_regex =".*[\r\n.]*\(\[*yes\]*/\[*no\]*\)\s*$"
for ver in installed_version.keys():
self.sendline('system uninstall package %s' %ver)
done = False
while not done:
retr == self.expect(yesno_prompt_regex,enable_prompt_regex, timeout = 10)
if retr == 0:
self.sendline('system uninstall package %s' %ver)
# This is the password option:
ses.sendline(password)
output = self.cmd("system uninstall package %s" %ver)
"""
#================================
# End section added by Anthony Ton
| [
"muttu2244@yahoo.com"
] | muttu2244@yahoo.com |
9ff0e602e68acc1da8af0f4d10a11ed64cc2a862 | 901e1686d5b5c243d6f876b2d850bad02c8f4d77 | /network.py | eebd64703dd636bd3432c7d55c811f8628a0079a | [
"MIT"
] | permissive | pkbatth92/Data-Science_Image-Classification | c414a3b38a2cc60f896d78909577b3ef2f323720 | 769ad73ecb51d79a1cc070b9eeb23ac7cd796728 | refs/heads/master | 2020-04-09T06:05:47.286455 | 2018-12-02T22:03:59 | 2018-12-02T22:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,524 | py | import torch
from torchvision import models
from torch import nn
from torch import optim
from collections import OrderedDict
class Network:
def __init__(self, architecture):
if architecture == "vgg13":
self.model = models.vgg13(pretrained=True)
elif architecture == "vgg19":
self.model = models.vgg19(pretrained=True)
else:
self.model = models.vgg16(pretrained=True)
def spec_classifier(self,hidden_units):
# Freezing parameters of the model
for param in self.model.parameters():
param.requires_grad = False
# Defining classifier
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, hidden_units)),
('relu1',nn.ReLU()),
('fc3', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim = 1))
]))
self.model.classifier = classifier
def train_classifier(self,train_loader, epochs, valid_loader,learning_rate,processor):
# training the network (classifier)
self.criterion = nn.NLLLoss()
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=learning_rate)
epochs = epochs
print_every = 15
steps = 0
self.model.to(processor)
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(train_loader):
steps += 1
inputs, labels = inputs.to(processor), labels.to(processor)
self.optimizer.zero_grad()
outputs = self.model.forward(inputs)
training_loss = self.criterion(outputs, labels)
training_loss.backward()
self.optimizer.step()
running_loss += training_loss.item()
if steps % print_every == 0:
print('Epoch: {}/{}...'.format(e+1,epochs))
running_loss = 0
self.check_accuracy_on_valid(valid_loader,processor)
def check_accuracy_on_valid(self,valid_loader,processor):
correct = 0
total = 0
running_loss = 0
self.model.to(processor)
with torch.no_grad():
for inputs, labels in valid_loader:
inputs, labels = inputs.to(processor), labels.to(processor)
outputs = self.model.forward(inputs)
_, predicted = torch.max(outputs.data, 1)
validation_loss = self.criterion(outputs, labels)
total += labels.size(0)
correct += (predicted == labels).sum().item()
running_loss += validation_loss
print('Accuracy on validation set: %d %%' % (correct*100/total),
'Loss on validation set: {:.4f}'.format(running_loss/total))
def check_accuracy_on_test(self,test_loader,processor):
correct = 0
total = 0
self.model.to(processor)
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(processor), labels.to(processor)
outputs = self.model.forward(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy on test set: %d %%' % (correct*100/total))
| [
"noreply@github.com"
] | noreply@github.com |
71d01f67c7f027070de066bc474222ea92c178e5 | 62af75f2b6dfe99c9bc4ee8c5441314f9ef6e5dc | /factual/utils/__init__.py | b3c0c60e9e958e910dee0bbaf8d639cea3b4546b | [] | no_license | almanackist/campaigndineations | 3f5f7e5e62eed209c064b9db651a7da4ef1b97b6 | 8048debd146ed4e5d44b72e06400abe02eb137c5 | refs/heads/master | 2021-01-01T15:44:23.570449 | 2012-06-24T19:01:22 | 2012-06-24T19:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | from utils import circle
| [
"almanackist@almanackery.com"
] | almanackist@almanackery.com |
7f9238fb493f94f9b1d420970d33fae23ba32ac8 | ab821e017c32746929581c56a69464e7d9243cba | /robot_calib/scripts/transform_utils.py | a2d7354f68c607ed46e1ad74ce15ae73945ddf1c | [] | no_license | JeffYoung17/MOM_Robot | d5cd960b7fceb94a3e42f2a570b232a058f1ffb8 | 3a9c57436e5534cac51411220602ad2500ac75ed | refs/heads/master | 2020-04-01T17:40:26.246145 | 2018-11-22T10:26:08 | 2018-11-22T10:26:08 | 153,443,380 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | #!/usr/bin/env python
import PyKDL
from math import pi
def quat_to_angle(quat):
rot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)
return rot.GetRPY()[2]
def normalize_angle(angle):
res = angle
while res > pi:
res -= 2.0 * pi
while res < -pi:
res += 2.0 * pi
return res
| [
"jeffyoung17@163.com"
] | jeffyoung17@163.com |
3713eb98c2b59067157bd8ca668bfb02eed482bc | 73c98ab29fad625a7db402024be4d372019d8aeb | /list_workflows.py | b97392b4f4d3d7709594bd7a28e589b0859604d8 | [] | no_license | InformaticsMatters/galaxy_bioblend | fce39a65a86d249fca867e3551f86074a08c51d8 | 5552d6d4e5850591bd80e39c7d39fbfb8d259746 | refs/heads/master | 2020-08-08T02:41:40.700272 | 2019-12-05T18:38:43 | 2019-12-05T18:38:43 | 213,681,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from galaxyclient import Utils
u = Utils()
gi = u.get_galaxy_instance()
workflows = gi.workflows.get_workflows()
for workflow in workflows:
print("Workflow: " + str(workflow)) | [
"tdudgeon@informaticsmatters.com"
] | tdudgeon@informaticsmatters.com |
44cf55d3b3270ca188b21be1f5bea62210cf6c4c | 73c2d3f96f176f534b7e81a43bb731ebff8c5261 | /wblog/settings.py | 3b063bd1678163cf69e5cb55c23ae73450406418 | [] | no_license | tianjiandikun/wblog | df9b77ccaaecea4236157dbc9b7dc48ffa1b9bee | 61a1e24b030e1da4cf934ea70b2f79724b48859a | refs/heads/master | 2020-05-27T16:57:38.971072 | 2015-03-16T16:25:44 | 2015-03-16T16:25:44 | 32,328,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,164 | py | """
Django settings for wblog project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dli5+@w8$yrnis$ti^p7%@vw$+4=($$+rsfl1qriqy$8p3$=61'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'django_markdown',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'wblog.urls'
WSGI_APPLICATION = 'wblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'wblog',
'USER': 'wblog',
'PASSWORD': 'wblog',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"xinhuyue@163.com"
] | xinhuyue@163.com |
0e4a20028c2fca889c595e291c6de3f6ec85ce20 | 1e47e604fcbeffedfe35fb933a3171f2a876c071 | /opengl/p5.py | 1c2f78cdcc38de1c3a31f983d224106f458e419f | [] | no_license | caihedongorder/PythonEx | 5ec9f938869167fa3a56b4705ce9f8ce29b0b65c | b40fcd2e73309da9a47be2ec1676c9281866246f | refs/heads/master | 2021-09-15T07:18:31.208471 | 2018-05-28T13:25:03 | 2018-05-28T13:25:03 | 104,706,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | #!/usr/bin/env python
# coding=utf-8
import sys
import time
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from gamemodule import render
from gamemodule import application
class GLRender(render.GLRenderBase):
def __init__(self):
self.startTime = time.clock()
self.rotateSpeed = 90
pass
def OnDrawFunc(self,DeltaTime):
glClear(GL_COLOR_BUFFER_BIT)
escapeTime = time.clock() - self.startTime
rotateAngle = self.rotateSpeed * escapeTime
rotateAngle = DeltaTime * self.rotateSpeed
print(rotateAngle)
# glLoadIdentity()
glRotatef(rotateAngle,0,1,0)
# glTranslatef(0.5,0,0)
# glRotatef(135,0,1,0)
# glutWireTeapot(0.5)
# glutSolidTeapot(0.5)
glBegin(GL_TRIANGLES)
# glVertex3f(-0.50,0.5,0)
# glVertex3f(0.50,0.5,0)
# glVertex3f(0.50,-0.5,0)
# glVertex3f(-0.50,-0.5,0)
# 逆时针
glColor3f(1.0,0,0)
glVertex3f(0.0,0.5,0)
glColor3f(0.0,1.0,0)
glVertex3f(0.5,-0.5,0)
glColor3f(0.0,0,1.0)
glVertex3f(-0.5,-0.5,0)
glEnd()
glFlush()
start = time.clock()
application.Init(lambda : GLRender())
application.Loop()
| [
"caihedongorder@126.com”"
] | caihedongorder@126.com” |
2f3af0863184acecd60d4a107446bbf8ed4e7e37 | 66b6146930bee8b02167f6a276aa38ee615e126d | /Utils.py | 415e416068202c91b2c2259633aad75919496c09 | [
"MIT"
] | permissive | muradtuk/UnifiedFramework | 10be233983a3255bac18b91b21f2077f45c07114 | 07dd7cf50552fa87fd875818eead03a2fe9e5073 | refs/heads/master | 2022-08-24T05:42:40.317221 | 2020-05-25T20:41:52 | 2020-05-25T20:41:52 | 258,382,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,820 | py | """*****************************************************************************************
MIT License
Copyright (c) 2020 Murad Tukan, Alaa Maalouf, Dan Feldman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*****************************************************************************************"""
import os
import scipy as sp
import numpy as np
import time
import pathlib
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
from scipy.optimize import approx_fprime
from scipy.stats import ortho_group
import cvxpy as cp
import copy
from sklearn.preprocessing import Normalizer
import PointSet
################################################## General constants ###################################################
EPSILON = 1e-9 # used for approximated the gradient of a loss function
TOL = 0.01 # defines the approximation with respect to the minimum volume enclosing ellipsoid
ELLIPSOID_MAX_ITER = 10
OPTIMIZATION_TOL = 1e-6
OPTIMIZATION_NUM_INIT = 10
Z = 2 # the norm
LAMBDA = 1 # the regularization parameter
PARALLELIZE = True # whether to apply the experimental results in a parallel fashion
THREAD_NUM = 4
DATA_FOLDER = 'datasets'
# M estimator loss functions supported by our framework
SENSE_BOUNDS = {
'logisitic': (lambda x, w, args=None: 32 / LAMBDA * (2 / args[0] + w * np.linalg.norm(x, ord=2, axis=1) ** 2)
* args[0]),
'nclz': (lambda x, w, args=None: w * np.linalg.norm(x, ord=Z, axis=1) ** Z),
'svm': (lambda x, w, args=None: np.maximum(9 * w / args[0], 2 * w / args[1]) + 13 * w / (4 * args[0]) +
125 * (args[0] + args[1]) / (4 * LAMBDA) * (w * np.linalg.norm(x, ord=2, axis=1)**2 +
w/(args[0] + args[1]))),
'restricted_lz': (lambda x, w, args=None: w * np.minimum(np.linalg.norm(x, ord=2, axis=1),
args[1] ** np.abs(0.5 - 1/Z) * np.linalg.norm(args[0]))),
'lz': (lambda x, w, args=None: w * np.linalg.norm(x, ord=Z, axis=1) ** Z if 1 <= Z <= 2
else args[0]**(Z/2) * w * np.linalg.norm(x, ord=Z, axis=1)**Z),
'lse': (lambda x, w, args=None: w * np.linalg.norm(x, ord=1, axis=1))
}
OBJECTIVE_COSTS = {
'logistic':
(lambda P, x, args=None: (np.sum(P.W) / (2 * args[0]) if args is not None else (1 / 2)) *
np.linalg.norm(x[:-1], 2) ** 2 +
LAMBDA * np.sum(np.multiply(P.W, np.log1p(np.exp(-np.multiply(P.P[:, -1],
np.dot(P.P[:, :-1], x[:-1])
+ x[-1])))))),
'svm':
(lambda P, x, args=None: (np.sum(P.W) / (2 * args[0]) if args is not None else (1 / 2)) *
np.linalg.norm(x[:-1], 2) ** 2 +
LAMBDA * np.sum(np.multiply(P.W, np.maximum(0,
1 - (np.multiply(P.P[:, -1],
np.dot(P.P[:, :-1], x[:-1])
+ x[-1])))))),
'lz':
(lambda P, x, args=None: np.sum(np.multiply(P.W, np.abs(np.dot(P.P[:, :-1], x) - P.P[:, -1]) ** Z))),
'restricted_lz':
(lambda P, x, args=None: np.sum(np.multiply(P.W, np.minimum(np.sum(np.abs(P.P, x), 1), np.linalg.norm(x, Z))))),
'lse':
(lambda P, x, args=None: np.linalg.norm(P.P - x, 'fro') ** 2)
}
############################################# Data option constants ####################################################
SYNTHETIC_DATA = 0 # use synthetic data
REAL_DATA = 1 # use real data
DATA_TYPE = REAL_DATA # distinguishes between the use of real vs synthetic data
########################################### Experimental results constants #############################################
# colors for our graphs
COLOR_MATCHING = {'Our coreset': 'red',
'Uniform sampling': 'blue',
'All data': 'black'}
REPS = 32 # number of repetitions for sampling a coreset
SEED = np.random.randint(1, int(1e7), REPS) # Seed for each repetition
NUM_SAMPLES = 10 # number of coreset sizes
x0 = None # initial solution for hueristical solver
OBJECTIVE_COST = None
PROBLEM_TYPE = None
SENSE_BOUND = None
USE_SVD = False
PREPROCESS_DATA = False
def initializaVariables(problem_type, z=2, Lambda=1):
global Z, LAMBDA, OBJECTIVE_COST, PROBLEM_TYPE, SENSE_BOUND, USE_SVD, PREPROCESS_DATA
Z = z
LAMBDA = Lambda
OBJECTIVE_COST = OBJECTIVE_COSTS[problem_type] # the objective function which we want to generate a coreset for
PROBLEM_TYPE = problem_type
SENSE_BOUND = SENSE_BOUNDS[problem_type]
if ('lz' in problem_type and z != 2) or problem_type == 'lse':
USE_SVD = False
PREPROCESS_DATA = False
else:
USE_SVD = True
PREPROCESS_DATA = True
var_dict = {}
variables = copy.copy(list(globals().keys()))
for var_name in variables:
if var_name.isupper():
var_dict[var_name] = eval(var_name)
return var_dict
def preprocessData(P):
global PREPROCESS_DATA
if PREPROCESS_DATA:
y = P[:, -1]
min_value = np.min(y)
max_value = np.max(y)
P = Normalizer().fit_transform(P[:, :-1], P[:, -1])
y[np.where(y == min_value)[0]] = -1
y[np.where(y == max_value)[0]] = 1
P = np.hstack((P, y[:, np.newaxis]))
return P
# def getObjectiveFunction():
# global PROBLEM_DEF, OBJECTIVE_FUNC, GRAD_FUNC
#
# if PROBLEM_DEF == 1:
# OBJECTIVE_FUNC = (lambda P, w: np.sum(np.multiply(P.W,np.log(1.0 + np.square(np.dot(P.P[:, :-1], w) + P.P[:, -1])))))
# GRAD_FUNC = (lambda P, w: np.sum(
# np.multiply(P.W, np.multiply(np.expand_dims(2/(1.0 + np.square(np.dot(P.P[:, :-1], w) - P.P[:, -1])), 1),
# np.multiply(P.P[:, :-1], np.expand_dims(np.dot(P.P[:, :-1], w) + P.P[:, -1], 1)), 0))))
def generateSampleSizes(n):
"""
The function at hand, create a list of samples which denote the desired coreset sizes.
:param n: An integer which denotes the number of points in the dataset.
:return: A list of coreset sample sizes.
"""
global NUM_SAMPLES
min_val = int(2 * np.log(n) ** 2) # minimum sample size
max_val = int(6 * n ** 0.6) # maximal sample size
samples = np.geomspace([min_val], [max_val], NUM_SAMPLES) # a list of samples
return samples
# def readSyntheticRegressionData():
# data = np.load('SyntheticRegDataDan.npz')
# X = data['X']
# y = data['y']
# P = PointSet(np.hstack((X[:, np.newaxis], -y[:, np.newaxis])))
# return P
def plotPointsBasedOnSens():
sens = np.load('sens.npy')
data = np.load('SyntheticRegDataDan.npz')
X = data['X']
y = data['y']
# getObjectiveFunction()
P = np.hstack((X[:, np.newaxis], -y[:, np.newaxis]))
colorbars = ['bwr']#, 'seismic', 'coolwarm', 'jet', 'rainbow', 'gist_rainbow', 'hot', 'autumn']
for i in range(len(colorbars)):
plt.style.use('classic')
min_, max_ = np.min(sens), np.max(sens)
plt.scatter(P[:, 0], P[:, 1], c=sens, marker='o', s=50, cmap=colorbars[i])
plt.clim(min_, max_)
ax = plt.gca()
cbar = plt.colorbar(pad=-0.1, fraction=0.046)
cbar.ax.get_yaxis().labelpad = 24
cbar.set_label('Sensitivity', rotation=270, size='xx-large', weight='bold')
cbar.ax.tick_params(labelsize=24)
plt.ylabel('y')
plt.xlabel('x')
plt.axis('off')
figure = plt.gcf()
figure.set_size_inches(20, 13)
plt.savefig('Sens{}.pdf'.format(i), bbox_inches='tight', pad_inches=0)
def createRandomRegressionData(n=2e4, d=2):
X, y = sklearn.datasets.make_regression(n_samples=int(n), n_features=d, random_state=0, noise=4.0,
bias=100.0)
# X = np.random.randn(int(n),d)
# y = np.random.rand(y.shape[0], )
X = np.vstack((X, 1000 * np.random.rand(20, d)))
y = np.hstack((y, 10000 * np.random.rand(20, )))
np.savez('SyntheticRegData', X=X, y=y)
def plotEllipsoid(center, radii, rotation, ax=None, plotAxes=True, cageColor='r', cageAlpha=1):
"""Plot an ellipsoid"""
make_ax = ax == None
if make_ax:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
# rotate accordingly
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot(np.array([x[i, j], y[i, j], z[i, j]]), rotation) + center.flatten()
if plotAxes:
# make some purdy axes
axes = np.array([[radii[0], 0.0, 0.0],
[0.0, radii[1], 0.0],
[0.0, 0.0, radii[2]]])
# rotate accordingly
for i in range(len(axes)):
axes[i] = np.dot(axes[i], rotation)
print('Axis are: ', axes)
# print(axes + center.flatten())
# plot axes
print('Whole points are: ')
for p in axes:
X3 = np.linspace(-p[0], p[0], 2) + center[0]
Y3 = np.linspace(-p[1], p[1], 2) + center[1]
Z3 = np.linspace(-p[2], p[2], 2) + center[2]
ax.plot3D(X3, Y3, Z3, color='m')
PP = np.vstack((X3, Y3, Z3)).T
print(PP)
# plot ellipsoid
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color=cageColor, alpha=cageAlpha)
##################################################### READ DATASETS ####################################################
def readRealData(datafile='hour.csv', problemType=0):
"""
This function, given a physical path towards an csv file, reads the data into a weighted set.
:param datafile: A string containing the physical path on the machine towards the dataset which the user desires
to use.
:param problemType: A integer defining whether the dataset is used for regression or clustering.
:return: A weighted set, namely, a PointSet object containing the dataset.
"""
global PROBLEM_TYPE
data_path = r'datasets/' + datafile
dataset = pd.read_csv(data_path) # read csv file
Q = dataset.values # get the data which the csv file has
P = Q.astype(np.float) # remove first two columns
# P = np.around(P, 6) # round the dataset to avoid numerical instabilities
if 'lz' in PROBLEM_TYPE: # if the problem is an instance of regression problem
P[:, -1] = -P[:, -1]
else:
P = preprocessData(P)
return PointSet.PointSet(P=P, W=None, ellipsoid_max_iters=ELLIPSOID_MAX_ITER, problem_type=PROBLEM_TYPE,
use_svd=USE_SVD)
def checkIfFileExists(file_path):
"""
The function at hand checks if a file at given path exists.
:param file_path: A string which contains a path of file.
:return: A boolean variable which counts for the existence of a file at a given path.
"""
file = pathlib.Path(file_path)
return file.exists()
def createDirectory(directory_name):
"""
##################### createDirectory ####################
Input:
- path: A string containing a path of an directory to be created at.
Output:
- None
Description:
This process is responsible creating an empty directory at a given path.
"""
full_path = r'results/'+directory_name
try:
os.makedirs(full_path)
except OSError:
if not os.path.isdir(full_path):
raise
def createRandomInitialVector(d):
"""
This function create a random orthogonal matrix which each column can be use as an initial vector for
regression problems.
:param d: A scalar denoting a desired dimension.
:return: None (using global we get A random orthogonal matrix).
"""
global x0
x0 = np.random.randn(d,d) # random dxd matrix
[x0, r] = np.linalg.qr(x0) # attain an orthogonal matrix
############################################## Optimization methods ####################################################
def solveConvexRegressionProblem(P):
start_time = time.time()
w = cp.Variable(P.d, )
loss = cp.sum(cp.multiply(P.W, cp.power(cp.abs(cp.matmul(P.P[:, :-1], w) - P.P[:, -1]), Z)))
constraints = []
prob = cp.Problem(cp.Minimize(loss), constraints)
prob.solve()
time_taken = time.time() - start_time
print('Solving optimization problem in {:.4f} secs'.format(time_taken))
return w.value, time_taken
def solveNonConvexRegressionProblem(P):
start_time = time.time()
func = lambda x: np.multiply(P.W, np.abs(np.dot(P[:, :-1], x) - P.P[:, -1]) ** Z)
grad = lambda x: sp.optimize.approx_fprime(x, func, EPSILON)
optimal_x = None
optimal_val = np.Inf
for i in range(OPTIMIZATION_NUM_INIT):
x0 = createRandomInitialVector(P.d)
res = sp.optimize.minimize(fun=func, x0=x0, jac=grad, method='L-BFGS-B')
def solveRegressionProblem(P):
global PROBLEM
if 'nc' in PROBLEM:
return solveNonConvexRegressionProblem(P)
else:
return solveConvexRegressionProblem(P)
if __name__ == '__main__':
# createSyntheticDan()
# createRandomRegressionData()
# testCauchy()
# plotPointsBasedOnSens()
# readRealData()
pass
| [
"muradtuk@gmail.com"
] | muradtuk@gmail.com |
67adbe2bfc32e2a54b910fbe07c551fb51ef8b5d | aa7a250726e3bb64e3037b4b099573560cd072b5 | /python/Wakeup_on_LAN.py | fedd72bf7078513d1b4cc5613db41cf15134afa9 | [] | no_license | GeneralZero/ATF | 700ab28e67d5b9a02af4165996b6231d60432f95 | 78dbdf3345c1b04de1e4b8637b2a9be0e5c843d6 | refs/heads/master | 2020-05-06T13:23:35.391904 | 2020-03-24T03:46:54 | 2020-03-24T03:46:54 | 4,170,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | #!/usr/bin/python
from socket import socket, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_BROADCAST
data = '\xFF\xFF\xFF\xFF\xFF\xFF' + '\xAA\xAA\xAA\xAA\xAA\xAA' * 16 # Where \xAA\xAA\xAA\xAA\xAA\xAA is the MAC Address
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
sock.sendto(data, ('<broadcast>', 9))
sock.close() | [
"Bdrid001@gmail.com"
] | Bdrid001@gmail.com |
3118055357e21e818369addcd8052d38382bdada | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_soar_utils/fn_soar_utils/components/funct_soar_utils_artifact_hash.py | fa0c9212fa4a7c4ee6fd5991f38a41c0ca9545f1 | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 2,521 | py | # -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2018, 2022. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
from json import dumps
from logging import getLogger
from hashlib import algorithms_guaranteed, new
from resilient_lib import get_file_attachment, get_file_attachment_metadata, validate_fields
from resilient_circuits import ResilientComponent, function, StatusMessage, FunctionResult, FunctionError
LOG = getLogger(__name__)
class FunctionComponent(ResilientComponent):
"""Component that implements SOAR function 'artifact_hash"""
@function("soar_utils_artifact_hash")
def _artifact_hash_function(self, event, *args, **kwargs):
"""Function: Calculate hashes for a file artifact."""
try:
# Validate required inputs
validate_fields(["incident_id", "artifact_id"], kwargs)
# Get the function parameters:
incident_id = kwargs.get("incident_id") # number
artifact_id = kwargs.get("artifact_id") # number
LOG.info("incident_id: %s", incident_id)
LOG.info("artifact_id: %s", artifact_id)
yield StatusMessage("Reading artifact...")
client = self.rest_client()
metadata = get_file_attachment_metadata(client, incident_id, artifact_id=artifact_id)
data = get_file_attachment(client, incident_id, artifact_id=artifact_id)
results = {
"filename": metadata["name"],
"content_type": metadata["content_type"],
"size": metadata["size"],
"created": metadata["created"]
}
# Hashlib provides a list of all "algorithms_available", but there's duplication, so
# use the standard list: ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algo in algorithms_guaranteed:
impl = new(algo)
impl.update(data)
# shake algorithms require a 'length' parameter
if algo.startswith("shake_"):
results[algo] = impl.hexdigest(int(algo.split('_')[-1]))
else:
results[algo] = impl.hexdigest()
LOG.info("%s sha1=%s", metadata["name"], results["sha1"])
# Produce a FunctionResult with the return value
LOG.debug(dumps(results))
yield FunctionResult(results)
except Exception:
yield FunctionError()
| [
"travis@example.org"
] | travis@example.org |
11af023167cde8c35bb2c4b22b1dd4d44852c42d | e89164093c99b2be87b201804718aa73a2ffdae3 | /leetcode/783. Minimum Distance Between BST Nodes.py | df5419cd15909bd4d9943cca22830c3f802cb3ea | [] | no_license | gsrr/leetcode | 748d585d0219ad1a1386794910c7410b50ce3c93 | 992bb618b605c3345318a0eeb2d2df4d11f6a2d5 | refs/heads/master | 2021-07-06T12:40:03.052470 | 2021-05-28T17:28:43 | 2021-05-28T17:28:43 | 76,116,620 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def ldr(node, arr):
if node.left != None:
ldr(node.left, arr)
arr.append(node.val)
if node.right != None:
ldr(node.right, arr)
class Solution(object):
def minDiffInBST(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
arr = []
ldr(root, arr)
minval = arr[1] - arr[0]
for i in xrange(2, len(arr)):
minval = min(arr[i] - arr[i - 1], minval)
return minval
| [
"jerrycheng1128@gmail.com"
] | jerrycheng1128@gmail.com |
8f9ef0086d4ee19c301005731bf09b20b0fc8a5c | 9c21e49150c99751231ad399bdba1850bb60c88c | /keepers/migrations/0012_auto_20180619_0056.py | 359b76f9d01a20e6c2e0917a4540eb44a4c47177 | [
"MIT"
] | permissive | netvigator/auctions | 3ab4086cb0bfbc736b17ede4e928f3ead2b08a4c | fc3766226cc65ac8694dffc74e893ecff8e7d07c | refs/heads/main | 2023-05-25T15:55:01.249670 | 2023-05-06T14:51:12 | 2023-05-06T14:51:12 | 92,816,101 | 0 | 0 | MIT | 2023-02-16T05:24:34 | 2017-05-30T09:14:39 | Python | UTF-8 | Python | false | false | 669 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-19 00:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('keepers', '0011_auto_20180615_1818'),
]
operations = [
migrations.AlterField(
model_name='item',
name='cSite',
field=models.CharField(max_length=14, verbose_name='Site'),
),
migrations.AlterField(
model_name='item',
name='tCreate',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created on'),
),
]
| [
"gravesricharde@yahoo.com"
] | gravesricharde@yahoo.com |
6bd7ea042d4999a8e460c696c13d9bf95d339f9e | 6f958fa3e9505d9cd0e75f51008de8e2d1c8c12f | /area/utils.py | 7bd1474cb10f69ff99f85048316243a66a11b5b0 | [] | no_license | yoachim/satellite_collisions | 3b59472ae6672dda7ff28916879ce6ed6370a42c | 4b5f475518cef526d117d83873e885a1fbd7aee8 | refs/heads/master | 2022-02-22T21:27:32.106308 | 2022-02-02T16:18:30 | 2022-02-02T16:18:30 | 220,065,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,845 | py | import datetime
import numpy as np
from astropy import time
from astropy import units as u
from astropy import constants as const
from astropy.coordinates import EarthLocation
from pycraf import satellite
from rubin_sim.utils import Site
import skyfield.sgp4lib as sgp4lib
from astropy.time import Time
import ephem
from rubin_sim.utils import _angularSeparation, _buildTree, _xyz_from_ra_dec, xyz_angular_radius
from rubin_sim.scheduler.utils import read_fields
import healpy as hp
# adapting from:
# https://github.com/cbassa/satellite_analysis
# https://nbviewer.jupyter.org/github/yoachim/19_Scratch/blob/master/sat_collisions/bwinkel_constellation.ipynb
def grow_hp(inmap, hpids, radius=1.75, replace_val=np.nan):
"""
grow a healpix mask
Parameters
----------
inmap : np.array
A HEALpix map
hpids : array
The healpixel values to grow around
radius : float (1.75)
The radius to grow around each point (degrees)
replace_val : float (np.nan)
The value to plug into the grown areas
"""
nside = hp.npix2nside(np.size(inmap))
theta, phi = hp.pix2ang(nside=nside, ipix=hpids)
vec = hp.ang2vec(theta, phi)
ipix_disc = [hp.query_disc(nside=nside, vec=vector, radius=np.radians(radius)) for vector in vec]
ipix_disc = np.unique(np.concatenate(ipix_disc))
outmap = inmap + 0
outmap[ipix_disc] = replace_val
return outmap
def satellite_mean_motion(altitude, mu=const.GM_earth, r_earth=const.R_earth):
'''
Compute mean motion of satellite at altitude in Earth's gravitational field.
See https://en.wikipedia.org/wiki/Mean_motion#Formulae
'''
no = np.sqrt(4.0 * np.pi ** 2 * (altitude + r_earth) ** 3 / mu).to(u.day)
return 1 / no
def tle_from_orbital_parameters(sat_name, sat_nr, epoch, inclination, raan,
mean_anomaly, mean_motion):
'''
Generate TLE strings from orbital parameters.
Note: epoch has a very strange format: first two digits are the year, next three
digits are the day from beginning of year, then fraction of a day is given, e.g.
20180.25 would be 2020, day 180, 6 hours (UT?)
'''
# Note: RAAN = right ascention (or longitude) of ascending node
def checksum(line):
s = 0
for c in line[:-1]:
if c.isdigit():
s += int(c)
if c == "-":
s += 1
return '{:s}{:1d}'.format(line[:-1], s % 10)
tle0 = sat_name
tle1 = checksum(
'1 {:05d}U 20001A {:14.8f} .00000000 00000-0 50000-4 '
'0 0X'.format(sat_nr, epoch))
tle2 = checksum(
'2 {:05d} {:8.4f} {:8.4f} 0001000 0.0000 {:8.4f} '
'{:11.8f} 0X'.format(
sat_nr, inclination.to_value(u.deg), raan.to_value(u.deg),
mean_anomaly.to_value(u.deg), mean_motion.to_value(1 / u.day)
))
return '\n'.join([tle0, tle1, tle2])
def create_constellation(altitudes, inclinations, nplanes, sats_per_plane, epoch=22050.1, name='Test'):
my_sat_tles = []
sat_nr = 8000
for alt, inc, n, s in zip(
altitudes, inclinations, nplanes, sats_per_plane):
if s == 1:
# random placement for lower orbits
mas = np.random.uniform(0, 360, n) * u.deg
raans = np.random.uniform(0, 360, n) * u.deg
else:
mas = np.linspace(0.0, 360.0, s, endpoint=False) * u.deg
mas += np.random.uniform(0, 360, 1) * u.deg
raans = np.linspace(0.0, 360.0, n, endpoint=False) * u.deg
mas, raans = np.meshgrid(mas, raans)
mas, raans = mas.flatten(), raans.flatten()
mm = satellite_mean_motion(alt)
for ma, raan in zip(mas, raans):
my_sat_tles.append(
tle_from_orbital_parameters(
name+' {:d}'.format(sat_nr), sat_nr, epoch,
inc, raan, ma, mm))
sat_nr += 1
return my_sat_tles
def starlink_constellation(supersize=False, fivek=False, fourk=False):
"""
Create a list of satellite TLE's
"""
#altitudes = np.array([550, 1110, 1130, 1275, 1325, 345.6, 340.8, 335.9])
#inclinations = np.array([53.0, 53.8, 74.0, 81.0, 70.0, 53.0, 48.0, 42.0])
#nplanes = np.array([72, 32, 8, 5, 6, 2547, 2478, 2493])
#sats_per_plane = np.array([22, 50, 50, 75, 75, 1, 1, 1])
# new values from Bruce Macintosh from FCC application
altitudes = np.array([328, 334, 345, 360, 373, 499, 604, 614], dtype=float)
inclinations = np.array([30., 40., 53., 96.9, 75., 53, 148., 115.7])
nplanes = np.array([1, 1, 1, 40, 1, 1, 12, 18])
sats_per_plane = np.array([7178, 7178, 7178, 50, 1998, 4000, 12, 18])
if fourk:
altitudes = np.array([550, 540, 570, 560, 560], dtype=float)
inclinations = np.array([53, 53.2, 70, 97.6, 97.6])
nplanes = np.array([72, 72, 36, 6, 4])
sats_per_plane = np.array([22, 22, 20, 58, 43])
if supersize:
# Let's make 4 more altitude and inclinations
new_altitudes = []
new_inclinations = []
new_nplanes = []
new_sat_pp = []
for i in np.arange(0, 4):
new_altitudes.append(altitudes+i*20)
new_inclinations.append(inclinations+3*i)
new_nplanes.append(nplanes)
new_sat_pp.append(sats_per_plane)
altitudes = np.concatenate(new_altitudes)
inclinations = np.concatenate(new_inclinations)
nplanes = np.concatenate(new_nplanes)
sats_per_plane = np.concatenate(new_sat_pp)
altitudes = altitudes * u.km
inclinations = inclinations * u.deg
my_sat_tles = create_constellation(altitudes, inclinations, nplanes, sats_per_plane, name='Starl')
if fivek:
stride = round(len(my_sat_tles)/5000)
my_sat_tles = my_sat_tles[::stride]
return my_sat_tles
time_J2000 = datetime.datetime(2000, 1, 1, 12, 0)
def _propagate(sat, dt):
'''
True equator mean equinox (TEME) position from `sgp4` at given time. Then converted to ITRS
Parameters
----------
sat : `sgp4.io.Satellite` instance
Satellite object filled from TLE
dt : `~datetime.datetime`
Time
Returns
-------
xs, ys, zs : float
TEME (=True equator mean equinox) position of satellite [km]
'''
# pos [km], vel [km/s]
position, velocity = sat.propagate(
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second + dt.microsecond / 1e6)
if position is None:
raise ValueError('Satellite propagation error')
# I _think_ this is supposed to take time since J2000 in days?
# looking at https://space.stackexchange.com/questions/25988/sgp4-teme-frame-to-j2000-conversion
jd_ut1 = dt - time_J2000
jd_ut1 = jd_ut1.days + jd_ut1.seconds/(3600.*24)
new_position, new_velocity = sgp4lib.TEME_to_ITRF(jd_ut1, np.array(position), np.array(velocity)*86400)
return tuple(new_position.tolist())
vec_propagate = np.vectorize(_propagate, excluded=['sat'], otypes=[np.float64] * 3)
def lsst_location():
site = Site('LSST')
obs_loc_lsst = EarthLocation(lat=site.latitude, lon=site.longitude, height=site.height)
sat_obs_lsst = satellite.SatelliteObserver(obs_loc_lsst)
return sat_obs_lsst
class Constellation(object):
"""
Have a class to hold ephem satellite objects
Parameters
----------
sat_tle_list : list of str
A list of satellite TLEs to be used
tstep : float (5)
The time step to use when computing satellite positions in an exposure
"""
def __init__(self, sat_tle_list, alt_limit=30., fov=3.5, tstep=5., exptime=30.):
self.sat_list = [ephem.readtle(tle.split('\n')[0], tle.split('\n')[1], tle.split('\n')[2]) for tle in sat_tle_list]
self.alt_limit_rad = np.radians(alt_limit)
self.fov_rad = np.radians(fov)
self._make_observer()
self._make_fields()
self.tsteps = np.arange(0, exptime+tstep, tstep)/3600./24. # to days
self.radius = xyz_angular_radius(fov)
def _make_fields(self):
"""
Make tesselation of the sky
"""
# RA and dec in radians
fields = read_fields()
# crop off so we only worry about things that are up
good = np.where(fields['dec'] > (self.alt_limit_rad - self.fov_rad))[0]
self.fields = fields[good]
self.fields_empty = np.zeros(self.fields.size)
# we'll use a single tessellation of alt az
leafsize = 100
self.tree = _buildTree(self.fields['RA'], self.fields['dec'], leafsize, scale=None)
def _make_observer(self):
telescope = Site(name='LSST')
self.observer = ephem.Observer()
self.observer.lat = telescope.latitude_rad
self.observer.lon = telescope.longitude_rad
self.observer.elevation = telescope.height
def advance_epoch(self, advance=100):
"""
Advance the epoch of all the satellites
"""
# Because someone went and put a valueError where there should have been a warning
# I prodly present the hackiest kludge of all time
for sat in self.sat_list:
sat._epoch += advance
def update_mjd(self, mjd):
"""
observer : ephem.Observer object
"""
self.observer.date = ephem.date(time.Time(mjd, format='mjd').datetime)
self.altitudes_rad = []
self.azimuth_rad = []
self.eclip = []
for sat in self.sat_list:
try:
sat.compute(self.observer)
except ValueError:
self.advance_epoch()
sat.compute(self.observer)
self.altitudes_rad.append(sat.alt)
self.azimuth_rad.append(sat.az)
self.eclip.append(sat.eclipsed)
self.altitudes_rad = np.array(self.altitudes_rad)
self.azimuth_rad = np.array(self.azimuth_rad)
self.eclip = np.array(self.eclip)
# Keep track of the ones that are up and illuminated
self.above_alt_limit = np.where((self.altitudes_rad >= self.alt_limit_rad) & (self.eclip == False))[0]
def fields_hit(self, mjd, fraction=False):
"""
Return an array that lists the number of hits in each field pointing
"""
mjds = mjd + self.tsteps
result = self.fields_empty.copy()
# convert the satellites above the limits to x,y,z and get the neighbors within the fov.
for mjd in mjds:
self.update_mjd(mjd)
x, y, z = _xyz_from_ra_dec(self.azimuth_rad[self.above_alt_limit], self.altitudes_rad[self.above_alt_limit])
if np.size(x) > 0:
indices = self.tree.query_ball_point(np.array([x, y, z]).T, self.radius)
final_indices = []
for indx in indices:
final_indices.extend(indx)
result[final_indices] += 1
if fraction:
n_hit = np.size(np.where(result > 0)[0])
result = n_hit/self.fields_empty.size
return result
def check_pointing(self, pointing_alt, pointing_az, mjd):
"""
See if a pointing has a satellite in it
pointing_alt : float
Altitude of pointing (degrees)
pointing_az : float
Azimuth of pointing (degrees)
mjd : float
Modified Julian Date at the start of the exposure
Returns
-------
in_fov : float
Returns the fraction of time there is a satellite in the field of view. Values >1 mean there were
on average more than one satellite in the FoV. Zero means there was no satllite in the image the entire exposure.
"""
mjds = mjd + self.tsteps
in_fov = 0
for mjd in mjds:
self.update_mjd(mjd)
ang_distances = _angularSeparation(self.azimuth_rad[self.above_alt_limit], self.altitudes_rad[self.above_alt_limit],
np.radians(pointing_az), np.radians(pointing_alt))
in_fov += np.size(np.where(ang_distances <= self.fov_rad)[0])
in_fov = in_fov/mjds.size
return in_fov
def look_ahead(self, pointing_alt, pointing_az, mjds):
"""
Return 1 if satellite in FoV, 0 if clear
"""
result = []
for mjd in mjds:
self.update_mjd(mjd)
ang_distances = _angularSeparation(self.azimuth_rad[self.above_alt_limit], self.altitudes_rad[self.above_alt_limit],
np.radians(pointing_az), np.radians(pointing_alt))
if np.size(np.where(ang_distances <= self.fov_rad)[0]) > 0:
result.append(1)
else:
result.append(0)
return result
| [
"yoachim@uw.edu"
] | yoachim@uw.edu |
a694e62f4c790eab767286b4be22a9c5f5e4a41e | 8b20fdc16253b2b4e07ce28f4fd3120db4566783 | /pythainlp/__init__.py | 47bffa93eda32cec984d87f336d8c648c66c28bf | [
"Apache-2.0",
"Swift-exception"
] | permissive | johnnyduo/pythainlp | d8a850fa7b6d9dfed5eb23f84264caea1703f5fb | dbefc4c88ee8051a14e3be1a10a57670f861cd37 | refs/heads/master | 2021-06-19T23:49:43.564140 | 2017-07-06T10:36:58 | 2017-07-06T10:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
if six.PY3:
"""
ไว้ใส่ความสามารถที่รองรับเฉพาะ Python 3.4+ เท่านั้น
"""
from pythainlp.sentiment import sentiment
from pythainlp.spell import hunspell,spell
from pythainlp.romanization import romanization,pyicu,royin
from pythainlp.tokenize import word_tokenize,tcc,etcc
from pythainlp.rank import rank
from pythainlp.change import texttothai,texttoeng
from pythainlp.number import nttn,nttt,ntnt,ntt,ttn,ttnt,number_format,numtowords,ReadNumber
from pythainlp.date import now
from pythainlp.tag import old,pos_tag
from pythainlp.collation import collation
from pythainlp.test import TestUM
from pythainlp.Text import Text
from pythainlp.MetaSound import MetaSound
from pythainlp.soundex import LK82,Udom83
from pythainlp.util import ngrams | [
"wannaphong@yahoo.com"
] | wannaphong@yahoo.com |
1b0c242b1b9290150f74b03b84de06e4fe4fb908 | c427e4dbe850eb170cd9bb864bca3e6cd980d6fe | /interface/gui.py | 08b8a7948bf2b77fa65e1e7480282f8179bfd80e | [
"MIT"
] | permissive | awforsythe/melgui | 66cdb6491bc30d58028751542aa613125e09aa24 | 61b677403ea1cd3324744b8f257f66de0bfed00c | refs/heads/master | 2020-06-01T07:45:53.586517 | 2013-12-09T21:08:25 | 2013-12-09T21:08:25 | 15,059,032 | 11 | 3 | null | null | null | null | UTF-8 | Python | false | false | 12,361 | py | """
Provides functionality related to Maya GUI controls.
"""
import re
import maya.cmds as mc
import maya.mel as mel
class Control(object):
"""
Represents a single UI control contained within a Gui. Provides a wrapper
for the MEL command associated with whatever control type, including
methods to edit and query the parameters of the control.
"""
def __init__(self, name, control_type, creation_flags, parent_name):
"""
Initializes a new control declaration with the given name and control
type. creation_flags is a string containing the MEL flags and arguments
used to create the control (excluding the command, the name, and the
-p[arent] flag). parent_name is the name of this control's parent, or
None if no parent is specified.
"""
self.name = name
self.control_type = control_type
self.creation_flags = creation_flags
self.parent_name = parent_name
def delete(self):
"""
Deletes this control and all of its children.
"""
mc.deleteUI(self.name)
def create(self):
"""
Executes the MEL command that creates this control based on its
creation parameters.
"""
# Construct the MEL command to create this control based on its
# parameters
parent_flag = (' -p %s' % self.parent_name) if self.parent_name else ''
command = '%s%s %s %s;' % (
self.control_type,
parent_flag,
self.creation_flags,
self.name)
# Attempt to execute the command as MEL. If unsuccessful, print the
# full command so we can diagnose the problem.
try:
mel.eval(command)
except RuntimeError, exc:
print '// %s //' % command
raise exc
def edit(self, **flags):
"""
Edits this control with the given new flag values. The provided
dictionary of flags need not contain the edit flag.
"""
def thunk_commands(flags):
"""
Modifies and returns the given dictionary so that all function
values associated with command flags are thunked into anonymous
functions that ignore the arguments passed to them by Maya.
"""
for flag, value in flags.iteritems():
if 'command' in flag.lower() and hasattr(value, '__call__'):
flags[flag] = lambda _: value()
return flags
flags['edit'] = True
self._call_command(thunk_commands(flags))
def query(self, flag):
"""
Returns the current value of the specified flag.
"""
return self._call_command({'query': True, flag: True})
def _call_command(self, flags):
"""
Private helper method that calls the MEL command associated with the
relevant type of control, passing in this control's name and the given
set of flag mappings.
"""
command = mc.__dict__[self.control_type]
return command(self.name, **flags)
@classmethod
def from_string(cls, name, command, parent_name):
"""
Instantiates a new Control object from the provided pieces of its
string declaration.
"""
# Capture an explicitly specified parent name in the declaration
parent_name_regex = re.search(r' -p(?:arent)? "?([A-Za-z0-9_]+)"? ?',
command)
# If a parent name has been specified, extract it from the command
if parent_name_regex:
parent_name = parent_name_regex.group(1)
command = command.replace(parent_name_regex.group(0), ' ')
# Split the MEL command used to create the control: the first word is
# the control type, and everything after that represents flags
command_tokens = command.split()
control_type = command_tokens[0]
creation_flags = ' '.join(command_tokens[1:])
# Instantiate a new control declaration from these parameters
return cls(name, control_type, creation_flags, parent_name)
class Gui(object):
"""
Represents a set of controls created from a string declaration via the
from_string classmethod. Once a Gui is created (by calling the create
method after a window has been created), individual controls from the
declaration can be accessed with square-bracket notation to be manipulated
individually. In addition, the edit method can be used to process a batch
of edits in a single call.
"""
def __init__(self, controls):
"""
Initializes a new Gui from the given list of Control objects.
"""
self._controls = []
self._control_lookup = {}
for control in controls:
self.add(control)
def __getitem__(self, key):
"""
Allows individual controls to be accessed by name using array-style
indexing into the Gui object.
"""
return self._control_lookup[key]
def add(self, control):
"""
Adds the specified control object to the Gui.
"""
self._controls.append(control)
self._control_lookup[control.name] = control
def create(self):
"""
Creates the Gui by creating all of its controls.
"""
for control in self._controls:
control.create()
def extend(self, other):
"""
Extends this Gui by adding and creating the controls contained in
another Gui object.
"""
for control in other._controls:
self.add(control)
other.create()
def edit(self, per_control_edits):
"""
Processes an unordered batch of edits for a subset of this Gui's
controls. per_control_edits is a dictionary mapping each control name
with a dictionary containing the flags and values specifying the edits
to be made to that control.
"""
for control_name, edit_flags in per_control_edits.iteritems():
self[control_name].edit(**edit_flags)
@classmethod
def from_string(cls, s):
"""
Instantiates a new Gui object from a string declaration.
"""
def strip_comments(line):
"""
Given a line, returns the same line with any comments stripped away.
Comments begin with a hash character ("#") and continue to the end
of the line thereafter.
"""
# Establish some local state to use in scanning the string.
# quote_open indicates whether the characters over which we're
# currently iterating are contained within a quoted span, and
# quote_chars contains the set of characters currently considered
# valid opening or closing characters for a quoted span.
quote_open = False
quote_chars = ['"', "'"]
def open_quote(quote_char):
"""
Modifies local state to indicate that we're scanning over a
region of the string that's enclosed in quotes. quote_char is
the character that opens the quote.
"""
quote_open = True
quote_chars = [quote_char]
def close_quote():
"""
Modifies local state to indicate that we're no longer scanning
over a quoted region of the string.
"""
quote_open = False
quote_chars = ['"', "'"]
# Iterate over each character in the string. If we encounter an
# unquoted hash character, we can immediately strip it away and
# return the part of the string before it. Otherwise, we keep
# iterating, checking each character to determine if we need to
# open or close a quote.
for i, c in enumerate(line):
if c == '#' and not quote_open:
return line[:i]
elif c in quote_chars:
close_quote() if quote_open else open_quote(c)
# Return the entire line unmodified if we encounter no hashes.
return line
def parse_line(lines):
"""
Parses the given line, returning a triple containing the line's
indentation level, the name of the control declared on that line,
and the creation command associated with that control.
"""
def get_indentation_level(line):
"""
Returns the number of spaces at the beginning of the line.
Treats each tab character as four spaces.
"""
match = re.match(r'[ \t]*', line)
if not match:
return 0
return len(match.group(0).replace('\t', ' '))
def split_control(line):
"""
Splits the given line at the first colon, returning the pair of
the control name and the creation command associated with that
control.
"""
first_colon_index = line.find(':')
return (line[:first_colon_index].strip(),
line[first_colon_index+1:].strip())
declaration_triples = []
for line in lines:
indentation_level = get_indentation_level(line)
name, command = split_control(line)
declaration_triples.append((indentation_level, name, command))
return declaration_triples
class ControlStack(object):
"""
Data structure used to keep track of the controls encountered when
parsing the input string.
"""
def __init__(self):
"""
Initializes an empty control stack.
"""
self._controls = [(-1, None)]
def pop(self, indentation_level):
"""
Pops controls off the top of the stack until the topmost
control is below the given indentation level.
"""
while self._controls[-1][0] >= indentation_level:
self._controls.pop()
def push(self, indentation_level, control_name):
"""
Pushes a new control onto the stack at the given indentation
level.
"""
assert indentation_level > self._controls[-1][0]
self._controls.append((indentation_level, control_name))
@property
def top_control(self):
"""
Returns the topmost control name on the stack.
"""
return self._controls[-1][1]
# Strip comments and blank lines to give us only the meaningful lines
commentless_lines = [strip_comments(line) for line in s.splitlines()]
meaningful_lines = [line for line in commentless_lines if line.strip()]
# Iterate over each line to collect control declarations, using a stack
# to infer parent controls based on indentation
controls = []
control_stack = ControlStack()
for (indentation_level,
control_name,
control_command) in parse_line(meaningful_lines):
# Slice off the top of the stack so that we're back to the last-seen
# control that's below the indentation level of the current one
control_stack.pop(indentation_level)
# Create a new control declaration, using the new top of the stack
# as its parent control
controls.append(Control.from_string(control_name,
control_command,
control_stack.top_control))
# Push the current control onto the stack, as it's now the last-seen
# control of its indentation level
control_stack.push(indentation_level, control_name)
# Instantiate and return a new Gui object from the parsed controls
return cls(controls)
| [
"awforsythe@gmail.com"
] | awforsythe@gmail.com |
ffb975e459aadcb6c155d65a8f4faff7915684c8 | d04bb7504bb9c77554b37d3b11d191f9b7e4c5dd | /napalm_Aclospf_Multidev.py | 6102f3f843920c8d893686d9d554f0cefac65870 | [] | no_license | amitk14595/Advance-Network-Automation-Scripts | 768e14702c10b25995297ae7983bc69f96d404dc | 4f288362c299e1d5da5b45ff4f9d07dd06673e34 | refs/heads/master | 2022-04-14T20:34:41.190292 | 2020-04-16T22:05:03 | 2020-04-16T22:05:03 | 256,339,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | import json
from napalm import get_network_driver
devicelist = ['192.168.122.70',
'192.168.122.71'
]
for ip_address in devicelist:
print ("Connecting to " + str(ip_address))
driver = get_network_driver('ios')
iosv = driver(ip_address, 'amit', 'cisco')
iosv.open()
iosv.load_merge_candidate(filename='ACL1.cfg')
diffs = iosv.compare_config()
if len(diffs) > 0:
print(diffs)
iosv.commit_config()
else:
print('No ACL changes required.')
iosv.discard_config()
iosv.load_merge_candidate(filename='ospf1.cfg')
diffs = iosv.compare_config()
if len(diffs) > 0:
print(diffs)
iosv.commit_config()
else:
print('No OSPF changes required.')
iosv.discard_config()
iosv.close()
| [
"noreply@github.com"
] | noreply@github.com |
069534b71755db5b1b403c9d65cf61f1b0a9f491 | 6b6e20004b46165595f35b5789e7426d5289ea48 | /workers/test/test_exportactionlogsworker.py | 0e4a728b421dd60c2b029851e5dfe04326ee7a91 | [
"Apache-2.0"
] | permissive | anwarchk/quay | 2a83d0ab65aff6a1120fbf3a45dd72f42211633b | 23c5120790c619174e7d36784ca5aab7f4eece5c | refs/heads/master | 2020-09-12T18:53:21.093606 | 2019-11-15T19:29:02 | 2019-11-15T19:29:02 | 222,517,145 | 0 | 0 | Apache-2.0 | 2019-11-18T18:32:35 | 2019-11-18T18:32:35 | null | UTF-8 | Python | false | false | 4,628 | py | import json
import os
from datetime import datetime, timedelta
import boto
from httmock import urlmatch, HTTMock
from moto import mock_s3_deprecated as mock_s3
from app import storage as test_storage
from data import model, database
from data.logs_model import logs_model
from storage import S3Storage, StorageContext, DistributedStorage
from workers.exportactionlogsworker import ExportActionLogsWorker, POLL_PERIOD_SECONDS
from test.fixtures import *
_TEST_CONTENT = os.urandom(1024)
_TEST_BUCKET = 'some_bucket'
_TEST_USER = 'someuser'
_TEST_PASSWORD = 'somepassword'
_TEST_PATH = 'some/cool/path'
_TEST_CONTEXT = StorageContext('nyc', None, None, None, None)
@pytest.fixture(params=['test', 'mock_s3'])
def storage_engine(request):
if request.param == 'test':
yield test_storage
else:
with mock_s3():
# Create a test bucket and put some test content.
boto.connect_s3().create_bucket(_TEST_BUCKET)
engine = DistributedStorage(
{'foo': S3Storage(_TEST_CONTEXT, 'some/path', _TEST_BUCKET, _TEST_USER, _TEST_PASSWORD)},
['foo'])
yield engine
def test_export_logs_failure(initialized_db):
# Make all uploads fail.
test_storage.put_content('local_us', 'except_upload', 'true')
repo = model.repository.get_repository('devtable', 'simple')
user = model.user.get_user('devtable')
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r'testcallback')
def handle_request(url, request):
called[0] = json.loads(request.body)
return {'status_code': 200, 'content': '{}'}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
now = datetime.now()
with HTTMock(handle_request):
with pytest.raises(IOError):
worker._process_queue_item({
'export_id': 'someid',
'repository_id': repo.id,
'namespace_id': repo.namespace_user.id,
'namespace_name': 'devtable',
'repository_name': 'simple',
'start_time': format_date(now + timedelta(days=-10)),
'end_time': format_date(now + timedelta(days=10)),
'callback_url': 'http://testcallback/',
'callback_email': None,
}, test_storage)
test_storage.remove('local_us', 'except_upload')
assert called[0]
assert called[0][u'export_id'] == 'someid'
assert called[0][u'status'] == 'failed'
@pytest.mark.parametrize('has_logs', [
True,
False,
])
def test_export_logs(initialized_db, storage_engine, has_logs):
# Delete all existing logs.
database.LogEntry3.delete().execute()
repo = model.repository.get_repository('devtable', 'simple')
user = model.user.get_user('devtable')
now = datetime.now()
if has_logs:
# Add new logs over a multi-day period.
for index in range(-10, 10):
logs_model.log_action('push_repo', 'devtable', user, '0.0.0.0', {'index': index},
repo, timestamp=now + timedelta(days=index))
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r'testcallback')
def handle_request(url, request):
called[0] = json.loads(request.body)
return {'status_code': 200, 'content': '{}'}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
with HTTMock(handle_request):
worker._process_queue_item({
'export_id': 'someid',
'repository_id': repo.id,
'namespace_id': repo.namespace_user.id,
'namespace_name': 'devtable',
'repository_name': 'simple',
'start_time': format_date(now + timedelta(days=-10)),
'end_time': format_date(now + timedelta(days=10)),
'callback_url': 'http://testcallback/',
'callback_email': None,
}, storage_engine)
assert called[0]
assert called[0][u'export_id'] == 'someid'
assert called[0][u'status'] == 'success'
url = called[0][u'exported_data_url']
if url.find('http://localhost:5000/exportedlogs/') == 0:
storage_id = url[len('http://localhost:5000/exportedlogs/'):]
else:
assert url.find('https://some_bucket.s3.amazonaws.com/some/path/exportedactionlogs/') == 0
storage_id, _ = url[len('https://some_bucket.s3.amazonaws.com/some/path/exportedactionlogs/'):].split('?')
created = storage_engine.get_content(storage_engine.preferred_locations,
'exportedactionlogs/' + storage_id)
created_json = json.loads(created)
if has_logs:
found = set()
for log in created_json['logs']:
if log.get('terminator'):
continue
found.add(log['metadata']['index'])
for index in range(-10, 10):
assert index in found
else:
assert created_json['logs'] == [{'terminator': True}]
| [
"jimmy.zelinskie+git@gmail.com"
] | jimmy.zelinskie+git@gmail.com |
993d0050d5f3731054fe14f269d1ff68186a0aa2 | 591291e2ecfddfcef7488494fe6bdee9b772b6aa | /app/web/wish.py | e9aa82d23bfbbd32a7c39c4eb074ffb8212119ff | [] | no_license | shenghuntianlang/Fisher | a549fffa7f19ac9c46920756a39b5071ab62d272 | 20d32e909cef247175c0fe6a5dfcdd7522c07246 | refs/heads/master | 2020-03-22T05:08:05.155299 | 2018-07-07T06:22:15 | 2018-07-07T06:22:15 | 139,544,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | """
the view functions about some operations for my wishes
"""
from flask import flash, redirect, url_for, render_template
from flask_login import current_user, login_required
from app.libs.mail import send_mail
from app.models.base import db
from app.models.gift import Gift
from app.models.wish import Wish
from app.view_models.trade import MyTrades
from . import web
__author__ = '七月'
@web.route('/my/wish')
@login_required
def my_wish():
uid = current_user.id
wishes_of_mine = Wish.get_user_wishes(uid)
isbn_list = [wish.isbn for wish in wishes_of_mine]
gift_count_list = Wish.get_gift_counts(isbn_list)
view_model = MyTrades(wishes_of_mine, gift_count_list)
return render_template('my_wish.html', wishes=view_model.trades)
@web.route('/wish/book/<isbn>')
@login_required
def save_to_wish(isbn):
if current_user.can_save_to_list(isbn):
# 如果数据库操作时发生异常,则进行数据库回滚,否则会影响之后的数据库操作
with db.auto_commit():
wish = Wish()
wish.isbn = isbn
# current_user, 与current_app, request原理相同,用来获取当前登录的用户对象
wish.uid = current_user.id
db.session.add(wish)
else:
flash('该书已存在于您的赠送清单或愿望清单,请勿重复添加')
return redirect(url_for('web.book_detail', isbn=isbn))
@web.route('/satisfy/wish/<int:wid>')
@login_required
def satisfy_wish(wid):
wish = Wish.query.get_or_404(wid)
gift = Gift.query.filter_by(uid=current_user.id, isbn=wish.isbn).first()
if not gift:
flash('你还没有上传此书,请点击"加入到赠送清单"添加此书.添加前,请确保自己可以赠送此书')
else:
send_mail(wish.user.email, '有人想赠送你一本书',
'email/satisify_wish.html', wish=wish, gift=gift)
flash('已向他/她发送了一封邮件,如果/他/她愿意接受你的赠送,你将会收到一个鱼漂')
return redirect(url_for('web.book_detail', isbn=wish.isbn))
@web.route('/wish/book/<isbn>/redraw')
@login_required
def redraw_from_wish(isbn):
wish = Wish.query.filter_by(isbn=isbn, launched=False).first_or_404()
with db.auto_commit():
wish.delete()
return redirect(url_for('web.my_wish'))
| [
"shenghuntianlang@outlook.com"
] | shenghuntianlang@outlook.com |
1b19ddccd1ce2a6b9f10eeaace553d9287da9bc3 | cce1a8382f211f5738402ee4e8f885db6d0ab3ea | /lustre-test/parse-mdtest-results.py | 346b01bdb49707aad288c5a3835c36f7c0962afe | [] | no_license | junhe/exp-script | 91383493770d2a9d9d315972c390131859397606 | e12c451425d01812d0f123198510211e33bf7988 | refs/heads/master | 2021-01-25T07:08:14.008273 | 2015-01-31T18:08:19 | 2015-01-31T18:08:19 | 20,899,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import sys
import fileinput
def isdataline(line):
starts = ['Directory creation', 'Directory stat', 'Directory removal',
'File creation', 'File stat', 'File read', 'File removal',
'Tree creation', 'Tree removal']
for start in starts:
if line.startswith(start):
return True
return False
lines = []
for line in fileinput.input():
lines.append(line.strip())
print 'Operation Max Min Mean Std.Dev'
for line in lines:
if isdataline(line):
cols = line.split(':')
rowname = cols[0].strip()
rowname = rowname.replace(' ', '.')
values = cols[1].split()
newline = [rowname] + values
print ' '.join(newline)
else:
pass
| [
"jhe@cs.wisc.edu"
] | jhe@cs.wisc.edu |
ca87e2d4a6d85f9a84b735aec448de0ffb39330a | 8ac156c3bfeb4ce28836a1820cb88959424dab14 | /test/test_ocr_page_result_with_lines_with_location.py | db398b1f5b831b331b45a635bf3ed2b22f00da5b | [
"Apache-2.0"
] | permissive | Cloudmersive/Cloudmersive.APIClient.Python.OCR | 7b593464d31d3038663bedca3c085a161e356f20 | 90acf41a9b307213ef79f63ea4c749469ef61006 | refs/heads/master | 2023-04-03T06:03:41.917713 | 2023-03-27T05:30:38 | 2023-03-27T05:30:38 | 138,450,272 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | # coding: utf-8
"""
ocrapi
The powerful Optical Character Recognition (OCR) APIs let you convert scanned images of pages into recognized text. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_ocr_api_client
from cloudmersive_ocr_api_client.models.ocr_page_result_with_lines_with_location import OcrPageResultWithLinesWithLocation # noqa: E501
from cloudmersive_ocr_api_client.rest import ApiException
class TestOcrPageResultWithLinesWithLocation(unittest.TestCase):
"""OcrPageResultWithLinesWithLocation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOcrPageResultWithLinesWithLocation(self):
"""Test OcrPageResultWithLinesWithLocation"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_ocr_api_client.models.ocr_page_result_with_lines_with_location.OcrPageResultWithLinesWithLocation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"35204726+Cloudmersive@users.noreply.github.com"
] | 35204726+Cloudmersive@users.noreply.github.com |
816c45d294921e6362d0eaa5cc2305ba0fb01d7f | a2fd604a8ef45b4e08cf832348d20b65e4468a79 | /phoenix/tests/test_caches.py | a4d7e9263d733aae95b47899c92b2a290f0313d0 | [] | no_license | darraes/data_structures | 8ca76a3fc3e961860861cd43f5b866b8e7e50427 | 4ff2c60e05d9275b163db59ed37b9f46ba50f3c0 | refs/heads/master | 2020-04-17T10:19:59.357548 | 2019-02-28T21:42:44 | 2019-02-28T21:42:44 | 166,497,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | import unittest
from phoenix.cache import *
class TestFunctions(unittest.TestCase):
def test_lru_new_insertions(self):
cache = LRUCache(3)
cache.put("k1", "v1")
self.assertEqual("v1", cache.get("k1"))
cache.put("k2", "v2")
self.assertEqual("v2", cache.get("k2"))
cache.put("k3", "v3")
self.assertEqual("v3", cache.get("k3"))
cache.put("k4", "v4")
self.assertEqual("v4", cache.get("k4"))
self.assertEqual("v3", cache.get("k3"))
self.assertEqual("v2", cache.get("k2"))
self.assertEqual(None, cache.get("k1"))
def test_lru_tail_to_head(self):
cache = LRUCache(3)
cache.put("k1", "v1")
cache.put("k2", "v2")
cache.put("k3", "v3")
cache.put("k1", "v11")
cache.put("k4", "v4")
self.assertEqual(None, cache.get("k2"))
self.assertEqual("v3", cache.get("k3"))
cache.put("k5", "v5")
self.assertEqual(None, cache.get("k1"))
def test_lru_middle_to_head(self):
cache = LRUCache(3)
cache.put("k1", "v1")
cache.put("k2", "v2")
cache.put("k3", "v3")
cache.put("k2", "v22")
cache.put("k4", "v4")
self.assertEqual(None, cache.get("k1"))
self.assertEqual("v22", cache.get("k2"))
cache.put("k5", "v5")
self.assertEqual(None, cache.get("k3"))
def test_lru_head_to_head(self):
cache = LRUCache(3)
cache.put("k1", "v1")
cache.put("k2", "v2")
cache.put("k3", "v3")
cache.put("k3", "v4")
cache.put("k4", "v4")
self.assertEqual(None, cache.get("k1"))
self.assertEqual("v4", cache.get("k4"))
cache.put("k5", "v5")
self.assertEqual(None, cache.get("k2"))
def test_lfu_4(self):
cache = LFUCache(0)
cache.put(0, 0)
self.assertEqual(None, cache.get(0))
def test_lfu_3(self):
cache = LFUCache(2)
cache.put(1, 1)
cache.put(2, 2)
self.assertEqual(1, cache.get(1))
cache.put(3, 3)
self.assertEqual(None, cache.get(2))
self.assertEqual(3, cache.get(3))
cache.put(4, 4)
self.assertEqual(None, cache.get(1))
self.assertEqual(3, cache.get(3))
self.assertEqual(4, cache.get(4))
def test_lfu_2(self):
cache = LFUCache(5)
cache.put("k1", "v1")
cache.put("k2", "v2")
cache.put("k3", "v3")
cache.put("k4", "v4")
cache.put("k5", "v5")
cache.put("k2", "v2")
cache.put("k3", "v3")
cache.put("k2", "v2")
cache.put("k6", "v6")
cache.put("k3", "v3")
| [
"daniel.arraes@gmail.com"
] | daniel.arraes@gmail.com |
60570467f232d79d8b785162fa8abe654121701e | b9dda07897d552466695c735c14d624cf89315bc | /triggerflow/service/eventsources/model.py | 220393130c315f170e96204d7db7a6ce32a801ff | [
"Apache-2.0"
] | permissive | JosepSampe/triggerflow | 02792ba96059f27c2d163ca88d50a10e030026ae | 66d8adcd6b31692663ee861c334608b74fecf884 | refs/heads/master | 2023-01-12T12:12:33.007616 | 2020-10-20T13:14:18 | 2020-10-20T13:14:18 | 264,998,376 | 0 | 0 | Apache-2.0 | 2020-05-18T16:32:06 | 2020-05-18T16:32:05 | null | UTF-8 | Python | false | false | 383 | py | from multiprocessing import Process
from threading import Thread
class EventSourceHook(Thread):
def __init__(self, name: str, *args, **kwargs):
super().__init__()
self.name = name
def run(self):
raise NotImplementedError()
def commit(self, records):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
| [
"aitor.a98@gmail.com"
] | aitor.a98@gmail.com |
9c3d97049f7aafe909b5c14e869c372e5914ae54 | cbc3e997925225b43f7973762d4afd318b3daac0 | /Basis Kurs/ChineseChef.py | 25bfe8ce34b49f4ba82aede1d4f13ddcbd202a3d | [] | no_license | Stoorrzi/pythonProject | c95ed245154b3593027bc813a4bcff2a27898823 | 7d6699cef35775ddb9c964b91c727f9a6b6fbe37 | refs/heads/master | 2023-01-28T04:29:40.787938 | 2020-12-09T21:30:07 | 2020-12-09T21:30:07 | 320,084,196 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from Chef import Chef
class ChineseChef(Chef):
def make_fried_rice(self):
print("The chef makes rice") | [
"philipp.storz@t-online.de"
] | philipp.storz@t-online.de |
b07ecb55c6657bb6813f14198dd6c33d0828a5dd | 5366c6cd1be8a41ddbb6558f1ed30dec8dee56bb | /books/migrations/0001_initial.py | ea248dc82b5d9e56767408a5123ba86fb87f27d7 | [] | no_license | chiragkapoorck/local-library | 2a9fc0e41074ed4339f8be84425be51369071453 | b0786dd14e65dac54a6d4f4ba2198cdc15ab9ddd | refs/heads/master | 2021-04-23T17:38:31.881461 | 2020-04-15T15:29:24 | 2020-04-15T15:29:24 | 249,951,313 | 0 | 1 | null | 2020-04-03T10:51:02 | 2020-03-25T10:41:42 | Python | UTF-8 | Python | false | false | 3,264 | py | # Generated by Django 3.0.4 on 2020-03-30 15:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('authors', models.CharField(help_text='Enter the author(s) name', max_length=1000)),
('summary', models.TextField(help_text='Enter a brief description of the book', max_length=1000)),
('isbn', models.CharField(help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>', max_length=13, verbose_name='ISBN')),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a book genre (e.g. Science Fiction)', max_length=200)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lang', models.CharField(help_text='Enter the language for this book', max_length=100)),
],
),
migrations.CreateModel(
name='BookInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, help_text='unique id for a book across the whole library', primary_key=True, serialize=False)),
('imprint', models.CharField(max_length=200)),
('due_back', models.DateField(blank=True, null=True)),
('status', models.CharField(blank=True, choices=[('m', 'Maintainance'), ('o', 'On Loan'), ('a', 'Available'), ('r', 'Reserved')], default='m', help_text='book availability', max_length=1)),
('book', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='books.Book')),
('borrower', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('language', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='books.Language')),
],
options={
'ordering': ['due_back'],
'permissions': (('can_mark_returned', 'Set book as returned'),),
},
),
migrations.AddField(
model_name='book',
name='genre',
field=models.ManyToManyField(help_text='Select a genre for this book', to='books.Genre'),
),
migrations.AddField(
model_name='book',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"shubham.singh094@yahoo.com"
] | shubham.singh094@yahoo.com |
156329b156ceca3fece8ee8488fa57e449e5999b | 575579bcec300fb199b17e005421427250319507 | /opinion-mining-api-31.py | 988c119d8f31241d52092d16d6b8db80ed5800ff | [] | no_license | dj-more/opinion-mining | 26a3409ac10ff13f34eb27104bf9a4f5daf13ea9 | 5ad12e1568c2b4cdd75c87e6ff95157c9d42585b | refs/heads/master | 2023-04-17T17:34:08.773872 | 2020-08-04T00:34:42 | 2020-08-04T00:34:42 | 284,834,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | import http.client, urllib.request, urllib.parse, urllib.error, base64
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': '3f7e7a5c8aaa4e5fa43ae374eb922ffb',
}
params = urllib.parse.urlencode({
# Request parameters
'showStats': '{boolean}',
'model-version': '{string}',
})
try:
conn = http.client.HTTPSConnection('southcentral.api.cognitive.microsoft.com')
conn.request("POST", "/text/analytics/v3.1-preview.1/sentiment?opinionMining=true%s" %params, "{body}", headers)
response = conn.getresponse()
data = response.read()
print(data)
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror)) | [
"[dhanaji@gmail.com]"
] | [dhanaji@gmail.com] |
3e651f5d573a022540f057fded102a8593fd9e3e | 56c027afd86601c0a621f6985b435c2ab1757e70 | /summerProject-master/dublinbusjourney/dublinbuspredict/Algorithms/model_prototype_1.py | 9b20f6ff49843d9039c3a43fec335276d44e2ae0 | [] | no_license | redfa/summerbigproject | ed8e8e7b53bedd10756fd7560b3f5d55477bf84d | a523ae95c1b4f2839c518e404f512b49039cf30f | refs/heads/master | 2021-08-31T18:59:39.265032 | 2017-12-22T13:16:44 | 2017-12-22T13:16:44 | 115,116,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,872 | py | try:
import pymysql
pymysql.install_as_MySQLdb()
except:
pass
import pandas as pd
pd.options.mode.chained_assignment = None
from dateutil import parser
def model(bus_route, stopid, arrival_time, day, p_holiday, s_holiday, rtr, trip_id):
# 1 request the lon and lat from a query in sql based on the stop id.
db = pymysql.connect(user='lucas', db='summerProdb', passwd='hello_world', host='csi6220-3-vm3.ucd.ie')
cursor = db.cursor()
cursor.execute('SELECT bus_timetable.arrival_time, bus_timetable.direction, bus_timetable.stop_sequence, bus_stops.lat, bus_timetable.dist_nxt_stop, bus_stops.lon '
'FROM bus_timetable, bus_stops WHERE bus_timetable.trip_id = "'+ str(trip_id[0]) +\
'" AND bus_timetable.stop_id = "' + str(stopid) + \
'" ORDER BY bus_timetable.stop_sequence;')
rows3 = cursor.fetchall()
lat = rows3[0][3]
lon = rows3[0][5]
dist_nxt_stop = rows3[0][4]
global direction
direction = rows3[0][1]
# 2 convert your arrival time to an integer. Arrival time needs to be replaced with your time variable.
arrival_time = parser.parse(arrival_time)
new_arrival_time = (arrival_time.hour*3600) + (arrival_time.minute*60) + (arrival_time.second)
new_arrival_time = new_arrival_time/86399
# 3 convert your date of the week to business day vs Saturday and Sunday.
business_day = False
saturday = False
sunday = False
if day < 5:
business_day = True
elif day == 5:
saturday = True
elif (day == 6) or (p_holiday == True):
sunday = True
# Create the row we want to match up against the modelS
input_data = pd.DataFrame({'lat': [lat],'lon': [lon], 'dist_nxt_stop': [dist_nxt_stop], \
'direction': [direction],'arrival_time': [new_arrival_time], 'business_day': [business_day],\
'Saturday': [saturday], 'Sunday': [sunday], 'school_holiday': [s_holiday],})
# Ensure input data columns are in correct order, otherwise results will be incorrect.
cols = list(input_data)
cols.insert(0, cols.pop(cols.index('school_holiday')))
cols.insert(0, cols.pop(cols.index('Sunday')))
cols.insert(0, cols.pop(cols.index('Saturday')))
cols.insert(0, cols.pop(cols.index('business_day')))
cols.insert(0, cols.pop(cols.index('arrival_time')))
cols.insert(0, cols.pop(cols.index('direction')))
cols.insert(0, cols.pop(cols.index('dist_nxt_stop')))
cols.insert(0, cols.pop(cols.index('lon')))
cols.insert(0, cols.pop(cols.index('lat')))
input_data = input_data.loc[:, cols]
# 4 load in the model.
# 5 predict the delay based on the input.
predict_duration = rtr.predict(input_data)
return predict_duration
| [
"noreply@github.com"
] | noreply@github.com |
e378342db455f9d7483d9f6cf7982882e5d2ca99 | b72596aa97a724f9f2cc6947b86a9b972846277f | /setup.py | 8cba9868cc12580e64d54561b344cf8fca1cdca5 | [
"MIT"
] | permissive | dumpmemory/hourglass-transformer-pytorch | 698cfcbc6a1b572efef37b5926d45dd598ff457b | 4be33bb41adfedf1b739cd24bec9481bc83a93e2 | refs/heads/main | 2023-09-03T01:45:41.994192 | 2021-11-10T15:49:06 | 2021-11-10T15:49:06 | 426,081,172 | 0 | 0 | MIT | 2021-11-10T15:55:51 | 2021-11-09T03:41:56 | Python | UTF-8 | Python | false | false | 750 | py | from setuptools import setup, find_packages
setup(
name = 'hourglass-transformer-pytorch',
packages = find_packages(),
version = '0.0.6',
license='MIT',
description = 'Hourglass Transformer',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/hourglass-transformer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'einops',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| [
"lucidrains@gmail.com"
] | lucidrains@gmail.com |
b089b4b1b74c539953c27bc2d116cba959748d6c | c7910a655d286a0ee8a0d510fef83abcac7e5a2b | /prelabs-ZGuo412-master/Prelab11/Consumer.py | 793ef2bacec489970274e3ba53af5ce4202df945 | [] | no_license | ZGuo412/Software-Engineering | 5437d82516b4e6af9e37f3f657fcc889f3e7fe71 | e1377d8889e65fd6d984d0bf7fa712ad74aeccfa | refs/heads/master | 2021-10-27T22:30:10.266735 | 2021-10-17T07:36:22 | 2021-10-17T07:36:22 | 238,366,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,145 | py | #######################################################
# Author: <Ziyu Guo>
# email: <guo412@purdue.edu>
# ID: <ee364d25>
# Date: <2019/3/31>
#######################################################
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog
from Prelab11.BasicUI import *
import xml.etree.ElementTree as ET
DataPath = '~ee364/DataFolder/Prelab11'
class Consumer(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(Consumer, self).__init__(parent)
self.setupUi(self)
self.btnSave.setEnabled(False)
self.txtCom = ['']*20
self.txtcname = [''] * 20
for i in range(0,20):
self.txtCom[i] = 'self.txtComponentCount_' + str(i + 1)
self.txtcname[i] = 'self.txtComponentName_' + str(i + 1)
self.txtStudentName.textChanged.connect(self.entry)
self.txtStudentID.textChanged.connect(self.entry)
self.com_txt = [self.txtComponentCount_1,self.txtComponentCount_2,self.txtComponentCount_3,self.txtComponentCount_4,self.txtComponentCount_5,self.txtComponentCount_6,self.txtComponentCount_7,self.txtComponentCount_8,self.txtComponentCount_9,self.txtComponentCount_10
,self.txtComponentCount_11,self.txtComponentCount_12,self.txtComponentCount_13,self.txtComponentCount_14,self.txtComponentCount_15,self.txtComponentCount_16,self.txtComponentCount_17,self.txtComponentCount_18,self.txtComponentCount_19,self.txtComponentCount_20]
self.name_txt = [self.txtComponentName_1,self.txtComponentName_2,self.txtComponentName_3,self.txtComponentName_4,self.txtComponentName_5,self.txtComponentName_6,self.txtComponentName_7,self.txtComponentName_8,self.txtComponentName_9,self.txtComponentName_10,
self.txtComponentName_11,self.txtComponentName_12,self.txtComponentName_13,self.txtComponentName_14,self.txtComponentName_15,self.txtComponentName_16,self.txtComponentName_17,self.txtComponentName_18,self.txtComponentName_19,self.txtComponentName_20]
for count in self.txtCom:
count = count + '.textChanged.connect(self.entry)'
exec(count)
for count in self.txtcname:
count = count + '.textChanged.connect(self.entry)'
exec(count)
self.cboCollege.currentIndexChanged.connect(self.entry)
self.chkGraduate.stateChanged.connect(self.entry)
self.btnClear.clicked.connect(self.click_clear)
self.btnLoad.clicked.connect(self.loadData)
self.btnSave.clicked.connect(self.saveXML)
def loadData(self):
"""
*** DO NOT MODIFY THIS METHOD! ***
Obtain a file name from a file dialog, and pass it on to the loading method. This is to facilitate automated
testing. Invoke this method when clicking on the 'load' button.
You must modify the method below.
"""
filePath, _ = QFileDialog.getOpenFileName(self, caption='Open XML file ...', filter="XML files (*.xml)")
if not filePath:
return
self.loadDataFromFile(filePath)
def loadDataFromFile(self, filePath):
"""
Handles the loading of the data from the given file name. This method will be invoked by the 'loadData' method.
*** YOU MUST USE THIS METHOD TO LOAD DATA FILES. ***
*** This method is required for unit tests! ***
"""
tree = ET.parse(filePath)
root = tree.getroot()
for child in root:
# print(child.tag) (StudentName), StudentID, College, Components
#print(child.attrib) ({'graduate': 'true'})
if child.tag == 'StudentName':
self.txtStudentName.setText(child.text)
if child.attrib['graduate'] == "true":
self.chkGraduate.setChecked(True)
if child.tag == 'StudentID':
self.txtStudentID.setText(child.text)
if child.tag == 'College':
self.cboCollege.setCurrentIndex(self.cboCollege.findText(child.text))
if child.tag == 'Components':
child_name = list()
child_count = list()
for component in child:
ans = list(component.attrib.values())
child_name.append(ans[0])
child_count.append(ans[1])
for i in range(0,len(child_name)):
if i >= 20:
break
self.name_txt[i].setText(child_name[i])
self.com_txt[i].setText(child_count[i])
def click_clear(self):
self.txtStudentID.clear()
self.txtStudentName.clear()
for count in self.txtCom:
count = count + '.clear()'
exec(count)
for count in self.txtcname:
count = count + '.clear()'
exec(count)
self.cboCollege.setCurrentIndex(0)
self.chkGraduate.setChecked(False)
self.btnLoad.setEnabled(True)
self.btnClear.setEnabled(True)
self.btnSave.setEnabled(False)
def entry(self):
if self.txtStudentName.text is not '':
self.butt()
if self.txtStudentID.text is not '':
self.butt()
for count in self.txtCom:
count = count + '.text'
exec(count)
if exec(count) is not '':
self.butt()
for count in self.txtcname:
count = count + '.text'
exec(count)
if exec(count) is not '':
self.butt()
if self.chkGraduate.checkState is True:
self.butt()
if self.cboCollege.currentIndex is not '':
self.butt()
def butt(self):
self.btnLoad.setEnabled(False)
self.btnSave.setEnabled(True)
def saveXML(self):
graduate = 'false'
if self.chkGraduate.isChecked() is True:
graduate = 'true'
s_name = self.txtStudentName.text()
s_id = self.txtStudentID.text()
college = self.cboCollege.currentText()
Cname = list()
Ccount = list()
for name in self.name_txt:
if name.text() is not '':
Cname.append(name.text())
for count in self.com_txt:
if count.text() is not '':
Ccount.append(count.text())
with open('target.xml', 'w') as output:
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<Content>\n')
output.write(' <StudentName graduate="'+graduate+'">'+s_name+'</StudentName>\n')
output.write(' <StudentID>'+s_id+'</StudentID>\n')
output.write(' <College>'+college+'</College>\n')
output.write(' <Components>\n')
for i in range(0, len(Ccount)):
output.write(' <Component name="'+Cname[i]+'" count="'+Ccount[i]+'" />\n')
output.write(' </Components>\n</Content>')
if __name__ == "__main__":
currentApp = QApplication(sys.argv)
currentForm = Consumer()
currentForm.show()
currentApp.exec_()
| [
"noreply@github.com"
] | noreply@github.com |
7dc54daf64f1034f96696ae087a56afedb35f04b | f2b049f68b91841811968c65e06facbdedc2483a | /Practice_exercises/Profit_loss_calculator.py | 7d03a1ec81e9d8b50f2032fc61005a75bb767500 | [] | no_license | Aswinraj-023/Basics_Python | 1cceb4a607b8969906b009f845a8183adc8297b5 | a4e5a6fd30dadf29d9ac10e54630962d06b24c0f | refs/heads/main | 2023-08-28T03:43:48.803279 | 2021-11-13T15:28:18 | 2021-11-13T15:28:18 | 424,990,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # 2) Loss calculator
goods_price = 200
profit = 0
counterfiet_money = 1000 #value of counterfiet_money
shopkeeper_1 = counterfiet_money # counterfiet_money given by customer to shopkeeper_1
#Shopkeeper_2 has a change of 1000Rs
shopkeeper_2 = 1000
# counterfiet_money exchanged from shopkeeper_1 to shopkeeper_2
counterfiet_money = shopkeeper_2
shopkeeper_1 = 1000
customer_change = shopkeeper_1 - goods_price
#shopkeeper_2 returns the counterfiet_money
shopkeeper_1 = counterfiet_money
loss = counterfiet_money + goods_price
print("Customer change is : ",customer_change)
print("The Loss Encountered by Shopkeeper_1 is ",loss)
| [
"noreply@github.com"
] | noreply@github.com |
34177aaf3d8e4472f51189bd33d2c6658fe3cd66 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_image_bytes01.py | 02dba5d0f8a119b040fad480338e187a1031b18b | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,466 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
from io import BytesIO
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'red.png', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.insert_image('E9', 'red.png', {'image_data': image_data})
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'red.png', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.insert_image('E9', 'red.png', {'image_data': image_data})
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
3c51dcc2e73e3f43318e71887d695fe2532c06b9 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/virtual_appliance/site/_delete.py | f453c8731d6e69b3932912be786b732d7da64fb3 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 5,731 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network virtual-appliance site delete",
is_preview=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete an Azure network virtual appliance site.
:example: Delete an Azure network virtual appliance site.
az network virtual-appliance site delete -n MySite -g MyRG --appliance-name MyName -y
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/networkvirtualappliances/{}/virtualappliancesites/{}", "2021-08-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.appliance_name = AAZStrArg(
options=["--appliance-name"],
help="The name of Network Virtual Appliance.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of Network Virtual Appliance Site.",
required=True,
id_part="child_name_1",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualApplianceSitesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualApplianceSitesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkVirtualApplianceName", self.ctx.args.appliance_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"siteName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
| [
"noreply@github.com"
] | noreply@github.com |
f8e387f4fa2c8a56db352d63b9b902c1ef6170c6 | d41f0ef7b861d32e4c060dfc7c70485cee68a0cc | /binary_tree_traversal.py | 8b1abcfa6997b825e749c9e922e6736fe5919c5e | [] | no_license | guojy8993/common-algotihms-you-shoud-know | 46048e2b5be402e0d8396818b092615c008b01c8 | bc35aff3ac89b73266a0b060bc5df3921e515688 | refs/heads/master | 2021-01-21T05:04:47.115484 | 2017-02-26T10:13:32 | 2017-02-26T10:13:32 | 83,128,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | #!/usr/bin/env python
# Author guojy8993@163.com
# Date 2017/02/25
# Desc Usage display of Binary-Tree-Traversal
'''
Binary Tree Traversal
http://blog.csdn.net/gfj0814/article/details/51637696
'''
class Node(object):
def __init__(self, data=0, left=None, right=None):
self.data = data
self.left = left
self.right = right
class BTree(object):
def __init__(self):
pass
def preOrder(self, node):
if not node:
return
print node.data
self.preOrder(node.left)
self.preOrder(node.right)
def inOrder(self, node):
if not node:
return
self.inOrder(node.left)
print node.data
self.inOrder(node.right)
def postOrder(self, node):
if not node:
return
self.postOrder(node.left)
self.postOrder(node.right)
print node.data
if __name__ == "__main__":
g = """
(1)
-------------+------------
+ +
(2) (3)
---------+----------- ---------+-----------
+ + + +
(4) (5) (6) (7)
--------
+(8) +(9)
"""
print g
node05 = Node(data=5)
node06 = Node(data=6)
node07 = Node(data=7)
node03 = Node(data=3, left=node06, right=node07)
node09 = Node(data=9)
node08 = Node(data=8)
node04 = Node(data=4, left=node08, right=node09)
node02 = Node(data=2, left=node04, right=node05)
node01 = Node(data=1, left=node02, right=node03)
tree = BTree()
print "PreOrder traversal:"
tree.preOrder(node01)
print "InOrder traversal:"
tree.inOrder(node01)
print "PostOrder traversal:"
tree.postOrder(node01)
| [
"guojy8993@163.com"
] | guojy8993@163.com |
28cdd3828cedfdafaf3fbeba275c59ee2be9fe8d | f8c94ed02cca850dd8dffafa181847986bc7244e | /website/urls.py | da20a08993b590af072bf5690c9c6d14c49c01f4 | [] | no_license | perillaseed/btp | 655e302ffc541b4f4b7e8382ff12c157c52d69f5 | 483cedf4fdfa32f04ef980cd911cb8f09aa55fdb | refs/heads/master | 2021-01-16T20:36:30.115193 | 2013-05-25T18:46:33 | 2013-05-25T18:46:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | from django.conf.urls.defaults import *
from django.conf import settings
from django.conf.urls.static import static
import os
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^vperf/', include('Video_Performance.urls')),
(r'^wperf/', include('wperf.urls')),
#(r'^ctrl/', include('ctrl.urls')),
#(r'^log/', include('log.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
# static files
#urlpatterns += patterns('', (r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),)
urlpatterns+=static(settings.STATIC_URL, document_root=os.getcwd()+"/Video_Performance/static/")
# media files
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"harsh@agni.(none)"
] | harsh@agni.(none) |
159750faef8bea05f5ecc528c3f8c8785dbf06ec | c4821ba4050082890e8cc086bd9c4461383e6f18 | /RL-Quadcopter_private-master/quad_controller_rl/src/quad_controller_rl/agents/ddpg_agent_combined.py | e2b5c77fc0f962e9b3bb03ecf66509a7313ddca0 | [] | no_license | ppujari/ReinforcementLearning | ed1093fcdc62b8e1b45964652c9e960dc3abd062 | 6a67540ea70badb78b2243f12af1630f18c9d728 | refs/heads/master | 2021-08-31T00:31:22.205110 | 2017-12-20T00:53:12 | 2017-12-20T00:53:12 | 114,410,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,356 | py | """Combined agent."""
import numpy as np
from quad_controller_rl.agents.base_agent import BaseAgent
from .ReplayBuffer import ReplayBuffer
from .ActorNetwork import ActorNetwork
from .CriticNetwork import CriticNetwork
from keras.initializers import Constant
class DDPGCombined(BaseAgent):
"""Sample agent that searches for optimal policy randomly."""
def __init__(self, task):
# task (environment) information
self.task = task # should contain observation_space and action_space
self.state_size = 1 #np.prod(self.task.observation_space.shape)
self.state_range = self.task.observation_space.high - self.task.observation_space.low
self.action_size = 1 #np.prod(self.task.action_space.shape)
self.action_range = self.task.action_space.high - self.task.action_space.low
# score tracker parameter
self.best_score = -np.inf
# learning parameters
self.batch_size = 40
self.buffer_size = 100000
self.tau = 0.01
self.lra = 0.0001
self.hidden_units = 1
self.hidden_units_hover = 3
self.bias_initializer_actor = Constant(.5)
# getting path for pre-trained networks
self.actor_path_takeoff = '/home/robond/catkin_ws/src/RL-Quadcopter/takeoff_actor.h5'
self.actor_path_hover = '/home/robond/catkin_ws/src/RL-Quadcopter/hover_actor.h5'
self.actor_path_landing = '/home/robond/catkin_ws/src/RL-Quadcopter/landing_actor.h5'
# create actors for each case
self.actor_takeoff = ActorNetwork(self.state_size, self.action_size,
self.batch_size, self.tau, self.lra, self.hidden_units, self.bias_initializer_actor)
self.actor_hover = ActorNetwork(self.state_size, self.action_size,
self.batch_size, self.tau, self.lra, self.hidden_units_hover, self.bias_initializer_actor)
self.actor_landing = ActorNetwork(self.state_size, self.action_size,
self.batch_size, self.tau, self.lra, self.hidden_units, self.bias_initializer_actor)
# load pre-trained weights
self.actor_takeoff.model.load_weights(self.actor_path_takeoff)
self.actor_hover.model.load_weights(self.actor_path_hover)
self.actor_landing.model.load_weights(self.actor_path_landing)
# episode variables
# self.reset_episode_vars()
def step(self, state, reward, done, task_id):
# center state vector
state = np.array([state[0, 2] - self.task.target_z])
state = np.expand_dims(state, axis=0)
# choose an action
action = self.act(state, task_id)
ros_action = np.array([0.0, 0.0, action, 0.0, 0.0, 0.0])
return ros_action
def act(self, state, task_id):
# choose action based on given state and policy
if task_id == 'takeoff':
action_unscaled = self.actor_takeoff.model.predict(np.expand_dims(state, axis=0))
elif task_id == 'hover':
action_unscaled = self.actor_hover.model.predict(np.expand_dims(state, axis=0))
elif task_id == 'landing':
action_unscaled = self.actor_landing.model.predict(np.expand_dims(state, axis=0))
action = self.task.action_space.low[2] + (((action_unscaled + 1.0) / 2.0) * self.action_range[2])
return action.reshape((1, 1))
| [
"ppujari@github.com"
] | ppujari@github.com |
8d8e492d19a9cbf8925b418a99430544fae33ad1 | 97b810d45f53546f925bf14e9c14fadf59d3ebb1 | /assembler/test/test_chip8.py | 94059af3518c9d089a514b3b54a93cc12f8f1088 | [
"MIT"
] | permissive | nulano/RPyChip8 | b26bf339fc861536d9cc069852b95ea702ca6406 | d463ff2629bef444961252694c4ac5d2688b3b03 | refs/heads/main | 2023-02-19T20:52:04.816524 | 2021-01-23T16:10:55 | 2021-01-23T16:10:55 | 330,976,821 | 0 | 0 | MIT | 2021-01-22T19:04:35 | 2021-01-19T12:44:46 | Python | UTF-8 | Python | false | false | 4,046 | py | import pytest
from assembler.chip8 import tokenize, assemble
class TestTokenize:
_test_single_line = [
("single_token", "RET", ["RET"]),
("two_tokens_space", "JMP main", ["JMP", "main"]),
("two_tokens_spaces", "JMP 0x200", ["JMP", "0x200"]),
("comment_hash_only", "#comment", []),
("comment_hash_no_space", "CLS#comment", ["CLS"]),
("comment_hash_after_space", "CLS # comment", ["CLS"]),
("comment_semi_only", ";comment", []),
("comment_semi_no_space", "CLS;comment", ["CLS"]),
("comment_semi_after_space", "CLS ; comment", ["CLS"]),
("delimiter_no_spaces", "ADD V1,V2", ["ADD", "V1", ",", "V2"]),
("delimiter_space_after", "ADD V1, V2", ["ADD", "V1", ",", "V2"]),
("delimiter_space_before", "ADD V1 ,V2", ["ADD", "V1", ",", "V2"]),
("delimiter_surround_spaces", "ADD V1 , V2", ["ADD", "V1", ",", "V2"]),
("delimiter_repeated", "ADD V1,,V2", ["ADD", "V1", ",", ",", "V2"]),
("label_no_space", "main:", ["main", ":"]),
("label_after_space", "main :", ["main", ":"]),
]
@pytest.mark.parametrize(
"test_id, line, expected",
_test_single_line,
ids=[t[0] for t in _test_single_line]
)
def test_single_line(self, test_id, line, expected):
assert tokenize(line) == expected
class TestAssemble:
_test_single_line = [
# empty / comment
(" ", ""),
("\t", ""),
("# comment", ""),
("; comment", ""),
# standard instructions
("CLS", "00E0"),
("RET", "00EE"),
# test SYS with various number specifications
("SYS E0h", "00E0"),
("SYS 777o", "01FF"),
("SYS 111100001111b", "0F0F"),
("SYS 0x3E0", "03E0"),
("SYS 255", "00FF"),
("SYS 0o555", "016D"),
("SYS 0b101010101010", "0AAA"),
# test JP with various offset specifications
("JP $", "1200"),
("JP $+A20h", "1C20"),
("JP $-0o700", "1040"),
# continue standard instructions
("HLT", "1200"), # alternative to 'JP $'
("JP AAAh", "1AAA"),
("CALL 555h", "2555"),
("SE V1, 127", "317F"),
("IFNE V1, 127", "317F"), # alternative spelling
("SNE V1, 127", "417F"),
("IFEQ V1, 127", "417F"), # alternative spelling
("SE VA, V5", "5A50"),
("IFNE VA, V5", "5A50"), # alternative spelling
("LD V5, 255", "65FF"),
("ADD V6, 255", "76FF"),
("LD V5, V6", "8560"),
("OR V5, V6", "8561"),
("AND V5, V6", "8562"),
("XOR V5, V6", "8563"),
("ADD V5, V6", "8564"),
("SUB V5, V6", "8565"),
("SHR V5, V6", "8566"),
("SHR V7", "8776"),
("SUBN V5, V6", "8567"),
("SHL V5, V6", "856E"),
("SHL V7", "877E"),
("SHL V5, V6", "856E"),
("SNE V5, V6", "9560"),
("IFEQ V5, V6", "9560"), # alternative spelling
("LD I, BCDh", "ABCD"),
("JP V0, CDEh", "BCDE"),
("RND V3, ABh", "C3AB"),
("DRW V1, V2, 4", "D124"),
("SKP V5", "E59E"),
("IFUP V5", "E59E"), # alternative spelling
("SKNP V5", "E5A1"),
("IFDN V5", "E5A1"), # alternative spelling
("LD V3, DT", "F307"),
("LD V3, K", "F30A"),
("LD DT, V4", "F415"),
("LD ST, V4", "F418"),
("ADD I, V5", "F51E"),
("LD F, V6", "F629"),
("LD B, V6", "F633"),
("LD [I], V7", "F755"),
("LD V7, [I]", "F765"),
# constants
("DW FEDCh", "FEDC"),
]
@pytest.mark.parametrize(
"line, expected",
_test_single_line,
ids=[t[0] for t in _test_single_line]
)
def test_single_line(self, line, expected):
assert assemble([line]).encode("hex").upper() == expected
def test_jump_label(self):
assert assemble(["main:", " ", "JP main"]) == "\x12\x00"
def test_org(self):
assert assemble([".org 0x204", "HLT"]) == "\0\0\0\0\x12\x04"
| [
"nulano@nulano.eu"
] | nulano@nulano.eu |
f037fc07b15d3b262441f8e257e03f8d9f722d1b | f6563a324ad740eebb9f2e07f882ae9ee6494935 | /restler/unit_tests/log_baseline_test_files/abc_test_grammar_invalid_b.py | 98748d1ba4b34f4cf60197143312bc844b4a32cb | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | victor-li/restler-fuzzer | 3c0b255a50c1c2d03b327c1a9177f130fd641140 | d54678011165787ac594bb4fc8235ffcd891d7d1 | refs/heads/main | 2023-08-18T12:23:20.338493 | 2021-09-30T19:33:15 | 2021-09-30T19:33:15 | 413,392,759 | 0 | 0 | MIT | 2021-10-04T11:29:25 | 2021-10-04T11:29:24 | null | UTF-8 | Python | false | false | 6,190 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# This grammar was created manually.
# There is no corresponding OpenAPI spec.
from __future__ import print_function
import json
from engine import primitives
from engine.core import requests
from engine.errors import ResponseParsingException
from engine import dependencies
_post_a = dependencies.DynamicVariable(
"_post_a"
)
_post_b = dependencies.DynamicVariable(
"_post_b"
)
_post_d = dependencies.DynamicVariable(
"_post_d"
)
def parse_A(data):
temp_123 = None
try:
data = json.loads(data)
except Exception as error:
raise ResponseParsingException("Exception parsing response, data was not valid json: {}".format(error))
try:
temp_123 = str(data["name"])
except Exception as error:
pass
if temp_123:
dependencies.set_variable("_post_a", temp_123)
def parse_B(data):
temp_123 = None
try:
data = json.loads(data)
except Exception as error:
raise ResponseParsingException("Exception parsing response, data was not valid json: {}".format(error))
try:
temp_123 = str(data["name"])
except Exception as error:
pass
if temp_123:
dependencies.set_variable("_post_b", temp_123)
def parse_D(data):
temp_123 = None
try:
data = json.loads(data)
except Exception as error:
raise ResponseParsingException("Exception parsing response, data was not valid json: {}".format(error))
try:
temp_123 = str(data["name"])
except Exception as error:
pass
if temp_123:
dependencies.set_variable("_post_d", temp_123)
req_collection = requests.RequestCollection([])
request = requests.Request([
primitives.restler_static_string("PUT "),
primitives.restler_static_string("/A/A"),
primitives.restler_static_string(" HTTP/1.1\r\n"),
primitives.restler_static_string("Accept: application/json\r\n"),
primitives.restler_static_string("Host: restler.unit.test.server.com\r\n"),
primitives.restler_static_string("Content-Type: application/json\r\n"),
primitives.restler_refreshable_authentication_token("authentication_token_tag"),
primitives.restler_static_string("\r\n"),
{
'post_send':
{
'parser': parse_A,
'dependencies':
[
_post_a.writer()
]
}
},
],
requestId="/A/{A}"
)
req_collection.add_request(request)
request = requests.Request([
primitives.restler_static_string("PUT "),
primitives.restler_static_string("/BUG/BUG"),
primitives.restler_static_string(" HTTP/1.1\r\n"),
primitives.restler_static_string("Accept: application/json\r\n"),
primitives.restler_static_string("Host: restler.unit.test.server.com\r\n"),
primitives.restler_static_string("Content-Type: application/json\r\n"),
primitives.restler_refreshable_authentication_token("authentication_token_tag"),
primitives.restler_static_string("\r\n"),
{
'post_send':
{
'parser': parse_B,
'dependencies':
[
_post_b.writer()
]
}
},
],
requestId="/B/{B}"
)
req_collection.add_request(request)
request = requests.Request([
primitives.restler_static_string("GET "),
primitives.restler_static_string("/C"),
primitives.restler_static_string(" HTTP/1.1\r\n"),
primitives.restler_static_string("Accept: application/json\r\n"),
primitives.restler_static_string("Host: restler.unit.test.server.com\r\n"),
primitives.restler_static_string("Content-Type: application/json\r\n"),
primitives.restler_refreshable_authentication_token("authentication_token_tag"),
primitives.restler_static_string("\r\n"),
primitives.restler_static_string("{"),
primitives.restler_static_string('"A": "'),
primitives.restler_static_string(_post_a.reader()),
primitives.restler_static_string('", "B": "'),
primitives.restler_static_string(_post_b.reader()),
primitives.restler_static_string('"'),
primitives.restler_static_string("}"),
],
requestId="/C"
)
req_collection.add_request(request)
request = requests.Request([
primitives.restler_static_string("PUT "),
primitives.restler_static_string("/D/D"),
primitives.restler_static_string(" HTTP/1.1\r\n"),
primitives.restler_static_string("Accept: application/json\r\n"),
primitives.restler_static_string("Host: restler.unit.test.server.com\r\n"),
primitives.restler_static_string("Content-Type: application/json\r\n"),
primitives.restler_refreshable_authentication_token("authentication_token_tag"),
primitives.restler_static_string("\r\n"),
primitives.restler_static_string("{"),
primitives.restler_static_string('"A": "'),
primitives.restler_static_string(_post_a.reader()),
primitives.restler_static_string('", "B": "'),
primitives.restler_static_string(_post_b.reader()),
primitives.restler_static_string('"'),
primitives.restler_static_string("}"),
primitives.restler_static_string("\r\n"),
{
'post_send':
{
'parser': parse_D,
'dependencies':
[
_post_d.writer()
]
}
},
],
requestId="/D/{D}"
)
req_collection.add_request(request)
request = requests.Request([
primitives.restler_static_string("GET "),
primitives.restler_static_string("/E"),
primitives.restler_static_string(" HTTP/1.1\r\n"),
primitives.restler_static_string("Accept: application/json\r\n"),
primitives.restler_static_string("Host: restler.unit.test.server.com\r\n"),
primitives.restler_static_string("Content-Type: application/json\r\n"),
primitives.restler_refreshable_authentication_token("authentication_token_tag"),
primitives.restler_static_string("\r\n"),
primitives.restler_static_string("{"),
primitives.restler_static_string('"D": "'),
primitives.restler_static_string(_post_d.reader()),
primitives.restler_static_string('"'),
primitives.restler_static_string("}"),
],
requestId="/E"
)
req_collection.add_request(request)
| [
"noreply@github.com"
] | noreply@github.com |
8cececa88fc5db4803974b32211503940569110f | c6fcc7b5769ef8e704fbd8d317a4b64f2bea0b85 | /data_loader.py | 0c26e5f675602a519957fd3a926792e0832f3f9f | [
"MIT"
] | permissive | NegativeMind/mnist-svhn-transfer | 886cb8385cd3335275e22cd690e2b4c346e4981d | 998ce54abe916dca2ffeeb916cd520f94928e93f | refs/heads/master | 2022-04-25T06:34:02.339211 | 2020-04-19T05:19:09 | 2020-04-19T05:19:09 | 254,663,893 | 0 | 0 | null | 2020-04-10T15:05:08 | 2020-04-10T15:05:07 | null | UTF-8 | Python | false | false | 1,160 | py | import torch
from torchvision import datasets
from torchvision import transforms
def get_loader(config):
"""Builds and returns Dataloader for MNIST and SVHN dataset."""
transform = transforms.Compose([
transforms.Scale(config.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform)
mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform)
svhn_loader = torch.utils.data.DataLoader(dataset=svhn,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
mnist_loader = torch.utils.data.DataLoader(dataset=mnist,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
return svhn_loader, mnist_loader | [
"yunjey47@naver.com"
] | yunjey47@naver.com |
f47ad89cbb0ecfa5a4f40da82fafb29dcdb9f6de | c3410f88fb8f05793f6c4ca80b8ab6c1b0338b61 | /send-test.py | bd298de8572f2841f654d4a9cfa9977aaa40d5a7 | [
"MIT"
] | permissive | BlurryRoots/checker | f6af0020cd3d4adf671555345281b7294d0acaea | 4a44f23ad33867ff61b1cc65999e5685a7fdf629 | refs/heads/master | 2016-09-06T06:06:40.848483 | 2014-12-17T16:52:39 | 2014-12-17T16:52:39 | 27,002,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | import requests
import json
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--port", dest="port",
action='store', type='int', default=8080,
help="Monitor service port.")
parser.add_option("-i", "--ip", dest="host",
default='localhost', help="Monitor service ip adress.")
parser.add_option("-s", "--server-host", dest="server_host",
default='127.0.0.1', help="Sending server node ip.")
parser.add_option("-P", "--server-port", dest="server_port",
action='store', type='int', default=42,
help="Sending server node port.")
(options, args) = parser.parse_args()
url = 'http://%s:%s/report' % (options.host, options.port)
payload = {
'host': options.server_host,
'port': options.server_port
}
ctype = {
'content-type': 'application/json'
}
r = requests.post (url, data=json.dumps (payload), headers=ctype)
print "response\n\ttext: %s\n\tstatus: %d" % (r.text, r.status_code)
| [
"blurryroots@posteo.de"
] | blurryroots@posteo.de |
36fb2747984856faec0b58c8809b658ee371d0c4 | 395d817d7a11dc3fcbfb50f0561aa0ce43a45d32 | /DQNs/DQN_cnn/dqn_agent.py | 67af84a3948fff40ad6139a08317945555a10732 | [] | no_license | lhonrush/DeepReinforcementLearning_Pytorch | a3ce458b756b9d33797f0703ace64dceae49bc20 | 3a80a41ed1f33ad82807496d43947fc9d670184b | refs/heads/master | 2023-07-08T11:11:29.634566 | 2021-08-19T07:26:58 | 2021-08-19T07:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,639 | py | import numpy as np
import random
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from cnn_model import CNN_Model
TAU = 1e-3 # for soft update of target parameters
EPS_start=1.0
EPS_end=0.01
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
# 使用 deque(maxlen=N) 构造函数会新建一个固定大小的队列。当新的元素加入并且这个队列已满的时候, 最老的元素会自动被移除掉
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""
Add a new experience to the memory
:param state:
:param p: sample probability for this experience
:return:
"""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def clean_buffer(self):
self.memory.clear()
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.tensor([e.state for e in experiences if e is not None]).float().to(device)
actions = torch.tensor([[e.action for e in experiences if e is not None]]).long().to(device)
rewards = torch.tensor([e.reward for e in experiences if e is not None]).float().to(device)
next_states = torch.tensor([e.next_state for e in experiences if e is not None]).float().to(
device)
dones = torch.from_numpy(np.array([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class Agent_dqn():
def __init__(self, input_channel,action_size,learning_rate=5e-3,buffer_size=int(1e4),batch_size=32):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.action_size = action_size
# Q-Network
self.qnetwork_local = CNN_Model(input_channel,action_size).to(device)
self.qnetwork_target = CNN_Model(input_channel,action_size).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), learning_rate)
# Replay memory
self.batch_size = batch_size
self.memory = ReplayBuffer(action_size, buffer_size,batch_size)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.episode = 0
self.epsilon = EPS_start
def act(self,state,i_episode,eps_decay):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
" Epsilon-greedy action selection"
if i_episode>self.episode:
# update EPS every new episode
self.epsilon = max(EPS_end, eps_decay * self.epsilon)
self.episode = i_episode
# epsilon greedy policy
if random.random() > self.epsilon:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def act_greedy_policy(self,state):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
return np.argmax(action_values.cpu().data.numpy())
def step(self,sarsd,gamma,update_every):
state, action, reward, next_state, done = sarsd
self.t_step += 1
# add an experience for current time step
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps
if (self.t_step+1) % update_every==0:
if self.memory.__len__()>self.batch_size:
batch_exps = self.memory.sample()
loss = self.learn(batch_exps,gamma)
return loss
def learn(self,exps,gamma):
# fetch the batch (s,a,r,s',done) from experiences batch
states,actions,rewards,next_states,dones = exps
print(states.shape)
# ------------------ calculate loss —------------------------- #
# calculate Q targets
expected_next_max_actions = self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(0)
Q_expected_next = self.qnetwork_target(next_states).gather(1, expected_next_max_actions)
Q_targets = rewards + (gamma * Q_expected_next * (1 - dones))
# get expected Q for current state
Q_expected = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(Q_expected, Q_targets)
# ---------------- update local Q net -------------------- #
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# print(next(self.qnetwork_local.parameters()).is_cuda)
# ---------------- update target Q net -------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
return loss.cpu().detach().numpy()
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
| [
"quantumcheese1990@gmail.com"
] | quantumcheese1990@gmail.com |
523b42f752bced31bc63bb710b3b4fded293c9cf | 20e3010608e40a6ec5ea56f69d122a62182e4bdb | /1 - Python-2/4 - strings functions/HW4/3. Make an IP adress unclickable.py | f6b7f30215f124961d64f2ec6f1ae189675582a4 | [] | no_license | LarisaOvchinnikova/Python | ee65eac221cd03563d60110118175692564c5b2d | 9cc86a260828662995dec59a6d69528f96d37e79 | refs/heads/master | 2021-08-22T21:41:02.351589 | 2021-05-25T18:37:09 | 2021-05-25T18:37:09 | 253,842,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | # Input: address = "1.1.1.1"
# Output: "1[.]1[.]1[.]1"
def ip_address(address):
return address.replace(".", "[.]")
print(ip_address("1.1.1.1")) | [
"larisaplantation@gmail.com"
] | larisaplantation@gmail.com |
c212488374a2e7a4dcf011707fabc37464e8b920 | f79102231c83674a4c01e56e3953b2a65cb14da2 | /leetcode/base/list/环形链表.py | 31d0d694e9e23ee41583a99337ef25a65410b65f | [] | no_license | Activity00/Python | 4971b177beaf72df0de97f7e78f400d48104dce1 | 166d97f36bbeea74c84ec57466bd0a65b608ed09 | refs/heads/master | 2020-12-24T07:53:06.782982 | 2020-09-29T10:55:43 | 2020-09-29T10:55:43 | 73,362,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # coding: utf-8
"""
@author: 武明辉
@time: 19-3-20 下午9:35
"""
"""
给定一个链表,判断链表中是否有环。
为了表示给定链表中的环,我们使用整数 pos 来表示链表尾连接到链表中的位置(索引从 0 开始)。 如果 pos 是 -1,则在该链表中没有环。
示例 1:
输入:head = [3,2,0,-4], pos = 1
输出:true
解释:链表中有一个环,其尾部连接到第二个节点。
示例 2:
输入:head = [1,2], pos = 0
输出:true
解释:链表中有一个环,其尾部连接到第一个节点。
示例 3:
输入:head = [1], pos = -1
输出:false
解释:链表中没有环。
进阶:
你能用 O(1)(即,常量)内存解决此问题吗?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def hasCycle(self, head):
if not head:
return False
slow = fast = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
return True
return False
if __name__ == '__main__':
pass
| [
"1032662429@qq.com"
] | 1032662429@qq.com |
54d0c850aeea3d8745e9e8793b2c9498017e11c6 | 84403eabc825adc91b47525fed136c7a84449205 | /practice_filerenaming.py | 24082671cf519abd3466202938d65a36e2c79663 | [] | no_license | a-b-v-k/PythonBootCamp | 7ae464a6f47ab7074eb5ee514b8f8250b86128b9 | 3b55ce4dc8cb2232429a671f8cbfc101eb74bdf5 | refs/heads/main | 2023-06-25T00:25:44.827193 | 2021-07-26T13:04:56 | 2021-07-26T13:04:56 | 376,466,219 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import os
try:
filepath = str(input('Enter the path of the file:'))
newname = str(input('Enter the new name:'))
filedir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
newfilepath = filedir + '/' + newname
os.rename(filepath, newfilepath)
except FileNotFoundError as e:
print(e) | [
"vijaykrishnaabv@gmail.com"
] | vijaykrishnaabv@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.