index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
5,400 | acb85a16e45472dac61eed4162dc651f67a0e8ca | import settings
#from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
#admin.autodiscover()
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#Admin and Media
url#(r'^admin/', include(admin.site.urls)),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
(r'^static/(?P<path>.*)$', 'django.contrib.staticfiles.views.serve',
{'document_root': settings.STATIC_ROOT}),
# Home Page
(r'^$', 'classify.views.HomePage'),
# Estimation
(r'^estimate/$', 'classify.views.Estimation'),
(r'^result/$', 'classify.views.Result'),
)
|
5,401 | d472a15d6fa826e50a550996369b00b6c599a1c7 | from .scheduler import Scheduler
MyScheduler = Scheduler()
|
5,402 | e44e19dbeb6e1e346ca371ca8730f53ee5b95d47 | from boa3.builtin import public
@public
def Main() -> int:
a = 'just a test'
return len(a)
|
5,403 | 3366d1d4ecc4cc9f971dff0c8adfbadc5511cc9e | #print 'g'
class Client:
def __init__(self, mechanize, WEBSERVICE_IP,WEBSERVICE_PORT, FORM_INPUT_PATH, dat , data_id = 'data', rasp_id_id = 'rasp_id',password = 'pass'):
self.WEBSERVICE_PORT = WEBSERVICE_PORT
self.mechanize = mechanize
self.WEBSERVICE_IP = WEBSERVICE_IP
self.FORM_INPUT_PATH = FORM_INPUT_PATH
self.data_id = data_id
self.rasp_id_id = rasp_id_id
self.password = password
self.dat = dat
def sent_data(self, data, RASP_ID, PASS):
#print 'http://' + self.WEBSERVICE_IP +':'+self.WEBSERVICE_PORT+self.FORM_INPUT_PATH
try:
br=self.mechanize.Browser()
br.open('http://' + self.WEBSERVICE_IP +':'+self.WEBSERVICE_PORT+self.FORM_INPUT_PATH, timeout = 5)
#print "sent to ", 'http://' + self.WEBSERVICE_IP +':'+self.WEBSERVICE_PORT+self.FORM_INPUT_PATH
br.select_form(nr=0) #check yoursite forms to match the correct number
#print br[self.data_id]
br[self.data_id] = data #use the proper input type=text name
br[self.rasp_id_id] = RASP_ID #use the proper input type=text name
br[self.password] = PASS #use the proper input type=text name
br.submit()
self.dat.sent_Host("OK" + data)
print 'Sent to Webservice: ', data
return True
except Exception, e:
self.dat.sent_Host("Fail" + data)
return False
#br.retrieve('https://www.yourfavoritesite.com/pagetoretrieve.html','yourfavoritepage.html')
#import mechanize
#print 'g'
#cli = Client(mechanize,'localhost','55555','/demo_websocket/form.html')
#cli.sent_data('hello world')
#sent_datasent_data(mechanize,'pholly.esy.es','80','/demo_websocket/form.html','helloword')
|
5,404 | 803283c9dac78c821373fa1025008b04919df72c | from turtle import *
from freegames import vector
def line(start, end):
"Draw line from start to end."
up()
goto(start.x, start.y)
down()
goto(end.x, end.y)
def square(start, end):
"Draw square from start to end."
up()
goto(start.x, start.y)
down()
begin_fill()
for count in range(4):
forward(end.x - start.x)
left(90)
end_fill()
def circulo(start, end):
"Draw circle from start to end."
distancia = end.x - start.x
begin_fill()
circle(distancia)
end_fill()
#Adrian
def rectangle(start, end): # Esta funcion crea un rectangulo con la key "R"
"Draw rectangle from start to end."
L = end.x - start.x # Largo de rectangulo
begin_fill() # Iniciar relleno
for count in range(4): # For de 4 loops para 4 lineas del rectangulo
if(count % 2): # IF para diferenciar los lados del rectangulo
forward(L)
else:
forward(L/2) # Ancho de Rectangulo
left(90) # LLamada de funcion para vuelta a la izquierda de 90 grados
end_fill()
def triangle(start, end):
"Draw triangle from start to end."
#Ricardo Triangulo
L = end.x - start.x #Distancia para los lados
begin_fill() #Iniciar relleno
for count in range(3): #Crean un trazo de 3 lineas de 120
forward(L)
left(120) #Angulo de vuelta
end_fill()
def tap(x, y):
"Store starting point or draw shape."
start = state['start']
if start is None:
state['start'] = vector(x, y)
else:
shape = state['shape']
end = vector(x, y)
shape(start, end)
state['start'] = None
def store(key, value):
"Store value in state at key."
state[key] = value
state = {'start': None, 'shape': line}
setup(420, 420, 370, 0)
onscreenclick(tap)
listen()
onkey(undo, 'u')
onkey(lambda: color('black'), 'K')
onkey(lambda: color('#F5B7B1'), 'Q') #Color Maestra
onkey(lambda: color('#00ffff'), 'Y') #Color Ricardo
onkey(lambda: color('white'), 'W')
onkey(lambda: color('green'), 'G')
onkey(lambda: color('blue'), 'B')
onkey(lambda: color('red'), 'R')
onkey(lambda: store('shape', line), 'l')
onkey(lambda: store('shape', square), 's')
onkey(lambda: store('shape', circulo), 'c')
onkey(lambda: store('shape', rectangle), 'r')
onkey(lambda: store('shape', triangle), 't')
done()
|
5,405 | cdc8c8aba384b7b1b5e741ffe4309eaee30aaada | # Generated by Django 3.0.5 on 2020-04-30 06:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products_app', '0003_auto_20200429_0739'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
],
),
migrations.RemoveField(
model_name='item',
name='stock',
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('items', models.ManyToManyField(to='products_app.Item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products_app.User')),
],
),
]
|
5,406 | b0e4042ac4ed54cafedb9e53244c164527559e39 | rak="hello\n"
n=input()
print(rak * int(n))
|
5,407 | 1857d76b8c68c58d2d721de529811a6aeb09fcbb | from fastapi import FastAPI
from app.router.routes import initRoutes
from app.cors.cors import initCors
app = FastAPI(debug=True,title="Recipe API")
initCors(app)
initRoutes(app)
|
5,408 | f5e60f2d384242b9675e756f67391ea09afcc262 | from customer_service.model.customer import Customer
def get_customer(customer_id, customer_repository):
return customer_repository.fetch_by_id(customer_id)
def create_customer(first_name, surname, customer_repository):
customer = Customer(first_name=first_name, surname=surname)
customer_repository.store(customer)
return customer.customer_id
def update_customer(first_name, surname, cid, customer_repository):
customer = customer_repository.fetch_by_id(cid)
customer.first_name = first_name
customer.surname = surname
customer_repository.store(customer)
return customer
|
5,409 | 76348448a658736627efe8fa6b19c752191966e7 | f=open('poem.txt')
for line in f:
print line,
|
5,410 | 958d7ec966179d63c6ba0a651e99fff70f0db31a | from collections import defaultdict, deque
import numpy as np
import gym
from chula_rl.policy.base_policy import BasePolicy
from chula_rl.exception import *
from .base_explorer import BaseExplorer
class OneStepExplorerWithTrace(BaseExplorer):
"""one-step explorer but with n-step trace"""
def __init__(self, n_step: int, n_max_interaction: int, env: gym.Env):
super().__init__(env)
self.n_step = n_step
self.n_max_interaction = n_max_interaction
self.last_s = self.env.reset()
self.trace = defaultdict(lambda: deque(maxlen=n_step))
self.n_interaction = 0
self.n_ep = 0
def step(self, policy: BasePolicy):
if self.n_interaction > self.n_max_interaction:
raise InteractionExceeded()
# explore
a = policy.step(self.last_s)
s, r, done, info = self.env.step(a)
self.n_interaction += 1
# collect data
self.trace['s'].append(self.last_s)
self.trace['a'].append(a)
self.trace['r'].append(r)
self.trace['done'].append(done)
self.trace['final_s'] = s # for bootstrapping
self.trace['final_a'] = policy.step(s) # for SARSA
self.last_s = s
# if done reset
if done:
self.last_s = self.env.reset()
self.n_ep += 1
self._update_stats(self.n_interaction, info['episode']['reward'])
return self.trace
|
5,411 | 467b919f6953737eedd3f99596df244bd1177575 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
r_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')
em_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')
aishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')
agni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')
df = pd.concat([aishah_data_df.mean(),em_data_df.mean(),r_data_df.mean(),agni_data_df.mean()],axis=1).T
# Setting the positions and width for the bars
pos = list(range(len(df['Mersenne Twister'])))
width = 0.2
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
# Create a bar with pre_score data,
# in position pos,
plt.bar(pos,
#using df['pre_score'] data,
df['Mersenne Twister'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='#EE3224')
# with label the first value in first_name
#label=df['first_name'][0])
# Create a bar with mid_score data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['mid_score'] data,
df['Xorshift 128+'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='#F78F1E')
# with label the second value in first_name
#label=df['first_name'][1])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['post_score'] data,
df['SPCG64'],
# of width
width,
# with alpha 0.5
#alpha=0.5,
# with color
color='#FFC222')
# with label the third value in first_name
#label=df['first_name'][2])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['post_score'] data,
df['Xoroshiro 128+'],
# of width
width,
# with alpha 0.5
#alpha=0.5,
# with color
color='#FF3300')
# with label the third value in first_name
#label=df['first_name'][2])
# Set the y axis label
ax.set_ylabel('Average MB/s',fontweight='bold')
# Set the chart's title
ax.set_title('Average MBs of Random Numbers Generated in a Second',fontweight='bold')
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(['MacBook 2017','MacBook 2015','MacBook 2011','Ubuntu 18.04'])
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, 10000] )
# Adding the legend and showing the plot
plt.legend(['Mersenne Twister','Xorshift 128+', 'SPCG64','Xoroshiro 128+'], loc='upper left')
plt.grid()
#plt.show()
plt.savefig('barchart_compare.png')
|
5,412 | 5e06dfb7aac64b5b98b4c0d88a86f038baf44feb | import math
import os
import pathfinder as pf
from constants import X_ROBOT_LENGTH, Y_ROBOT_WIDTH, Y_WALL_TO_EXCHANGE_FAR, \
X_WALL_TO_SWITCH_NEAR
from utilities.functions import GeneratePath
class settings():
order = pf.FIT_HERMITE_QUINTIC
samples = 1000000
period = 0.02
maxVelocity = 6.0
maxAcceleration = 10
maxJerk = 30
# The waypoints are entered as X, Y, and Theta. Theta is measured clockwise from the X-axis and
# is in units of radians. It is important to generate the paths in a consistent manner to those
# used by the controller. For example, use either metric or imperial units. Also, use a
# consistent frame of reference. This means that +X is forward, -X is backward, +Y is right, and
# -Y is left, +headings are going from +X towards +Y, and -headings are going from +X to -Y.
waypoints = [
pf.Waypoint(0, 0, 0),
pf.Waypoint(96 / 12, -22 / 12, 0),
]
GeneratePath(os.path.dirname(__file__), "first_cube_middle_start_left_switch", waypoints, settings)
|
5,413 | c248d653556ecdf27e56b57930832eb293dfd579 | from ShazamAPI import Shazam
import json
import sys
print("oi")
|
5,414 | 4fb563985bd99599e88676e167ee84a95b018aba | import os
import struct
import sys
import wave
sys.path.insert(0, os.path.dirname(__file__))
C5 = 523
B4b = 466
G4 = 392
E5 = 659
F5 = 698
VOLUME = 12000
notes = [
[VOLUME, C5],
[VOLUME, C5],
[VOLUME, B4b],
[VOLUME, C5],
[0, C5],
[VOLUME, G4],
[0, C5],
[VOLUME, G4],
[VOLUME, C5],
[VOLUME, F5],
[VOLUME, E5],
[VOLUME, C5],
]
from fade import fade
from gain import gain
from repeat import repeat
from square import square_wave
all_samples = []
quarter_second = 44100 // 4
for volume, frequency in notes:
samples = square_wave(int(44100 / frequency // 2))
samples = gain(samples, volume)
samples = repeat(samples, quarter_second)
samples = fade(samples, quarter_second)
all_samples.extend(samples)
all_samples = [int(sample) for sample in all_samples]
w = wave.open('music.wav', 'wb')
w.setnchannels(1)
w.setsampwidth(2)
w.setframerate(44100)
w.writeframes(struct.pack('<' + 'h' * len(all_samples), *all_samples))
|
5,415 | 8c11463e35fb32949abbb163a89f874040a33ad0 | import cv2
import numpy as np
import time
from dronekit import connect, VehicleMode
connection_string = "/dev/ttyACM0"
baud_rate = 115200
print(">>>> Connecting with the UAV <<<<")
vehicle = connect(connection_string, baud=baud_rate, wait_ready=True)
vehicle.wait_ready('autopilot_version')
print('ready')
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
t = str(time.time())
out = cv2.VideoWriter(t+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
while(True):
posdata = str(vehicle.location.global_relative_frame).split(':')
_, _, alt = posdata[1].split(',')
ret, frame = cap.read()
cv2.putText(frame, str(alt),(0,int(frame_height/2.1)),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,255,255), 1)
if ret == True:
print("record..")
out.write(frame)
#cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
5,416 | db22e568c86f008c9882181f5c1d88d5bca28570 | import sqlite3 as lite
import sys
con = lite.connect("test.db")
with con:
cur = con.cursor()
cur.execute('''CREATE TABLE Cars(Id INT, Name TEXT, Price INT)''')
cur.execute('''INSERT INTO Cars VALUES(1, 'car1', 10)''')
cur.execute('''INSERT INTO Cars VALUES(2, 'car2', 20)''')
cur.execute('''INSERT INTO Cars VALUES(3, 'car3', 30)''')
|
5,417 | c6fdb9c405427a3583a59065f77c75c4aa781405 | from flask import Flask, app
from flask_sqlalchemy import SQLAlchemy
db= SQLAlchemy()
DBNAME = 'database.db'
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'
app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'
db.init_app(app)
from .views import views
from .auth import auth
app.register_blueprint(views, urlprefix='/')
app.register_blueprint(auth, urlprefix='/')
return app
|
5,418 | 2b88bec388f3872b63d6bfe200e973635bb75054 | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.core.paginator import Paginator
from .models import post
from django.contrib.auth.decorators import login_required
from .forms import post_fo
from django.db.models import Q
def index(request):
posts_list = post.objects.all().order_by('-date')
site = request.GET.get('site')
search_text = request.GET.get('search')
if search_text != None:
posts_list = posts_list.filter(Q(title__contains=search_text) | Q(contry__contains=search_text))
if site != 'None' and site != None:
posts_list = posts_list.filter(site=request.GET.get('site'))
if request.GET.get('rate') == 'true':
posts_list = posts_list.order_by('-rate')
paginator = Paginator(posts_list, 15)
page = request.GET.get('page')
posts = paginator.get_page(page)
ratelist = [1,2,3,4,5]
sitelist = ['All', 'Netfilx', 'Watcha', 'Tving', 'Qoop', 'Etc']
return render(request, 'index.html',{'posts':posts, 'site':site, 'sitelist':sitelist, 'ratelist':ratelist, 'search':search_text})
def detail(request, post_id):
po = get_object_or_404(post, pk = post_id)
ratelist = [1,2,3,4,5]
return render(request, 'detail.html', {'post':po, 'ratelist':ratelist})
@login_required(login_url = '/login/')
def delet(request, post_id):
po = get_object_or_404(post, pk = post_id)
po.delete()
return redirect(index)
@login_required(login_url = '/login/')
def new(request):
if request.method == 'POST':
form = post_fo(request.POST)
if form.is_valid():
post = form.save(commit = False)
post.date = timezone.now()
post.save()
return redirect(detail, post.id)
else:
form = post_fo()
return render(request, 'new.html', {'form':form})
@login_required(login_url = '/login/')
def update(request, post_id):
po = get_object_or_404(post, pk = post_id)
if request.method == 'POST':
po.site = request.POST.get("site")
po.contry = request.POST.get("contry")
po.genre = request.POST.get("genre")
po.rate = request.POST.get("rate")
po.title = request.POST.get("title")
po.review = request.POST.get("review")
po.date = timezone.now()
po.save()
return redirect(detail, po.id)
else:
return render(request, 'update.html', {'post_id':post_id, 'po':po})
|
5,419 | 566dab589cdb04332a92138b1a1faf53cd0f58b8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import List, Dict, NamedTuple, Union, Optional
import codecs
import collections
import enum
import json
import re
import struct
from refinery.lib.structures import StructReader
from refinery.units.formats.office.xtdoc import xtdoc, UnpackResult
from refinery.lib import chunks
from refinery.lib.types import ByteStr
from refinery.lib.mime import FileMagicInfo
from refinery.lib.tools import cached_property
class MsiType(enum.IntEnum):
"""
Known data types for MSI table cell entries.
"""
Long = 0x104
Short = 0x502
Binary = 0x900
String = 0xD00
StringLocalized = 0xF00
Unknown = 0
def __str__(self):
return self.name
class MSITableColumnInfo(NamedTuple):
"""
Represents information about an MSI table column. See also:
https://doxygen.reactos.org/db/de4/msipriv_8h.html
"""
number: int
attributes: int
@property
def type(self) -> MsiType:
try:
if self.is_integer:
return MsiType(self.attributes & 0xFFF)
else:
return MsiType(self.attributes & 0xF00)
except Exception:
return MsiType.Unknown
@property
def is_integer(self) -> bool:
return self.attributes & 0x0F00 < 0x800
@property
def is_key(self) -> bool:
return self.attributes & 0x2000 == 0x2000
@property
def is_nullable(self) -> bool:
return self.attributes & 0x1000 == 0x1000
@property
def length(self) -> int:
vt = self.type
if vt is MsiType.Long:
return 4
if vt is MsiType.Short:
return 2
return self.attributes & 0xFF
@property
def struct_format(self) -> str:
vt = self.type
if vt is MsiType.Long:
return 'I'
elif vt is MsiType.Short:
return 'H'
else:
return 'H'
class MSIStringData:
def __init__(self, string_data: ByteStr, string_pool: ByteStr):
data = StructReader(string_data)
pool = StructReader(string_pool)
self.strings: List[bytes] = []
self.provided_ref_count: List[int] = []
self.computed_ref_count: List[int] = []
self.codepage = pool.u16()
self._unknown = pool.u16()
while not pool.eof:
size, rc = pool.read_struct('<HH')
string = data.read_bytes(size)
self.strings.append(string)
self.provided_ref_count.append(rc)
self.computed_ref_count.append(0)
@cached_property
def codec(self):
try:
return codecs.lookup(F'cp{self.codepage}').name
except Exception:
xtmsi.log_info('failed looking up codec', self.codepage)
return 'latin1'
def __len__(self):
return len(self.strings)
def __iter__(self):
yield from range(1, len(self) + 1)
def __contains__(self, index):
return 0 < index <= len(self)
def ref(self, index: int, increment=True) -> Union[str, bytes]:
assert index > 0
index -= 1
if increment:
self.computed_ref_count[index] += 1
data = self.strings[index]
data = data.decode(self.codec)
return data
class xtmsi(xtdoc):
"""
Extract files and metadata from Microsoft Installer (MSI) archives. The synthetic file {FN} contains
parsed MSI table information, similar to the output of the Orca tool. Binary streams are placed in a
virtual folder called "Binary", and extracted scripts from custom actions are separately extracted in
a virtual folder named "Action".
"""
_SYNTHETIC_STREAMS_FILENAME = 'MsiTables.json'
# https://learn.microsoft.com/en-us/windows/win32/msi/summary-list-of-all-custom-action-types
_CUSTOM_ACTION_TYPES = {
0x01: 'DLL file stored in a Binary table stream.',
0x02: 'EXE file stored in a Binary table stream.',
0x05: 'JScript file stored in a Binary table stream.',
0x06: 'VBScript file stored in a Binary table stream.',
0x11: 'DLL file that is installed with a product.',
0x12: 'EXE file that is installed with a product.',
0x13: 'Displays a specified error message and returns failure, terminating the installation.',
0x15: 'JScript file that is installed with a product.',
0x16: 'VBScript file that is installed with a product.',
0x22: 'EXE file having a path referencing a directory.',
0x23: 'Directory set with formatted text.',
0x25: 'JScript text stored in this sequence table.',
0x26: 'VBScript text stored in this sequence table.',
0x32: 'EXE file having a path specified by a property value.',
0x33: 'Property set with formatted text.',
0x35: 'JScript text specified by a property value.',
0x36: 'VBScript text specified by a property value.',
}
def unpack(self, data):
streams = {result.path: result for result in super().unpack(data)}
def stream(name: str):
return streams.pop(name).get_data()
def column_formats(table: Dict[str, MSITableColumnInfo]) -> str:
return ''.join(v.struct_format for v in table.values())
def stream_to_rows(data: ByteStr, row_format: str):
row_size = struct.calcsize(F'<{row_format}')
row_count = int(len(data) / row_size)
reader = StructReader(data)
columns = [reader.read_struct(F'<{sc*row_count}') for sc in row_format]
for i in range(row_count):
yield [c[i] for c in columns]
tables: Dict[str, Dict[str, MSITableColumnInfo]] = collections.defaultdict(collections.OrderedDict)
strings = MSIStringData(stream('!_StringData'), stream('!_StringPool'))
for tbl_name_id, col_number, col_name_id, col_attributes in stream_to_rows(stream('!_Columns'), 'HHHH'):
tbl_name = strings.ref(tbl_name_id)
col_name = strings.ref(col_name_id)
tables[tbl_name][col_name] = MSITableColumnInfo(col_number, col_attributes)
table_names_given = {strings.ref(k) for k in chunks.unpack(stream('!_Tables'), 2, False)}
table_names_known = set(tables)
for name in table_names_known - table_names_given:
self.log_warn(F'table name known but not given: {name}')
for name in table_names_given - table_names_known:
self.log_warn(F'table name given but not known: {name}')
class ScriptItem(NamedTuple):
row_index: int
extension: Optional[str]
processed_table_data: Dict[str, List[Dict[str, str]]] = {}
tbl_properties: Dict[str, str] = {}
tbl_files: Dict[str, str] = {}
tbl_components: Dict[str, str] = {}
postprocessing: List[ScriptItem] = []
def format_string(string: str):
# https://learn.microsoft.com/en-us/windows/win32/msi/formatted
def _replace(match: re.Match[str]):
_replace.done = False
prefix, name = match.groups()
if not prefix:
tbl = tbl_properties
elif prefix in '%':
name = name.rstrip('%').upper()
return F'%{name}%'
elif prefix in '!#':
tbl = tbl_files
elif prefix in '$':
tbl = tbl_components
else:
raise ValueError
return tbl.get(name, '')
while True:
_replace.done = True
string = re.sub(R'''(?x)
\[ # open square brackent
(?![~\\]) # not followed by escapes
([%$!#]?) # any of the valid prefix characters
([^[\]{}]+) # no brackets or braces
\]''', _replace, string)
if _replace.done:
break
string = re.sub(r'\[\\(.)\]', r'\1', string)
string = string.replace('[~]', '\0')
return string
for table_name, table in tables.items():
stream_name = F'!{table_name}'
if stream_name not in streams:
continue
processed = []
info = list(table.values())
for r, row in enumerate(stream_to_rows(stream(stream_name), column_formats(table))):
values = []
for index, value in enumerate(row):
vt = info[index].type
if vt is MsiType.Long:
if value != 0:
value -= 0x80000000
elif vt is MsiType.Short:
if value != 0:
value -= 0x8000
elif value in strings:
value = strings.ref(value)
elif not info[index].is_integer:
value = ''
values.append(value)
if table_name == 'Property':
tbl_properties[values[0]] = values[1]
if table_name == 'File':
tbl_properties[values[0]] = values[2]
if table_name == 'Component':
tbl_properties[values[0]] = F'%{values[2]}%'
entry = dict(zip(table, values))
einfo = {t: i for t, i in zip(table, info)}
if table_name == 'MsiFileHash':
entry['Hash'] = struct.pack(
'<IIII',
row[2] ^ 0x80000000,
row[3] ^ 0x80000000,
row[4] ^ 0x80000000,
row[5] ^ 0x80000000,
).hex()
if table_name == 'CustomAction':
code = row[1] & 0x3F
try:
entry['Comment'] = self._CUSTOM_ACTION_TYPES[code]
except LookupError:
pass
t = einfo.get('Target')
c = {0x25: 'js', 0x26: 'vbs', 0x33: None}
if code in c and t and not t.is_integer:
postprocessing.append(ScriptItem(r, c[code]))
processed.append(entry)
if processed:
processed_table_data[table_name] = processed
ca = processed_table_data.get('CustomAction', None)
for item in postprocessing:
entry = ca[item.row_index]
try:
path: str = entry['Action']
data: str = entry['Target']
except KeyError:
continue
root = F'Action/{path}'
if item.extension:
path = F'{root}.{item.extension}'
streams[path] = UnpackResult(path, data.encode(self.codec))
continue
data = format_string(data)
parts = [part.partition('\x02') for part in data.split('\x01')]
if not all(part[1] == '\x02' for part in parts):
continue
for name, _, script in parts:
if not name.lower().startswith('script'):
continue
if not script:
continue
path = F'{root}.{name}'
streams[path] = UnpackResult(path, script.encode(self.codec))
for ignored_stream in [
'[5]SummaryInformation',
'[5]DocumentSummaryInformation',
'[5]DigitalSignature',
'[5]MsiDigitalSignatureEx'
]:
streams.pop(ignored_stream, None)
inconsistencies = 0
for k in range(len(strings)):
c = strings.computed_ref_count[k]
p = strings.provided_ref_count[k]
if c != p and not self.log_debug(F'string reference count computed={c} provided={p}:', strings.ref(k + 1, False)):
inconsistencies += 1
if inconsistencies:
self.log_info(F'found {inconsistencies} incorrect string reference counts')
def fix_msi_path(path: str):
prefix, dot, name = path.partition('.')
if dot == '.' and prefix.lower() == 'binary':
path = F'{prefix}/{name}'
return path
streams = {fix_msi_path(path): item for path, item in streams.items()}
ds = UnpackResult(self._SYNTHETIC_STREAMS_FILENAME,
json.dumps(processed_table_data, indent=4).encode(self.codec))
streams[ds.path] = ds
for path in sorted(streams):
streams[path].path = path
yield streams[path]
@classmethod
def handles(self, data: bytearray):
if not data.startswith(B'\xD0\xCF\x11\xE0'):
return False
return FileMagicInfo(data).extension == 'msi'
xtmsi.__doc__ = xtmsi.__doc__.format(FN=xtmsi._SYNTHETIC_STREAMS_FILENAME)
|
5,420 | d2da346e11fa9508cab22a3a2fd3ca57a0a755e6 | # Number Guessing Game
import random
#assign secrectNumber to random number from range 1-10, inclusive of 10
secrectNumber = random.randint (1, 11)
#initialize number or guesses to 1 and call it guess
numGuesses = 1
#prompt user to enter their name and enter their guess
name = int (input("Enter your name: ))
print("name")
guess = int(input("Enter a guess:"))
print("guess")
#create a while loop thats exits when the guess is equal to the secrect number
#guess the the secrectNumber if guess is > than secrect number the user is will recieve an alert that the number is less than guess
# if guess is < than secrect number the user is will recieve an alert the number isgreater than guess
#if the guess is not equal to the secrect number, do the while loop.
while (guess != secrectNumber > 5):
if guess > secrectNumber:
print ("the secrect number is less than "+ str(guess))
else:
print ("the secrect number is greater than " + str(guess))
numGuesses += 1
if guess == number:
break
#The number of guesses is incremented by +1
#if the guess is not equal to the secrect number, guess again. The user is prompted to ebnter a number
guess = int (input("Enter a number"))
print (" Zee gives congrats to" +str(name) +"! the number is" +str(secrectNumber))
print ("it took you" +str(numGuesses) + "guesses. Great job!")
|
5,421 | c0b5a0605bdfcb7cb84211d3ad0d24f78f838cdf | import os
import pytest
def get_client():
from apiserver import app, is_caching_enabled
app.config['TESTING'] = True
app.enable_cache(is_caching_enabled())
return app.test_client()
@pytest.fixture
def client():
os.environ['FLASK_ENV'] = 'testing'
yield get_client()
@pytest.fixture
def client_with_caching():
os.environ['FLASK_ENV'] = 'production'
yield get_client()
|
5,422 | b76d3b6a4c15833ee2b25fede5923e1fe1dc4dd7 | # stdlib
from typing import Any
# third party
import numpy as np
# syft absolute
import syft as sy
from syft.core.common.uid import UID
from syft.core.node.new.action_object import ActionObject
from syft.core.node.new.action_store import DictActionStore
from syft.core.node.new.context import AuthedServiceContext
from syft.core.node.new.credentials import SIGNING_KEY_FOR
from syft.core.node.new.credentials import SyftSigningKey
from syft.core.node.new.credentials import SyftVerifyKey
from syft.core.node.new.user import User
from syft.core.node.new.user import UserCreate
from syft.core.node.new.user import UserView
from syft.core.node.new.user_service import UserService
from syft.core.node.worker import Worker
test_signing_key_string = (
"b7803e90a6f3f4330afbd943cef3451c716b338b17a9cf40a0a309bc38bc366d"
)
test_verify_key_string = (
"08e5bcddfd55cdff0f7f6a62d63a43585734c6e7a17b2ffb3f3efe322c3cecc5"
)
test_signing_key_string_2 = (
"8f4412396d3418d17c08a8f46592621a5d57e0daf1c93e2134c30f50d666801d"
)
test_verify_key_string_2 = (
"833035a1c408e7f2176a0b0cd4ba0bc74da466456ea84f7ba4e28236e7e303ab"
)
def test_signing_key() -> None:
# we should keep our representation in hex ASCII
# first convert the string representation into a key
test_signing_key = SyftSigningKey.from_string(test_signing_key_string)
assert isinstance(test_signing_key, SyftSigningKey)
# make sure it converts back to the same string
assert str(test_signing_key) == test_signing_key_string
# make a second one and verify that its equal
test_signing_key_2 = SyftSigningKey.from_string(test_signing_key_string)
assert test_signing_key == test_signing_key_2
# get the derived verify key
test_verify_key = test_signing_key.verify_key
assert isinstance(test_verify_key, SyftVerifyKey)
# make sure both types provide the verify key as a string
assert test_verify_key_string == test_verify_key.verify
assert test_verify_key_string == test_signing_key.verify
# make sure that we don't print signing key but instead the verify key
assert SIGNING_KEY_FOR in test_signing_key.__repr__()
assert test_verify_key_string in test_signing_key.__repr__()
# get another verify key from the same string and make sure its equal
test_verify_key_2 = SyftVerifyKey.from_string(test_verify_key_string)
assert test_verify_key == test_verify_key_2
def test_action_store() -> None:
test_signing_key = SyftSigningKey.from_string(test_signing_key_string)
action_store = DictActionStore()
uid = UID()
raw_data = np.array([1, 2, 3])
test_object = ActionObject.from_obj(raw_data)
set_result = action_store.set(
uid=uid, credentials=test_signing_key, syft_object=test_object
)
assert set_result.is_ok()
test_object_result = action_store.get(uid=uid, credentials=test_signing_key)
assert test_object_result.is_ok()
assert test_object == test_object_result.ok()
test_verift_key_2 = SyftVerifyKey.from_string(test_verify_key_string_2)
test_object_result_fail = action_store.get(uid=uid, credentials=test_verift_key_2)
assert test_object_result_fail.is_err()
assert "denied" in test_object_result_fail.err()
def test_user_transform() -> None:
new_user = UserCreate(
email="alice@bob.com",
name="Alice",
password="letmein",
password_verify="letmein",
)
# assert new_user.id is None
assert new_user.email == "alice@bob.com"
assert new_user.name == "Alice"
assert new_user.password == "letmein"
assert new_user.password_verify == "letmein"
print("new user", new_user)
user = new_user.to(User)
print("got a user", user)
# assert user.id is not None # need to insert / update first
assert user.email == "alice@bob.com"
assert user.name == "Alice"
assert user.hashed_password is not None
assert user.salt is not None
edit_user = user.to(UserView)
# assert edit_user.id is not None # need to insert / update first
assert edit_user.email == "alice@bob.com"
assert edit_user.name == "Alice"
assert edit_user.password is None
assert edit_user.password_verify is None
assert not hasattr(edit_user, "signing_key")
def test_user_service() -> None:
test_signing_key = SyftSigningKey.from_string(test_signing_key_string)
worker = Worker()
user_service = worker.get_service(UserService)
# create a user
new_user = UserCreate(
email="alice@bob.com",
name="Alice",
password="letmein",
password_verify="letmein",
)
# create a context
context = AuthedServiceContext(node=worker, credentials=test_signing_key.verify_key)
# call the create function
user_view = user_service.create(context=context, user_create=new_user)
# get the result
assert user_view is not None
assert user_view.email == new_user.email
assert user_view.name == new_user.name
# we have a UID
assert user_view.id is not None
# we can query the same user again
user_view_2 = user_service.view(context=context, uid=user_view.id)
# the object matches
assert user_view_2 is not None
assert user_view == user_view_2
def test_syft_object_serde() -> None:
# create a user
new_user = UserCreate(
email="alice@bob.com",
name="Alice",
password="letmein",
password_verify="letmein",
)
# syft absolute
import syft as sy
ser = sy.serialize(new_user, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert new_user == de
def test_worker() -> None:
worker = Worker()
assert worker
def test_action_object_add() -> None:
raw_data = np.array([1, 2, 3])
action_object = ActionObject.from_obj(raw_data)
result = action_object + action_object
x = result.syft_action_data
y = raw_data * 2
assert (x == y).all()
def test_action_object_hooks() -> None:
raw_data = np.array([1, 2, 3])
action_object = ActionObject.from_obj(raw_data)
def pre_add(context: Any, *args: Any, **kwargs: Any) -> Any:
# double it
new_value = args[0]
new_value.syft_action_data = new_value.syft_action_data * 2
return context, (new_value,), kwargs
def post_add(context: Any, name: str, new_result: Any) -> Any:
# change return type to sum
return sum(new_result.syft_action_data)
action_object._syft_pre_hooks__["__add__"] = [pre_add]
action_object._syft_post_hooks__["__add__"] = [post_add]
result = action_object + action_object
x = result.syft_action_data
y = sum((raw_data * 2) + raw_data)
assert y == 18
assert x == y
action_object._syft_pre_hooks__["__add__"] = []
action_object._syft_post_hooks__["__add__"] = []
def test_worker_serde() -> None:
worker = Worker()
ser = sy.serialize(worker, to_bytes=True)
de = sy.deserialize(ser, from_bytes=True)
assert de.signing_key == worker.signing_key
assert de.id == worker.id
|
5,423 | 8b4590cf2d8c040b6ab31c63baff0d83ab818641 | '''
config -- config manipulator module for share
@author: shimarin
@copyright: 2014 Walbrix Corporation. All rights reserved.
@license: proprietary
'''
import json,argparse
import oscar,groonga
def parser_setup(parser):
parser.add_argument("base_dir")
parser.add_argument("operations", nargs="*")
parser.set_defaults(func=run)
def get(base_dir, config_name = None):
with oscar.context(base_dir) as context:
with context.command("select") as command:
command.add_argument("table", "Config")
if config_name: command.add_argument("filter", "_key == \"%s\"" % command.escape(config_name))
rows = json.loads(command.execute())[0][2:]
if config_name:
return json.loads(rows[0][2]) if len(rows) > 0 else None
#else
result = {}
for row in rows:
result[row[1]] = json.loads(row[2])
return result
def put(base_dir, config_name, value):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", {"_key":config_name,"value":oscar.to_json(value)})
def put_all(base_dir, configs):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", map(lambda (x,y):{"_key":x,"value":oscar.to_json(y)}, configs.items()))
def show_one(base_dir, config_name):
with oscar.context(base_dir) as context:
print groonga.get(context, "Config", config_name)
def set_one(base_dir, config_name, value):
with oscar.context(base_dir, oscar.min_free_blocks) as context:
groonga.load(context, "Config", {"_key":"config_name","value":"value"})
def run(args):
if len(args.operations) == 0:
print get(args.base_dir)
elif len(args.operations) == 1:
print get(args.base_dir, args.operations[0])
elif len(args.operations) == 2:
put(args.base_dir, args.operations[0], json.loads(args.operations[1]))
else:
raise Exception("Invalid number of arguments")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser_setup(parser)
args = parser.parse_args()
args.func(args)
|
5,424 | df00cc501b7b682cc1f4fbc9ae87a27984e6b5ef | class Boundary():
def __init__(self, py5_inst, x1, y1, x2, y2):
self.py5 = py5_inst
self.a = self.py5.create_vector(x1, y1)
self.b = self.py5.create_vector(x2, y2)
def show(self):
self.py5.stroke(255)
self.py5.line(self.a.x, self.a.y, self.b.x, self.b.y)
|
5,425 | bbd421d39894af163b56e7104c3b29a45635d5a3 | from math import *
def heron(a, b, c):
tmp = [a, b, c]
tmp.sort()
if tmp[0] + tmp[1] <= tmp[-1]:
raise ValueError ("Warunek trojkata jest nie spelniony")
halfPerimeter = (a + b + c)/2
return sqrt(halfPerimeter * (halfPerimeter - a)*(halfPerimeter-b)*(halfPerimeter-c))
print heron(7, 4, 3) |
5,426 | cad881dd29be16de8375b3ce6e4a437562a05097 | files = [
"arria2_ddr3.qip"
]
|
5,427 | f8d0cc9cb0e5f8adf9077ffb39dd6abedfedaa12 | a = int(input('점수를 입력하세요'))
if a >= 70 :
print:('통과입니다.')
print:('축하합니다.')
else :
print:('불합격입니다.')
print("안녕")
|
5,428 | 3bb408f2b2ac63a2555258c05844881ccdfc5057 | import math
import pygame
import numpy as np
from main import Snake, SCREEN_WIDTH, SCREEN_HEIGHT, drawGrid, GRIDSIZE
from random import randint
FOOD_REWARD = 5
DEATH_PENALTY = 10
MOVE_PENALTY = 0.1
LIVES = 5
SQUARE_COLOR = (80,80,80)
SNAKE_HEAD_COLOR = ((0,51,0), (0,0,153), (102,0,102))
SNAKE_COLOR = ((154,205,50), (50,50,250), (50,0,250))
FOOD_COLOR = (255,69,0)
class SnakeGame:
def __init__(self, board_width = 10, board_height = 10, gui = False, enemy_epsilon=0.1):
self.score = 0
self.board = {'width': board_width, 'height': board_height}
self.gui = gui
self.lives = LIVES
self.player = []
self.enemy = []
self.enemy_epsilon = enemy_epsilon
self.food = []
def start(self):
'''
:return: [lives, score, player, enemy, food]
'''
self.player_init(LIVES)
self.enemy_init()
self.generate_food()
if self.gui: self.render_init()
return self.generate_observations()
def player_init(self, lives=LIVES):
x = randint(3, math.ceil(self.board["width"] / 2) - 1)
y = randint(3, self.board["height"] - 3)
self.player = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.player.insert(0, point)
self.lives = lives
def enemy_init(self):
x = randint(math.ceil(self.board["width"] / 2), self.board["width"] - 3)
y = randint(3, self.board["height"] - 3)
self.enemy = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.enemy.insert(0, point)
if self.enemy[0] in self.player[1:-1]:
self.enemy_init() # retry
def generate_food(self):
food = []
while not food:
food = [randint(1, self.board["width"]), randint(1, self.board["height"])]
if food in self.enemy: food = []
elif food in self.player: food = []
self.food = food
def get_enemy_movement(self):
'''
0 - UP, (-1, 0)
1 - RIGHT, (
2 - DOWN,
3 - LEFT
'''
if np.random.random() <= self.enemy_epsilon:
return randint(0, 3)
if self.food[0] > self.enemy[0][0]:
return 2
elif self.food[0] < self.enemy[0][0]:
return 0
elif self.food[1] > self.enemy[0][1]:
return 1
elif self.food[1] < self.enemy[0][1]:
return 3
return randint(0, 3)
def step(self, key):
'''
0 - UP,
1 - RIGHT,
2 - DOWN,
3 - LEFT
:param key:
:return: [lives, score, player, enemy, food]
'''
if self.is_done() :
self.end_game()
if not self.food:
self.generate_food()
self.create_new_point(self.player, key)
self.create_new_point(self.enemy, self.get_enemy_movement())
player_ate = False
if self.food_eaten(self.player):
self.score += FOOD_REWARD
self.generate_food()
player_ate = True
else:
self.remove_last_point(self.player)
self.score -= MOVE_PENALTY
if (not player_ate) and self.food_eaten(self.enemy):
self.generate_food()
else:
self.remove_last_point(self.enemy)
self.check_collisions()
if not self.food:
self.generate_food()
return self.generate_observations()
def create_new_point(self, snake, key):
new_point = [snake[0][0], snake[0][1]]
if key == 0: # UP
new_point[0] -= 1
elif key == 1: # RIGHT
new_point[1] += 1
elif key == 2: # DOWN
new_point[0] += 1
elif key == 3: # LEFT
new_point[1] -= 1
snake.insert(0, new_point)
def food_eaten(self, snake):
return self.food in snake
def remove_last_point(self, snake):
snake.pop()
def check_collisions(self):
state = 0
# 0 -> no collision,
# 1 -> player collision,
# 2 -> enemy collision
player_collided = False
enemy_collided = False
if (self.player[0][0] == 0 or
self.player[0][0] == self.board["width"] or
self.player[0][1] == 0 or
self.player[0][1] == self.board["height"] or
self.player[0] in self.player[1:-1] or
self.player[0] in self.enemy):
player_collided = True
if (self.enemy[0][0] == 0 or
self.enemy[0][0] == self.board["width"] or
self.enemy[0][1] == 0 or
self.enemy[0][1] == self.board["height"] or
self.enemy[0] in self.player or
self.enemy[0] in self.enemy[1:-1]):
enemy_collided = True
if player_collided:
self.lives -= 1
if not self.is_done():
self.player_init(self.lives)
if enemy_collided:
self.enemy_init() # enemy moves randomly but has infinite lives
def generate_observations(self):
'''
:return: [lives, score, player, enemy, food]
'''
return self.lives, self.score, self.player, self.enemy, self.food
'''Methods for Rendering the game'''
def render_init(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
drawGrid(self.surface)
self.myfont = pygame.font.SysFont("bahnschrift", 20)
def step_render(self, key):
'''
:return: [lives, score, player, enemy, food]
'''
self.clock.tick(3)
drawGrid(self.surface)
if not self.food:
self.generate_food()
_lives, _score, _player, _enemy, _food = self.step(key)
self.draw_snake(self.player, self.surface, SNAKE_COLOR[0], SNAKE_HEAD_COLOR[0])
self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1], SNAKE_HEAD_COLOR[1])
if not self.food:
self.generate_food()
self.draw_food(self.surface, FOOD_COLOR)
self.screen.blit(self.surface, (0, 0))
text1 = self.myfont.render("Score: {0} Lives: {1}".format(round(self.score, 2), self.lives), True, (250, 250, 250))
# text2 = myfont.render("Score AI {0}".format(enemy.score), 1, (250, 250, 250))
self.screen.blit(text1, (5, 10))
# screen.blit(text2, (SCREEN_WIDTH - 120, 10))
pygame.display.update()
return _lives, _score, _player, _enemy, _food
def draw_snake(self, snake, surface, color, head_color):
drew_head = False
for p in snake:
curr_color = color
if not drew_head:
curr_color = head_color
drew_head = True
r = pygame.Rect((p[0]*GRIDSIZE, p[1]*GRIDSIZE), (GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, curr_color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def draw_food(self, surface, color):
r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE), (GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def is_done(self):
return self.lives <= 0
def render_destroy(self):
print("Snake Player Final Score:", self.score)
def end_game(self):
if self.gui: self.render_destroy()
raise Exception("Game over") |
5,429 | 88e34878cdad908ed4ac30da82355aaa46ed719b | from rest_framework import serializers
from django.contrib import auth
from rest_framework.exceptions import ValidationError
from django.contrib.auth.password_validation import validate_password
from django.utils.translation import gettext as _
from rest_users.utils.api import _build_initial_user
User = auth.get_user_model()
class LoginUserSerializer(serializers.Serializer):
login = serializers.CharField()
password = serializers.CharField()
def get_authenticated_user(self):
login, password = self.validated_data['login'], self.validated_data['password']
user = None
login_field_names = [User.USERNAME_FIELD, User.EMAIL_FIELD]
for field_name in login_field_names:
kwargs = {
field_name: login,
'password': password,
}
user = auth.authenticate(**kwargs)
if user:
break
return user
class LogoutSerializer(serializers.Serializer):
revoke_token = serializers.BooleanField(default=False)
class UserGetProfileSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ('password',)
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ('password',)
class RegisterUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
def validate_password(self, password):
user = _build_initial_user(self.initial_data)
validate_password(password, user=user)
return password
def get_fields(self):
fields = super().get_fields()
fields['password_confirm'] = serializers.CharField(write_only=True)
return fields
def validate(self, attrs):
if attrs['password'] != attrs['password_confirm']:
raise ValidationError(_("Passwords don't match"))
return attrs
def create(self, validated_data):
data = validated_data.copy()
del data['password_confirm']
return self.Meta.model.objects.create_user(**data)
class ChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField()
password = serializers.CharField()
def validate_old_password(self, old_password):
user = self.context['request'].user
if not user.check_password(old_password):
raise serializers.ValidationError(_("Old password is not correct"))
return old_password
def validate_password(self, password):
user = self.context['request'].user
validate_password(password, user=user)
return password
def get_fields(self):
fields = super().get_fields()
fields['password_confirm'] = serializers.CharField()
return fields
def validate(self, attrs):
if attrs['password'] != attrs['password_confirm']:
raise serializers.ValidationError(_("Passwords don't match"))
return attrs
|
5,430 | d3342507cb1966e14380ff28ae12b5c334abd20a | from datetime import *
dd=int(input("enter number day: "))
nn=int(datetime.now().strftime("%w"))+1
# print(dd)
# print(nn)
print((datetime.now().date())+(timedelta(days=dd-nn)))
|
5,431 | 88cc4ae4137cf9c0e9c39874b36f7a2770550f96 | from app.exceptions import UserAlreadyExist, UserDoesNotExist
class Accounts(object):
""" Creates an Account where users can be stored """
def __init__(self):
self.users = {}
def add_user(self, user):
if user.id in self.users:
raise UserAlreadyExist
else:
self.users.update({user.id: user})
def remove_user(self, email):
"""This Method removes a user from users dictonary using his/her
unique email"""
try:
self.users.pop(email)
except KeyError:
raise UserDoesNotExist
def check_user(self, email):
if email in self.users:
return self.users[email]
def all_users(self):
return self.users |
5,432 | b90678c8f7ad9b97e13e5603bdf1dc8cb3511ca5 | # PySNMP SMI module. Autogenerated from smidump -f python DOCS-IETF-QOS-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:36 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( DscpOrAny, ) = mibBuilder.importSymbols("DIFFSERV-DSCP-TC", "DscpOrAny")
( InterfaceIndex, ifIndex, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
( InetAddress, InetAddressType, InetPortNumber, ) = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType", "InetPortNumber")
( SnmpAdminString, ) = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( Bits, Counter32, Counter64, Integer32, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Unsigned32, mib_2, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter32", "Counter64", "Integer32", "Integer32", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Unsigned32", "mib-2")
( MacAddress, RowStatus, StorageType, TextualConvention, TimeStamp, TruthValue, ) = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "RowStatus", "StorageType", "TextualConvention", "TimeStamp", "TruthValue")
# Types
class DocsIetfQosBitRate(TextualConvention, Unsigned32):
displayHint = "d"
class DocsIetfQosRfMacIfDirection(Integer):
subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(2,1,)
namedValues = NamedValues(("downstream", 1), ("upstream", 2), )
class DocsIetfQosSchedulingType(Integer):
subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(3,1,5,6,2,4,)
namedValues = NamedValues(("undefined", 1), ("bestEffort", 2), ("nonRealTimePollingService", 3), ("realTimePollingService", 4), ("unsolictedGrantServiceWithAD", 5), ("unsolictedGrantService", 6), )
# Objects
docsIetfQosMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 127)).setRevisions(("2006-01-23 00:00",))
if mibBuilder.loadTexts: docsIetfQosMIB.setOrganization("IETF IP over Cable Data Network (IPCDN)\nWorking Group")
if mibBuilder.loadTexts: docsIetfQosMIB.setContactInfo("\nCo-Author: Michael Patrick\nPostal: Motorola BCS\n 111 Locke Drive\n Marlborough, MA 01752-7214\n U.S.A.\nPhone: +1 508 786 7563\nE-mail: michael.patrick@motorola.com\n\nCo-Author: William Murwin\nPostal: Motorola BCS\n 111 Locke Drive\n Marlborough, MA 01752-7214\n U.S.A.\nPhone: +1 508 786 7594\nE-mail: w.murwin@motorola.com\n\nIETF IPCDN Working Group\nGeneral Discussion: ipcdn@ietf.org\nSubscribe: http://www.ietf.org/mailman/listinfo/ipcdn\nArchive: ftp://ftp.ietf.org/ietf-mail-archive/ipcdn\nCo-chairs: Richard Woundy, Richard_Woundy@cable.comcast.com\n Jean-Francois Mule, jfm@cablelabs.com")
if mibBuilder.loadTexts: docsIetfQosMIB.setDescription("This is the management information for\nQuality Of Service (QOS) for DOCSIS 1.1 and 2.0.\n\n\n\nCopyright (C) The Internet Society (2006). This version of\nthis MIB module is part of RFC 4323; see the RFC itself for\nfull legal notices.")
docsIetfQosNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 127, 0))
docsIetfQosMIBObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 127, 1))
docsIetfQosPktClassTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 1))
if mibBuilder.loadTexts: docsIetfQosPktClassTable.setDescription("This table describes the packet classification\nconfigured on the CM or CMTS.\nThe model is that a packet either received\nas input from an interface or transmitted\nfor output on an interface may be compared\nagainst an ordered list of rules pertaining to\nthe packet contents. Each rule is a row of this\ntable. A matching rule provides a Service Flow\nID to which the packet is classified.\nAll rules need to match for a packet to match\na classifier.\n\nThe objects in this row correspond to a set of\nClassifier Encoding parameters in a DOCSIS\nMAC management message. The\ndocsIetfQosPktClassBitMap indicates which\nparticular parameters were present in the\nclassifier as signaled in the DOCSIS message.\nIf the referenced parameter was not present\nin the signaled DOCSIS 1.1 and 2.0 Classifier, the\ncorresponding object in this row reports a\nvalue as specified in the DESCRIPTION section.")
docsIetfQosPktClassEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 1, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowId"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosPktClassId"))
if mibBuilder.loadTexts: docsIetfQosPktClassEntry.setDescription("An entry in this table provides a single packet\nclassifier rule. The index ifIndex is an ifType\nof docsCableMaclayer(127).")
docsIetfQosPktClassId = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosPktClassId.setDescription("Index assigned to packet classifier entry by\nthe CMTS, which is unique per Service Flow.")
docsIetfQosPktClassDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 2), DocsIetfQosRfMacIfDirection()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassDirection.setDescription("Indicates the direction to which the classifier\nis applied.")
docsIetfQosPktClassPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassPriority.setDescription("The value specifies the order of evaluation\nof the classifiers.\n\nThe higher the value, the higher the priority.\nThe value of 0 is used as default in\nprovisioned Service Flows Classifiers.\nThe default value of 64 is used for dynamic\nService Flow Classifiers.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the default\nvalue as defined above.")
docsIetfQosPktClassIpTosLow = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassIpTosLow.setDescription("The low value of a range of TOS byte values.\nIf the referenced parameter is not present\nin a classifier, this object reports the value\nof 0.\n\nThe IP TOS octet, as originally defined in RFC 791,\nhas been superseded by the 6-bit Differentiated\nServices Field (DSField, RFC 3260) and the 2-bit\nExplicit Congestion Notification Field (ECN field,\nRFC 3168). This object is defined as an 8-bit\noctet as per the DOCSIS Specification\nfor packet classification.")
docsIetfQosPktClassIpTosHigh = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassIpTosHigh.setDescription("The 8-bit high value of a range of TOS byte\nvalues.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the\nvalue of 0.\n\nThe IP TOS octet as originally defined in RFC 791\nhas been superseded by the 6-bit Differentiated\nServices Field (DSField, RFC 3260) and the 2-bit\nExplicit Congestion Notification Field (ECN field,\nRFC 3168). This object is defined as an 8-bit\noctet as defined by the DOCSIS Specification\nfor packet classification.")
docsIetfQosPktClassIpTosMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassIpTosMask.setDescription("The mask value is bitwise ANDed with TOS byte\nin an IP packet, and this value is used for\nrange checking of TosLow and TosHigh.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value\nof 0.\n\nThe IP TOS octet as originally defined in RFC 791\nhas been superseded by the 6-bit Differentiated\nServices Field (DSField, RFC 3260) and the 2-bit\nExplicit Congestion Notification Field (ECN field,\nRFC 3168). This object is defined as an 8-bit\noctet per the DOCSIS Specification for packet\nclassification.")
docsIetfQosPktClassIpProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 258))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassIpProtocol.setDescription("This object indicates the value of the IP\nProtocol field required for IP packets to match\nthis rule.\n\n\n\n\nThe value 256 matches traffic with any IP Protocol\nvalue. The value 257 by convention matches both TCP\nand UDP.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value\nof 258.")
docsIetfQosPktClassInetAddressType = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 8), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassInetAddressType.setDescription("The type of the Internet address for\ndocsIetfQosPktClassInetSourceAddr,\ndocsIetfQosPktClassInetSourceMask,\ndocsIetfQosPktClassInetDestAddr, and\ndocsIetfQosPktClassInetDestMask.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\nipv4(1).")
docsIetfQosPktClassInetSourceAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 9), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassInetSourceAddr.setDescription("This object specifies the value of the IP\nSource Address required for packets to match\nthis rule.\n\nAn IP packet matches the rule when the packet\nIP Source Address bitwise ANDed with the\ndocsIetfQosPktClassInetSourceMask value equals the\ndocsIetfQosPktClassInetSourceAddr value.\n\nThe address type of this object is specified by\ndocsIetfQosPktClassInetAddressType.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n'00000000'H.")
docsIetfQosPktClassInetSourceMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 10), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassInetSourceMask.setDescription("This object specifies which bits of a packet's\nIP Source Address are compared to match\nthis rule.\n\nAn IP packet matches the rule when the packet\nsource address bitwise ANDed with the\ndocsIetfQosPktClassInetSourceMask value equals the\ndocsIetfQosIpPktClassInetSourceAddr value.\n\nThe address type of this object is specified by\ndocsIetfQosPktClassInetAddressType.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n'FFFFFFFF'H.")
docsIetfQosPktClassInetDestAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 11), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassInetDestAddr.setDescription("This object specifies the value of the IP\nDestination Address required for packets to match\nthis rule.\n\nAn IP packet matches the rule when the packet\nIP Destination Address bitwise ANDed with the\ndocsIetfQosPktClassInetDestMask value\nequals the docsIetfQosPktClassInetDestAddr value.\n\nThe address type of this object is specified by\ndocsIetfQosPktClassInetAddressType.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n'00000000'H.")
docsIetfQosPktClassInetDestMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 12), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassInetDestMask.setDescription("This object specifies which bits of a packet's\nIP Destination Address are compared to\nmatch this rule.\n\nAn IP packet matches the rule when the packet\ndestination address bitwise ANDed with the\ndocsIetfQosPktClassInetDestMask value equals the\ndocsIetfQosIpPktClassInetDestAddr value.\n\nThe address type of this object is specified by\ndocsIetfQosPktClassInetAddressType.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n'FFFFFFFF'H.")
docsIetfQosPktClassSourcePortStart = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 13), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassSourcePortStart.setDescription("This object specifies the low-end inclusive\nrange of TCP/UDP source port numbers to which\na packet is compared. This object is irrelevant\nfor non-TCP/UDP IP packets.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value\nof 0.")
docsIetfQosPktClassSourcePortEnd = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 14), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassSourcePortEnd.setDescription("This object specifies the high-end inclusive\nrange of TCP/UDP source port numbers to which\na packet is compared. This object is irrelevant\nfor non-TCP/UDP IP packets.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n65535.")
docsIetfQosPktClassDestPortStart = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 15), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassDestPortStart.setDescription("This object specifies the low-end inclusive\nrange of TCP/UDP destination port numbers to\nwhich a packet is compared.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value\nof 0.")
docsIetfQosPktClassDestPortEnd = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 16), InetPortNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassDestPortEnd.setDescription("This object specifies the high-end inclusive\nrange of TCP/UDP destination port numbers to which\na packet is compared.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n65535.")
docsIetfQosPktClassDestMacAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 17), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassDestMacAddr.setDescription("An Ethernet packet matches an entry when its\ndestination MAC address bitwise ANDed with\ndocsIetfQosPktClassDestMacMask equals the value of\ndocsIetfQosPktClassDestMacAddr.\n\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n'000000000000'H.")
docsIetfQosPktClassDestMacMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 18), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassDestMacMask.setDescription("An Ethernet packet matches an entry when its\ndestination MAC address bitwise ANDed with\ndocsIetfQosPktClassDestMacMask equals the value of\ndocsIetfQosPktClassDestMacAddr.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n'000000000000'H.")
docsIetfQosPktClassSourceMacAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 19), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassSourceMacAddr.setDescription("An Ethernet packet matches this entry when its\nsource MAC address equals the value of\nthis object.\n\nIf the referenced parameter is not present\nin a classifier, this object reports the value of\n'FFFFFFFFFFFF'H.")
docsIetfQosPktClassEnetProtocolType = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 20), Integer().subtype(subtypeSpec=SingleValueConstraint(2,0,1,4,3,)).subtype(namedValues=NamedValues(("none", 0), ("ethertype", 1), ("dsap", 2), ("mac", 3), ("all", 4), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassEnetProtocolType.setDescription("This object indicates the format of the layer 3\nprotocol ID in the Ethernet packet. A value of\nnone(0) means that the rule does not use the\nlayer 3 protocol type as a matching criteria.\n\nA value of ethertype(1) means that the rule\napplies only to frames that contain an\nEtherType value. Ethertype values are contained\nin packets using the Dec-Intel-Xerox (DIX)\nencapsulation or the RFC1042 Sub-Network Access\nProtocol (SNAP) encapsulation formats.\n\nA value of dsap(2) means that the rule applies\n\n\n\nonly to frames using the IEEE802.3\nencapsulation format with a Destination Service\nAccess Point (DSAP) other\nthan 0xAA (which is reserved for SNAP).\n\nA value of mac(3) means that the rule applies\nonly to MAC management messages for MAC management\nmessages.\n\nA value of all(4) means that the rule matches\nall Ethernet packets.\n\nIf the Ethernet frame contains an 802.1P/Q Tag\nheader (i.e., EtherType 0x8100), this object\napplies to the embedded EtherType field within\nthe 802.1P/Q header.\n\nIf the referenced parameter is not present in a\nclassifier, this object reports the value of 0.")
docsIetfQosPktClassEnetProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassEnetProtocol.setDescription("If docsIetfQosEthPktClassProtocolType is none(0),\nthis object is ignored when considering whether\na packet matches the current rule.\n\nIf dosQosPktClassEnetProtocolType is ethertype(1),\nthis object gives the 16-bit value of the\nEtherType that the packet must match in order to\nmatch the rule.\n\nIf docsIetfQosPktClassEnetProtocolType is dsap(2),\nthe lower 8 bits of this object's value must match\nthe DSAP byte of the packet in order to match the\nrule.\n\nIf docsIetfQosPktClassEnetProtocolType is mac(3),\nthe lower 8 bits of this object's value represent a\nlower bound (inclusive) of MAC management message\ntype codes matched, and the upper 8 bits represent\nthe upper bound (inclusive) of matched MAC message\ntype codes. Certain message type codes are\nexcluded from matching, as specified in the\nreference.\n\n\n\nIf the Ethernet frame contains an 802.1P/Q Tag\nheader (i.e., EtherType 0x8100), this object applies\nto the embedded EtherType field within the 802.1P/Q\nheader.\n\nIf the referenced parameter is not present in the\nclassifier, the value of this object is reported\nas 0.")
docsIetfQosPktClassUserPriLow = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassUserPriLow.setDescription("This object applies only to Ethernet frames\nusing the 802.1P/Q tag header (indicated with\nEtherType 0x8100). Such frames include a 16-bit\nTag that contains a 3-bit Priority field and\na 12-bit VLAN number.\n\nTagged Ethernet packets must have a 3-bit\nPriority field within the range of\ndocsIetfQosPktClassPriLow to\ndocsIetfQosPktClassPriHigh in order to match this\nrule.\n\nIf the referenced parameter is not present in the\nclassifier, the value of this object is reported\nas 0.")
docsIetfQosPktClassUserPriHigh = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassUserPriHigh.setDescription("This object applies only to Ethernet frames\nusing the 802.1P/Qtag header (indicated with\nEtherType 0x8100). Such frames include a 16-bit\nTag that contains a 3-bit Priority field and\na 12-bit VLAN number.\n\nTagged Ethernet packets must have a 3-bit\nPriority field within the range of\ndocsIetfQosPktClassPriLow to\ndocsIetfQosPktClassPriHigh in order to match this\nrule.\n\n\n\nIf the referenced parameter is not present in the\nclassifier, the value of this object is reported\nas 7.")
docsIetfQosPktClassVlanId = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassVlanId.setDescription("This object applies only to Ethernet frames\nusing the 802.1P/Q tag header.\n\nTagged packets must have a VLAN Identifier that\nmatches the value in order to match the rule.\n\nIf the referenced parameter is not present in the\nclassifier, the value of this object is reported\nas 0.")
docsIetfQosPktClassStateActive = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 25), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassStateActive.setDescription("This object indicates whether or not the classifier\nis enabled to classify packets to a Service Flow.\n\nIf the referenced parameter is not present in the\nclassifier, the value of this object is reported\nas true(1).")
docsIetfQosPktClassPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 26), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassPkts.setDescription("This object counts the number of packets that have\nbeen classified using this entry. This\nincludes all packets delivered to a Service Flow\nmaximum rate policing function, whether or not that\nfunction drops the packets.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosPktClassBitMap = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 1, 1, 27), Bits().subtype(namedValues=NamedValues(("rulePriority", 0), ("activationState", 1), ("destPortStart", 10), ("destPortEnd", 11), ("destMac", 12), ("sourceMac", 13), ("ethertype", 14), ("userPri", 15), ("vlanId", 16), ("ipTos", 2), ("ipProtocol", 3), ("ipSourceAddr", 4), ("ipSourceMask", 5), ("ipDestAddr", 6), ("ipDestMask", 7), ("sourcePortStart", 8), ("sourcePortEnd", 9), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPktClassBitMap.setDescription("This object indicates which parameter encodings\nwere actually present in the DOCSIS packet\nclassifier encoding signaled in the DOCSIS message\nthat created or modified the classifier. Note that\nDynamic Service Change messages have replace\nsemantics, so that all non-default parameters must\nbe present whether the classifier is being created\nor changed.\n\nA bit of this object is set to 1 if the parameter\nindicated by the comment was present in the\nclassifier encoding, and to 0 otherwise.\n\nNote that BITS are encoded most significant bit\nfirst, so that if, for example, bits 6 and 7 are\nset, this object is encoded as the octet string\n'030000'H.")
docsIetfQosParamSetTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 2))
if mibBuilder.loadTexts: docsIetfQosParamSetTable.setDescription("This table describes the set of DOCSIS 1.1 and 2.0\nQOS parameters defined in a managed device.\n\nThe ifIndex index specifies a DOCSIS MAC Domain.\nThe docsIetfQosServiceFlowId index specifies a\nparticular Service Flow.\nThe docsIetfQosParamSetType index indicates whether\nthe active, admitted, or provisioned QOS Parameter\nSet is being described by the row.\n\nOnly the QOS Parameter Sets of DOCSIS 1.1 and 2.0\nService Flows are represented in this table.\n\nDOCSIS 1.0 QOS service profiles are not\nrepresented in this table.\n\nEach row corresponds to a DOCSIS QOS Parameter Set\nas signaled via DOCSIS MAC management messages.\nEach object in the row corresponds to one or\npart of one DOCSIS 1.1 Service Flow Encoding.\nThe docsIetfQosParamSetBitMap object in the row\nindicates which particular parameters were signaled\nin the original registration or dynamic service\nrequest message that created the QOS Parameter Set.\n\nIn many cases, even if a QOS Parameter Set parameter\nwas not signaled, the DOCSIS specification calls\nfor a default value to be used. That default value\nis reported as the value of the corresponding object\nin this row.\n\nMany objects are not applicable, depending on\nthe Service Flow direction or upstream scheduling\ntype. The object value reported in this case\nis specified in the DESCRIPTION clause.")
docsIetfQosParamSetEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 2, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowId"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosParamSetType"))
if mibBuilder.loadTexts: docsIetfQosParamSetEntry.setDescription("A unique set of QOS parameters.")
docsIetfQosParamSetServiceClassName = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 1), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetServiceClassName.setDescription("Refers to the Service Class Name from which the\nparameter set values were derived.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the default\nvalue of this object is a zero-length string.")
docsIetfQosParamSetPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetPriority.setDescription("The relative priority of a Service Flow.\nHigher numbers indicate higher priority.\nThis priority should only be used to differentiate\n\n\n\nService Flow from identical parameter sets.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the default\nvalue of this object is 0. If the parameter is\nnot applicable, the reported value is 0.")
docsIetfQosParamSetMaxTrafficRate = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 3), DocsIetfQosBitRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetMaxTrafficRate.setDescription("Maximum sustained traffic rate allowed for this\nService Flow in bits/sec. Must count all MAC frame\ndata PDU from the bytes following the MAC header\nHCS to the end of the CRC. The number of bytes\nforwarded is limited during any time interval.\nThe value 0 means no maximum traffic rate is\nenforced. This object applies to both upstream and\ndownstream Service Flows.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the default\nvalue of this object is 0. If the parameter is\nnot applicable, it is reported as 0.")
docsIetfQosParamSetMaxTrafficBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetMaxTrafficBurst.setDescription("Specifies the token bucket size in bytes\nfor this parameter set. The value is calculated\nfrom the byte following the MAC header HCS to\nthe end of the CRC. This object is applied in\nconjunction with docsIetfQosParamSetMaxTrafficRate\nto calculate maximum sustained traffic rate.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the default\nvalue of this object for scheduling types\nbestEffort (2), nonRealTimePollingService(3),\nand realTimePollingService(4) is 3044.\n\nIf this parameter is not applicable, it is reported\nas 0.")
docsIetfQosParamSetMinReservedRate = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 5), DocsIetfQosBitRate()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetMinReservedRate.setDescription("Specifies the guaranteed minimum rate in\nbits/sec for this parameter set. The value is\ncalculated from the byte following the MAC\nheader HCS to the end of the CRC. The default\nvalue of 0 means that no bandwidth is reserved.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the default\nvalue of this object is 0. If the parameter\nis not applicable, it is reported as 0.")
docsIetfQosParamSetMinReservedPkt = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetMinReservedPkt.setDescription("Specifies an assumed minimum packet size in\nbytes for which the\ndocsIetfQosParamSetMinReservedRate will be\nprovided. The value is calculated from the byte\nfollowing the MAC header HCS to the end of the\nCRC.\n\nIf the referenced parameter is omitted from a\nDOCSIS QOS parameter set, the default value is\nCMTS implementation dependent. In this case, the\nCMTS reports the default value it is using, and the\nCM reports a value of 0. If the referenced\nparameter is not applicable to the direction or\nscheduling type of the Service Flow, both CMTS and\nCM report this object's value as 0.")
docsIetfQosParamSetActiveTimeout = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetActiveTimeout.setDescription("Specifies the maximum duration in seconds that\nresources remain unused on an active service\nflow before CMTS signals that both active and\nadmitted parameters set are null. The default\nvalue of 0 signifies an infinite amount of time.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the default\nvalue of this object is 0.")
docsIetfQosParamSetAdmittedTimeout = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(200)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetAdmittedTimeout.setDescription("Specifies the maximum duration in seconds that\nresources remain in admitted state before\nresources must be released.\n\nThe value of 0 signifies an infinite amount\nof time.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the\ndefault value of this object is 200.")
docsIetfQosParamSetMaxConcatBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetMaxConcatBurst.setDescription("Specifies the maximum concatenated burst in\nbytes that an upstream Service Flow is allowed.\nThe value is calculated from the FC byte of the\nConcatenation MAC Header to the last CRC byte in\nof the last concatenated MAC frame, inclusive.\nThe value of 0 specifies no maximum burst.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set, the default\nvalue of this object for scheduling types\nbestEffort(2), nonRealTimePollingService(3), and\n\n\n\nrealTimePollingService(4) is 1522. If the parameter\nis not applicable, this object's value is reported\nas 0.")
docsIetfQosParamSetSchedulingType = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 10), DocsIetfQosSchedulingType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetSchedulingType.setDescription("Specifies the upstream scheduling service used for\nupstream Service Flow.\n\nIf the referenced parameter is not present in the\ncorresponding DOCSIS QOS Parameter Set of an\nupstream Service Flow, the default value of this\nobject is bestEffort(2). For QOS parameter sets of\ndownstream Service Flows, this object's value is\nreported as undefined(1).")
docsIetfQosParamSetNomPollInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetNomPollInterval.setDescription("Specifies the nominal interval in microseconds\nbetween successive unicast request\nopportunities on an upstream Service Flow.\n\nThis object applies only to upstream Service Flows\nwith DocsIetfQosSchedulingType of value\nnonRealTimePollingService(3),\nrealTimePollingService(4), and\nunsolictedGrantServiceWithAD(5). The parameter is\nmandatory for realTimePollingService(4). If the\nparameter is omitted with\nnonRealTimePollingService(3), the CMTS uses an\nimplementation-dependent value. If the parameter\nis omitted with unsolictedGrantServiceWithAD(5),\nthe CMTS uses as a default value the value of the\nNominal Grant Interval parameter. In all cases,\nthe CMTS reports the value it is using when the\nparameter is applicable. The CM reports the\nsignaled parameter value if it was signaled,\nand 0 otherwise.\n\n\n\nIf the referenced parameter is not applicable to\nthe direction or scheduling type of the\ncorresponding DOCSIS QOS Parameter Set, both\nCMTS and CM report this object's value as 0.")
docsIetfQosParamSetTolPollJitter = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetTolPollJitter.setDescription("Specifies the maximum amount of time in\nmicroseconds that the unicast request interval\nmay be delayed from the nominal periodic\nschedule on an upstream Service Flow.\n\nThis parameter is applicable only to upstream\nService Flows with a DocsIetfQosSchedulingType of\nrealTimePollingService(4) or\nunsolictedGrantServiceWithAD(5).\n\nIf the referenced parameter is applicable but not\npresent in the corresponding DOCSIS QOS Parameter\nSet, the CMTS uses an implementation-dependent\nvalue and reports the value it is using.\nThe CM reports a value of 0 in this case.\n\nIf the parameter is not applicable to the\ndirection or upstream scheduling type of the\nService Flow, both CMTS and CM report this\nobject's value as 0.")
docsIetfQosParamSetUnsolicitGrantSize = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetUnsolicitGrantSize.setDescription("Specifies the unsolicited grant size in bytes.\nThe grant size includes the entire MAC frame\ndata PDU from the Frame Control byte to the end\nof the MAC frame.\n\nThe referenced parameter is applicable only\nfor upstream flows with a DocsIetfQosSchedulingType\nof unsolicitedGrantServicewithAD(5) or\nunsolicitedGrantService(6), and it is mandatory\n\n\n\nwhen applicable. Both CMTS and CM report\nthe signaled value of the parameter in this\ncase.\n\nIf the referenced parameter is not applicable to\nthe direction or scheduling type of the\ncorresponding DOCSIS QOS Parameter Set, both\nCMTS and CM report this object's value as 0.")
docsIetfQosParamSetNomGrantInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 14), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetNomGrantInterval.setDescription("Specifies the nominal interval in microseconds\nbetween successive data grant opportunities\non an upstream Service Flow.\n\nThe referenced parameter is applicable only\nfor upstream flows with a DocsIetfQosSchedulingType\nof unsolicitedGrantServicewithAD(5) or\nunsolicitedGrantService(6), and it is mandatory\nwhen applicable. Both CMTS and CM report the\nsignaled value of the parameter in this case.\n\nIf the referenced parameter is not applicable to\nthe direction or scheduling type of the\ncorresponding DOCSIS QOS Parameter Set, both\nCMTS and CM report this object's value as 0.")
docsIetfQosParamSetTolGrantJitter = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetTolGrantJitter.setDescription("Specifies the maximum amount of time in\nmicroseconds that the transmission opportunities\nmay be delayed from the nominal periodic schedule.\n\nThe referenced parameter is applicable only\nfor upstream flows with a DocsIetfQosSchedulingType\nof unsolicitedGrantServicewithAD(5) or\nunsolicitedGrantService(6), and it is mandatory\nwhen applicable. Both CMTS and CM report the\n\n\n\nsignaled value of the parameter in this case.\n\nIf the referenced parameter is not applicable to\nthe direction or scheduling type of the\ncorresponding DOCSIS QOS Parameter Set, both\nCMTS and CM report this object's value as 0.")
docsIetfQosParamSetGrantsPerInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetGrantsPerInterval.setDescription("Specifies the number of data grants per Nominal\nGrant Interval\n(docsIetfQosParamSetNomGrantInterval).\n\nThe referenced parameter is applicable only\nfor upstream flows with a DocsIetfQosSchedulingType\nof unsolicitedGrantServicewithAD(5) or\nunsolicitedGrantService(6), and it is mandatory\nwhen applicable. Both CMTS and CM report the\nsignaled value of the parameter in this case.\n\nIf the referenced parameter is not applicable to\nthe direction or scheduling type of the\ncorresponding DOCSIS QOS Parameter Set, both\nCMTS and CM report this object's value as 0.")
docsIetfQosParamSetTosAndMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 17), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetTosAndMask.setDescription("Specifies the AND mask for the IP TOS byte for\noverwriting IP packet's TOS value. The IP packet\nTOS byte is bitwise ANDed with\ndocsIetfQosParamSetTosAndMask, and the result is\nbitwise ORed with docsIetfQosParamSetTosORMask and\nthe result is written to the IP packet TOS byte.\nA value of 'FF'H for docsIetfQosParamSetTosAndMask\nand a value of '00'H for\ndocsIetfQosParamSetTosOrMask means that the IP\nPacket TOS byte is not overwritten.\n\nThis combination is reported if the referenced\nparameter is not present in a QOS Parameter Set.\n\n\n\nThe IP TOS octet as originally defined in RFC 791\nhas been superseded by the 6-bit Differentiated\nServices Field (DSField, RFC 3260) and the 2-bit\nExplicit Congestion Notification Field (ECN field,\nRFC 3168). Network operators SHOULD avoid\nspecifying values of docsIetfQosParamSetTosAndMask\nand docsIetfQosParamSetTosORMask that would result\nin the modification of the ECN bits.\n\nIn particular, operators should not use values of\ndocsIetfQosParamSetTosAndMask that have either of\nthe least-significant two bits set to 0. Similarly,\noperators should not use values of\ndocsIetfQosParamSetTosORMask that have either of\nthe least-significant two bits set to 1.\n\nEven though this object is only enforced by the\nCable Modem Termination System (CMTS),\nCable Modems MUST report the value as signaled in\nthe referenced parameter.")
docsIetfQosParamSetTosOrMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 18), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetTosOrMask.setDescription("Specifies the OR mask for the IP TOS byte.\n\nSee the description of docsIetfQosParamSetTosAndMask\nfor further details.\n\nThe IP TOS octet as originally defined in RFC 791\nhas been superseded by the 6-bit Differentiated\nServices Field (DSField, RFC 3260) and the 2-bit\nExplicit Congestion Notification Field (ECN field,\nRFC 3168). Network operators SHOULD avoid\nspecifying values of docsIetfQosParamSetTosAndMask\nand docsIetfQosParamSetTosORMask that would result\nin the modification of the ECN bits.")
docsIetfQosParamSetMaxLatency = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 19), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetMaxLatency.setDescription("Specifies the maximum latency between the\nreception of a packet by the CMTS on its NSI\nand the forwarding of the packet to the RF\ninterface. A value of 0 signifies no maximum\nlatency is enforced. This object only applies to\ndownstream Service Flows.\n\nIf the referenced parameter is not present in the\ncorresponding downstream DOCSIS QOS Parameter Set,\nthe default value is 0. This parameter is\nnot applicable to upstream DOCSIS QOS Parameter\nSets, and its value is reported as 0 in this case.")
docsIetfQosParamSetType = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 20), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,2,)).subtype(namedValues=NamedValues(("active", 1), ("admitted", 2), ("provisioned", 3), ))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosParamSetType.setDescription("Defines the type of the QOS parameter set defined\nby this row. active(1) indicates the Active QOS\nparameter set, describing the service currently\nbeing provided by the DOCSIS MAC domain to the\nService Flow. admitted(2) indicates the Admitted\nQOS Parameter Set, describing services reserved by\nthe DOCSIS MAC domain for use by the service\nflow. provisioned (3) describes the QOS Parameter\nSet defined in the DOCSIS CM Configuration file for\nthe Service Flow.")
docsIetfQosParamSetRequestPolicyOct = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 21), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetRequestPolicyOct.setDescription("Specifies which transmit interval opportunities\nthe CM omits for upstream transmission requests and\npacket transmissions. This object takes its\ndefault value for downstream Service Flows.\n\nUnless otherwise indicated, a bit value of 1 means\nthat a CM must not use that opportunity for\nupstream transmission.\n\nIf bit 0 is the least significant bit of the\nleast significant (4th) octet, and if bit number\nis increased with significance, the bit definitions\nare defined as follows:\n\nbroadcastReqOpp(0):\n all CMs broadcast request opportunities\n\npriorityReqMulticastReq(1):\n priority request multicast request\n opportunities\n\nreqDataForReq(2):\n request/data opportunities for requests\n\nreqDataForData(3):\n request/data opportunities for data\n\npiggybackReqWithData(4):\n piggyback requests with data\n\nconcatenateData(5):\n concatenate data\n\nfragmentData(6):\n fragment data\n\nsuppresspayloadheaders(7):\n suppress payload headers\n\n\n\n\ndropPktsExceedUGSize(8):\n A value of 1 means that the Service Flow must\n drop packets that do not fit in the Unsolicited\n Grant size.\n\nIf the referenced parameter is not present in\na QOS Parameter Set, the value of this object is\nreported as '00000000'H.")
docsIetfQosParamSetBitMap = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 2, 1, 22), Bits().subtype(namedValues=NamedValues(("trafficPriority", 0), ("maxTrafficRate", 1), ("nomPollInterval", 10), ("tolPollJitter", 11), ("unsolicitGrantSize", 12), ("nomGrantInterval", 13), ("tolGrantJitter", 14), ("grantsPerInterval", 15), ("tosOverwrite", 16), ("maxLatency", 17), ("maxTrafficBurst", 2), ("minReservedRate", 3), ("minReservedPkt", 4), ("activeTimeout", 5), ("admittedTimeout", 6), ("maxConcatBurst", 7), ("schedulingType", 8), ("requestPolicy", 9), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosParamSetBitMap.setDescription("This object indicates the set of QOS Parameter\nSet parameters actually signaled in the\nDOCSIS registration or dynamic service request\nmessage that created or modified the QOS Parameter\nSet. A bit is set to 1 when the parameter described\nby the indicated reference section is present\nin the original request.\n\nNote that when Service Class names are expanded,\nthe registration or dynamic response message may\ncontain parameters as expanded by the CMTS based\n\n\n\non a stored service class. These expanded\nparameters are not indicated by a 1 bit in this\nobject.\n\nNote that even though some QOS Parameter Set\nparameters may not be signaled in a message\n(so that the paramater's bit in this object is 0),\nthe DOCSIS specification requires that default\nvalues be used. These default values are reported\nas the corresponding object's value in the row.\n\nNote that BITS objects are encoded most\nsignificant bit first. For example, if bits\n1 and 16 are set, the value of this object\nis the octet string '400080'H.")
docsIetfQosServiceFlowTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 3))
if mibBuilder.loadTexts: docsIetfQosServiceFlowTable.setDescription("This table describes the set of DOCSIS-QOS\nService Flows in a managed device.")
docsIetfQosServiceFlowEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 3, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowId"))
if mibBuilder.loadTexts: docsIetfQosServiceFlowEntry.setDescription("Describes a Service Flow.\nAn entry in the table exists for each\nService Flow ID. The ifIndex is an\nifType of docsCableMaclayer(127).")
docsIetfQosServiceFlowId = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 3, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosServiceFlowId.setDescription("An index assigned to a Service Flow by CMTS.")
docsIetfQosServiceFlowSID = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 3, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16383))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowSID.setDescription("Service Identifier (SID) assigned to an\nadmitted or active Service Flow. This object\nreports a value of 0 if a Service ID is not\nassociated with the Service Flow. Only active\nor admitted upstream Service Flows will have a\nService ID (SID).")
docsIetfQosServiceFlowDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 3, 1, 3), DocsIetfQosRfMacIfDirection()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowDirection.setDescription("The direction of the Service Flow.")
docsIetfQosServiceFlowPrimary = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 3, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowPrimary.setDescription("Object reflects whether Service Flow is the primary\nor a secondary Service Flow.\n\nA primary Service Flow is the default Service Flow\nfor otherwise unclassified traffic and all MAC\nmessages.")
docsIetfQosServiceFlowStatsTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 4))
if mibBuilder.loadTexts: docsIetfQosServiceFlowStatsTable.setDescription("This table describes statistics associated with the\nService Flows in a managed device.")
docsIetfQosServiceFlowStatsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 4, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowId"))
if mibBuilder.loadTexts: docsIetfQosServiceFlowStatsEntry.setDescription("Describes a set of Service Flow statistics.\nAn entry in the table exists for each\nService Flow ID. The ifIndex is an\nifType of docsCableMaclayer(127).")
docsIetfQosServiceFlowPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 4, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowPkts.setDescription("For outgoing Service Flows, this object counts the\nnumber of Packet Data PDUs forwarded to this\nService Flow. For incoming upstream CMTS service\nflows, this object counts the number of Packet\nData PDUs actually received on the Service Flow\nidentified by the SID for which the packet was\nscheduled. CMs not classifying downstream packets\nmay report this object's value as 0 for downstream\nService Flows. This object does not count\nMAC-specific management messages.\n\nParticularly for UGS flows, packets sent on the\nprimary Service Flow in violation of the UGS grant\nsize should be counted only by the instance of this\nobject that is associated with the primary service\n\n\n\nflow.\n\nUnclassified upstream user data packets (i.e., non-\nMAC-management) forwarded to the primary upstream\nService Flow should be counted by the instance of\nthis object that is associated with the primary\nservice flow.\n\nThis object does include packets counted by\ndocsIetfQosServiceFlowPolicedDelayPkts, but does not\ninclude packets counted by\ndocsIetfQosServiceFlowPolicedDropPkts\nand docsIetfQosServiceFlowPHSUnknowns.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosServiceFlowOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 4, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowOctets.setDescription("The number of octets from the byte after the MAC\nheader HCS to the end of the CRC for all packets\ncounted in the docsIetfQosServiceFlowPkts object for\nthis row. Note that this counts the octets after\npayload header suppression and before payload\nheader expansion have been applied.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosServiceFlowTimeCreated = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 4, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowTimeCreated.setDescription("The value of sysUpTime when the service flow\nwas created.")
docsIetfQosServiceFlowTimeActive = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 4, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowTimeActive.setDescription("The number of seconds that the service flow\nhas been active.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosServiceFlowPHSUnknowns = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 4, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowPHSUnknowns.setDescription("For incoming upstream CMTS service flows, this\nobject counts the number of packets received\nwith an unknown payload header suppression index.\nThe service flow is identified by the SID for which\nthe packet was scheduled.\n\nOn a CM, only this object's instance for the primary\ndownstream service flow counts packets received with\nan unknown payload header suppression index. All\nother downstream service flows on CM report this\nobjects value as 0.\n\nAll outgoing service flows report this object's\nvalue as 0.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosServiceFlowPolicedDropPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowPolicedDropPkts.setDescription("For outgoing service flows, this object counts the\nnumber of Packet Data PDUs classified to this\nservice flow dropped due to:\n (1) implementation-dependent excessive delay\n while enforcing the Maximum Sustained\n Traffic Rate; or\n (2) UGS packets dropped due to exceeding the\n Unsolicited Grant Size with a\n Request/Transmission policy that requires\n such packets to be dropped.\n\nClassified packets dropped due to other reasons\n\n\n\nmust be counted in ifOutDiscards for the interface\nof this service flow. This object reports 0 for\nincoming service flows.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosServiceFlowPolicedDelayPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 4, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowPolicedDelayPkts.setDescription("This object counts only outgoing packets delayed in\norder to maintain the Maximum Sustained Traffic\nRate. This object will always report a value of 0\nfor UGS flows because the Maximum Sustained Traffic\nRate does not apply. This object is 0 for incoming\nservice flows.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosUpstreamStatsTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 5))
if mibBuilder.loadTexts: docsIetfQosUpstreamStatsTable.setDescription("This table describes statistics associated with\nupstream service flows. All counted frames must\nbe received without a Frame Check Sequence (FCS)\nerror.")
docsIetfQosUpstreamStatsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 5, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosSID"))
if mibBuilder.loadTexts: docsIetfQosUpstreamStatsEntry.setDescription("Describes a set of upstream service flow\nstatistics. An entry in the table exists for each\nupstream Service Flow in a managed device.\nThe ifIndex is an ifType of\ndocsCableMaclayer(127).")
docsIetfQosSID = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 5, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16383))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosSID.setDescription("Identifies a service ID for an admitted or active\nupstream service flow.")
docsIetfQosUpstreamFragments = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 5, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosUpstreamFragments.setDescription("The number of fragmentation headers received on an\nupstream service flow, regardless of whether\nthe fragment was correctly reassembled into a\nvalid packet.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosUpstreamFragDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 5, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosUpstreamFragDiscards.setDescription("The number of upstream fragments discarded and not\nassembled into a valid upstream packet.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosUpstreamConcatBursts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 5, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosUpstreamConcatBursts.setDescription("The number of concatenation headers received on an\nupstream service flow.\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDynamicServiceStatsTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 6))
if mibBuilder.loadTexts: docsIetfQosDynamicServiceStatsTable.setDescription("This table describes statistics associated with the\nDynamic Service Flows in a managed device.")
docsIetfQosDynamicServiceStatsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 6, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosIfDirection"))
if mibBuilder.loadTexts: docsIetfQosDynamicServiceStatsEntry.setDescription("Describes a set of dynamic service flow statistics.\nTwo entries exist for each DOCSIS MAC layer\ninterface for the upstream and downstream\ndirection. On the CMTS, the downstream direction\nrow indicates messages transmitted or transactions\noriginated by the CMTS. The upstream direction row\nindicates messages received or transaction\noriginated by the CM. On the CM, the downstream\ndirection row indicates messages received or\ntransactions originated by the CMTS. The upstream\ndirection row indicates messages transmitted by\nthe CM or transactions originated by the CM.\nThe ifIndex is an ifType of\ndocsCableMaclayer(127).")
docsIetfQosIfDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 1), DocsIetfQosRfMacIfDirection()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosIfDirection.setDescription("The direction of interface.")
docsIetfQosDSAReqs = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSAReqs.setDescription("The number of Dynamic Service Addition Requests,\nincluding retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDSARsps = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSARsps.setDescription("The number of Dynamic Service Addition Responses,\nincluding retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\n\n\n\nindexes this object.")
docsIetfQosDSAAcks = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSAAcks.setDescription("The number of Dynamic Service Addition\nAcknowledgements, including retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDSCReqs = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSCReqs.setDescription("The number of Dynamic Service Change Requests,\nincluding retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDSCRsps = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSCRsps.setDescription("The number of Dynamic Service Change Responses,\nincluding retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDSCAcks = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSCAcks.setDescription("The number of Dynamic Service Change\nAcknowledgements, including retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\n\n\n\nindexes this object.")
docsIetfQosDSDReqs = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSDReqs.setDescription("The number of Dynamic Service Delete Requests,\nincluding retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDSDRsps = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDSDRsps.setDescription("The number of Dynamic Service Delete Responses,\nincluding retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDynamicAdds = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDynamicAdds.setDescription("The number of successful Dynamic Service Addition\ntransactions.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDynamicAddFails = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDynamicAddFails.setDescription("The number of failed Dynamic Service Addition\ntransactions.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\n\n\n\nindexes this object.")
docsIetfQosDynamicChanges = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDynamicChanges.setDescription("The number of successful Dynamic Service Change\ntransactions.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDynamicChangeFails = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDynamicChangeFails.setDescription("The number of failed Dynamic Service Change\ntransactions.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDynamicDeletes = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDynamicDeletes.setDescription("The number of successful Dynamic Service Delete\ntransactions.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDynamicDeleteFails = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDynamicDeleteFails.setDescription("The number of failed Dynamic Service Delete\ntransactions.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\n\n\n\nindexes this object.")
docsIetfQosDCCReqs = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDCCReqs.setDescription("The number of Dynamic Channel Change Request\nmessages traversing an interface. This count\nis nonzero only on downstream direction rows.\nThis count should include the number of retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex\nthat indexes this object.")
docsIetfQosDCCRsps = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDCCRsps.setDescription("The number of Dynamic Channel Change Response\nmessages traversing an interface. This count is\nnonzero only on upstream direction rows. This count\nshould include the number of retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDCCAcks = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDCCAcks.setDescription("The number of Dynamic Channel Change Acknowledgement\nmessages traversing an interface. This count\nis nonzero only on downstream direction rows.\nThis count should include the number of retries.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDCCs = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDCCs.setDescription("The number of successful Dynamic Channel Change\ntransactions. This count is nonzero only on\ndownstream direction rows.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosDCCFails = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 6, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosDCCFails.setDescription("The number of failed Dynamic Channel Change\ntransactions. This count is nonzero only on\ndownstream direction rows.\n\nThis counter's last discontinuity is the\nifCounterDiscontinuityTime for the same ifIndex that\nindexes this object.")
docsIetfQosServiceFlowLogTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 7))
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogTable.setDescription("This table contains a log of the disconnected\nService Flows in a managed device.")
docsIetfQosServiceFlowLogEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 7, 1)).setIndexNames((0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogIndex"))
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogEntry.setDescription("The information regarding a single disconnected\nservice flow.")
docsIetfQosServiceFlowLogIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogIndex.setDescription("Unique index for a logged service flow.")
docsIetfQosServiceFlowLogIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogIfIndex.setDescription("The ifIndex of ifType docsCableMaclayer(127)\non the CMTS where the service flow was present.")
docsIetfQosServiceFlowLogSFID = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogSFID.setDescription("The index assigned to the service flow by the CMTS.")
docsIetfQosServiceFlowLogCmMac = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 4), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogCmMac.setDescription("The MAC address for the cable modem associated with\nthe service flow.")
docsIetfQosServiceFlowLogPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogPkts.setDescription("The number of packets counted on this service flow\nafter payload header suppression.")
docsIetfQosServiceFlowLogOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogOctets.setDescription("The number of octets counted on this service flow\nafter payload header suppression.")
docsIetfQosServiceFlowLogTimeDeleted = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 7), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogTimeDeleted.setDescription("The value of sysUpTime when the service flow\nwas deleted.")
docsIetfQosServiceFlowLogTimeCreated = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogTimeCreated.setDescription("The value of sysUpTime when the service flow\nwas created.")
docsIetfQosServiceFlowLogTimeActive = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogTimeActive.setDescription("The total time that the service flow was active.")
docsIetfQosServiceFlowLogDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 10), DocsIetfQosRfMacIfDirection()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogDirection.setDescription("The value of docsIetfQosServiceFlowDirection\nfor the service flow.")
docsIetfQosServiceFlowLogPrimary = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 11), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogPrimary.setDescription("The value of docsIetfQosServiceFlowPrimary for the\nservice flow.")
docsIetfQosServiceFlowLogServiceClassName = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 12), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogServiceClassName.setDescription("The value of docsIetfQosParamSetServiceClassName for\nthe provisioned QOS Parameter Set of the\nservice flow.")
docsIetfQosServiceFlowLogPolicedDropPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogPolicedDropPkts.setDescription("The final value of\ndocsIetfQosServiceFlowPolicedDropPkts for the\nservice flow.")
docsIetfQosServiceFlowLogPolicedDelayPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogPolicedDelayPkts.setDescription("The final value of\ndocsIetfQosServiceFlowPolicedDelayPkts for the\nservice flow.")
docsIetfQosServiceFlowLogControl = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 7, 1, 15), Integer().subtype(subtypeSpec=SingleValueConstraint(1,6,)).subtype(namedValues=NamedValues(("active", 1), ("destroy", 6), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsIetfQosServiceFlowLogControl.setDescription("Setting this object to the value destroy(6) removes\nthis entry from the table.\n\nReading this object returns the value active(1).")
docsIetfQosServiceClassTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 8))
if mibBuilder.loadTexts: docsIetfQosServiceClassTable.setDescription("This table describes the set of DOCSIS-QOS\nService Classes in a CMTS.")
docsIetfQosServiceClassEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 8, 1)).setIndexNames((0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassName"))
if mibBuilder.loadTexts: docsIetfQosServiceClassEntry.setDescription("A provisioned service class on a CMTS.\nEach entry defines a template for certain\nDOCSIS QOS Parameter Set values. When a CM\ncreates or modifies an Admitted QOS Parameter Set\nfor a Service Flow, it may reference a Service Class\nName instead of providing explicit QOS Parameter\nSet values. In this case, the CMTS populates\nthe QOS Parameter Set with the applicable\ncorresponding values from the named Service Class.\nSubsequent changes to a Service Class row do not\naffect the QOS Parameter Set values of any service\nflows already admitted.\n\nA service class template applies to only\na single direction, as indicated in the\ndocsIetfQosServiceClassDirection object.")
docsIetfQosServiceClassName = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosServiceClassName.setDescription("Service Class Name. DOCSIS specifies that the\nmaximum size is 16 ASCII characters including\na terminating zero. The terminating zero is not\nrepresented in this SnmpAdminString syntax object.")
docsIetfQosServiceClassStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassStatus.setDescription("Used to create or delete rows in this table.\nThere is no restriction on the ability to change\nvalues in this row while the row is active.\nInactive rows need not be timed out.")
docsIetfQosServiceClassPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)).clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassPriority.setDescription("Template for docsIetfQosParamSetPriority.")
docsIetfQosServiceClassMaxTrafficRate = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 4), DocsIetfQosBitRate().clone('0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassMaxTrafficRate.setDescription("Template for docsIetfQosParamSetMaxTrafficRate.")
docsIetfQosServiceClassMaxTrafficBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 5), Unsigned32().clone(3044)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassMaxTrafficBurst.setDescription("Template for docsIetfQosParamSetMaxTrafficBurst.")
docsIetfQosServiceClassMinReservedRate = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 6), DocsIetfQosBitRate().clone('0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassMinReservedRate.setDescription("Template for docsIetfQosParamSEtMinReservedRate.")
docsIetfQosServiceClassMinReservedPkt = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassMinReservedPkt.setDescription("Template for docsIetfQosParamSetMinReservedPkt.")
docsIetfQosServiceClassMaxConcatBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1522)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassMaxConcatBurst.setDescription("Template for docsIetfQosParamSetMaxConcatBurst.")
docsIetfQosServiceClassNomPollInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 9), Unsigned32().clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassNomPollInterval.setDescription("Template for docsIetfQosParamSetNomPollInterval.")
docsIetfQosServiceClassTolPollJitter = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 10), Unsigned32().clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassTolPollJitter.setDescription("Template for docsIetfQosParamSetTolPollJitter.")
docsIetfQosServiceClassUnsolicitGrantSize = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassUnsolicitGrantSize.setDescription("Template for docsIetfQosParamSetUnsolicitGrantSize.")
docsIetfQosServiceClassNomGrantInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 12), Unsigned32().clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassNomGrantInterval.setDescription("Template for docsIetfQosParamSetNomGrantInterval.")
docsIetfQosServiceClassTolGrantJitter = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 13), Unsigned32().clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassTolGrantJitter.setDescription("Template for docsIetfQosParamSetTolGrantJitter.")
docsIetfQosServiceClassGrantsPerInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 127)).clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassGrantsPerInterval.setDescription("Template for docsIetfQosParamSetGrantsPerInterval.")
docsIetfQosServiceClassMaxLatency = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 15), Unsigned32().clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassMaxLatency.setDescription("Template for docsIetfQosParamSetClassMaxLatency.")
docsIetfQosServiceClassActiveTimeout = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(0)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassActiveTimeout.setDescription("Template for docsIetfQosParamSetActiveTimeout.")
docsIetfQosServiceClassAdmittedTimeout = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(200)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassAdmittedTimeout.setDescription("Template for docsIetfQosParamSetAdmittedTimeout.")
docsIetfQosServiceClassSchedulingType = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 18), DocsIetfQosSchedulingType().clone('bestEffort')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassSchedulingType.setDescription("Template for docsIetfQosParamSetSchedulingType.")
docsIetfQosServiceClassRequestPolicy = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4).clone(hexValue='00000000')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassRequestPolicy.setDescription("Template for docsIetfQosParamSetRequestPolicyOct.")
docsIetfQosServiceClassTosAndMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 20), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceClassTosAndMask.setDescription("Template for docsIetfQosParamSetTosAndMask.\nThe IP TOS octet as originally defined in RFC 791\nhas been superseded by the 6-bit Differentiated\nServices Field (DSField, RFC 3260) and the 2-bit\nExplicit Congestion Notification Field (ECN field,\nRFC 3168). Network operators SHOULD avoid\nspecifying values of\ndocsIetfQosServiceClassTosAndMask and\ndocsIetfQosServiceClassTosOrMask that would result\nin the modification of the ECN bits.\n\n\n\nIn particular, operators should not use values of\ndocsIetfQosServiceClassTosAndMask that have either\nof the least-significant two bits set to 0.\nSimilarly,operators should not use values of\ndocsIetfQosServiceClassTosOrMask that have either\nof the least-significant two bits set to 1.")
docsIetfQosServiceClassTosOrMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 21), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosServiceClassTosOrMask.setDescription("Template for docsIetfQosParamSetTosOrMask.\nThe IP TOS octet as originally defined in RFC 791\nhas been superseded by the 6-bit Differentiated\nServices Field (DSField, RFC 3260) and the 2-bit\nExplicit Congestion Notification Field (ECN field,\nRFC 3168). Network operators SHOULD avoid\nspecifying values of\ndocsIetfQosServiceClassTosAndMask and\ndocsIetfQosServiceClassTosOrMask that would result\nin the modification of the ECN bits.\n\nIn particular, operators should not use values of\ndocsIetfQosServiceClassTosAndMask that have either\nof the least-significant two bits set to 0.\nSimilarly, operators should not use values of\ndocsIetfQosServiceClassTosOrMask that have either\nof the least-significant two bits set to 1.")
docsIetfQosServiceClassDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 22), DocsIetfQosRfMacIfDirection().clone('upstream')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassDirection.setDescription("Specifies whether the service class template\napplies to upstream or downstream service flows.")
docsIetfQosServiceClassStorageType = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 23), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassStorageType.setDescription("This object defines whether this row is kept in\nvolatile storage and lost upon reboot or whether\nit is backed up by non-volatile or permanent\nstorage. 'permanent' entries need not allow\nwritable access to any object.")
docsIetfQosServiceClassDSCPOverwrite = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 8, 1, 24), DscpOrAny().clone('-1')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassDSCPOverwrite.setDescription("This object allows the overwrite of the DSCP\nfield per RFC 3260.\n\nIf this object is -1, then the corresponding entry's\ndocsIetfQosServiceClassTosAndMask value MUST be\n'FF'H and docsIetfQosServiceClassTosOrMask MUST be\n'00'H. Otherwise, this object is in the range of\n0..63, and the corresponding entry's\ndocsIetfQosServiceClassTosAndMask value MUST be\n'03'H and the docsIetfQosServiceClassTosOrMask MUST\nbe this object's value shifted left by two bit\npositions.")
docsIetfQosServiceClassPolicyTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 9))
if mibBuilder.loadTexts: docsIetfQosServiceClassPolicyTable.setDescription("This table describes the set of DOCSIS-QOS\nService Class Policies.\n\nThis table is an adjunct to the\n\n\n\ndocsDevFilterPolicy table. Entries in the\ndocsDevFilterPolicy table can point to\nspecific rows in this table.\n\nThis table permits mapping a packet to a service\nclass name of an active service flow so long as\na classifier does not exist at a higher\npriority.")
docsIetfQosServiceClassPolicyEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 9, 1)).setIndexNames((0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassPolicyIndex"))
if mibBuilder.loadTexts: docsIetfQosServiceClassPolicyEntry.setDescription("A service class name policy entry.")
docsIetfQosServiceClassPolicyIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 9, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosServiceClassPolicyIndex.setDescription("Index value to identify an entry in\nthis table uniquely.")
docsIetfQosServiceClassPolicyName = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 9, 1, 2), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassPolicyName.setDescription("Service Class Name to identify the name of the\nservice class flow to which the packet should be\ndirected.")
docsIetfQosServiceClassPolicyRulePriority = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 9, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassPolicyRulePriority.setDescription("Service Class Policy rule priority for the\nentry.")
docsIetfQosServiceClassPolicyStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 9, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassPolicyStatus.setDescription("Used to create or delete rows in this table.\nThis object should not be deleted if it is\nreferenced by an entry in docsDevFilterPolicy.\nThe reference should be deleted first.\nThere is no restriction on the ability\nto change values in this row while the row is\nactive. Inactive rows need not be timed out.")
docsIetfQosServiceClassPolicyStorageType = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 9, 1, 5), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsIetfQosServiceClassPolicyStorageType.setDescription("This object defines whether this row is kept in\nvolatile storage and lost upon reboot or whether\nit is backed up by non-volatile or permanent\nstorage. 'permanent' entries need not allow\nwritable access to any object.")
docsIetfQosPHSTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 10))
if mibBuilder.loadTexts: docsIetfQosPHSTable.setDescription("This table describes the set of payload header\nsuppression entries.")
docsIetfQosPHSEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 10, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowId"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosPktClassId"))
if mibBuilder.loadTexts: docsIetfQosPHSEntry.setDescription("A payload header suppression entry.\n\nThe ifIndex is an ifType of docsCableMaclayer(127).\nThe index docsIetfQosServiceFlowId selects one\nservice flow from the cable MAC layer interface.\nThe docsIetfQosPktClassId index matches an\nindex of the docsIetfQosPktClassTable.")
docsIetfQosPHSField = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 10, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPHSField.setDescription("Payload header suppression field defines the\nbytes of the header that must be\nsuppressed/restored by the sending/receiving\ndevice.\n\nThe number of octets in this object should be\nthe same as the value of docsIetfQosPHSSize.")
docsIetfQosPHSMask = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 10, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPHSMask.setDescription("Payload header suppression mask defines the\nbit mask that is used in combination with the\ndocsIetfQosPHSField. It defines which bytes in\nthe header must be suppressed/restored by the\nsending or receiving device.\n\nEach bit of this bit mask corresponds to a byte\nin the docsIetfQosPHSField, with the least\n\n\n\nsignificant bit corresponding to the first byte\nof the docsIetfQosPHSField.\n\nEach bit of the bit mask specifies whether\nthe corresponding byte should be suppressed\nin the packet. A bit value of '1' indicates that\nthe byte should be suppressed by the sending\ndevice and restored by the receiving device.\nA bit value of '0' indicates that\nthe byte should not be suppressed by the sending\ndevice or restored by the receiving device.\n\nIf the bit mask does not contain a bit for each\nbyte in the docsIetfQosPHSField, then the bit mask\nis extended with bit values of '1' to be the\nnecessary length.")
docsIetfQosPHSSize = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 10, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPHSSize.setDescription("Payload header suppression size specifies the\nnumber of bytes in the header to be suppressed\nand restored.\n\nThe value of this object must match the number\nof bytes in the docsIetfQosPHSField.")
docsIetfQosPHSVerify = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 10, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPHSVerify.setDescription("Payload header suppression verification value. If\n'true', the sender must verify docsIetfQosPHSField\nis the same as what is contained in the packet\nto be suppressed.")
docsIetfQosPHSIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 10, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosPHSIndex.setDescription("Payload header suppression index uniquely\n\n\n\nreferences the PHS rule for a given service flow.")
docsIetfQosCmtsMacToSrvFlowTable = MibTable((1, 3, 6, 1, 2, 1, 127, 1, 11))
if mibBuilder.loadTexts: docsIetfQosCmtsMacToSrvFlowTable.setDescription("This table provides for referencing the service\nflows associated with a particular cable modem.\nThis allows indexing into other docsIetfQos\ntables that are indexed by docsIetfQosServiceFlowId\nand ifIndex.")
docsIetfQosCmtsMacToSrvFlowEntry = MibTableRow((1, 3, 6, 1, 2, 1, 127, 1, 11, 1)).setIndexNames((0, "DOCS-IETF-QOS-MIB", "docsIetfQosCmtsCmMac"), (0, "DOCS-IETF-QOS-MIB", "docsIetfQosCmtsServiceFlowId"))
if mibBuilder.loadTexts: docsIetfQosCmtsMacToSrvFlowEntry.setDescription("An entry is created by CMTS for each service flow\nconnected to this CMTS.")
docsIetfQosCmtsCmMac = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 11, 1, 1), MacAddress()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosCmtsCmMac.setDescription("The MAC address for the referenced CM.")
docsIetfQosCmtsServiceFlowId = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 11, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: docsIetfQosCmtsServiceFlowId.setDescription("An index assigned to a service flow by CMTS.")
docsIetfQosCmtsIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 127, 1, 11, 1, 3), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsIetfQosCmtsIfIndex.setDescription("The ifIndex of ifType docsCableMacLayer(127)\non the CMTS that is connected to the Cable Modem.")
docsIetfQosConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 127, 2))
docsIetfQosGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 127, 2, 1))
docsIetfQosCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 127, 2, 2))
# Augmentions
# Groups
docsIetfQosBaseGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 127, 2, 1, 1)).setObjects(*(("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassUserPriLow"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassSourcePortStart"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassEnetProtocol"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassIpTosMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassInetDestAddr"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowTimeActive"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowTimeCreated"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassStateActive"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSAReqs"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSCAcks"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassInetDestMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDCCFails"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassDestPortStart"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassInetSourceMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSDRsps"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDCCReqs"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDCCs"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassPriority"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPHSMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPHSVerify"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPHSIndex"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSARsps"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassEnetProtocolType"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassIpTosLow"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassInetSourceAddr"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPHSField"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSCReqs"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDynamicChangeFails"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSDReqs"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassDestPortEnd"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDynamicAdds"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassVlanId"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDynamicDeleteFails"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassPkts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDynamicDeletes"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassIpProtocol"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowSID"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowPHSUnknowns"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowPrimary"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPHSSize"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowPkts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassSourcePortEnd"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSAAcks"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowOctets"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDCCRsps"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassUserPriHigh"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowDirection"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDSCRsps"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowPolicedDelayPkts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowPolicedDropPkts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassIpTosHigh"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassSourceMacAddr"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassDestMacMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassDirection"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassDestMacAddr"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassBitMap"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDynamicAddFails"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDCCAcks"), ("DOCS-IETF-QOS-MIB", "docsIetfQosPktClassInetAddressType"), ("DOCS-IETF-QOS-MIB", "docsIetfQosDynamicChanges"), ) )
if mibBuilder.loadTexts: docsIetfQosBaseGroup.setDescription("Group of objects implemented in both Cable Modems and\nCable Modem Termination Systems.")
docsIetfQosParamSetGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 127, 2, 1, 2)).setObjects(*(("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetMaxConcatBurst"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetGrantsPerInterval"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetMaxTrafficRate"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetActiveTimeout"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetMinReservedPkt"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetPriority"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetRequestPolicyOct"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetServiceClassName"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetTosOrMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetMinReservedRate"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetMaxTrafficBurst"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetBitMap"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetSchedulingType"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetTolPollJitter"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetTosAndMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetMaxLatency"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetTolGrantJitter"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetNomPollInterval"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetNomGrantInterval"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetAdmittedTimeout"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetUnsolicitGrantSize"), ) )
if mibBuilder.loadTexts: docsIetfQosParamSetGroup.setDescription("Group of objects implemented in both Cable Modems and\nCable Modem Termination Systems for QOS Parameter Sets.")
docsIetfQosCmtsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 127, 2, 1, 3)).setObjects(*(("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogSFID"), ("DOCS-IETF-QOS-MIB", "docsIetfQosUpstreamFragDiscards"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogPolicedDropPkts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogControl"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogTimeCreated"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogOctets"), ("DOCS-IETF-QOS-MIB", "docsIetfQosUpstreamConcatBursts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogCmMac"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogPrimary"), ("DOCS-IETF-QOS-MIB", "docsIetfQosCmtsIfIndex"), ("DOCS-IETF-QOS-MIB", "docsIetfQosUpstreamFragments"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogTimeActive"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogPkts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogIfIndex"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogDirection"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogPolicedDelayPkts"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogServiceClassName"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceFlowLogTimeDeleted"), ) )
if mibBuilder.loadTexts: docsIetfQosCmtsGroup.setDescription("Group of objects implemented only in the CMTS.")
docsIetfQosSrvClassPolicyGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 127, 2, 1, 4)).setObjects(*(("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassPolicyStorageType"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassPolicyName"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassPolicyRulePriority"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassPolicyStatus"), ) )
if mibBuilder.loadTexts: docsIetfQosSrvClassPolicyGroup.setDescription("Group of objects implemented in both Cable Modems and\nCable Modem Termination Systems when supporting policy-based\nservice flows.")
docsIetfQosServiceClassGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 127, 2, 1, 5)).setObjects(*(("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassSchedulingType"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassNomGrantInterval"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassTolGrantJitter"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassDSCPOverwrite"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassGrantsPerInterval"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassDirection"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassMaxTrafficBurst"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassPriority"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassMaxTrafficRate"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassStorageType"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassTolPollJitter"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassTosOrMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassStatus"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassMaxConcatBurst"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassTosAndMask"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassUnsolicitGrantSize"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassNomPollInterval"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassRequestPolicy"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassMinReservedRate"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassActiveTimeout"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassMinReservedPkt"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassAdmittedTimeout"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassMaxLatency"), ) )
if mibBuilder.loadTexts: docsIetfQosServiceClassGroup.setDescription("Group of objects implemented only in Cable Modem\nTermination Systems when supporting expansion of Service\nClass Names in a QOS Parameter Set")
# Compliances
docsIetfQosCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 127, 2, 2, 1)).setObjects(*(("DOCS-IETF-QOS-MIB", "docsIetfQosCmtsGroup"), ("DOCS-IETF-QOS-MIB", "docsIetfQosServiceClassGroup"), ("DOCS-IETF-QOS-MIB", "docsIetfQosSrvClassPolicyGroup"), ("DOCS-IETF-QOS-MIB", "docsIetfQosBaseGroup"), ("DOCS-IETF-QOS-MIB", "docsIetfQosParamSetGroup"), ) )
if mibBuilder.loadTexts: docsIetfQosCompliance.setDescription("The compliance statement for MCNS Cable Modems and\nCable Modem Termination Systems that implement DOCSIS\nService Flows.")
# Exports
# Module identity
mibBuilder.exportSymbols("DOCS-IETF-QOS-MIB", PYSNMP_MODULE_ID=docsIetfQosMIB)
# Types
mibBuilder.exportSymbols("DOCS-IETF-QOS-MIB", DocsIetfQosBitRate=DocsIetfQosBitRate, DocsIetfQosRfMacIfDirection=DocsIetfQosRfMacIfDirection, DocsIetfQosSchedulingType=DocsIetfQosSchedulingType)
# Objects
mibBuilder.exportSymbols("DOCS-IETF-QOS-MIB", docsIetfQosMIB=docsIetfQosMIB, docsIetfQosNotifications=docsIetfQosNotifications, docsIetfQosMIBObjects=docsIetfQosMIBObjects, docsIetfQosPktClassTable=docsIetfQosPktClassTable, docsIetfQosPktClassEntry=docsIetfQosPktClassEntry, docsIetfQosPktClassId=docsIetfQosPktClassId, docsIetfQosPktClassDirection=docsIetfQosPktClassDirection, docsIetfQosPktClassPriority=docsIetfQosPktClassPriority, docsIetfQosPktClassIpTosLow=docsIetfQosPktClassIpTosLow, docsIetfQosPktClassIpTosHigh=docsIetfQosPktClassIpTosHigh, docsIetfQosPktClassIpTosMask=docsIetfQosPktClassIpTosMask, docsIetfQosPktClassIpProtocol=docsIetfQosPktClassIpProtocol, docsIetfQosPktClassInetAddressType=docsIetfQosPktClassInetAddressType, docsIetfQosPktClassInetSourceAddr=docsIetfQosPktClassInetSourceAddr, docsIetfQosPktClassInetSourceMask=docsIetfQosPktClassInetSourceMask, docsIetfQosPktClassInetDestAddr=docsIetfQosPktClassInetDestAddr, docsIetfQosPktClassInetDestMask=docsIetfQosPktClassInetDestMask, docsIetfQosPktClassSourcePortStart=docsIetfQosPktClassSourcePortStart, docsIetfQosPktClassSourcePortEnd=docsIetfQosPktClassSourcePortEnd, docsIetfQosPktClassDestPortStart=docsIetfQosPktClassDestPortStart, docsIetfQosPktClassDestPortEnd=docsIetfQosPktClassDestPortEnd, docsIetfQosPktClassDestMacAddr=docsIetfQosPktClassDestMacAddr, docsIetfQosPktClassDestMacMask=docsIetfQosPktClassDestMacMask, docsIetfQosPktClassSourceMacAddr=docsIetfQosPktClassSourceMacAddr, docsIetfQosPktClassEnetProtocolType=docsIetfQosPktClassEnetProtocolType, docsIetfQosPktClassEnetProtocol=docsIetfQosPktClassEnetProtocol, docsIetfQosPktClassUserPriLow=docsIetfQosPktClassUserPriLow, docsIetfQosPktClassUserPriHigh=docsIetfQosPktClassUserPriHigh, docsIetfQosPktClassVlanId=docsIetfQosPktClassVlanId, docsIetfQosPktClassStateActive=docsIetfQosPktClassStateActive, docsIetfQosPktClassPkts=docsIetfQosPktClassPkts, docsIetfQosPktClassBitMap=docsIetfQosPktClassBitMap, docsIetfQosParamSetTable=docsIetfQosParamSetTable, docsIetfQosParamSetEntry=docsIetfQosParamSetEntry, docsIetfQosParamSetServiceClassName=docsIetfQosParamSetServiceClassName, docsIetfQosParamSetPriority=docsIetfQosParamSetPriority, docsIetfQosParamSetMaxTrafficRate=docsIetfQosParamSetMaxTrafficRate, docsIetfQosParamSetMaxTrafficBurst=docsIetfQosParamSetMaxTrafficBurst, docsIetfQosParamSetMinReservedRate=docsIetfQosParamSetMinReservedRate, docsIetfQosParamSetMinReservedPkt=docsIetfQosParamSetMinReservedPkt, docsIetfQosParamSetActiveTimeout=docsIetfQosParamSetActiveTimeout, docsIetfQosParamSetAdmittedTimeout=docsIetfQosParamSetAdmittedTimeout, docsIetfQosParamSetMaxConcatBurst=docsIetfQosParamSetMaxConcatBurst, docsIetfQosParamSetSchedulingType=docsIetfQosParamSetSchedulingType, docsIetfQosParamSetNomPollInterval=docsIetfQosParamSetNomPollInterval, docsIetfQosParamSetTolPollJitter=docsIetfQosParamSetTolPollJitter, docsIetfQosParamSetUnsolicitGrantSize=docsIetfQosParamSetUnsolicitGrantSize, docsIetfQosParamSetNomGrantInterval=docsIetfQosParamSetNomGrantInterval, docsIetfQosParamSetTolGrantJitter=docsIetfQosParamSetTolGrantJitter, docsIetfQosParamSetGrantsPerInterval=docsIetfQosParamSetGrantsPerInterval, docsIetfQosParamSetTosAndMask=docsIetfQosParamSetTosAndMask, docsIetfQosParamSetTosOrMask=docsIetfQosParamSetTosOrMask, docsIetfQosParamSetMaxLatency=docsIetfQosParamSetMaxLatency, docsIetfQosParamSetType=docsIetfQosParamSetType, docsIetfQosParamSetRequestPolicyOct=docsIetfQosParamSetRequestPolicyOct, docsIetfQosParamSetBitMap=docsIetfQosParamSetBitMap, docsIetfQosServiceFlowTable=docsIetfQosServiceFlowTable, docsIetfQosServiceFlowEntry=docsIetfQosServiceFlowEntry, docsIetfQosServiceFlowId=docsIetfQosServiceFlowId, docsIetfQosServiceFlowSID=docsIetfQosServiceFlowSID, docsIetfQosServiceFlowDirection=docsIetfQosServiceFlowDirection, docsIetfQosServiceFlowPrimary=docsIetfQosServiceFlowPrimary, docsIetfQosServiceFlowStatsTable=docsIetfQosServiceFlowStatsTable, docsIetfQosServiceFlowStatsEntry=docsIetfQosServiceFlowStatsEntry, docsIetfQosServiceFlowPkts=docsIetfQosServiceFlowPkts, docsIetfQosServiceFlowOctets=docsIetfQosServiceFlowOctets, docsIetfQosServiceFlowTimeCreated=docsIetfQosServiceFlowTimeCreated, docsIetfQosServiceFlowTimeActive=docsIetfQosServiceFlowTimeActive, docsIetfQosServiceFlowPHSUnknowns=docsIetfQosServiceFlowPHSUnknowns, docsIetfQosServiceFlowPolicedDropPkts=docsIetfQosServiceFlowPolicedDropPkts, docsIetfQosServiceFlowPolicedDelayPkts=docsIetfQosServiceFlowPolicedDelayPkts, docsIetfQosUpstreamStatsTable=docsIetfQosUpstreamStatsTable, docsIetfQosUpstreamStatsEntry=docsIetfQosUpstreamStatsEntry, docsIetfQosSID=docsIetfQosSID, docsIetfQosUpstreamFragments=docsIetfQosUpstreamFragments, docsIetfQosUpstreamFragDiscards=docsIetfQosUpstreamFragDiscards, docsIetfQosUpstreamConcatBursts=docsIetfQosUpstreamConcatBursts, docsIetfQosDynamicServiceStatsTable=docsIetfQosDynamicServiceStatsTable, docsIetfQosDynamicServiceStatsEntry=docsIetfQosDynamicServiceStatsEntry, docsIetfQosIfDirection=docsIetfQosIfDirection, docsIetfQosDSAReqs=docsIetfQosDSAReqs, docsIetfQosDSARsps=docsIetfQosDSARsps, docsIetfQosDSAAcks=docsIetfQosDSAAcks, docsIetfQosDSCReqs=docsIetfQosDSCReqs, docsIetfQosDSCRsps=docsIetfQosDSCRsps, docsIetfQosDSCAcks=docsIetfQosDSCAcks, docsIetfQosDSDReqs=docsIetfQosDSDReqs, docsIetfQosDSDRsps=docsIetfQosDSDRsps, docsIetfQosDynamicAdds=docsIetfQosDynamicAdds, docsIetfQosDynamicAddFails=docsIetfQosDynamicAddFails, docsIetfQosDynamicChanges=docsIetfQosDynamicChanges, docsIetfQosDynamicChangeFails=docsIetfQosDynamicChangeFails, docsIetfQosDynamicDeletes=docsIetfQosDynamicDeletes, docsIetfQosDynamicDeleteFails=docsIetfQosDynamicDeleteFails, docsIetfQosDCCReqs=docsIetfQosDCCReqs, docsIetfQosDCCRsps=docsIetfQosDCCRsps, docsIetfQosDCCAcks=docsIetfQosDCCAcks, docsIetfQosDCCs=docsIetfQosDCCs, docsIetfQosDCCFails=docsIetfQosDCCFails, docsIetfQosServiceFlowLogTable=docsIetfQosServiceFlowLogTable, docsIetfQosServiceFlowLogEntry=docsIetfQosServiceFlowLogEntry, docsIetfQosServiceFlowLogIndex=docsIetfQosServiceFlowLogIndex, docsIetfQosServiceFlowLogIfIndex=docsIetfQosServiceFlowLogIfIndex, docsIetfQosServiceFlowLogSFID=docsIetfQosServiceFlowLogSFID, docsIetfQosServiceFlowLogCmMac=docsIetfQosServiceFlowLogCmMac, docsIetfQosServiceFlowLogPkts=docsIetfQosServiceFlowLogPkts, docsIetfQosServiceFlowLogOctets=docsIetfQosServiceFlowLogOctets, docsIetfQosServiceFlowLogTimeDeleted=docsIetfQosServiceFlowLogTimeDeleted, docsIetfQosServiceFlowLogTimeCreated=docsIetfQosServiceFlowLogTimeCreated, docsIetfQosServiceFlowLogTimeActive=docsIetfQosServiceFlowLogTimeActive, docsIetfQosServiceFlowLogDirection=docsIetfQosServiceFlowLogDirection, docsIetfQosServiceFlowLogPrimary=docsIetfQosServiceFlowLogPrimary, docsIetfQosServiceFlowLogServiceClassName=docsIetfQosServiceFlowLogServiceClassName, docsIetfQosServiceFlowLogPolicedDropPkts=docsIetfQosServiceFlowLogPolicedDropPkts, docsIetfQosServiceFlowLogPolicedDelayPkts=docsIetfQosServiceFlowLogPolicedDelayPkts, docsIetfQosServiceFlowLogControl=docsIetfQosServiceFlowLogControl, docsIetfQosServiceClassTable=docsIetfQosServiceClassTable, docsIetfQosServiceClassEntry=docsIetfQosServiceClassEntry, docsIetfQosServiceClassName=docsIetfQosServiceClassName, docsIetfQosServiceClassStatus=docsIetfQosServiceClassStatus, docsIetfQosServiceClassPriority=docsIetfQosServiceClassPriority, docsIetfQosServiceClassMaxTrafficRate=docsIetfQosServiceClassMaxTrafficRate, docsIetfQosServiceClassMaxTrafficBurst=docsIetfQosServiceClassMaxTrafficBurst, docsIetfQosServiceClassMinReservedRate=docsIetfQosServiceClassMinReservedRate, docsIetfQosServiceClassMinReservedPkt=docsIetfQosServiceClassMinReservedPkt, docsIetfQosServiceClassMaxConcatBurst=docsIetfQosServiceClassMaxConcatBurst)
mibBuilder.exportSymbols("DOCS-IETF-QOS-MIB", docsIetfQosServiceClassNomPollInterval=docsIetfQosServiceClassNomPollInterval, docsIetfQosServiceClassTolPollJitter=docsIetfQosServiceClassTolPollJitter, docsIetfQosServiceClassUnsolicitGrantSize=docsIetfQosServiceClassUnsolicitGrantSize, docsIetfQosServiceClassNomGrantInterval=docsIetfQosServiceClassNomGrantInterval, docsIetfQosServiceClassTolGrantJitter=docsIetfQosServiceClassTolGrantJitter, docsIetfQosServiceClassGrantsPerInterval=docsIetfQosServiceClassGrantsPerInterval, docsIetfQosServiceClassMaxLatency=docsIetfQosServiceClassMaxLatency, docsIetfQosServiceClassActiveTimeout=docsIetfQosServiceClassActiveTimeout, docsIetfQosServiceClassAdmittedTimeout=docsIetfQosServiceClassAdmittedTimeout, docsIetfQosServiceClassSchedulingType=docsIetfQosServiceClassSchedulingType, docsIetfQosServiceClassRequestPolicy=docsIetfQosServiceClassRequestPolicy, docsIetfQosServiceClassTosAndMask=docsIetfQosServiceClassTosAndMask, docsIetfQosServiceClassTosOrMask=docsIetfQosServiceClassTosOrMask, docsIetfQosServiceClassDirection=docsIetfQosServiceClassDirection, docsIetfQosServiceClassStorageType=docsIetfQosServiceClassStorageType, docsIetfQosServiceClassDSCPOverwrite=docsIetfQosServiceClassDSCPOverwrite, docsIetfQosServiceClassPolicyTable=docsIetfQosServiceClassPolicyTable, docsIetfQosServiceClassPolicyEntry=docsIetfQosServiceClassPolicyEntry, docsIetfQosServiceClassPolicyIndex=docsIetfQosServiceClassPolicyIndex, docsIetfQosServiceClassPolicyName=docsIetfQosServiceClassPolicyName, docsIetfQosServiceClassPolicyRulePriority=docsIetfQosServiceClassPolicyRulePriority, docsIetfQosServiceClassPolicyStatus=docsIetfQosServiceClassPolicyStatus, docsIetfQosServiceClassPolicyStorageType=docsIetfQosServiceClassPolicyStorageType, docsIetfQosPHSTable=docsIetfQosPHSTable, docsIetfQosPHSEntry=docsIetfQosPHSEntry, docsIetfQosPHSField=docsIetfQosPHSField, docsIetfQosPHSMask=docsIetfQosPHSMask, docsIetfQosPHSSize=docsIetfQosPHSSize, docsIetfQosPHSVerify=docsIetfQosPHSVerify, docsIetfQosPHSIndex=docsIetfQosPHSIndex, docsIetfQosCmtsMacToSrvFlowTable=docsIetfQosCmtsMacToSrvFlowTable, docsIetfQosCmtsMacToSrvFlowEntry=docsIetfQosCmtsMacToSrvFlowEntry, docsIetfQosCmtsCmMac=docsIetfQosCmtsCmMac, docsIetfQosCmtsServiceFlowId=docsIetfQosCmtsServiceFlowId, docsIetfQosCmtsIfIndex=docsIetfQosCmtsIfIndex, docsIetfQosConformance=docsIetfQosConformance, docsIetfQosGroups=docsIetfQosGroups, docsIetfQosCompliances=docsIetfQosCompliances)
# Groups
mibBuilder.exportSymbols("DOCS-IETF-QOS-MIB", docsIetfQosBaseGroup=docsIetfQosBaseGroup, docsIetfQosParamSetGroup=docsIetfQosParamSetGroup, docsIetfQosCmtsGroup=docsIetfQosCmtsGroup, docsIetfQosSrvClassPolicyGroup=docsIetfQosSrvClassPolicyGroup, docsIetfQosServiceClassGroup=docsIetfQosServiceClassGroup)
# Compliances
mibBuilder.exportSymbols("DOCS-IETF-QOS-MIB", docsIetfQosCompliance=docsIetfQosCompliance)
|
5,433 | 2908d34165fac272c9571be623855a0613c952f3 | from django.contrib.auth.models import User
from django_filters import (
NumberFilter,
DateTimeFilter,
AllValuesFilter
)
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import permissions
from rest_framework.throttling import ScopedRateThrottle
from rest_framework import filters
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
GenericAPIView,
ListAPIView,
RetrieveAPIView
)
from games.models import (
GameCategory,
Game,
Player,
PlayerScore
)
from games.serializers import (
GameCategorySerializer,
GameSerializer,
PlayerSerializer,
PlayerScoreSerializer,
)
from games.serializers import UserSerializer
from games.permissions import IsOwnerOrReadOnly
class ApiRoot(GenericAPIView):
name= 'api-root'
def get(self,request,*args,**kwargs):
return Response(
{
'players':reverse(PlayerList.name,request=request),
'game-categories':reverse(GameCategoryList.name,request=request),
'game':reverse(GameList.name,request=request),
'scores':reverse(PlayerScoreList.name,request=request),
'users': reverse(UserList.name,request=request)
}
)
class GameCategoryList(ListCreateAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-list'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
filter_fields = ('name',)
search_fields = ('^name',)
ordering_fields = ('name',)
class GameCategoryDetail(RetrieveUpdateDestroyAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-detail'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
class GameList(ListCreateAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-list'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly
)
filter_fields = (
'name',
'game_category',
'release_date',
'played',
'owner',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
'release_date',
)
def perform_create(self, serializer):
# pass an additional owner field to the create method
# to set the owner to the user recieved in the request
serializer.save(owner=self.request.user)
class GameDetail(RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly
)
class PlayerList(ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = (
'name',
'gender',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
)
class PlayerDetail(RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(
name='score',lookup_expr='gte'
)
max_score = NumberFilter(
name='score',lookup_expr='lte'
)
from_score_date = DateTimeFilter(
name='score_date',
lookup_expr='gte'
)
to_score_date = DateTimeFilter(
name='score_date',
lookup_expr='lte'
)
player_name = AllValuesFilter(
name='player__name'
)
game_name = AllValuesFilter(
name= 'game__name'
)
class Meta:
model = PlayerScore
fields = (
'score',
'from_score_date',
'to_score_date',
'min_score',
'max_score',
# player__name will be accessed as player_name
'player_name',
#game__name will be accessed as game_name
'game_name'
)
class PlayerScoreList(ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class =PlayerScoreFilter
ordering_fields = (
'score',
'score_date',
)
class PlayerScoreDetail(RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
|
5,434 | db93de33f537eeaf64ca8e2b2b79aba1f592305b | import urllib.request
username = ''
link = r'https://www.instagram.com/' + username
html = urllib.request.urlopen(link)
print(html.read()) |
5,435 | de9b85c250dea15ff9201054957ebc38017a8c35 | n=7
a=[]
for i in range(1,n+1):
print(i)
if(i<n):
print("+")
a.append(i)
print("= {}".format(sum(a)))
|
5,436 | b724b04c6303cc9021539ad7df5a198000491029 | # -*- coding: utf-8 -*-
# Project = https://github.com/super-l/search-url.git
# Author = superl
# Blog = www.superl.org QQ:86717375
# Team = Code Security Team(C.S.T) | 铭剑创鼎
import urllib2
import re
import ConfigParser
from lib.filter import *
from lib.getdata import *
from lib.count import *
from lib.status import *
class Baidu():
baidu_page_size = 50
search_name = '[baidu]'
def __init__(self,count) :
cfg = ConfigParser.ConfigParser()
cfg.read("config/setting.conf")
self.baidu_page_size = int(cfg.get("search", "baidu_page_size"))
self.savefile = cfg.get("global", "savefile")
self.write_title = cfg.get("log", "write_title")
self.write_name = cfg.get("log", "write_name")
self.my_filter = SupFilter()
self.my_data = SupGetData()
self.my_status = Supstatus()
self.count = count
#Get the web page source code
def search(self,key,page_pn):
#The number of baidu pages currently viewed
#page_num = page_pn/baidu_page_size
page_num = str(page_pn/self.baidu_page_size+1)
search_url = 'http://www.baidu.com/s?wd=key&rn='+str(self.baidu_page_size)+'&pn='+str(page_pn)
search_url = search_url.replace('key',key)
#print search_url
htmlcontent = self.my_data.get_pagehtml(search_url,'baidu')
regex_page = r'<span class="pc">'+page_num+'</span>'
page_compile = re.compile(regex_page)
page_result = page_compile.findall(htmlcontent)
if page_result:
pass
else:
self.my_status.baidu_search = False
return
regex_titleurl = r'<div class="result c-container ".*<h3 class=".*"><a(?:[^\<]*\n[^\<]*)href = "(?P<url>.+?)"(?:[^\<]*\n[^\<]*)target="_blank"(?:[^\<]*\n[^\<]*)>(?P<title>.+?)</a></h3>'
content = re.compile(regex_titleurl)
find_result = content.findall(htmlcontent)
print ("\033[1;37;40m==========================百度 第%s页采集开始================\n"%(page_num))
if self.savefile == 'True':
logfile = open(key+'.txt','a')
for i in range(len(find_result)):
dr = re.compile(r'<[^>]+>',re.S)
title = dr.sub('',find_result[i][1])
realurl = self.my_data.get_baidu_realurl(find_result[i][0])
self.count.all_totals+=1
realurl = self.my_filter.filter_data(realurl,title)
if realurl != "filter":
self.count.all_checked_totals+=1
print ("[ID]:%d [URL]:%s [TITLE]:%s"%(i,realurl,title))
if self.savefile == 'True':
have_url = 0
with open(key+'.txt','r') as foo:
for line in foo.readlines():
if realurl in line:
have_url = 1
if have_url ==0:
if self.write_title:
if self.write_name:
logfile.write(self.search_name+realurl+' '+title+'\n')
else:
logfile.write(realurl+' '+title+'\n')
else:
if self.write_name:
logfile.write(self.search_name+realurl+'\n')
else:
logfile.write(realurl+'\n')
else:
self.count.all_delete_totals+=1
else:
self.count.all_filter_totals+=1
if self.savefile == 'True':
logfile.close()
print ("==========================百度 第%s页采集结束================\n"%(page_num))
|
5,437 | b51e0ee80a2488197470627821204d1f74cd62a1 | # POST API for Red Alert project - NLP and Metalearning components
# Insikt Intelligence S.L. 2019
import pandas as pd
import pickle
from flask import Flask, render_template, request, jsonify
from utilities import load_data, detect_language
from preprocessing import preprocess, Tagger, remove_stopwords
import json
from gensim.models import KeyedVectors
from Embeddings import Embeddings, to_vector_single, to_vector_single_nonzeros
import numpy as np
import os
from analysis import analyze
from probability_terror import probability_terror
from new_terms_no_lang import new_terms
from classifier import classifier
from claslisting import claslisting
from audit import audit
app = Flask(__name__)
emb_dict = {"en": "embedding-EN", "ar": "embedding-AR", "es": "embedding-ES", "ro": "embedding-RO","fr": "embedding-FR"}
@app.route('/vectorize',methods=['POST'])
def make_vectorize():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in vectorize: text is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Vectorize...")
embeddings = Embeddings(emb_dict[lang])
processed_text = preprocess(text)
no_stpw_text = remove_stopwords(processed_text, lang)
vectorized_tokens=to_vector_single_nonzeros(no_stpw_text, embeddings,len(no_stpw_text))
if len(vectorized_tokens) > 0:
vectorized_text = np.mean(vectorized_tokens, axis=0)
else:
vectorized_text =np.zeros((300,)*1)
print(vectorized_text)
#Send the response codes
responses = jsonify(vector=vectorized_text.tolist())
responses.status_code = 200
return responses
@app.route('/probability',methods=['POST'])
def make_probability():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text,language and classifier
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in probability: text is missing")
return responses
try:
cls = data['classifier']
except:
responses=jsonify("Error in probability: classifier is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Computing probability of having content related to "+cls)
probability = probability_terror(text,lang,cls)
#Send the response codes
responses = jsonify(probability=probability)
responses.status_code = 200
return responses
@app.route('/analyze',methods=['POST'])
def make_analyze():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text'] # we assume text is tokenized
except:
responses=jsonify("Error in analyze: text is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify( message = "Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
#print(analysis[0])
#Send the response codes
responses = jsonify(concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/terms',methods=['POST'])
def make_terms():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
texts = data['dataset'] # we assume text is tokenized
#Preprocess the text
print("Suggesting new terms for search...")
terms=new_terms(texts)
#print(terms)
#Send the response codes
responses = jsonify(message="Suggested new terms for search: ",terms= list(terms))
responses.status_code = 200
return responses
@app.route('/sento',methods=['POST'])
def make_sento():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
#Get the text, language and classifier
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
text = data['text']
except:
responses=jsonify("Error in sento: text is missing")
return responses
try:
cls = data['classifier']
except:
responses=jsonify("Error in sento: classifier is missing")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Preprocess the text
print("Sento analysis")
# Probability
probability = probability_terror(text,lang,cls)
print(probability)
# Analyze
filename = os.path.join(os.path.dirname(__file__), 'models-registry.json')
registry = load_data(filename)
analysis = analyze(text, lang, registry)
data_audit={"auditEventType":"Start task","details":{"sento":"NLP analysis"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(probability=probability,concepts=analysis[0],key_ideas=analysis[1],topics=analysis[2])
responses.status_code = 200
return responses
@app.route('/classifier',methods=['POST'])
def make_classifier():
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request("There is no data for the training"))
else:
#Get the text and the language
try:
lang = data['lang']
except:
try:
lang=detect_language(data['text'])
print(lang)
except:
responses=jsonify("Error in vectorize: language field is missing")
return responses
try:
annotated_data = data['annotated_data']
except:
responses=jsonify("Error in classifier: annotated data is missing")
return responses
try:
user_id=data['user_id']
except:
responses=jsonify("Error in classifier: user_id is missing")
return responses
try:
case_id=data['case_id']
except:
responses=jsonify("Error in classifier: case_id is missing")
return responses
try:
clas_name=data['clas_name']
except:
responses=jsonify("Error in classifier: classifier name is missing")
return responses
print(len(annotated_data))
if len(annotated_data) < 22:
responses=jsonify( "Training data set should have more than 10 samples per each class")
return responses
if lang not in ['en','es','ar','ro','fr']:
responses=jsonify("Language not available. Language must be in ['en','es','ar','ro','fr']")
return responses
#Train the new classifier
print("Training a new classifier from the user's annotated dataset ")
accuracy=classifier(annotated_data,lang,user_id,case_id,clas_name)
data_audit={"auditEventType":"Start task","details":{"classifier":"Trains a new classifier based on the annotations provided by the user"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(message="Classifier has been saved. Accuracy given in % - calculated using C-10V", accuracy=accuracy)
responses.status_code = 200
return responses
@app.route('/claslisting',methods=['POST'])
def make_claslisting():
user_id=None
case_id=None
try:
#Load the data
data = request.get_json()
except Exception as e:
raise e
if data == {}:
return(bad_request())
else:
try:
user_id=data['user_id']
except:
responses=jsonify(message="Error in classifiers listing: user_id is missing")
return responses
try:
case_id=data['case_id']
except:
responses=jsonify(message="Error in classifiers listing: case_id is missing")
return responses
available_classifiers=claslisting(user_id,case_id)
data_audit={"auditEventType":"Start task","details":{"claslisting":"Lists the available classifiers"},"principal":"Analyst"}
datajson=json.dumps(data_audit)
results_audit=audit(datajson)
#Send the response codes
responses = jsonify(available_classifiers=available_classifiers)
responses.status_code = 200
return responses
@app.route('/my400')
def bad_request(msg=''):
code = 400
if msg=='':
msg = 'Error'
return msg, code
if __name__ == '__main__':
#app.run()
app.run(host='0.0.0.0',port=5000)
|
5,438 | 5209638ec97a666783c102bec7a2b00991c41a08 | # ----------------------------------------------------------------------------
# Written by Khanh Nguyen Le
# May 4th 2019
# Discord: https://discord.io/skyrst
# ----------------------------------------------------------------------------
import operator
def validInput(x):
if x=="a": return True
elif x=="b": return True
elif x=="c": return True
elif x=="d": return True
else: return False
def takeInput():
x=input()
while not validInput(x):
print("Invalid input. Try another one:")
x=input()
return x
def main():
stats = {'Council':0, 'United':0, 'Faceless': 0, 'Warband':0}
print("Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.")
print("Answer the the following questions by typing a, b, c or d.")
print("1/8")
print("I'd die without my...\na. Freedom\nb. Knowledge\nc. Talent\nd. Hope")
x = takeInput()
if x=="a":
stats['Faceless'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['United']+= 1
print("2/8")
print("Pick an animal:\na. Owl\nb. Leopard\nc. Elepant\nd. Octopus")
x = takeInput()
if x=="a":
stats['Warband'] += 1
elif x=="b":
stats['Faceless']+= 1
elif x=="c":
stats['United']+= 1
else:
stats['Council']+= 1
print("3/8")
print("Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable")
x = takeInput()
if x=="a":
stats['Warband'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['United']+= 1
else:
stats['Faceless']+= 1
print("4/8")
print("The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['Faceless']+= 1
print("5/8")
print("The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later")
x = takeInput()
if x=="a":
stats['Faceless'] += 1
elif x=="b":
stats['United']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['Council']+= 1
print("6/8")
print("What's your favorite time of the day\na. Dawn\nb. Day\nc. Dusk\nd. Night")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Faceless']+= 1
else:
stats['Warband']+= 1
print("7/8")
print("Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\"")
x = takeInput()
if x=="a":
stats['Council'] += 1
elif x=="b":
stats['Faceless']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['United']+= 1
print("8/8")
print("I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Faceless']+= 1
else:
stats['Warband']+= 1
print("\n")
result = max(stats.items(), key=operator.itemgetter(1))[0]
print("Congratulations! You are a " +result)
main() |
5,439 | d287123acdbabdd5a223e774c89945ab888fcbcc | #os for file system
import os
from sys import platform as _platform
import fnmatch
import inspect
files = 0
lines = 0
extension0 = '.c'
extension1 = '.cpp'
extension2 = '.h'
extension3 = '.hpp'
filename = inspect.getframeinfo(inspect.currentframe()).filename
startPath = os.path.dirname(os.path.abspath(filename))
with open("files_with_extensions.txt", "w", encoding="utf-8") as filewrite:
for r, d, f in os.walk(startPath):
for file in f:
if file.endswith(extension0) or file.endswith(extension1) or file.endswith(extension2) or file.endswith(extension3):
if _platform == "linux" or _platform == "linux2":
ss = '/'
elif _platform == "win32" or _platform == "win64":
ss = '\\'
filePathAndName = r + ss + file
files += 1
filewrite.write(f"{filePathAndName}")
fi = open(filePathAndName, 'r')
pos = fi.tell()
fileLines = 0
while (True):
li = fi.readline()
# check for any hidden symbols
if li.isspace():
continue
newpos = fi.tell()
fileLines += 1
if newpos == pos: # stream position hasn't changed -> EOF
break
else:
pos = newpos
lines += fileLines
filewrite.write(f"{fileLines}\n")
print(file + " " + str(fileLines))
fi.close()
print(files)
print(lines)
filewrite.write(f"{files}\n")
filewrite.write(f"{lines}\n")
|
5,440 | 8286407987301ace7af97d6acdcf6299ce3d8525 | from django.db import models
class Book(models.Model):
title = models.TextField(max_length=32, blank=False, null=False)
# from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
#
#
# class UserAccountManager(BaseUserManager):
# def create_user(self, email, firstname,lastname, phonenumber, password=None,):
#
# if not email:
# raise ValueError('Users must have an email address')
# email = self.normalize_email(email)
# user = self.model(email=email, name=firstname)
# user.set_password(password)
# user.save()
#
# class UserAccount(AbstractBaseUser, PermissionsMixin):
# email = models.EmailField(max_length=255, unique=True)
# firstname = models.CharField(max_length=255)
# lastname = models.CharField(max_length=255)
# is_active = models.BooleanField(default=True)
# is_staff = models.BooleanField(default=True)
#
# objects = UserAccountManager()
#
# USERNAME_FILED = 'email'
# REQUIRED_FIELDS = ['firstname','lastname','phonenumber']
#
# def get_full_name(self):
# return self.firstname + " " + self.lastname
#
# def get_short_name(self):
# return self.firstname
#
# def __str__(self):
# return self.email
|
5,441 | b4d31fd05f8a9d66dcfffb55d805ab93d7ff9cdf | #Write a function remove_duplicates that takes in a list and removes elements of the list that are the same.
#For example: remove_duplicates([1,1,2,2])
#should return [1,2].
#Do not modify the list you take as input! Instead, return a new list.
def remove_duplicates(lst_of_items):
new_list=list()
#dict={}
for item in lst_of_items:
#dict[item]
if item not in new_list:
new_list.append(item)
#print item
return new_list
print remove_duplicates([1,3,1,2,2,3,3,3])
|
5,442 | c96a64573fc6cc207ee09be4f4b183d065736ff6 | from collections import deque
'''
Big O
เวลาเรียก queue จะมี2operation 1deque 2enqueue เวลาเอาไปใช้
อยู่ที่การimplementation
โปรแกรมที่ดี 1.ทำงานถูกต้อง 2.ทันใจ 3.ทรัพยากรที่ใช้รันได้ทุกเครื่อง(specคอมกาก)
4.ทำงานได้ตามต้องการ5.ความเสถียรของระบบ 6.Bugs
แพง คือ memory expansive ใช้หน่วยความจำเยอะ
runtime expensive ใช้เวลาเยอะ
เลยเกิด queue linklist
โดยแต่ละอย่าง
- linklist มีcost มาเกี่ยว
- dequeue ใช้ O(1) มันขึ้นกับว่าจำหน่วยตัวข้างในมี10ตัวใช้ 1ms 1ล้านตัวก็ใช้ 1ms
เรียกความเร็วคงที่ อีกชื่อ O(1) โอวัน โอหนึ่ง แต่มันในอุดมคติ
เวลาใใช้ linklist เก็บตัวชี้และ ข้อมูล มันเลยใช้ หน่วยความจำเป็น2เท่าของ list
Big O คือการวิเคราะห์ runing time complexityเปรียบเทียบสองตัวว่าตัวไหนมีประสิทธิภาพดีกว่า
แต่Big O ที่ดีกว่าไม่ได้เร็วกว่า เพราะ ขึ้นอยุกับ ความเร็วspecเครื่อง
n T(n)
1 1ms
10 10ms
1M 1000s
T(N)ผันตามn เรียก O(n)
อีกเคส
n T(n)
1 1
10 100
1M 1Ms
T(N) ผันตาม n^2,n^3,n! จะใช้เวลาเยอะมาก
เช่น ให้ทาย อันไหนเร็วสุด
1. O(1) อันดับ1
2. O(n) อันดับ3
3. O(n^2) อันดับ4
4. O(logn) อันดับ2
เวลาใช้ linklist จะมี3ขั้นตอนในการเชื่อม 1.สร้างnodeใหม่ 2.ลิ้งข้อมูลอันเก่ากะอันใหม่ 3.ลิ้งส่วนfront
radix sort ดูค่าในแต่ละหลัก
1.รับ input เก็บไว้ในqueue
2.หยิบตัวแรกออกไป
3.มันจะหาว่าตัวไหนmax และมีกี่หลัก
4.จะมีการเทียบ3รอบ รอบที่1 เอาข้อมูลที่ดึงออกมา เก็บไว้ตามหลักในรอบนั้นๆเช่น 64 เลขหลักหน่วยตรงกับหลัก4 ก้เก่บไว้ที่4
'''
class Queue:
def __init__(self):
self.items=deque()
def enQueue(self,i):
self.items.append(i)
def deQueue(self):
return self.items.popleft()
def isEmpty(self):
return len(self.items)==0
def size(self):
return len(self.items)
'''class Queue():
def __init__(self,list=None):
if list==None:
self.items=[]
else:
self.items=list
def enQueue(self,i):
self.items.append(i)
def deQueue(self):
self.items.pop(0)
def isQEmpty(self):
return len(self.items)==0
def size(self):
return len(self.items)
'''
if __name__== '__main__':
q=Queue()
print(q.items)
q.enQueue('A')
print(q.items)
q.deQueue()
print(q.items)
print(q.isEmpty())
|
5,443 | 5c315a49ead80e8d8ce057bd774f97bce098de59 | import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from pathlib import Path
from scipy.io import loadmat
from skimage.transform import resize
from sklearn.model_selection import train_test_split
import cv2
import keras
from keras.models import Model
from keras.optimizers import RMSprop
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model,Sequential
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adadelta, RMSprop,SGD,Adam
from keras import regularizers
from keras import backend as K
from keras.utils import to_categorical
#Load data
X_train = np.load('X_train.npy')
Y_train = np.load('Y_train.npy')
X_test = np.load('X_test.npy')
Y_test = np.load('Y_test.npy')
y_test_orig = np.load('y_test_orig.npy')
y_train_orig = np.load('y_train_orig.npy')
print('X_train shape:' +str(X_train.shape))
print('Y_train shape:' +str(Y_train.shape))
print('X_test shape:' + str(X_test.shape))
print('Y_test shape:' +str(Y_test.shape))
print('y_train_orig shape:' + str(y_train_orig.shape))
print('y_test_orig shape:' + str(y_test_orig.shape))
batch_size = 32
epochs = 200
inChannel = 1
x, y = 128, 128
input_shape = (x, y, inChannel)
num_classes = 7
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
# kernel_regularizer=regularizers.l2(0.01)
X_input = Input(input_shape)
# CONV1 -> BN -> RELU Block applied to X
X = Conv2D(8, (4, 4), strides = (1, 1), name = 'conv0',kernel_regularizer=regularizers.l2(0.001),padding="same")(X_input)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
#X = Dropout(0.5)(X)
# MAXPOOL1
X = MaxPooling2D((2, 2), name='max_pool')(X)
# CONV2 -> BN -> RELU Block applied to X
X = Conv2D(16, (2, 2), strides=(1, 1), name='conv1',kernel_regularizer=regularizers.l2(0.001),padding="same")(X)
X = BatchNormalization(axis=3, name='bn1')(X)
X = Activation('relu')(X)
#X = Dropout(0.5)(X)
# MAXPOOL2
X = MaxPooling2D((2, 2), name='max_pool2')(X)
# CONV3 -> BN -> RELU Block applied to X
X = Conv2D(32, (1, 1), strides=(1, 1), name='conv2',kernel_regularizer=regularizers.l2(0.001),padding="same")(X)
X = BatchNormalization(axis=3, name='bn2')(X)
X = Activation('relu')(X)
#X = Dropout(0.5)(X)
# MAXPOOL3
X = MaxPooling2D((2, 2), name='max_pool3')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(7, activation='softmax', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='CNN')
return model
CNN_model = model(input_shape)
CNN_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
CNN_model.summary()
Train = CNN_model.fit(X_train,Y_train,epochs=epochs,batch_size=batch_size,validation_data=(X_test, Y_test))
plt.plot(Train.history['accuracy'])
plt.plot(Train.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
test_eval = CNN_model.evaluate(X_test,Y_test)
print('Test loss:', test_eval[0])
print('Test accuracy:', test_eval[1])
predicted_classes = CNN_model.predict(X_test)
predicted_classes = np.argmax(np.round(predicted_classes),axis=1)
correct = [i for i,item in enumerate(predicted_classes) if item == y_test_orig[i]]
wrong = [i for i,item in enumerate(predicted_classes) if item != y_test_orig[i]]
print(predicted_classes)
print(y_test_orig)
print(correct)
print(wrong)
accuracy={}
for i in range(7):
all = np.sum(y_train_orig == i)
correct = np.array([predicted_classes == y_train_orig]) & np.array([predicted_classes == i])
correct_count = np.sum(correct)
accuracy[i] = correct_count/all
print(all)
print(correct_count)
accuracy={}
for i in range(7):
all = np.sum(y_test_orig == i)
correct = np.array([predicted_classes == y_test_orig]) & np.array([predicted_classes == i])
correct_count = np.sum(correct)
accuracy[i] = correct_count/all
print(all)
print(correct_count)
print('C0 accuracy = '+ str(accuracy[0]))
print('C1 accuracy = '+ str(accuracy[1]))
print('C2 accuracy = '+ str(accuracy[2]))
print('C3 accuracy = '+ str(accuracy[3]))
print('C4 accuracy = '+ str(accuracy[4]))
print('C5 accuracy = '+ str(accuracy[5]))
print('C6 accuracy = '+ str(accuracy[6]))
#img = correct[1]
#plt.imshow(X_test[img][:,:,0])
#plt.show()
for i in range(len(wrong)):
print(Y_test[wrong[i]], 'ground truth:' +str(y_test_orig[wrong[i]]), 'predict:' +str(predicted_classes[wrong[i]]))
plt.imshow(X_test[wrong[i]][:,:,0])
plt.colorbar()
plt.show()
|
5,444 | b0dbc4e8a2ce41dc9d2040890e3df4d078680fa1 | from collections import deque
def solution(people, limit):
people.sort()
cnt = 0
left_idx = 0
right_idx = len(people)-1
while left_idx <= right_idx:
if people[left_idx] + people[right_idx] <= limit:
cnt += 1
left_idx += 1
right_idx -= 1
else:
cnt += 1
right_idx -= 1
answer = cnt
return answer
|
5,445 | 4e66fe0485d987da590d11c848009b2e1665b3dc | from flask import Blueprint, render_template, flash, redirect, url_for, request, current_app, g, session
from flask_login import current_user
from app import decorators
from app.models import User, Post, Comment, Tag
from slugify import slugify
from app.main.forms import CommentForm, TagForm, ProfileForm, ContactForm
from app import db
from flask_mail import Message
from app import mail
main = Blueprint('main', __name__)
def manage_prev_page():
global session, request
if 'profile' not in request.referrer and 'change_password' not in request.referrer \
and 'forgot_password' not in request.referrer \
and 'request_password' not in request.referrer:
session['prev_page'] = request.referrer
@main.route('/')
def homepage():
return render_template('main/homepage.html')
@main.route('/about', methods=['GET', 'POST'])
def about():
return render_template('main/about.html')
@main.route('/blog')
def blog():
page = request.args.get('page', 1, type=int)
posts = Post.query.paginate(page=page, per_page=5)
tags = Tag.query.all()
return render_template('main/blog.html', posts=posts, slugify=slugify, tags=tags)
@main.route('/blog/post/<int:post_id>/<string:post_url>', methods=['GET', 'POST'])
def post(post_id, post_url):
post = Post.query.filter_by(id=post_id).first()
tags = Tag.query.filter_by(id=post_id)
form = CommentForm()
comments = Comment.query.filter_by(post_id=post_id).order_by(Comment.date.desc())
tags = Tag.query.filter_by(post_id=post_id)
if form.validate_on_submit():
if current_user.is_authenticated:
comment = Comment(content=form.content.data,
post_id=post_id, author_id=current_user.id)
db.session.add(comment)
db.session.commit()
flash('Your comment has been published')
return redirect(url_for('main.post', post_id=post_id, post_url=post_url))
else:
flash('You need to get logged in to comment')
return render_template('main/post.html', post=post, form=form, comments=comments, tags=tags, post_id=post_id, post_url=post_url )
@main.route('/profile', methods=['GET', 'POST'])
@decorators.login_required
def profile():
if request.method == 'GET':
manage_prev_page()
form = ProfileForm()
if request.method == 'POST':
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been changed!')
return redirect(session['prev_page'])
else:
flash('Please check your data!')
return render_template('main/profile.html', form=form)
|
5,446 | bc5e928305d82c92c10106fe1f69f5979d57e3d2 | import os
from tqdm import tqdm
from system.krl import KRL
from system.utils.format import format_data
from system.oie import OIE
# extract one file
def execute_file(input_fp, output_fp):
oie = OIE()
oie.extract_file(input_fp, output_fp)
# extract one sentence
def execute_sentence():
oie = OIE()
# test one data
line = {"text": "印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校"}
line = {"text": "中美两国的人民反对大规模的杀伤性的武器"}
line = {"id": "6",
"sysId": "eb88374b30fda925b399e787a927327c",
"text": "乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。",
"event_list": [
{"event_type": "举办类", "trigger": "举行", "trigger_start_index": "38", "trigger_end_index": "40",
"trigger_entity_type": "NONE", "arguments": [
{"role": "会议", "argument": "抗议集会", "argument_start_index": "40", "argument_end_index": "44",
"argument_entity_type": "Meeting"},
{"role": "地点", "argument": "普天间基地", "argument_start_index": "31", "argument_end_index": "36",
"argument_entity_type": "ZBGC"},
{"role": "时间", "argument": "13日", "argument_start_index": "0", "argument_end_index": "3",
"argument_entity_type": "Time"},
{"role": "主体", "argument": "冲绳和平运动中心", "argument_start_index": "4", "argument_end_index": "12",
"argument_entity_type": "Org"}]}]}
sample = line['text']
result, quick_look = oie.extract(sample, True, True, True)
print(quick_look)
# s += len(result)
# opobj.write(str(result) + "\n")
# opobj2.write(str(quick_look) + "\n")
# print(s)
# opobj.close()
# opobj2.close()
def clean_triples(train_fp, output_fp, is_train: bool):
krl = KRL()
model_type = 'TransE'
if is_train:
model_type = 'TransE'
krl.train(train_fp, model_type=model_type, dev_path=train_fp, save_path='./krl_{}_saves'.format(model_type))
else:
krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=model_type)
if __name__ == "__main__":
# 1 extract the triples
# eg:{"id": "870", "sysId": "3669195fb557cea411d166d353cc194d",
# "text": "目前,黎以临时边界“蓝线”沿线,特别是靠近叙利亚戈兰高地的地段局势紧张,黎以军队和联合国驻黎巴嫩南部临时部队(联黎部队)都处于高度戒备状态,以应对以色列空袭叙利亚可能引发的军事冲突。",
# "event_list": [{"event_type": "军事冲突类", "trigger": "空袭", "trigger_start_index": "76", "trigger_end_index": "78", "trigger_entity_type": "$element$", "arguments": [{"role": "主体", "argument": "以色列", "argument_start_index": "73", "argument_end_index": "76", "argument_entity_type": "Country"}, {"role": "目标", "argument": "叙利亚", "argument_start_index": "78", "argument_end_index": "81", "argument_entity_type": "Country"}]}]}
# -> [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']]
input_file_path = 'data/all_data.json'
triples_file_path = 'result/1_after_extract.txt'
# execute_file(input_file_path, triples_file_path)
# 2 clean the triples
# transform the data format
# [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']] ->
# 南部临时部队(联黎部队), 处于, 高度戒备状态
# 以色列, 空袭, 叙利亚
formatted_fp = 'result/1_after_extract_formatted.txt'
format_data(triples_file_path, formatted_fp)
# using Knowledge Relation Learning (KRL) to score the triples
cleared_file_path = 'result/2_cleared_extract.txt'
clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path, is_train=True)
|
5,447 | e1968e0d6146ce7656505eeed8e9f31daa4b558a | from django.contrib import admin
# from django.contrib.admin import AdminSite
# class MyAdminSite(AdminSite):
# site_header = 'Finder Administration'
# admin_site = MyAdminSite(name='Finder Admin')
from finder.models import Database, Column, GpsData, Alarm, System
class ColumnInline(admin.TabularInline):
model = Column
class GPSInline(admin.TabularInline):
model = GpsData
classes= ('collapse',)
class DatabaseAdmin(admin.ModelAdmin):
# fieldsets = [
# (None, {'fields': ['database_id']}),
# ('Database Info', {#'classes': ('collapse',),
# 'fields': ['rows',
# 'missing_rows',
# 'columns_count',
# 'columns_geo_count',
# 'columns_numeric_count',
# 'columns_temporal_count',
# 'columns_text_count',
# 'values',
# 'values_missing']}
# ),
# ('Profiler Info', {#'classes': ('collapse',),
# 'fields': ['profiler_input_file',
# 'profiler_status',
# 'profiler_time_begin',
# 'profiler_time_end',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('Socrata Metadata', {#'classes': ('collapse',),
# 'fields': ['socrata_status',
# 'socrata_description',
# 'socrata_category',
# 'socrata_owner',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('GPS Data', {#'classes': ('collapse',),
# 'fields': [ 'gps_values', 'lat_min', 'lat_max', 'long_min', 'long_max']}
# ),
# ]
list_display = ('database_id', 'name', 'category', 'short_profiler_status', 'socrata_status',
#'socrata_primary', 'rows', 'columns_count', 'missing_percent',
'source_agency',
'has_bounding_box')
search_fields = ('profiler_status','database_id','category','name', 'description','owner','tags',)
list_filter = ['profiler_status', 'category', 'owner', 'author', 'socrata_status']
prepopulated_fields = {'name': ('database_id',)}
inlines = [ColumnInline
#, GPSInline
]
admin.site.register(Database, DatabaseAdmin)
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
admin.site.register(Alarm, AlarmAdmin)
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
admin.site.register(System, SystemAdmin)
|
5,448 | d3e728bda85d2e72b8e477ab439d4dcffa23d63a | #!/usr/bin/env python
import speech_recognition as sr
from termcolor import colored as color
import apiai
import json
from os import system
import wikipedia as wiki
from time import sleep
import webbrowser as wb
BOLD = "\033[1m" #use to bold the text
END = "\033[0m" #use to close the bold text
CLIENT_ACCESS_TOKEN = "2245d4ab7c99466e806c8986a18234c4"
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
google_search = "https://www.google.com/search?q="
youtube_search = "https://www.youtube.com/results?search_query="
google_drive = "https://drive.google.com"
gmail = "https://mail.google.com"
try:
r = sr.Recognizer()
with sr.Microphone() as source:
system("clear")
print(color(BOLD+"Hola!\nAsk me anything."+END,"green"))
while True:
audio = r.listen(source)
# while True:
try:
query = r.recognize_google(audio)
print(query)
except sr.UnknownValueError:
print (color("Listening","blue"))
except KeyboardInterrupt:
print (color(BOLD+" Bye!"+END, "cyan"))
|
5,449 | 5b4a196de60a3a30bc571c559fe5f211563b8999 | # -*- coding: utf-8 -*-
# @File : config.py
# @Author: TT
# @Email : tt.jiaqi@gmail.com
# @Date : 2018/12/4
# @Desc : config file
from utils.general import getchromdriver_version
from chromedriver.path import path
import os
import sys
chromedriver = os.path.abspath(os.path.dirname(__file__)) + "\\chromedriver\\"+ getchromdriver_version()
download_path = os.path.abspath(os.path.dirname(__file__)) + "\\"
Suffix_name = ['.bin', '.rar', '.zip', '.7z']
|
5,450 | aa4226c377368d1ece4e556db9b7fdd0134472c9 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
"""
This module contains RestrictBlend context manager.
"""
__all__ = (
"RestrictBlend",
)
import bpy as _bpy
class _RestrictContext:
__slots__ = ()
_real_data = _bpy.data
# safe, the pointer never changes
_real_pref = _bpy.context.preferences
@property
def window_manager(self):
return self._real_data.window_managers[0]
@property
def preferences(self):
return self._real_pref
class _RestrictData:
__slots__ = ()
_context_restrict = _RestrictContext()
_data_restrict = _RestrictData()
class RestrictBlend:
__slots__ = ("context", "data")
def __enter__(self):
self.data = _bpy.data
self.context = _bpy.context
_bpy.data = _data_restrict
_bpy.context = _context_restrict
def __exit__(self, type, value, traceback):
_bpy.data = self.data
_bpy.context = self.context
|
5,451 | 00051a4087bfcf2e6826e9afa898830dc59aa5ab | # -*-coding:utf-8-*-
# Author: Scott Larter
import pygame
import pygame.draw
import numpy as np
from agent import *
from tools import *
SCREENSIZE = [1200, 400] # walls.csv
#SCREENSIZE = [1200, 650] # walls2.csv
RESOLUTION = 180
AGENTSNUM = 12
GROUPSNUM = 2
MAXGROUPSIZE = 6
MAXSUBGROUPSIZE = 3
BACKGROUNDCOLOR = [255, 255, 255]
LINECOLOR = [255,0,0]
AGENTSIZE = 9
AGENTTHICKNESS = 3
WALLSFILE = "walls.csv"
pygame.init()
screen = pygame.display.set_mode(SCREENSIZE)
pygame.display.set_caption('Social Force Model - Crosswalk')
clock = pygame.time.Clock()
# initialize walls
walls = []
for line in open(WALLSFILE, newline='', encoding="utf-8-sig"):
coords = line.split(",")
wall = []
wall.append(float(coords[0]))
wall.append(float(coords[1]))
wall.append(float(coords[2]))
wall.append(float(coords[3]))
walls.append(wall)
# initialize agents
agents = []
for n in range(AGENTSNUM):
group_id = (int)(n / MAXGROUPSIZE)
subgroup_id = (int)((n % MAXGROUPSIZE) / MAXSUBGROUPSIZE)
if n % MAXGROUPSIZE == 0:
agents.append([])
if n % MAXSUBGROUPSIZE == 0:
agents[group_id].append([])
agent = Agent(n, group_id, subgroup_id)
agents[group_id][subgroup_id].append(agent)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
(mouseX, mouseY) = pygame.mouse.get_pos()
screen.fill(BACKGROUNDCOLOR)
# draw walls
for wall in walls:
startPos = np.array([wall[0],wall[1]])
endPos = np.array([wall[2],wall[3]])
startPx = startPos*10 #worldCoord2ScreenCoord(startPos,SCREENSIZE,RESOLUTION)
endPx = endPos*10 #worldCoord2ScreenCoord(endPos,SCREENSIZE,RESOLUTION)
pygame.draw.line(screen, LINECOLOR, startPx.astype(int), endPx.astype(int))
for group in agents:
for subgroup in group:
for agent in subgroup:
agent.direction = normalize(agent.dest - agent.pos)
agent.desiredV = agent.desiredSpeed * agent.direction
adapt = agent.adaptVel()
# initial forces values
peopleInter = 0.0
wallInter = 0.0
groupVis = 0.0
groupAtt = 0.0
ownGroupRep = 0.0
otherGroupRep = 0.0
# wall interaction
for wall in walls:
wallInter += agent.wallInteraction(wall)
# people interaction
for groupj in agents:
for subgroupj in groupj:
for agentj in subgroupj:
if agent.agentId != agentj.agentId:
peopleInter += agent.peopleInteraction(agentj)
# list of group members excluding current ped
agentGroup = []
for sub in group:
for mem in sub:
if mem.agentId != agent.agentId:
agentGroup.append(mem)
# group visual and attraction forces
if len(agentGroup) > 0:
groupVis = agent.groupVisual(agentGroup)
groupAtt = agent.groupAttraction(agentGroup + [agent])
# same group repulsion
for agentj in agentGroup:
ownGroupRep += agent.ownGroupRepulsion(agentj)
groupInter = groupVis + groupAtt + ownGroupRep
# other groups repulsion
for gid,g in enumerate(agents):
if gid != agent.groupId:
# create list of 'other group' members
otherGroup = []
for sub in g:
otherGroup += sub
otherGroupRep += agent.otherGroupRepulsion(otherGroup)
#print(otherGroupRep)
# subgroup forces
subgroupForce = agent.subgroupForces(group)
sumForce = adapt + wallInter + peopleInter + groupInter# + otherGroupRep + subgroupForce
accl = sumForce / agent.mass
agent.actualV = agent.actualV + accl*0.5 # consider dt = 0.5
agent.pos = agent.pos + agent.actualV*0.5
if (np.linalg.norm(agent.pos - agent.dest) < 2) & (agent.Goal == 0):
agent.Goal = 1
agent.timeOut = pygame.time.get_ticks()
#agent.timeOut = clock.get_time()/1000.0
print('Agent ', agent.agentId, 'reached goal at ', agent.timeOut)
for group in agents:
for subgroup in group:
for agent in subgroup:
scPos = (agent.pos*10).astype(int) #worldCoord2ScreenCoord(agent.pos, SCREENSIZE, RESOLUTION)
endPos = ((agent.pos + agent.actualV) * 10).astype(int)
endPosDV = ((agent.pos + agent.desiredV) * 10).astype(int)
pygame.draw.circle(screen, agent.color, scPos, AGENTSIZE, AGENTTHICKNESS)
pygame.draw.circle(screen, agent.subgroupColor, scPos, 5, 3)
pygame.draw.line(screen, agent.color, scPos, endPos, 2)
pygame.draw.line(screen, [255,60,0], scPos, endPosDV, 2)
pygame.display.flip()
clock.tick(20)
#clock.get_time |
5,452 | 519dbe97ce9de30e616d660ef168e686c52b01b5 | #!/usr/bin/env python3
#
# main.py - By Steven Chen Hao Nyeo
# Graphical interface for Socionics Engine
# Created: August 8, 2019
import wx
from cognitive_function import *
from entity import Entity
from function_to_type import Translator
from function_analysis import *
class TypeFrame(wx.Frame):
def __init__(self, parent, title):
# Create Frame
wx.Frame.__init__(self, parent, title = title, size = (530, 480), style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
self.panel = wx.Panel(self)
# The current list of cognitive functions entered into the system
self.entityList = []
# Arrays containing the rows of buttons for dominant and auxiliary functions
self.domButtons = []
self.auxButtons = []
# Keep track of the current row of buttons to enable
self.rowCount = 0
# Setup for program interface
self.row_1_y = 30
self.row_2_y = 90
self.row_3_y = 150
wx.StaticText(self.panel, label = "Dominant Function:", pos = (30, self.row_1_y - 20))
self.createCogButtons(0)
wx.StaticText(self.panel, label = "Auxiliary Function:", pos = (30, self.row_2_y - 20))
self.createCogButtons(1)
# The function that creates the buttons for the eight cognitive functions
def createCogButtons(self, row):
# Keeps track of creation of dominant or auxiliary buttons
cogButtons = self.domButtons if row == 0 else self.auxButtons
# Create and bind the buttons to the event
labels = ["N", "S", "T", "F"]
for i in range(4):
cogButtons.append(wx.Button(self.panel, label = labels[i] + "i", size = (50, 30) , pos = (30 + 120 * i, self.row_1_y if row == 0 else self.row_2_y)))
cogButtons.append(wx.Button(self.panel, label = labels[i] + "e", size = (50, 30) , pos = (90 + 120 * i, self.row_1_y if row == 0 else self.row_2_y)))
for i in range(8):
self.Bind(wx.EVT_BUTTON, self.onclick_cogFunction, cogButtons[i])
# The auxiliary buttons are disabled before the dominant function is entered
if (row == 1):
for button in self.auxButtons:
button.Disable()
# The event handler for clicking on the buttons
def onclick_cogFunction(self, event):
btnLabel = event.GetEventObject().GetLabel()
# First row - dominant function
if (self.rowCount == 0):
# Disable the dominant function buttons
self.rowCount = 1
self.entityList.append(self.labelToFunction(btnLabel))
for button in self.domButtons:
button.Disable()
# Re-enable the appropriate auxiliary function buttons
for button in self.auxButtons:
if (button.Label[1] == self.entityList[0].opposite().sublabel
and button.Label[0] != self.entityList[0].opposite_orientation().label
and button.Label[0] != self.entityList[0].label):
button.Enable()
# Second row - auxiliary function
else:
self.entityList.append(self.labelToFunction(btnLabel))
for button in self.auxButtons:
button.Disable()
if (len(self.entityList) == 2):
e = Entity(self.entityList)
print(Translator.translate_orientation(e) +
Translator.translate_observing(e) +
Translator.translate_decision_making(e) +
Translator.translate_perception(e))
# The helper functin that returns the corresponding function object according to the entered string
def labelToFunction(self, btnLabel):
if (btnLabel == "Ni"):
return Ni
elif (btnLabel == "Ne"):
return Ne
elif (btnLabel == "Si"):
return Si
elif (btnLabel == "Se"):
return Se
elif (btnLabel == "Ti"):
return Ti
elif (btnLabel == "Te"):
return Te
elif (btnLabel == "Fi"):
return Fi
elif (btnLabel == "Fe"):
return Fe
if __name__ == "__main__":
app = wx.App()
frame = TypeFrame(None, title = "Socionics Engine")
frame.Show()
app.MainLoop()
|
5,453 | 81b920ab5417937dc0fc1c9675d393efc6a4d58d | import pandas as pd #@UnusedImport
import matplotlib.pyplot as plt
import matplotlib #@UnusedImport
import numpy as np #@UnusedImport
class Plotter():
def __init__(self):
self.red_hex_code = '#ff0000'
def AlkDMIonStatsSplitPlot(self, df):
PV1_DataSets_lst = df[df['inst'] == 'PV1']['DataSet'].unique()
PV2_DataSets_lst = df[df['inst'] == 'PV2']['DataSet'].unique()
inst_sets = [PV1_DataSets_lst,PV2_DataSets_lst]
ax_title = ['Peg-BT PV1', 'Peg-BT PV2']
fig = plt.figure(figsize=(25,9))
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
ax1.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable
ax2.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,0.9,4))) #@UndefinedVariable
ax = [ax1,ax2]
for a in range(2):
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel('Area Per Ion via Detector Measurement')
ax[a].set_xlabel('Alkane Standard\nSample Injection Count')
ax[a].set_title(ax_title[a])
for dset in inst_sets[a]:
df_sliced = df[df['DataSet'] == dset].copy()
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Offset: +{v} v = {d} v'.format(v=offset, d=dv)
ax[a].plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)
ax[a].legend(loc='center', bbox_to_anchor=(0.17,-0.1))
# plt.suptitle('Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition', fontsize=14)
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def AlkDMIonStatsPlot(self, df):
DataSets_lst = df['DataSet'].unique()
fig = plt.figure(figsize=(15.5,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.1,1.00,8))) #@UndefinedVariable
for dset in DataSets_lst:
df_sliced = df[df['DataSet'] == dset].copy()
instrument = df_sliced['inst'].iloc[2]
offset = df_sliced['offset_volts'].iloc[2]
dv = df_sliced['Det_Volts'].iloc[2]
curve_label = 'Inst: {i} - Offset: +{v} v = {d} v'.format(i=instrument, v=offset, d=dv)
ax.plot(df_sliced['Cumulative_Inj'], df_sliced['ave_api'], label=curve_label)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel('Ave. Aera Per Ion')
plt.xlabel('Sample Injections')
plt.title('Tracking Area Per Ion via Detector Measurement\nOver ~48 Hours of Continuous Sample Acquisition')
legend_h_offset, legend_v_offset = 1.25, 0.75
plt.legend(loc='center right', bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig('DM_API_Analysis', bbox_inches='tight')
plt.show()
def GenericIndividualPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl, plot_title, png_filename, legend_h_offset=1.25, legend_v_offset=0.75, legend_location='center'):
# xdata & ydata: both are a list of lists each containing the corresponding axis data. These are the requirement of these two
# data set to prevent an error:
# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.
# There must be the same number of sub lists to prevent an error.
# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.
# The rest are self explainatory
fig = plt.figure(figsize=(15.5,9))
ax = fig.add_subplot(1,1,1)
for i in range(len(xdata_lst)):
ax.plot(xdata_lst[i], ydata_lst[i], color=self.color_codes[i], label=legendlbl_lst[i])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel(ylbl)
plt.xlabel(xlbl)
plt.title(plot_title)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
# (x_data, all_y_data, legendlbl_lst, xlbl, plot_titles, figure_title, all_png_filenames)
def GenericCombinedPlotMaker(self, xdata_lst, ydata_lst, legendlbl_lst, xlbl, ylbl_lst, fig_title, png_filename, legend_h_offset=0.9, legend_v_offset=2.4, legend_location='center'):
# xdata_lst: is a list of lists each containing the corresponding x-axis data. The x-axis data is the same for all ax_n objects
# Generic example: [Series_1_x-axis_data_lst, Series_n_x-axis_data_lst...]
# ydata_lst: is a list of lists of lists containing all the y-axis data.
# Generic example: [ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...], ax_n[ax_1[Series_1_y-axis_data_lst, Series_n_y-axis_data_lst...]...]
# data set to prevent an error:
# Sublists with the same index are a matching x vs y set that will be plotted. They MUST be the same length to prevent an error.
# There must be the same number of sub lists to prevent an error.
# legendlbl_lst: a list of legend labels for each x vs y plot. Again there must be the same number of items in this list as x/y pairs.
# The rest are self explainatory
fig = plt.figure(figsize=(25,9))
ax = []
for a in range(4):
ax.append(fig.add_subplot(2,2,1+a))
ax[a].set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
for s in range(len(xdata_lst)):
ax[a].plot(xdata_lst[s], ydata_lst[a][s], label=legendlbl_lst[s])
ax[a].spines['right'].set_visible(False)
ax[a].spines['top'].set_visible(False)
ax[a].set_ylabel(ylbl_lst[a])
if (a == 2 or a == 3) and s == 1:
plt.xlabel(xlbl)
elif (a == 0 or a == 1) and s == 1:
ax[a].set_xticklabels([])
ax[a].spines['bottom'].set_visible(False)
ax[a].xaxis.set_ticks_position('none')
plt.suptitle(fig_title, fontsize=20)
plt.legend(loc=legend_location, bbox_to_anchor=(legend_h_offset, legend_v_offset))
plt.savefig(png_filename, bbox_inches='tight')
def Manual_OFN20fg_IDL(self):
fig = plt.figure(figsize=(25,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
xdata = [0,150,250,350]
ydata = [[0.036614, 0.009674, 0.0056418, 0.004696],[0.0083151, 0.0044855, 0.0046082, 0.0033099]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('IDL pg')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
plt.suptitle('IDL vs Detector Voltage Offset\nOFN 0.02 pg On Column\nQuant Mass = 271.99', fontsize=20)
plt.savefig('OFN_20fg_IDL_Plot', bbox_inches='tight')
def Manual_GO_Plot(self):
fig = plt.figure(figsize=(25,9))
ax = fig.add_subplot(1,1,1)
ax.set_prop_cycle('color',plt.cm.spectral(np.linspace(0.25,0.84,2))) #@UndefinedVariable
xdata = [0,150,250,350]
ydata = [[-7.7, 26.5, 42.8, 66.1],[-8, 4.1, 13.5, 48.4]]
legendlbl_lst = ['Peg BT - PV1', 'Peg BT - PV2']
for s in range(len(ydata)):
ax.plot(xdata, ydata[s], label=legendlbl_lst[s])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Change in Optimized Detector Voltage')
ax.set_xlabel('Optimized Detector Voltage Offset (volts)')
plt.legend()
# plt.suptitle('Change in Optimized Detector Voltage\nFrom the Beginning to the End of a Data Set', fontsize=20)
plt.savefig('GO_Delta_Plot', bbox_inches='tight')
plt.show() |
5,454 | aa952e8f9a1855b5578cb26d6e5aca42605ee585 | # https://leetcode-cn.com/problems/zigzag-conversion/
# 6. Z 字形变换
class Solution:
def convert(self, s: str, numRows: int) -> str:
res = ''
for i in range(numRows):
pass
return res
|
5,455 | 0e47a7d9cd6809886674291d6a535dd18205a012 | #!/usr/bin/env python3
def GetDensity(T, P, config):
return P/(T*config["Flow"]["mixture"]["gasConstant"])
def GetViscosity(T, config):
if (config["Flow"]["mixture"]["viscosityModel"]["type"] == "Constant"):
viscosity = config["Flow"]["mixture"]["viscosityModel"]["Visc"]
elif (config["Flow"]["mixture"]["viscosityModel"]["type"] == "PowerLaw"):
viscosity = config["Flow"]["mixture"]["viscosityModel"]["ViscRef"]*(T/config["Flow"]["mixture"]["viscosityModel"]["TempRef"])**0.7
elif (config["Flow"]["mixture"]["viscosityModel"]["type"] == "Sutherland"):
viscosity = (config["Flow"]["mixture"]["viscosityModel"]["ViscRef"]*(T/config["Flow"]["mixture"]["viscosityModel"]["TempRef"])**1.5)*(config["Flow"]["mixture"]["viscosityModel"]["TempRef"]+config["Flow"]["mixture"]["viscosityModel"]["SRef"])/(T+config["Flow"]["mixture"]["viscosityModel"]["SRef"])
else:
assert False
return viscosity
|
5,456 | 4379d89c2ada89822acbf523d2e364599f996f8c | import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
import matplotlib.pyplot as plt
import tensorflow as tf
from enum import Enum
from pytalib.indicators import trend
from pytalib.indicators import base
class Cell(Enum):
BasicRNN = 1
BasicLSTM = 2
LSTMCellPeephole = 3
GRU = 4
valid_set_size_percentage = 10
test_set_size_percentage = 10
df = pd.read_csv('data_2019-01-06.csv')
df.sort_values('Date')
# function for min-max normalization of stock
def normalize_data(df):
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df['Open'] = min_max_scaler.fit_transform(df['Open'].values.reshape(-1, 1))
df['High'] = min_max_scaler.fit_transform(df['High'].values.reshape(-1, 1))
df['Low'] = min_max_scaler.fit_transform(df['Low'].values.reshape(-1, 1))
df['Close'] = min_max_scaler.fit_transform(df['Close'].values.reshape(-1, 1))
df['Volume'] = min_max_scaler.fit_transform(df['Volume'].values.reshape(-1, 1))
return df
# function to create train, validation, test data given stock data and sequence length
def load_data(stock, seq_len):
data_raw = stock.values # convert to numpy array
data = []
# create all possible sequences of length seq_len
for index in range(len(data_raw) - seq_len):
data.append(data_raw[index: index + seq_len])
data = np.array(data)
valid_set_size = int(np.round(valid_set_size_percentage / 100 * data.shape[0]))
test_set_size = int(np.round(test_set_size_percentage / 100 * data.shape[0]))
train_set_size = data.shape[0] - (valid_set_size + test_set_size)
x_train = data[:train_set_size, :-1, :]
y_train = data[:train_set_size, -1, :]
x_valid = data[train_set_size:train_set_size + valid_set_size, :-1, :]
y_valid = data[train_set_size:train_set_size + valid_set_size, -1, :]
x_test = data[train_set_size + valid_set_size:, :-1, :]
y_test = data[train_set_size + valid_set_size:, -1, :]
return [x_train, y_train, x_valid, y_valid, x_test, y_test]
# show predictions: 0 = open, 1 = close, 2 = highest, 3 = lowest, 4 = volume
def show_predictions(ft, y_test_pred):
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(y_test.shape[0]),
y_test[:, ft], color='black', label='test target')
plt.plot(np.arange(y_test_pred.shape[0]),
y_test_pred[:, ft], color='green', label='test prediction')
plt.title('future stock prices')
plt.xlabel('time [days]')
plt.ylabel('normalized price')
plt.legend(loc='best')
x = 0
error_percent = 5
for index in range(0, len(y_test)):
if (abs((y_test_pred[:, ft][index] - y_test[:, ft][index])) / abs(y_test[:, ft][index]) * 100) < error_percent:
x += 1
print("Percent of predictions which error is less then {}% = {}%".format(error_percent, x / len(y_test) * 100))
# Calculating the direction between 2 points using true values and predicted values
z = 0
distance = 10
for index in range(distance, len(y_test)):
if (y_test[:, ft][index - distance] <= y_test[:, ft][index] and y_test_pred[:, ft][index - distance] <=
y_test_pred[:, ft]
[index]) or (
y_test[:, ft][index - distance] >= y_test[:, ft][index] and y_test_pred[:, ft][index - distance]
>= y_test_pred[:, ft][index]):
z += 1
print("Percent of correct predicted direction = {}%".format(z / len(y_test) * 100))
plt.show()
# choose one stock
df_stock = df.copy()
df_stock.drop(['Date'], 1, inplace=True)
cols = list(df_stock.columns.values)
# normalize stock
df_stock_norm = df_stock.copy()
df_stock_norm = normalize_data(df_stock_norm)
# create train, test data
seq_len = 50 # choose sequence length
x_train, y_train, x_valid, y_valid, x_test, y_test = load_data(df_stock_norm, seq_len)
index_in_epoch = 0
perm_array = np.arange(x_train.shape[0])
np.random.shuffle(perm_array)
# function to get the next batch
def get_next_batch(batch_size):
global index_in_epoch, x_train, perm_array
start = index_in_epoch
index_in_epoch += batch_size
if index_in_epoch > x_train.shape[0]:
np.random.shuffle(perm_array) # shuffle permutation array
start = 0 # start next epoch
index_in_epoch = batch_size
end = index_in_epoch
return x_train[perm_array[start:end]], y_train[perm_array[start:end]]
# parameters
CellType = Cell.BasicRNN
n_steps = seq_len - 1
n_inputs = 5
n_neurons = 200
n_outputs = 5
n_layers = 2
learning_rate = 0.001
batch_size = 50
n_epochs = 10
train_set_size = x_train.shape[0]
test_set_size = x_test.shape[0]
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_outputs])
if CellType == Cell.BasicRNN:
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.elu)
for layer in range(n_layers)]
elif CellType == Cell.BasicLSTM:
layers = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons, activation=tf.nn.elu)
for layer in range(n_layers)]
elif CellType == Cell.LSTMCellPeephole:
layers = [tf.contrib.rnn.LSTMCell(num_units=n_neurons,
activation=tf.nn.leaky_relu, use_peepholes=True)
for layer in range(n_layers)]
elif CellType == Cell.GRU:
layers = [tf.contrib.rnn.GRUCell(num_units=n_neurons, activation=tf.nn.leaky_relu)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
outputs = outputs[:, n_steps - 1, :] # keep only last output of sequence
loss = tf.reduce_mean(tf.square(outputs - y)) # loss function = mean squared error
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
# run graph
def train_data(model_name):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for iteration in range(int(n_epochs * train_set_size / batch_size)):
x_batch, y_batch = get_next_batch(batch_size) # fetch the next training batch
sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
if iteration % int(5 * train_set_size / batch_size) == 0:
mse_train = loss.eval(feed_dict={X: x_train, y: y_train})
mse_valid = loss.eval(feed_dict={X: x_valid, y: y_valid})
print('%.2f epochs: MSE train/valid = %.6f/%.6f' % (
iteration * batch_size / train_set_size, mse_train, mse_valid))
saver.save(sess, 'train_models/' + model_name)
def test(model_name):
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'train_models/' + model_name)
y_test_pred = sess.run(outputs, feed_dict={X: x_test})
show_predictions(1, y_test_pred)
model = ['train_model', 'train_model_LSTM', 'train_model_with_batch_500', 'train_model_with_layers_4',
'train_model_with_volume', 'model_seq_len_100', "model_GRU", 'model_LSTM_pipehole']
# train_data(model[0])
# test(model[0])
y_new = []
for i in y_test:
y_new.append(i[1] * 10000)
macd = trend.MovingAverageConvergenceDivergence(y_new)
print(macd.calculate())
macd.validate()
tt = trend.ExponentialMovingAverage(y_new, 10)
print(tt.calculate())
plt.figure(figsize=(15, 5))
plt.subplot(1, 1, 1)
plt.plot(np.arange(len(y_new)), y_new, color='black', label='test target')
plt.plot(np.arange(len(macd.macd)), tt.calculate(), color='green', label='test prediction')
plt.plot(np.arange(len(macd.macd)), macd.macd_signal_line, color='red', label='test prediction')
plt.show()
|
5,457 | 4d82e68faa3102fc2949fd805588504b7d874589 | import os, sys, datetime, pytz, tzlocal, urllib.request, requests, csv, hashlib, json, boto3
uri = 'ftp://ftpcimis.water.ca.gov/pub2/daily/daily107.csv' #Station 107 is Santa Barbara
base_et = 0.15
def main():
try:
tempfile = tempfile_name()
get_datafile(tempfile)
except:
print("Could not retrieve datafile " + tempfile)
exit(-1)
et = get_yesterdays_et(tempfile)
if et == -1.0:
print("No et found for " + datestr)
exit(-1)
new_water_level = int(et/base_et * 100)
print("New Water Level will be %d" % new_water_level)
status = set_os_et(new_water_level)
notify(status)
exit(0)
def yesterday():
dt = datetime.datetime.now(datetime.timezone.utc)
local_timezone = tzlocal.get_localzone()
dt = dt.astimezone(local_timezone)
delta = datetime.timedelta(1)
dt = dt - delta
return datetime.datetime.strftime(dt, "%-m/%-d/%Y")
def get_yesterdays_et(tempfile):
datestr = yesterday()
et = -1.0
with open(tempfile, 'r') as tmp:
rdr = csv.reader(tmp)
for r in rdr:
if r[1] == datestr:
et = float(r[3])
print("Found et for " + datestr + ": " + str(et))
os.remove(tempfile)
return et
def tempfile_name():
return '/tmp/get_et_rate_' + str(datetime.datetime.now().timestamp()) + '.csv'
def get_datafile(tempfile):
global uri
urllib.request.urlretrieve(uri, tempfile)
def get_password():
try:
pw = os.environ['OPENSPRINKLER_PASSWORD']
except:
print("OpenSprinkler password not set in env variable OPENSPRINKLER_PASSWORD")
exit(-1)
pw = pw.encode('ascii')
m = hashlib.md5()
m.update(pw)
return m.hexdigest()
def set_os_et(new_water_level):
hash = get_password()
status = ""
r = requests.get('http://192.168.1.13/jo?pw=' + hash)
res = json.loads(r.text)
status = status + "Old water level: %s\n" % {res['wl']}
r = requests.get('http://192.168.1.13/co?pw=%s&o23=%d' % (hash, new_water_level))
r = requests.get('http://192.168.1.13/jo?pw=' + hash)
res = json.loads(r.text)
status = status + "Successfully set to new value %s\n" % {res['wl']}
return status
def notify(status):
session = boto3.Session(profile_name='trbryan')
sns = session.client('sns')
response = sns.publish(
TopicArn='arn:aws:sns:us-west-2:509611857908:opensprinkler_et_update',
Message=status,
Subject='Daily OpenSprinkler ET Adjustment',
MessageStructure='string',
)
if __name__ == "__main__": main() |
5,458 | 382f7119beba81087c497baf170eb6814c26c03e | """byte - property model module."""
from __future__ import absolute_import, division, print_function
class BaseProperty(object):
"""Base class for properties."""
def get(self, obj):
"""Get property value from object.
:param obj: Item
:type obj: byte.model.Model
"""
raise NotImplementedError
def set(self, obj, value):
"""Set property value on object.
:param obj: Item
:type obj: byte.model.Model
:param value: Value
:type value: any
"""
raise NotImplementedError
|
5,459 | beb9fe8e37a4f342696a90bc624b263e341e4de5 | #!/usr/bin/python
L=['ABC','ABC']
con1=[]
for i in range (0,len(L[1])):
#con.append(L[1][i])
con=[]
for j in range (0, len(L)):
print(L[j][i])
con.append(L[j][i])
con1.append(con)
con2=[]
for k in range (0,len(con1)):
if con1[k].count('A')==2:
con2.append('a')
elif con1[k].count('B')==2:
con2.append('b')
else:
con2.append('n')
|
5,460 | 8f14bbab8b2a4bc0758c6b48feb20f8b0e3e348b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File: software/jetson/fastmot/utils/sot.py
# By: Samuel Duclos
# For: Myself
# Description: This file returns detection results from an image.
from cvlib.object_detection import draw_bbox
class ObjectCenter(object):
def __init__(self, args):
"""Initialize variables."""
self.args = args
def load_classes(self, path):
with open(path, 'r') as names_file:
names = names_file.read().split('\n')
return list(filter(None, names))
def _filter_(self, frame, predictions):
"""Apply object detection."""
if not self.args.no_filter_object_category:
names = self.load_classes(self.args.names)
object_category = names.index(self.args.object_category)
predictions = self.filter_inference_results(predictions,
object_category=object_category)
return predictions
def filter_inference_results(self, predictions, object_category='person'):
"""Return bounding box of biggest object of selected category."""
if predictions is not None and len(predictions) == 3:
bboxes, labels, confs = predictions
# Only return bounding boxes for the selected object category.
category_bboxes = [(bbox,
label,
conf) for (bbox,
label,
conf) in zip(bboxes,
labels,
confs) if (label == object_category).any()]
if len(category_bboxes) > 0:
# Choose biggest object of selected category.
biggest_bbox = None
biggest_label = None
biggest_conf = None
most_pixels = 0
for (bbox, label, conf) in category_bboxes:
(x, y, w, h) = bbox
n_pixels = w * h
if n_pixels > most_pixels:
most_pixels = n_pixels
biggest_bbox = bbox
biggest_label = label
biggest_conf = conf
category_bboxes = ([biggest_bbox], [biggest_label], [biggest_conf])
predictions = category_bboxes
return predictions
def update(self, predictions, frame, frameCenter):
"""Asynchronous update of detection results to return object center."""
if len(predictions) > 0:
(x, y, w, h) = predictions[0][0]
objectX = int(x + (w / 2.0))
objectY = int(y + (h / 2.0))
return ((objectX, objectY), predictions)
else:
return (frameCenter, None)
def filter_objects(self, frame, predictions, object_x=None, object_y=None, center_x=None, center_y=None):
"""Apply object detection."""
predictions = self._filter_(frame, predictions)
if predictions is not None and len(predictions) > 0:
if predictions[0][0] is not None and len(predictions) == 3:
bbox, label, conf = predictions[0][0]
# Calculate the center of the frame since we will be trying to keep the object there.
(H, W) = frame.shape[:2]
center_x.value = W // 2
center_y.value = H // 2
object_location = self.update(predictions, frame, (center_x.value, center_y.value))
((object_x.value, object_y.value), predictions) = object_location
if self.args.no_show:
return None
else:
# Draw bounding box over detected objects.
inferred_image = draw_bbox(frame, bbox, label, conf, write_conf=True)
return inferred_image
|
5,461 | bb730606c7357eeb605292d5b9c05e8e8a797ea2 | n,k=[int(input()) for _ in range(2)]
ans=1
for _ in range(n):
ans=min(ans*2,ans+k)
print(ans)
|
5,462 | 4c38d0487f99cdc91cbce50079906f7336e51482 | from platypush.message.response import Response
class CameraResponse(Response):
pass
# vim:sw=4:ts=4:et:
|
5,463 | b72bf00d156862c7bddecb396da3752be964ee66 | # SaveIsawQvector
import sys
import os
if os.path.exists("/opt/Mantid/bin"):
sys.path.append("/opt/mantidnightly/bin")
#sys.path.append("/opt/Mantid/bin") # Linux cluster
#sys.path.append('/opt/mantidunstable/bin')
else:
sys.path.append("C:/MantidInstall/bin") # Windows PC
# import mantid
from mantid.simpleapi import *
user_input = open('SaveIsawQvector.inp', 'r')
lineString = user_input.readline()
lineList = lineString.split()
data_directory = lineList[0]
lineString = user_input.readline()
lineList = lineString.split()
output_directory = lineList[0]
input_run_nums = open('monitorCtsAndAngles.dat', 'r')
min_tof = 2000
max_tof = 16500
start_time = 0.0
stop_time = 1.0e06
while True:
lineString = input_run_nums.readline()
lineList = lineString.split()
if len(lineList) == 0: break
run_num = lineList[0]
print run_num
full_name = data_directory + run_num + '_event.nxs'
event_ws = 'TOPAZ_' + run_num
LoadEventNexus( Filename = full_name, OutputWorkspace = event_ws,
FilterByTofMin = min_tof, FilterByTofMax = max_tof,
FilterByTimeStart = start_time, FilterByTimeStop = stop_time )
outputFile = output_directory + run_num + '_SaveIsawQvector.bin'
SaveIsawQvector(InputWorkspace = event_ws,
Filename = outputFile)
DeleteWorkspace(Workspace = event_ws)
print 'All done!'
|
5,464 | 1599f5e49ec645b6d448e74719e240343077aedd | from django.conf.urls import patterns, include, url
from django.contrib import admin
from metainfo.views import DomainListView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'metapull.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', DomainListView.as_view()),
url(r'^admin/', include(admin.site.urls)),
url(r'^domains/', include('metainfo.urls', namespace = 'domains')),
)
|
5,465 | 1b09b18926dc95d4c4b3088f45088f12c162ccb3 | # -*- coding: utf-8 -*-
a=float(input('Digite um número:'))
b=(a-(a%1))
c=(a%1)
print('O valor inteiro é %d' %b)
print('O valor decimal é %.6f' %c) |
5,466 | 9e987e057ee5322765415b84e84ef3c4d2827742 | # Copyright (c) 2008-2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `interpolation` module."""
from __future__ import division
import logging
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from scipy.spatial import cKDTree, Delaunay
from scipy.spatial.distance import cdist
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import calc_kappa
from metpy.gridding.interpolation import (barnes_point, barnes_weights, cressman_point,
cressman_weights, inverse_distance,
natural_neighbor, nn_point)
from metpy.gridding.triangles import dist_2, find_natural_neighbors
logging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)
@pytest.fixture()
def test_data():
r"""Return data used for tests in this file."""
x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)
y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)
z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,
0.225, 3.364], dtype=float)
return x, y, z
@pytest.fixture()
def test_grid():
r"""Return grid locations used for tests in this file."""
with get_test_data('interpolation_test_grid.npz') as fobj:
data = np.load(fobj)
return data['xg'], data['yg']
def test_natural_neighbor(test_data, test_grid):
r"""Test natural neighbor interpolation function."""
xp, yp, z = test_data
xg, yg = test_grid
img = natural_neighbor(xp, yp, z, xg, yg)
with get_test_data('nn_bbox0to100.npz') as fobj:
truth = np.load(fobj)['img']
assert_array_almost_equal(truth, img)
interp_methods = ['cressman', 'barnes']
@pytest.mark.parametrize('method', interp_methods)
def test_inverse_distance(method, test_data, test_grid):
r"""Test inverse distance interpolation function."""
xp, yp, z = test_data
xg, yg = test_grid
extra_kw = {}
if method == 'cressman':
extra_kw['r'] = 20
extra_kw['min_neighbors'] = 1
test_file = 'cressman_r20_mn1.npz'
elif method == 'barnes':
extra_kw['r'] = 40
extra_kw['kappa'] = 100
test_file = 'barnes_r40_k100.npz'
img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)
with get_test_data(test_file) as fobj:
truth = np.load(fobj)['img']
assert_array_almost_equal(truth, img)
def test_nn_point(test_data):
r"""Test find natural neighbors for a point interpolation function."""
xp, yp, z = test_data
tri = Delaunay(list(zip(xp, yp)))
sim_gridx = [30]
sim_gridy = [30]
members, tri_info = find_natural_neighbors(tri,
list(zip(sim_gridx, sim_gridy)))
val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]],
tri, members[0], tri_info)
truth = 1.009
assert_almost_equal(truth, val, 3)
def test_barnes_weights():
r"""Test Barnes weights function."""
kappa = 1000000
gamma = 0.5
dist = np.array([1000, 2000, 3000, 4000])**2
weights = barnes_weights(dist, kappa, gamma) * 10000000
truth = [1353352.832366126918939,
3354.626279025118388,
.152299797447126,
.000000126641655]
assert_array_almost_equal(truth, weights)
def test_cressman_weights():
r"""Test Cressman weights function."""
r = 5000
dist = np.array([1000, 2000, 3000, 4000])**2
weights = cressman_weights(dist, r)
truth = [0.923076923076923,
0.724137931034482,
0.470588235294117,
0.219512195121951]
assert_array_almost_equal(truth, weights)
def test_cressman_point(test_data):
r"""Test Cressman interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([30, 30], r=r)
dists = dist_2(30, 30, xp[indices], yp[indices])
values = z[indices]
truth = 1.05499444404
value = cressman_point(dists, values, r)
assert_almost_equal(truth, value)
def test_barnes_point(test_data):
r"""Test Barnes interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([60, 60], r=r)
dists = dist_2(60, 60, xp[indices], yp[indices])
values = z[indices]
truth = 4.08718241061
ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))
kappa = calc_kappa(ave_spacing)
value = barnes_point(dists, values, kappa)
assert_almost_equal(truth, value)
|
5,467 | cfc0ca0d8528937526f6c42721870f1739a2ae95 | from turtle import Screen
import time
from snake import Snake
from snake_food import Food
from snake_score import Scoreboard
screen = Screen()
screen.setup(width=600,height=600)
screen.bgcolor("black")
screen.title("Snake Game")
screen.tracer(0)
snake = Snake()
food=Food()
score=Scoreboard()
screen.listen()
screen.onkey(snake.up,"Up")
screen.onkey(snake.down,"Down")
screen.onkey(snake.left,"Left")
screen.onkey(snake.right,"Right")
game_is_on = True
while game_is_on:
screen.update()
time.sleep(0.1)
snake.move()
if (snake.segment[0].distance(food))<15:
food.refresh()
# screen.update()
snake.extend()
score.increase_score()
if snake.segment[0].xcor() > 280 or snake.segment[0].xcor() < -280 or snake.segment[0].ycor() > 280 or snake.segment[0].ycor() < -280:
game_is_on=False
score.gameover()
for seg in snake.segment:
if (seg==snake.segment[0]):
continue
if snake.segment[0].distance(seg)<10:
game_is_on = False
score.gameover()
screen.exitonclick() |
5,468 | 9adf18b3a65bf58dd4c22a6fe026d0dd868533fb | from .models import Stock
from .serializers import StockSerializer
from rest_framework import generics
class StockListCreate(generics.ListCreateAPIView):
queryset = Stock.objects.all()
serializer_class = StockSerializer
|
5,469 | a76a0631c97ba539019790e35136f6fd7573e461 | from google.cloud import pubsub_v1
import os
from flask import Flask, request, jsonify
from google.cloud import pubsub_v1
import os
from gcloud import storage
import json
import datetime
import time
app = Flask(__name__)
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"] = "/home/vishvesh/Documents/Dal/serverless/api-8566414966874230052-395627-4fca061a25a4.json"
project_id = "api-8566414966874230052-395627"
client = storage.Client()
bucket = client.get_bucket('publisher_files')
@app.route('/publish', methods=['GET', 'POST'])
def publish():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
subscription_id = sub_user
msg = request.args.get('msg')
topic_id = topic_user
publisher = pubsub_v1.PublisherClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id=project_id,
topic=topic_id, # Set this to something appropriate.
)
try:
pub = publisher.create_topic(topic_name)
print("created pub", pub)
except Exception as e:
print(e,"------e------")
pass
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id=project_id,
topic=topic_id,
)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id,
sub=subscription_id,
)
try:
sub = subscriber.create_subscription(
name=subscription_name, topic=topic_name)
print("created", sub)
except Exception as e:
print(e, "--------e----------")
pass
pub_msg = publisher.publish(topic_name, str.encode(msg))
print("msg sent",pub_msg)
data = {"msg": str(msg)}
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
name = topic_user + str(st)
json.dump(data, open(name + ".json", 'w'))
blob = bucket.blob(name + ".json")
blob.upload_from_filename(name + ".json")
blob.make_public()
os.remove(name + ".json")
return str(data)
# SUBSCRIBER
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
topic_user = request.args.get('touser')
sub_user = request.args.get('fromuser')
topic_id = topic_user
subscription_id = sub_user
msg_list = []
subscriber = pubsub_v1.SubscriberClient()
# topic_name = 'projects/{project_id}/topics/{topic}'.format(
# project_id=project_id,
# topic=topic_id,
# )
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id=project_id,
sub=subscription_id, # Set this to something appropriate.
)
#
# try:
# sub = subscriber.create_subscription(
# name=subscription_name, topic=topic_name)
# print("created", sub)
# except Exception as e:
# print(e,"--------e----------")
# pass
def callback(message):
print(message.data)
msg_list.append(message.data.decode('utf-8'))
message.ack()
future = subscriber.subscribe(subscription_name, callback)
try:
f = future.result(timeout=4.0)
print(f,type(f))
except Exception as e:
future.cancel()
pass
# subscriber.close()
return jsonify(msg_list)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0',
port=int(os.environ.get(
'PORT', 8080)))
|
5,470 | 4cdd5fc15096aac01ad6d97d38ef7397859de18b | import json
import urllib
while True:
# Get input URL
url = raw_input("Enter URL: ")
# Check valid input
if len(url) < 1:
break
# Get data
print("Retrieving", url)
connection = urllib.urlopen(url)
data = connection.read()
print("Retrieved", len(data), "characters")
# Parse and deserialize
try:
js = json.loads(str(data))
except:
js = None
print(json.dumps(js, indent=4))
comments = js["comments"]
result = 0
for comment in comments:
result += comment["count"]
print("\n")
print("Result = {}".format(result)) |
5,471 | 20c081dc47f541a988bccef89b8e51f446c80f58 | # terminal based game in Python
from random import randint
print('Terminal based number guessing game')
while True:
try:
numberOfGames = int(input('Please choose how many games you want to play ---> '))
except:
print('Only numbes accepted')
continue
if (numberOfGames > 0 and numberOfGames < 10):
break;
randomNumbers = []
for i in range(numberOfGames):
randomNumbers.append(randint(1, 10))
for index, number in enumerate(randomNumbers):
print('Game %i' %(index + 1))
guess = 0
attempts = 0
while (guess != number):
try:
guess = int(input('Guess the number ---> '))
except Exception as e:
print('Only numbers accepted')
continue
if (guess > number):
print('Your number is bigger!')
else:
print('Your number is smaller!')
attempts += 1
print('Great you guessed it! Attempts %i' %attempts)
attempts = 0
|
5,472 | 6bde0ce30f33b155cc4c9ce9aa2ea6a6c5a1231d | """Coroutine utilities."""
from decorator import decorator
@decorator
def coroutine(f, *a, **kw):
"""This decorator starts the coroutine for us."""
i = f(*a, **kw)
i.next()
return i
|
5,473 | f1601d3d820b93631f9b1358627a5716016ad135 | import os
def is_admin():
"""
The function ``is_admin`` detects whether the calling process is running
with administrator/superuser privileges. It works cross-platform on
either Windows NT systems or Unix-based systems.
"""
if os.name == 'nt':
try:
# Only Windows users with admin privileges can read
# the C:\windows\temp directory.
os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\windows'),'temp']))
except:
return False
else:
return True
else:
# Root has UID 0 on Unix systems.
if 'SUDO_USER' in os.environ and os.geteuid() == 0:
return True
else:
return False
|
5,474 | 021f224d031477bd305644261ad4d79d9eca98b3 | import pytest
from flaat.issuers import IssuerConfig, is_url
from flaat.test_env import FLAAT_AT, FLAAT_ISS, environment
class TestURLs:
def test_url_1(self):
assert is_url("http://heise.de")
def test_valid_url_http(self):
assert is_url("http://heise.de")
def test_valid_url_https(self):
assert is_url("http://heise.de")
def test_valid_url_ftp(self):
assert is_url("http://heise.de")
def test_valid_url_https_path(self):
assert is_url("https://heise.de/thi_s&is=difficult")
def test_invalid_url(self):
assert not is_url("htp://heise.de")
def test_token_introspection():
client_id = environment.get("FLAAT_CLIENT_ID")
client_secret = environment.get("FLAAT_CLIENT_SECRET")
if client_id is None or client_secret is None: # pragma: no cover
pytest.skip("FLAAT_CLIENT_ID and FLAAT_CLIENT_SECRET are not set")
issuer_config = IssuerConfig.get_from_string(FLAAT_ISS)
assert issuer_config is not None
issuer_config.client_id = client_id
issuer_config.client_secret = client_secret
introspection_info = issuer_config._get_introspected_token_info(FLAAT_AT)
assert introspection_info is not None
|
5,475 | 0f0adde7241898d2efe7e2b5cc218e42ed7b73d8 | from functools import reduce
from collections import defaultdict
def memory(count: int, start_numbers: list):
numbers = defaultdict(lambda: tuple(2 * [None]), { el: (idx,None ) for idx,el in enumerate(start_numbers) })
last = start_numbers[-1]
for idx in range(len(numbers), count):
last = 0 if None in numbers[last] else reduce(lambda a,b:a-b, numbers[last])
numbers[last] = ( idx, numbers[last][0] )
print(f"For starting numbers: {start_numbers}, the {count}th number is: {last}")
[ memory(count, [8,0,17,4,1,12]) for count in [ 2020, 30000000 ] ]
|
5,476 | b794a4cca3303ac7440e9aad7bc210df62648b51 | from pkg.models.board import Board
class BaseAI:
_board: Board = None
def __init__(self, board=None):
if board is not None:
self.set_board(board)
def set_board(self, board):
self._board = board
def find_move(self, for_player):
pass
|
5,477 | 957fb1bd34d13b86334da47ac9446e30afd01678 | data = {
'title': 'Dva leteca (gostimo na 2)',
'song': [
'x - - - - - x - - - - -',
'- x - - - x - - - x - -',
'- - x - x - - - x - x -',
'- - - x - - - x - - - x'
],
'bpm': 120,
'timeSignature': '4/4'
}
from prog import BellMusicCreator
exportFile = __file__.replace('.py', '') + '.xml'
# BellMusicCreator().show(data)
BellMusicCreator().write(data, fp=exportFile)
|
5,478 | f75e0ddf42cc9797cdf1c4a4477e3d16441af740 | import openpyxl # 适用于xlsx文件
'''
纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示:
{
"1":["张三",150,120,100],
"2":["李四",90,99,95],
"3":["王五",60,66,68]
}
请将上述内容写到 student.xls 文件中
'''
def read_file():
words = []
with open('15.txt', 'r') as file:
content = file.read()
# print(content)
# print(type(content))
word = eval(content)
# print(word)
# print(word.keys())
# for each in word.keys():
# print(each)
# print(word[each])
# print(word.values())
# print(type(word))
for i, j in zip(word.keys(), word.values()):
# print(i, j)
words.append([i, j])
print(words)
return words
def write_list(list): # 写入excel文件
wb = openpyxl.Workbook()
sheet = wb.active
sheet.title = 'test'
value = list
for i in range(0, len(value)):
for j in range(0, len(value[i])):
sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))
wb.save('city.xlsx')
print("写入数据成功!")
if __name__ == '__main__':
# read_file()
write_list(read_file())
|
5,479 | 200552b638d6b1a6879b455837677b82689e0069 |
STATUS_CHOICES = (
(-1, 'Eliminado'),
(0, 'Inactivo'),
(1, 'Activo'),
)
USERTYPES_CHOICES = ()
#-- Activation Request Values
ACTIVATION_CHOICES = (
(1, 'Activacion'),
(2, 'Solicitud Password'),
(3, 'Invitacion'),
)
#-- Activation Status Values
ACTIVATIONSTATUS_CHOICES = (
(-1, 'Expirado'),
(0, 'Enviado'),
(1, 'Activado'),
) |
5,480 | 2c4f27e7d1bfe6d68fd0836094b9e350946913f6 | from django.db import models
class Survey(models.Model):
"""Survey representation.
"""
name = models.CharField(max_length=255)
description = models.TextField()
start_date = models.DateTimeField()
end_date = models.DateTimeField()
def __str__(self):
return self.name
class Question(models.Model):
"""Survey's question respresentation.
"""
QUESTION_TYPE_CHOICES = (
(1, 'Text answer'),
(2, 'One choice answer'),
(3, 'Multiple choices answer')
)
survey = models.ForeignKey(
Survey,
on_delete=models.CASCADE,
related_name='questions')
text = models.TextField()
question_type = models.IntegerField(choices=QUESTION_TYPE_CHOICES)
def __str__(self):
return self.text
class AnswerChoice(models.Model):
"""Represantation of question's
answer's choice.
"""
question = models.ForeignKey(
Question,
on_delete=models.CASCADE,
related_name='choices')
text = models.TextField()
def __str__(self):
return self.text
class CompletedSurvey(models.Model):
"""Representation of survey,
completed by the user.
"""
user_id = models.IntegerField(null=True, blank=True)
survey = models.ForeignKey(
Survey,
on_delete=models.SET_NULL,
null=True,
related_name='completed_surveys')
def __str__(self):
return f"{self.user_id} - {self.survey.name}"
class Answer(models.Model):
"""Representations of question's answer.
"""
completed_survey = models.ForeignKey(
CompletedSurvey,
on_delete=models.CASCADE,
related_name='answers')
question = models.ForeignKey(
Question,
on_delete=models.CASCADE,
related_name='answers')
text_answer = models.TextField(blank=True)
answer_choices = models.ManyToManyField(AnswerChoice, blank=True)
def __str__(self):
return f"Answer for survey '{str(self.completed_survey)}' made by user {self.completed_survey.user_id}" |
5,481 | 9ae9fd6da5c3d519d87af699dd4ea9b564a53d79 | import hashlib
hash = 'yzbqklnj'
int = 0
while not hashlib.md5("{}{}".format(hash, int).encode('utf-8')).hexdigest().startswith('000000'):
print("Nope luck for {}{}".format(hash, int))
int += 1
print("Key: {}{}".format(hash, int))
print("Number: {}").format(int) |
5,482 | 36e7398f576aa1d298a20b4d4a27a7b93e3bd992 | import numpy as np
import matplotlib.pyplot as plt
def sigmoid(X):
""" Applies the logistic function to x, element-wise. """
return 1 / (1 + np.exp(-X))
def x_strich(X):
return np.column_stack((np.ones(len(X)), X))
def feature_scaling(X):
x_mean = np.mean(X, axis=0)
x_std = np.std(X, axis=0)
return (X - x_mean) / x_std, x_mean, x_std
def rescale_model(thetas, mean, std):
thetas_rescaled = np.zeros(thetas.shape[0])
for count, value in enumerate(thetas):
if count == 0:
thetas_rescaled[0] = value + thetas[1] * (mean / std)
return thetas_rescaled
def logistic_hypothesis(theta):
"""Combines given list argument in a logistic equation and returns it as a
function
Args:
thetas: list of coefficients
Returns:
lambda that models a logistc function based on thetas and x
"""
return lambda X: sigmoid(np.dot(x_strich(X), theta))
# def regulated_cost(X, y, theta, lambda_reg):
#
# return cross_entropy(X, y)(theta) + L2_regularization_cost(X, theta, lambda_reg)
# def cross_entropy(X, y):
# """
# Computes the cross-entropy for a single logit value and a given target class.
# Parameters
# ----------
# X : float64 or float32
# The logit
# y : int
# The target class
# Returns
# -------
# floatX
# The cross entropy value (negative log-likelihood)
# """
#
# def cost(theta):
# z = x_strich(X).dot(theta)
# mu = np.max([np.zeros(X.shape[0]), -z], axis=0)
# r1 = y * (mu + np.log(np.exp(-mu) + np.exp(-z - mu)))
# mu = np.max([np.zeros(X.shape[0]), z], axis=0)
# r2 = (1 - y) * (mu + np.log(np.exp(-mu) + np.exp(z - mu)))
# return r1 + r2
#
# return cost
def cross_entropy(X, y):
"""Implements cross-entropy as a function costs(theta) on given traning data
Args:
h: the hypothesis as function
x: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
Returns:
lambda costs(theta) that models the cross-entropy for each x^i
"""
return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (
1 - y
) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)
def compute_new_theta(X, y, theta, learning_rate, lambda_reg):
"""Updates learnable parameters theta
The update is done by calculating the partial derivities of
the cost function including the linear hypothesis. The
gradients scaled by a scalar are subtracted from the given
theta values.
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
hypothesis: the hypothesis as function
Returns:
theta: Updated theta_0
"""
thetas = np.zeros(len(theta))
thetas = theta * (1 - learning_rate * (lambda_reg / len(X))) - (
learning_rate / len(X)
) * np.sum((logistic_hypothesis(theta)(X) - y) * x_strich(X).T, axis=1)
return thetas
def L2_regularization_cost(X, theta, lambda_reg):
return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))
def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):
"""Minimize theta values of a logistic model based on cross-entropy cost function
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
num_iters: number of iterations updating thetas
lambda_reg: regularization strength
Returns:
history_cost: cost after each iteration
history_theta: Updated theta values after each iteration
"""
thetas = [theta]
cost = np.zeros(num_iters)
J = mean_cross_entropy_costs(X, y, lambda_reg)
cost[0] = J(thetas[0])
for i in range(1, num_iters):
thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))
cost[i] = J(thetas[i])
return cost, thetas
def mean_cross_entropy_costs(X, y, lambda_reg=0.0):
"""Implements mean cross-entropy as a function J(theta) on given traning
data
Args:
X: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
hypothesis: the hypothesis as function
cost_func: cost function
Returns:
lambda J(theta) that models the mean cross-entropy
"""
return lambda theta: np.mean(cross_entropy(X, y)(theta)) + L2_regularization_cost(
X, theta, lambda_reg
)
def plot_progress(fig, costs, learning_rate, lambda_reg):
"""Plots the costs over the iterations
Args:
costs: history of costs
"""
ax = fig.add_subplot(111)
ax.plot(
np.arange(len(costs)),
costs,
alpha=0.8,
label="LR: " + str(learning_rate) + " __ Lambda: " + str(lambda_reg),
)
ax.legend(
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc="best",
ncol=4,
mode="expand",
borderaxespad=0.0,
) |
5,483 | 1c60620814a4aea2573caf99cee87590a8d57c18 | #Write by Jess.S 25/1/2019
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
def draw_point(x,y):
plt.scatter(x, y)
plt.title('点分布图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def draw_route(route_list,x,y):
plt.scatter(x, y)
for route in route_list:
route= np.array(route)
# print(route.shape)
plt.plot(route[:,0],route[:,1])
plt.title('路径图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def read_data(path,node):
csv_data = pd.read_csv(path) # 读取训练数据
# print(csv_data)
x = csv_data['Easting']
y = csv_data['Southing']
# print(x)
# print(y)
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
# print(node)
node_sort =sorted(node, key=lambda x: (x[0], x[1]))
# print(node_sort)
#另一种利用numpy的排序方法
# node = np.array(node)
# node = node[np.lexsort(node[:,::-1].T)]
# print(node)
return node_sort,x,y
#判断前沿面的点是否被更新
# def dominant(prev,current):
# if prev[0]<current[0] & prev[1]<current[1]:
# return True
# return False
#
# #判断两条路径是否有重叠部分
# def judge_line(origin,n1,n2):
# if((n1[1]-origin[1])/(n1[0]-origin[0])==(n2[1]-origin[1])/(n2[0]-origin[0])):
# return True
# return False
def init_routing(route_number,route_list,leading_edge,node_sort):
for n in node_sort:
if(n == node_sort[0]):
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if(len(route_list)>=route_number):
return route_list
return
def expand(route_list,leading_edge,node_sort,route_number):
for i in range(len(node_sort)):
if(i<=route_number):
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if(leading_edge[a][1]>y_min):
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if((delta_y>=0) & (delta_y<y_min)):
y_min = delta_y
index = n
if(index < 0):
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
if __name__=='__main__':
path = 'coordinates v1.csv'
node = []#所有点的坐标信息,下面进行排序
route_list = []#存储现有的路径信息
leading_edge = []#存储路径最前沿延续的路径index
route_number = 6
node_sort,x,y = read_data(path, node)
route_list = init_routing(route_number, route_list, leading_edge,node_sort)
route_list = expand(route_list, leading_edge, node_sort, route_number)
route_list = np.array(route_list)
draw_route(route_list,x,y)
print(route_list)
|
5,484 | dfbbbaf6b5f02c60ca48f7864068d59349c547d1 | """AWS CDK application.
See https://docs.aws.amazon.com/cdk/ for details.
"""
from ias_pmi_cdk_common import PMIApp
from stacks import MainStack
APP_NAME = 'etl-pm-pipeline-be'
# create CDK application
app = PMIApp(APP_NAME)
# add stacks
MainStack(app, app, 'main')
# synthesize application assembly
app.synth()
|
5,485 | 89881f3cc6703b3f43f5d2dae87fa943d8a21513 | from random import random, randint, choice
from copy import deepcopy
from math import log
"""
Обертка для функций, которые будут находиться в узлах,
представляющих функции. Его члены – имя функции, сама функция
и количество принимаемых параметров.
"""
class fwrapper:
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
"""
Класс функциональных узлов (имеющих потомков). Инициализируется экземпляром класса fwrapper.
Метод evaluate вычисляет значения дочерних узлов и передает их представленной данным узлом
функции в качестве параметров.
"""
class node:
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
# Метод display выводит представление дерева в виде строки
def display(self, indent=0):
print((' ' * indent) + self.name)
for c in self.children:
c.display(indent + 1)
"""
Класс узлов, которые просто возвращают один из переданных программе параметров.
Его метод evaluate возвращает параметр, соответствующий значению idx.
"""
class paramnode:
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
# Это метод просто печатает индекс возвращаемого параметра
def display(self, indent=0):
print('%sp%d' % (' ' * indent, self.idx))
"""
Узлы, возвращающие константы. Метод evaluate просто возвращает
то значение, которым экземпляр был инициализирован.
"""
class constnode:
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print('%s%d' % (' ' * indent, self.v))
"""
Простые функции типа add и subtract можно встроить с помощью лямбда-выражений.
Для остальных функцию придется написать в отдельном блоке.
В любом случае функция обертывается в экземпляр класса fwrapper
вместе со своим именем и числом параметров.
"""
addw = fwrapper(lambda l: l[0] + l[1], 2, 'add')
subw = fwrapper(lambda l: l[0] - l[1], 2, 'subtract')
mulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply')
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
ifw = fwrapper(iffunc, 3, 'if')
def isgreater(l):
if l[0] > l[1]:
return 1
else:
return 0
gtw = fwrapper(isgreater, 2, 'isgreater')
# В этой строке создается список всех функций, чтобы впоследствии из него
# можно было выбирать элементы случайным образом.
flist = [addw, mulw, ifw, gtw, subw]
# C помощью класса node можно построить дерево программы (в качестве примера)
def exampletree():
return node(ifw, [
node(gtw, [paramnode(0), constnode(3)]),
node(addw, [paramnode(1), constnode(5)]),
node(subw, [paramnode(1), constnode(2)]),
]
)
"""
Эта функция создает узел, содержащий случайно выбранную функцию, и проверяет,
сколько у этой функции должно быть параметров. Для каждого дочернего узла функция
вызывает себя рекурсивно, чтобы создать новый узел. Так конструируется все дерево,
причем процесс построения ветвей завершается в тот момент, когда у очередного узла
нет дочерних (то есть он представляет либо константу, либо переменную-параметр).
Параметр pc равен числу параметров, принимаемых деревом на входе. Параметр fpr
задает вероятность того, что вновь создаваемый узел будет соответствовать функции,
а ppr – вероятность того, что узел, не являющийся функцией, будет иметь тип paramnode.
"""
def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):
if random() < fpr and maxdepth > 0:
f = choice(flist)
children = [makerandomtree(pc, maxdepth - 1, fpr, ppr)
for i in range(f.childcount)]
return node(f, children)
elif random() < ppr:
return paramnode(randint(0, pc - 1))
else:
return constnode(randint(0, 10))
def hiddenfunction(x, y):
return x ** 2 + 2 * y + 3 * x + 5
def buildhiddenset():
rows = []
for i in range(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
"""
Эта функция перебирает все строки набора данных, вычисляет функцию от указанных
в ней аргументов и сравнивает с результатом. Абсолютные значения разностей суммируются.
Чем меньше сумма, тем лучше программа, а значение 0 говорит о том, что все результаты
в точности совпали.
"""
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
"""
Эта функция начинает с корня дерева и решает, следует ли изменить
узел. Если нет, она рекурсивно вызывает mutate для дочерних узлов.
Может случиться, что мутации подвергнутся все узлы, а иногда дерево
вообще не изменится.
"""
# Мутация путем замены поддерева
def mutate(t, pc, probchange=0.1):
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(t)
if hasattr(t, "children"):
result.children = [mutate(c, pc, probchange) for c in t.children]
return result
"""
Функции, выполняющей скрещивание, передаются два дерева, и она
обходит оба. Если случайно выбранное число не превышает пороговой
вероятности, то функция возвращает копию первого дерева, в которой
одна из ветвей заменена какой-то ветвью, взятой из второго дерева.
Поскольку обход выполняется параллельно, то скрещивание произойдет примерно на одном уровне каждого дерева.
"""
# Функция скрещивания. Две успешные программы комбинируются с целью получения новой программы.
def crossover(t1, t2, probswap=0.7, top=1):
if random() < probswap and not top:
return deepcopy(t2)
else:
result = deepcopy(t1)
if hasattr(t1, 'children') and hasattr(t2, 'children'):
result.children = [crossover(c, choice(t2.children), probswap, 0)
for c in t1.children]
return result
# Функция возвращает функцию ранжирования для имеющегося набора данных
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
"""
Создание конкурентной среды, в которой программы будут эволюционировать.
Смысл в том, чтобы создать набор случайных программ, отобрать из них
наилучшие для копирования и модификации и повторять процесс, пока не будет
выполнено некое условие останова.
"""
def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1, breedingrate=0.4, pexp=0.7, pnew=0.05):
"""Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,
вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.
Наилучшая программа автоматически попадает в следующее поколение без изменения.
Args:
rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.
mutationrate: Вероятность мутации, передаваемая функции mutate.
breedingrate: Вероятность скрещивания, передаваемая функции crossover.
popsize: Размер исходной популяции.
probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/
probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.
Returns:
tuple: Найденное наилучшее совпадние
"""
# Возвращает случайное число, отдавая предпочтение более маленьким числам.
# Чем меньше значение pexp, тем больше будет доля маленьких чисел.
def selectindex():
return int(log(random()) / log(pexp))
# Создаем случайную исходную популяцию
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print(scores[0][0])
if scores[0][0] == 0: break
# Две наилучшие особи отбираются всегда
newpop = [scores[0][1], scores[1][1]]
# Строим следующее поколение
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(
crossover(scores[selectindex()][1],
scores[selectindex()][1],
probswap=breedingrate),
pc, probchange=mutationrate))
else:
# Добавляем случайный узел для внесения неопределенности
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
#[
# (10, "program1"),
# (17, "program2"),
#]
def gridgame(p):
# Размер доски
max = (3, 3)
# Запоминаем последний ход каждого игрока
lastmove = [-1, -1]
# Запоминаем положения игроков
location = [[randint(0, max[0]), randint(0, max[1])]]
# Располагаем второго игрока на достаточном удалении от первого
location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])
# Не более 50 ходов до объявления ничьей
for o in range(50):
# Для каждого игрока
for i in range(2):
locs = location[i][:] + location[1 - i][:]
locs.append(lastmove[i])
move = p[i].evaluate(locs) % 4
# Если игрок два раза подряд ходит в одном направлении, ему
# засчитывается проигрыш
if lastmove[i] == move: return 1 - i
lastmove[i] = move
if move == 0:
location[i][0] -= 1
# Доска ограничена
if location[i][0] < 0: location[i][0] = 0
if move == 1:
location[i][0] += 1
if location[i][0] > max[0]: location[i][0] = max[0]
if move == 2:
location[i][1] -= 1
if location[i][1] < 0: location[i][1] = 0
if move == 3:
location[i][1] += 1
if location[i][1] > max[1]: location[i][1] = max[1]
# Если противник захвачен в плен, вы выиграли
if location[i] == location[1 - i]: return i
return -1
def tournament(pl):
# Массив для подсчета проигрышей
losses = [0 for p in pl]
# Каждый игрок встречается со всеми другими
for i in range(len(pl)):
for j in range(len(pl)):
if i == j: continue
# Кто выиграл?
winner = gridgame([pl[i], pl[j]])
# Два очка за поражение, одно за ничью
if winner == 0:
losses[j] += 2
elif winner == 1:
losses[i] += 2
elif winner == -1:
losses[i] += 1
losses[i] += 1
pass
# Отсортировать и вернуть результаты
z = list(zip(losses, pl))
z.sort(key=lambda t: t[0])
# input()
print(z[0][1].display(indent=4))
return z
class humanplayer:
def evaluate(self, board):
# Получить мою позицию и позиции других игроков
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
# Нарисовать доску
for i in range(4):
for j in range(4):
if (i, j) == me:
print('O',end=' ')
elif (i, j) in others:
print('X',end=' ')
else:
print('.',end=' ')
print()
# Показать ходы, для справки
print('Your last move was %d' % board[len(board) - 1])
print(' 0')
print('2 3')
print(' 1')
print('Enter move: ')
# Вернуть введенное пользователем число
move = int(input())
return move
class fwrapper:
def __init__(self, function, params, name):
self.function = function
self.childcount = params
self.name = name
# flist={'str':[substringw,concatw],'int':[indexw]}
flist = [addw, mulw, ifw, gtw, subw]
|
5,486 | 2681bd9fe93a4d61214b7c45e5d73097ab73dc07 | import torch as th
from tpp.processes.hawkes.r_terms_recursive_v import get_r_terms
from tpp.utils.test import get_test_events_query
def run_test():
marks = 3
events, query = get_test_events_query(marks=marks)
beta = th.rand([marks, marks])
get_r_terms(events=events, beta=beta)
if __name__ == '__main__':
run_test()
|
5,487 | f46dd5217c8e015546d7fff7ee52569ecc2c8e41 | #8
def matrix(m):
for i in range(len(m)):
for j in range (len(m[0])):
m[i][j]=(m[i][j])**2
a=[[1,2,3],[4,5,6],[8,9,0]]
print('The matrix is ',a)
matrix(a)
print('The updated matrix is ',a)
|
5,488 | 1ac0f5c62ee3cb60d4443b65d429f4f0e6815100 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import SetPasswordForm
from . import views
urlpatterns = [
url(regex=r'^(?P<pk>\d+)$', view=views.UserDetailView.as_view(), name='user_detail'),
url(regex=r'^update/(?P<pk>\d+)$', view=views.UserUpdateView.as_view(), name='user_update'),
url(regex=r'^email/update/(?P<pk>\d+)$', view=views.EmailUpdateView.as_view(), name='email_change'),
url(regex=r'^password/change$', view=auth_views.password_change,
kwargs={'template_name': 'accounts/password_change_form.html',
'current_app': 'accounts', 'password_change_form': SetPasswordForm},
name='password_change'),
url(regex=r'^password/change/done$', view=auth_views.password_change_done,
kwargs={'template_name': 'accounts/password_change_done.html', 'current_app': 'accounts'},
name='password_change_done'),
url(regex=r'^switch$', view=views.SwitchUserView.as_view(), name='switch_user'),
url(regex=r'^all_trainees$', view=views.AllTrainees.as_view(), name='trainee_information'),
]
|
5,489 | b8a41c56a31acab0181ec364f76010ac12119074 | # PDE:
# add_library('hype')
# processing.py:
from hype.core.util import H
from hype.core.interfaces import HCallback
from hype.extended.behavior import HOscillator
from hype.extended.drawable import HCanvas, HRect
from hype.extended.layout import HGridLayout
from hype.extended.util import HDrawablePool
from random import choice
rectRadius = 50
numSquares = 25
canvas = None
pool = None
color1 = 0x406B2B24 # #6B2B24
color2 = 0xc4831521 # #831521
def setup():
global canvas, pool
size(568, 568)
H.init(this).background(0xffE0DFE2) # #E0DFE2
smooth()
canvas = H.add(HCanvas()).autoClear(False).fade(5)
pool = HDrawablePool(numSquares)
pool.autoParent(canvas)\
.add(HRect()
.size(rectRadius * 2)
.noStroke())\
.layout(HGridLayout()
.startLoc(rectRadius * 2 - 20, rectRadius * 2 - 20)
.spacing(rectRadius * 2 + 1, rectRadius * 2 + 1)
.cols(5))\
.onCreate(Callback())\
.requestAll()
def draw():
H.drawStage()
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER)\
.fill(choice([color1, color2]))
HOscillator()\
.target(drawable)\
.property(H.ROTATION)\
.range(-5, 5)\
.speed(1)\
.freq(4)\
.currentStep(pool.currentIndex() * random(2, 25))
|
5,490 | 957545649e9bf1eaabe42a1caa627d544e68f108 | """
This file is part of GALE,
Copyright Joe Krall, 2014.
GALE is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GALE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GALE. If not, see <http://www.gnu.org/licenses/>.
"""
from Fastmap.Slurp import *
from Fastmap.Moo import *
from jmoo_individual import *
def gale_64_WHERE(problem, population, configuration, values_to_be_passed):
"The Core method behind GALE"
# Compile population into table form used by WHERE
t = slurp([[x for x in row.decisionValues] + ["?" for y in problem.objectives] for row in population],
problem.buildHeader().split(","))
# Initialize some parameters for WHERE
The.allowDomination = True
The.alpha = 1
for i, row in enumerate(t.rows):
row.evaluated = False
# Run WHERE
m = Moo(problem, t, len(t.rows), N=1).divide(minnie=rstop(t))
# Organizing
NDLeafs = m.nonPrunedLeaves() # The surviving non-dominated leafs
allLeafs = m.nonPrunedLeaves() + m.prunedLeaves() # All of the leafs
# After mutation: Check how many rows were actually evaluated
numEval = 0
for leaf in allLeafs:
for row in leaf.table.rows:
if row.evaluated:
numEval += 1
return NDLeafs, numEval
def polynomial_mutation(problem, individual, configuration):
from numpy.random import random
eta_m_ = configuration["NSGAIII"]["ETA_M_DEFAULT_"]
distributionIndex_ = eta_m_
output = jmoo_individual(problem, individual.decisionValues)
probability = 1/len(problem.decisions)
for var in xrange(len(problem.decisions)):
if random() <= probability:
y = individual.decisionValues[var]
yU = problem.decisions[var].up
yL = problem.decisions[var].low
delta1 = (y - yL)/(yU - yL)
delta2 = (yU - y)/(yU - yL)
rnd = random()
mut_pow = 1.0/(eta_m_ + 1.0)
if rnd < 0.5:
xy = 1.0 - delta1
val = 2.0 * rnd + (1 - 2 * rnd) * (xy ** (distributionIndex_ + 1.0))
deltaq = val ** mut_pow - 1
else:
xy = 1.0 - delta2
val = 2.0 * (1.0-rnd) + 2.0 * (rnd-0.5) * (xy ** (distributionIndex_+1.0))
deltaq = 1.0 - (val ** mut_pow)
y += deltaq * (yU - yL)
if y < yL: y = yL
if y > yU: y = yU
output.decisionValues[var] = y
return output
def sbxcrossover(problem, parent1, parent2, configuration):
EPS = 1.0e-14
distribution_index = configuration["NSGAIII"]["ETA_C_DEFAULT_"]
probability = configuration["NSGAIII"]["SBX_Probability"]
from numpy.random import random
offspring1 = jmoo_individual(problem, parent1.decisionValues)
offspring2 = jmoo_individual(problem, parent2.decisionValues)
number_of_variables = len(problem.decisions)
if random() <= probability:
for i in xrange(number_of_variables):
valuex1 = offspring1.decisionValues[i]
valuex2 = offspring2.decisionValues[i]
if random() <= 0.5:
if abs(valuex1 - valuex2) > EPS:
if valuex1 < valuex2:
y1 = valuex1
y2 = valuex2
else:
y1 = valuex2
y2 = valuex1
yL = problem.decisions[i].low
yU = problem.decisions[i].up
rand = random()
beta = 1.0 + (2.0 * (y1 - yL) / (y2 - y1))
alpha = 2.0 - beta ** (-1 * (distribution_index + 1.0))
if rand <= 1/alpha:
betaq = (1.0 / (2.0 - rand * alpha)) ** (1.0 / (distribution_index + 1.0))
else:
betaq = (1.0 / (2.0 - rand * alpha)) ** (1.0 / (distribution_index + 1.0))
c1 = 0.5 * ((y1 + y2) - betaq * (y2 - y1))
beta = 1.0 + (2.0 * (yU - y2) / (y2 - y1))
alpha = 2.0 - beta ** -(distribution_index + 1.0)
if rand <= (1.0 / alpha):
betaq = (rand * alpha) ** (1.0 / (distribution_index + 1.0))
else:
betaq = ((1.0 / (2.0 - rand * alpha)) ** (1.0 / (distribution_index + 1.0)))
c2 = 0.5 * ((y1 + y2) + betaq * (y2 - y1))
if c1 < yL: c1 = yL
if c2 < yL: c2 = yL
if c1 > yU: c1 = yU
if c2 > yU: c2 = yU
if random() <= 0.5:
offspring1.decisionValues[i] = c2
offspring2.decisionValues[i] = c1
else:
offspring1.decisionValues[i] = c1
offspring2.decisionValues[i] = c2
else:
offspring1.decisionValues[i] = valuex1
offspring2.decisionValues[i] = valuex2
else:
offspring1.decisionValues[i] = valuex2
offspring2.decisionValues[i] = valuex1
return offspring1, offspring2
def variation(problem, individual_index, population, configuration):
""" SBX regeneration Technique """
from random import randint
another_parent = individual_index
while another_parent == individual_index: another_parent = randint(0, len(population)-1)
from copy import deepcopy
parent1 = deepcopy(population[individual_index])
parent2 = deepcopy(population[another_parent])
child1, _ = sbxcrossover(problem, parent1, parent2, configuration)
mchild1 = polynomial_mutation(problem, child1, configuration)
return mchild1
def gale_64_Mutate(problem, NDLeafs, configuration):
#################
# Mutation Phase
#################
# Keep track of evals
numEval = 0
population = []
for leaf in NDLeafs:
initial_size = len(leaf.table.rows)
# print "Number of mutants: ", len(leaf.table.rows)
# Pull out the Poles
east = leaf.table.rows[0]
west = leaf.table.rows[-1]
# Evaluate those poles if needed
if not east.evaluated:
for o, objScore in enumerate(problem.evaluate(east.cells)):
east.cells[-(len(problem.objectives) - o)] = objScore
east.evaluated = True
numEval += 1
if not west.evaluated:
for o, objScore in enumerate(problem.evaluate(west.cells)):
west.cells[-(len(problem.objectives) - o)] = objScore
west.evaluated = True
numEval += 1
# Score the poles
n = len(problem.decisions)
weights = []
for obj in problem.objectives:
# w is negative when we are maximizing that objective
if obj.lismore:
weights.append(+1)
else:
weights.append(-1)
weightedWest = [c * w for c, w in zip(west.cells[n:], weights)]
weightedEast = [c * w for c, w in zip(east.cells[n:], weights)]
westLoss = loss(weightedWest, weightedEast, mins=[obj.low for obj in problem.objectives],
maxs=[obj.up for obj in problem.objectives])
eastLoss = loss(weightedEast, weightedWest, mins=[obj.low for obj in problem.objectives],
maxs=[obj.up for obj in problem.objectives])
# Determine better Pole
if eastLoss < westLoss:
to_be_mutated = leaf.table.rows[:int(len(leaf.table.rows)/2)]
else:
to_be_mutated = leaf.table.rows[:int(len(leaf.table.rows)/2)]
to_be_mutated_jmoo = []
for row in to_be_mutated:
if row.evaluated:
to_be_mutated_jmoo.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]],
[x for x in row.cells[len(problem.decisions):]]))
else:
to_be_mutated_jmoo.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], None))
for i in xrange(initial_size - len(to_be_mutated)):
index = i%len(to_be_mutated_jmoo)
mutant = variation(problem, index, to_be_mutated_jmoo, configuration)
to_be_mutated_jmoo.append(mutant)
members_evaluated = sum([1 for i in to_be_mutated_jmoo if i.valid])
while members_evaluated <= 2:
from random import randint
index = randint(0, len(to_be_mutated_jmoo)-1)
to_be_mutated_jmoo[index].evaluate()
numEval += 1
members_evaluated += 1
print "> ", members_evaluated
population += to_be_mutated_jmoo
return population, numEval
def gale_64_Regen(problem, unusedslot, mutants, configuration):
howMany = configuration["Universal"]["Population_Size"] - len(mutants)
# Generate random individuals
population = []
for i in range(howMany):
population.append(jmoo_individual(problem, problem.generateInput(), None))
return mutants+population, 0
|
5,491 | 74de0da708c7eb792dea15afb23713d9d71af520 | #!/usr/bin/env python3
# Created by: Khang Le
# Created on: Dec 2019
# This program uses lists and rotation
def rotation(list_of_number, ratating_time):
numbers = list_of_number[0]
numbers = [list_of_number[(i + ratating_time) % len(list_of_number)]
for i, x in enumerate(list_of_number)]
return numbers
def main():
lst = []
# number of elemetns as input
user_input = int(input("Enter number of elements : "))
rotating_time = int(input("Enter how many times you want to rotate: "))
print("The numbers are:")
for i in range(0, user_input):
ele = int(input())
lst.append(ele) # adding the element
numbers = rotation(lst, rotating_time)
print("Rotated by {0}: {1}".format(rotating_time, numbers))
if __name__ == "__main__":
main()
|
5,492 | 7b2ca3db44c5f71c2975bd8af701dafca3b3d081 | import math
import numpy as np
class incStat:
def __init__(self, Lambda, isTypeJitter=False): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.w = 0 # weight
self.isTypeJitter = isTypeJitter
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp
if self.isTypeJitter:
if not math.isnan(self.lastTimestamp):
v = t - self.lastTimestamp
else:
v = 0
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
def processDecay(self, timestamp):
factor=1
# check for decay
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def weight(self):
return self.w
def mean(self):
if math.isnan(self.cur_mean): # calculate it only once when necessary
self.cur_mean = self.CF1 / self.w
return self.cur_mean
def var(self):
if math.isnan(self.cur_var): # calculate it only once when necessary
self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))
return self.cur_var
def std(self):
if math.isnan(self.cur_std): # calculate it only once when necessary
self.cur_std = math.sqrt(self.var())
return self.cur_std
#calculates and pulls all stats
def allstats(self):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
return self.w, self.cur_mean, self.cur_var
def getHeaders(self):
return "weight", "mean", "variance"
#like incStat, but maintains stats between two streams
class incStat_2D(incStat):
def __init__(self, Lambda): # timestamp is creation time
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.CF3 = None # sum of residules (A-uA)
self.w = 0 # weight
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = np.nan
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = 0 # the value of the last residule
#other_incS_decay is the decay factor of the other incstat
def insert2D(self, v, t, other_incS_lastRes, other_incS_decay = 1): # also updates covariance (expensive)
self.processDecay(t)
# update with v
self.CF1 = self.CF1 + v
self.CF2 = self.CF2 + math.pow(v, 2)
self.w = self.w + 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
self.cur_cov = np.nan
self.last_residule = v - self.mean()
self.CF3[0] = self.CF3[0] + self.last_residule * other_incS_lastRes * other_incS_decay
def processDecay(self, timestamp):
# check for decay
factor=1
if not math.isnan(self.lastTimestamp):
timeDiff = timestamp - self.lastTimestamp
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
if self.CF3 == None:
self.CF3 = [0]
self.CF3[0] = self.CF3[0] * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def radius(self, istat_ref): # the radius of two stats
return math.sqrt(math.pow(self.var(), 2) + math.pow(istat_ref[0].var(), 2))
def magnitude(self, istat_ref): # the magnitude of two stats
return math.sqrt(math.pow(self.mean(), 2) + math.pow(istat_ref[0].mean(), 2))
#covaince approximation using a hold-and-wait model
def cov(self,istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
if math.isnan(self.cur_cov):
self.cur_cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
return self.cur_cov
# Pearson corl. coef (using a hold-and-wait model)
def p_cc(self, istat_ref): # assumes that current time is the timestamp in 'self.lastTimestamp' is the current time
ss = self.std() * istat_ref[0].std()
if ss != 0:
return self.cov(istat_ref[0]) / ss
else:
return 0
# calculates and pulls all stats
def allstats2D(self, istat_ref):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
self.cur_std = math.sqrt(self.cur_var)
if istat_ref[0].w != 0:
cov = self.CF3[0] / ((self.w + istat_ref[0].w) / 2)
magnitude = math.sqrt(math.pow(self.cur_mean, 2) + math.pow(istat_ref[0].mean(), 2))
radius = math.sqrt(math.pow(self.cur_var, 2) + math.pow(istat_ref[0].var(), 2))
ss = self.cur_std * istat_ref[0].std()
pcc = 0
if ss != 0:
pcc = cov / ss
else:
magnitude = self.cur_mean
radius = self.cur_var
cov = 0
pcc = 0
return self.w, self.cur_mean, self.cur_std, magnitude, radius, cov, pcc
def getHeaders(self):
return "weight", "mean", "std", "magnitude", "radius", "covariance", "pcc"
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat:
# Each lambda in the tuple L parameter determines a incStat's decay window size (factor)
def __init__(self, L, isTypeJitter=False):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat(l,isTypeJitter))
# returns the weight, mean, and variance of each window
def getStats(self):
allstats = np.zeros(len(self.L)*3) #3 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats()
allstats[i*3:(i*3+3)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
# lite only updates incrementals needed for weight, mean, variance, magnitude and radius
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
self.incStats[i].insert(val, timestamp)
# First updates, then gets the stats (weight, mean, and variance only)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
def getMaxW(self,t):
mx = 0
for stat in self.incStats:
stat.processDecay(t)
if stat.w > mx:
mx = stat.w
return mx
# A set of 3 incremental statistics for a 1 or 2 dimensional time-series
class windowed_incStat_2D:
# Each lambda parameter in L determines a incStat's decay window size (factor)
def __init__(self, L):
self.incStats = list()
self.L = sorted(L,reverse=True) #largest lambda to smallest
for l in self.L:
self.incStats.append(incStat_2D(l))
self.other_winStat = None # a mutable refernece [] to the windowed_incStat monitoring the other parallel time-series
# returns the weight, mean, variance, radius, magnitude, and covariance and pcc of each window
def getStats(self):
allstats = np.zeros(len(self.L)*7) #6 stats for each lambda
for i in range(0,len(self.incStats)):
stats = self.incStats[i].allstats2D([self.other_winStat[0].incStats[i]])
allstats[i*7:(i*7+7)] = stats
return allstats
def getHeaders(self):
headers = []
for i in range(0,len(self.incStats)):
headers = headers + ["L"+str(self.L[i])+"_"+header for header in self.incStats[i].getHeaders()]
return headers
# updates the statistics
# val is the new observation
# timestamp is the arrival time of val.
def updateStats(self, val, timestamp):
for i in range(0,len(self.incStats)):
decay = self.other_winStat[0].incStats[i].processDecay(timestamp)
self.incStats[i].insert2D(val, timestamp, self.other_winStat[0].incStats[i].last_residule, decay)
# First updates, then gets the stats (weight, mean, variance, magnitude, radius, and covariance)
def updateAndGetStats(self, val, timestamp):
self.updateStats(val, timestamp)
return self.getStats()
# Joins two windowed_incStat (e.g. rx and tx channels) together.
# other_winStat should be a [] mutable object
def join_with_winStat(self, other_winStat): # prectect with mutexes!
self.other_winStat = other_winStat
other_winStat[0].other_winStat = [self]
for i in range(0,len(self.incStats)):
self.incStats[i].CF3 = other_winStat[0].incStats[i].CF3 = [0]
def getMaxW(self,t):
lastIncStat = len(self.incStats)
self.incStats[lastIncStat-1].processDecay(t)
return self.incStats[lastIncStat-1].w
class incStatHT:
# incStatHT maintains a python dictionary object (Hash Table) filled with a collection of windowed_incStats.
# The purpose of the incStatHT is to minimize the number of operations in incrementing and retrieving statics on time-series in an online manner.
# Note, this library is built in a manner which assumes that the individual time sereis are NOT sampled at the same time (i.e., fused), thus each stream should be updated individually with each corresponding value.
# The current implementation can maintain 1-dimensional or 2-dimensional time series, and monitors three windows over each time-series.
# If 1-dimensional, set key 2 to the empty string ''.
# If 2-dimensional, key1 should be the target stream
# Each lambda parameter determines a incStat's decay window size (factor): 2^(-lambda*deltaT)
def __init__(self):
self.HT = dict()
def updateGet_1D(self, key, val, timestamp, L, isTypeJitter=False): # 1D will only maintain the mean and variance
wis = self.HT.get(key)
if wis is None:
wis = [windowed_incStat(L,isTypeJitter)]
self.HT[key] = wis
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def getHeaders_1D(self,L):
tmp_incs = windowed_incStat(L)
return tmp_incs.getHeaders()
class incStatHT_2D(incStatHT):
def updateGet_2D(self, key1, key2, val, timestamp, L): # src and dst should be strings
key = key1 + key2
wis = self.HT.get(key) # get windowed incrimental stat object
if wis is None:
wis = self.create_2D_entry(key1, key2, L)
elif hasattr(wis[0],'other_winStat') and wis[0].other_winStat == []:
self.create_1D_entry(key1,key2,L,wis)
stats = wis[0].updateAndGetStats(val, timestamp)
return stats
def create_1D_entry(self, key1, key2, L, wis): # prectect with mutexes!
# create
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key2 + key1] = wis_k2_k1
return wis_k2_k1
def create_2D_entry(self, key1, key2, L): # prectect with mutexes!
# create
wis_k1_k2 = [windowed_incStat_2D(L)]
wis_k2_k1 = [windowed_incStat_2D(L)]
# connect net stats..
wis_k1_k2[0].join_with_winStat(wis_k2_k1)
# store
self.HT[key1 + key2] = wis_k1_k2
self.HT[key2 + key1] = wis_k2_k1
return wis_k1_k2
def getHeaders_2D(self,L):
tmp_incs = windowed_incStat_2D(L)
return tmp_incs.getHeaders()
|
5,493 | e7f511b97f316157a768203afe9f36ea834ebb6c | import requests
import urllib.request
from utilities.read_write_utilities import read_set,write_to_csv
import time
from bs4 import BeautifulSoup
import pickledb
import json
import glob
import csv
drugs = read_set('/Users/sandeep.dey/Downloads/2020-02-06_scrape/drugs')
print(drugs)
output_records = []
# fields = ["equiv_name","coupon_network","npi","default_quantity","price_type","scrape_date","price","root","dosage",
# "generic","drug_id","date","form_name","ncpdp","pharmacy","geo","slug","quantity"]
fields = ["equiv_name","default_quantity","root","dosage","generic","drug_id","form_name","slug"]
for drug in drugs:
# print('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug)
with open('/Users/sandeep.dey/Downloads/2020-02-06_scrape/%s'%drug) as json_file:
for record in json.load(json_file):
# print(record)
output_records.append({field:str(record[field]) if field in record else '' for field in fields})
write_to_csv('/Users/sandeep.dey/Downloads/2020-02-06_scrape/units_of_use_data.csv',output_records)
# filename = '/Users/sdey/Downloads/privia_utilization_data.csv'
# output_filename = '/Users/sdey/Downloads/privia_utilization_raw_data.csv'
#
# with open(filename, 'r') as input_file:
# with open(output_filename, 'w') as output_file:
# reader = csv.DictReader(input_file)
# writer = csv.DictWriter(output_file, fieldnames=reader.fieldnames)
# writer.writeheader()
# number_of_lines = 0
# for row in reader:
# row['Medication Name'] = row['Medication Name'].replace(',',':')
# writer.writerow(row)
# number_of_lines+=1
# if number_of_lines % 10000 == 0 :
# print('%d lines'%number_of_lines)
#
# filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515.csv'
# output_filename = '/Users/sandeep.dey/Downloads/pricing_nadac_cost_20190515_output.csv'
#
# with open(filename, 'r') as input_file:
# with open(output_filename, 'w') as output_file:
# reader = csv.DictReader(input_file)
# fieldnames = ['ndc','nadac_per_unit','effective_date','pricing_unit','otc',
# 'explanation_code','classification_for_rate_setting','corresponding_generic_drug_nadac_per_unit',
# 'corresponding_generic_drug_effective_date','as_of_date']
# writer = csv.DictWriter(output_file, fieldnames=fieldnames)
# writer.writeheader()
# number_of_lines = 0
# for row in reader:
# row['explanation_code'] = row['explanation_code'].replace('\"','').replace(',','').replace(' ','')
# row.pop('ndc_description')
# row.pop('pharmacy_type_indicator')
# writer.writerow(row)
# number_of_lines+=1
# if number_of_lines % 10000 == 0 :
# print('%d lines'%number_of_lines)
|
5,494 | 77531233219b76be51aed86536e4d92b8dc5ccad | #!/usr/bin/env python3
# Script qui permet de couper au début ou à la fin d'un fichier audio (.wav)
# un silence ou un passage musical à partir d'un fichier de transcription correspondant.
# Supporte uniquement l'extension audio .wav.
# Supporte les formats de transcriptions suivants :
# - .stm
# - .mlfmanu
# Usage : python cutAudioFile.py audio.wav transcriptFile.* audio_trimmed.wav
import sys
from os import path # Pour couper l'extension de fichier
from subprocess import check_output, CalledProcessError, STDOUT # Pour lancer sox
# Pour parser les arguments
from argparse import ArgumentParser, RawTextHelpFormatter, ArgumentTypeError
import sys
import utils
# Cherche le début et la fin de la coupe dans le fichier de transcription.
# Retourne les temps de début et de fin de la coupe en secondes.
# Format stm.
def searchBeginAndEndStm(transFileName):
fileName = path.splitext(path.basename(transFileName))[0] # Nom du stm sans extension
# On ouvre le fichier avec le bon encodage si celui-ci est précisé
if (path.isfile(path.dirname(transFileName) + "/encoding.txt")):
e = open(path.dirname(transFileName) + "/encoding.txt", 'r')
encod = e.readline()
f = open(transFileName, 'r', encoding=encod)
e.close()
else:
f = open(transFileName, 'r')
#Tant qu'on a pas une ligne de transcription (commencant par le nom de fichier) on lit en avancant
currentLine = f.readline()
while (currentLine.split()[0] != fileName):
currentLine = f.readline()
#Si la première ligne est un silence/musique, on prend comme début le timestamp de fin, sinon le timestamp de début
if (currentLine.split()[2] == "inter_segment_gap"):
debut = float(currentLine.split()[4])
else:
debut = float(currentLine.split()[3])
#On va jusqu'à la fin du fichier en conservant la dernière ligne "correcte"
nextLine = f.readline()
while (nextLine != ''):
if (nextLine.split()[0] == fileName and nextLine.split()[2] != "inter_segment_gap"):
currentLine = nextLine
nextLine = f.readline()
#On prend la fin de la dernière phrase
fin = float(currentLine.split()[4])
f.close()
return (debut, fin)
# Cherche le début et la fin de la coupe dans le fichier de transcription.
# Retourne les temps de début et de fin de la coupe en secondes.
# Format mlfmanu.
def searchBeginAndEndMlfmanu(transFileName):
fileName = path.splitext(path.basename(transFileName))[0] #Nom du fichier sans extension
f = open(transFileName, 'r')
currentLine = f.readline()
# On lit le fichier ligne par ligne tant qu'on a pas atteint une ligne non vide,
# qui n'est pas un commentaire ou qui n'est pas un silence.
while (currentLine[0] == "#" or currentLine[0] == "\"" or currentLine.split()[2] == "sil"):
currentLine = f.readline()
debut = float(currentLine.split()[0]) / 10000000; #Conversion en secondes
nextLine = f.readline()
# On lit ligne par ligne tant qu'on a pas atteint la dernière ligne (ligne de silence exclus)
while (nextLine[0] != '.'):
if (nextLine.split()[2] != "sil"):
currentLine = nextLine
nextLine = f.readline()
fin = float(currentLine.split()[1]) / 10000000; #Conversion en secondes
f.close()
return (debut, fin)
# Coupe le fichier audio de cutBegin jusqu'à cutEnd (en secondes).
def cutAudioFile(audioFileName, cutFileName, cutBegin, cutEnd):
duration = cutEnd - cutBegin
try:
check_output("sox " + audioFileName + " " + cutFileName + " trim " + str(cutBegin) + " " + str(duration), shell = True, stderr=STDOUT)
except CalledProcessError as exc:
utils.eprintCalledProcessError(exc, "à SOX")
sys.exit(1)
def main(audioFileName, transFileName, outputFileName, beginningTime=None, endTime=None):
extension = path.splitext(transFileName)[1]
if (extension == ".stm"):
(debut, fin) = searchBeginAndEndStm(transFileName)
elif (extension == ".mlfmanu"):
(debut, fin) = searchBeginAndEndMlfmanu(transFileName)
# On prend les temps "les plus limitants"
if (beginningTime is not None and beginningTime > debut):
debut = beginningTime
if (endTime is not None and endTime < fin):
fin = endTime
cutAudioFile(audioFileName, outputFileName, debut, fin) # On coupe le fichier audio
def parseArgs():
parser = ArgumentParser(description="Programme python permettant de couper un fichier audio en retirant un silence ou un passage musical au début ou à la fin du fichier à l'aide de son fichier de transcription.\n"
"Si les options -beginning ou -end sont spécifiées, le temps le plus limitant entre le contenu de la transcription et l'option sera utilisé.", formatter_class=RawTextHelpFormatter)
parser.add_argument("audioFileName", metavar="audioFile",
help="fichier audio (extension wav uniquement).",
type=utils.isValidFile)
parser.add_argument("transFileName", metavar="transcriptFile",
help="fichier de transcription (extensions stm et mlfmanu supportées).",
type=utils.isValidTranscriptFile)
parser.add_argument("outputFileName", metavar="outputFile",
help="nom du fichier de sortie (coupé).")
parser.add_argument("-b", "--beginning", dest="beginningTime", required=False,
help="le temps de début de la coupe.", metavar="beginningTime",
type=utils.isPositiveNumber)
parser.add_argument("-e", "--end", dest="endTime", required=False,
help="le temps de fin de la coupe.", metavar="endTime",
type=utils.isPositiveNumber)
args = parser.parse_args()
return (args.audioFileName, args.transFileName, args.outputFileName, args.beginningTime, args.endTime)
if __name__ == '__main__':
args = parseArgs() # Parse les arguments
main(*args) # Unpack le tuple et passe les éléments en paramétre du main
|
5,495 | d7b426727e11833b3825baac7b379f5ce44ea491 | def ehcf(a, b):
p1, q1, h1, p2, q2, h2 = 1, 0, a, 0, 1, b
from math import floor
while h2 != 0:
r = floor(h1/h2)
p3 = p1-r*p2
q3 = q1-r*q2
h3 = h1-r*h2
p1,q1,h1,p2,q2,h2 = p2,q2,h2,p3,q3,h3
return (p1, q1, h1)
def findinverse(k, p):
l = ehcf(k,p)[0] % p
return l |
5,496 | 1c66ccb80383feeee96b3fb492ff63be1a67a796 | import pytest
from django_swagger_utils.drf_server.exceptions import NotFound
from unittest.mock import create_autospec
from content_management_portal.constants.enums import TextType
from content_management_portal.interactors.storages.storage_interface \
import StorageInterface
from content_management_portal.interactors.presenters. \
question_presenter_interface import PresenterInterface
from content_management_portal.interactors.question_creation_interactor \
import QuestionCreateInteractor
from content_management_portal.interactors.question_updation_interactor \
import QuestionUpdateInteractor
from content_management_portal.interactors.question_deletion_interactor \
import QuestionDeletionInteractor
class TestQuestionInteractor:
def test_question_create(self,questiondto):
user_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionCreateInteractor(storage=storage,presenter=presenter)
interactor.question_creation(user_id=user_id,short_title=short_title, \
content_type=content_type, content=content)
# Assert
storage.question_creation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_update(self,questiondto):
user_id=1
question_id=1
short_title="hello"
content_type="HTML"
content="hi"
storage=create_autospec(StorageInterface)
presenter=create_autospec(PresenterInterface)
interactor = QuestionUpdateInteractor(storage=storage,presenter=presenter)
interactor.question_updation(user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
# Assert
storage.question_updation.assert_called_once_with( \
user_id=user_id,
short_title=short_title,
content_type=content_type,
content=content,
question_id=question_id
)
presenter.get_question_dto_response(questiondto=questiondto)
def test_question_deletion(self):
# Arrange
question_id=1
storage=create_autospec(StorageInterface)
interactor = QuestionDeletionInteractor(storage=storage)
# Act
interactor.question_deletion(question_id=question_id)
# Assert
storage.question_deletion.assert_called_once_with(question_id=question_id)
|
5,497 | 06992263599fe3290c87ec00c6cb8af3748920c8 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# jan 2014 bbb garden shield attempt
# AKA
'''
Sensors:
analog level sensor, pin AIN0
TMP102 i2c temperature sensor, address 0x48
(if add0 is grounded) or 0x49 (if pulled up)
Outputs:
Analog RGB LED strip
I2C display(?)
Pump Activate/Deactivate (GPIO pin)
Some measurements as of mid-March 2014:
Tank can be pumped for 15 minutes without sun exposure to liquid.
Seems like after 10 minutes of pumping, the autosiphon engages, though.
Tank takes about 17 minutes to drain from a 15-minute pump
11 gals in reservoir reads as 0.42 on the adc.read scale from 0 to 1
8 gals in reservoir reads as 0.175 on the adc.read scale from 0 to 1
7 gals in reservoir reads as 0.15 on the adc.read scale from 0 to 1
'''
from __future__ import division
import Adafruit_SSD1306 as ssd
import Adafruit_BBIO.UART as uart
import Image
import ImageDraw
import ImageFont
# import Adafruit_GPIO.PWM as pwm
import Adafruit_BBIO.GPIO as gpio
import Adafruit_BBIO.ADC as adc
# import TMP102 as tmp102
import datetime
from dateutil.tz import tzlocal
import time
import serial
import atexit
from math import log
import requests
import key as k
import logging
BCOEFFICIENT = 3950 # thermistor beta coefficient
THERMISTORNOMINAL = 10000
TEMPERATURENOMINAL = 25.0
SERIESRESISTOR = 10000
# a1 = blue and white, which is bed temp
# a2 = white and orange, which is tank temp
interval = 60 # seconds between samples
greenPin = 'P8_13'
bluePin = 'P9_14'
redPin = 'P8_19'
servoPin = 'P9_16'
tankPin = 'P9_39'
photoPin = 'P9_38'
thermistor1 = 'P9_40' # AIN1, bed temp
thermistor2 = 'P9_37' # AIN2, reservoir temp
pumpPin = 'P8_10'
RST = 'P8_10' # OLED screen reset pin, not always necessary
readings = {}
PUMP_INTERVAL = 60 # minutes between pump actuations
PUMP_DURATION = 12 # minutes to run pump
def exit_handler():
print 'exiting'
gpio.output(pumpPin,gpio.LOW)
gpio.cleanup()
uart.cleanup()
def do_sensor_read():
print 'sensor read'
global readings
readings = {}
# value = ADC.read("AIN1")
# adc returns value from 0 to 1.
# use read_raw(pin) to get V values
# tank = adc.read(tankPin)
tank = adc.read(tankPin) # have to read twice due to bbio bug
print 'tank is %s' % tank
time.sleep(1)
# photo = adc.read(photoPin) # have to read twice due to bbio bug
photo = 1.0-adc.read(photoPin) # reverse range so that 0 is darkest
print 'photo is %s' % photo
time.sleep(1)
# temp1 = adc.read_raw(thermistor1)
temp1 = adc.read_raw(thermistor1)
time.sleep(1)
print 'temp1 raw %s' % temp1
temp1 = convert_thermistor_special(temp1)
readings['bedTemp'] = temp1
print 'converted bed_temp is %s' % temp1
# # do conversion per
# # http://learn.adafruit.com/thermistor/using-a-thermistor
# temp2 = adc.read_raw(thermistor2)
temp2 = adc.read_raw(thermistor2)
time.sleep(1)
print 'temp2 raw %s' % temp2
print temp2
temp2 = convert_thermistor(temp2)
readings['tankTemp'] = temp2
print 'converted reservoir_temp is %s' % temp2
# do conversion per
# http://learn.adafruit.com/thermistor/using-a-thermistor
# tmp36reading = adc.read_raw(tmp36Pin)
# tmp36reading = adc.read_raw(tmp36Pin) # have to read twice due to bbio bug
# millivolts = tmp36reading * 1800 # 1.8V reference = 1800 mV
# temp_c = (millivolts - 500) / 10
# print temp_c
# ph_val = get_ph()
# print 'ph_val was thoght to be %s' % ph_val
readings['tankLevel'] = tank # tank level
readings['photocell'] = photo # photocell
def convert_thermistor(raw):
# convert the value to resistance
# print 'was given %s' % raw
raw = SERIESRESISTOR/((1800.0/raw) - 1.0)
# raw = float(SERIESRESISTOR / float(raw))
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def convert_thermistor_special(raw):
# convert the value to resistance
# print 'was given %s' % raw
# raw = (1800/raw) - 1
# fuck me, a1 is only up against 3.73kOhm - even though it's a properly-labeled resistor!
raw = 3730.0/((1800.0/raw) - 1.0)
print 'Thermistor resistance '
print raw
steinhart = raw/THERMISTORNOMINAL # (R/Ro)
steinhart = log(steinhart) # ln(R/Ro)
steinhart /= BCOEFFICIENT # 1/B * ln(R/Ro)
steinhart += float(1.0 / (TEMPERATURENOMINAL + 273.15)) # + (1/To)
steinhart = float(1.0 / steinhart) # Invert
steinhart -= 273.15 # convert to C
print 'we think converted temperature is %s' % steinhart
return steinhart
def do_db_update():
print 'db update'
global readings
# print readings
if len(readings) != 0:
# data.sparkfun.com is expecting:
# bedTemp, photo, tankLevel, tankTemp
bedTemp = float('{0:.2f}'.format(readings['bedTemp']))
tankTemp = float('{0:.2f}'.format(readings['tankTemp']))
payload = {
'photo':readings['photocell'],
'tankLevel':readings['tankLevel'],
'bedTemp':readings['bedTemp'],
'tankTemp':readings['tankTemp']
}
h = {'Phant-Private-Key':k.key['phant_private']}
r = requests.post(k.key['phant_url'], data=payload, headers=h)
print 'wrote a result set to the DB'
else:
print 'NULL readings, nothing written to DB'
def get_ph():
print 'we are in get_ph'
uart.setup('UART2')
ser = serial.Serial(port = '/dev/ttyO2', baudrate=38400)
print 'opened serial port'
ser.open()
ser.write('R\r')
data = ser.read()
print 'ph received raw as %s' % data
ser.close()
uart.cleanup()
return data
def do_state_display():
print 'state_display'
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font.
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('Vdj.ttf', 8)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
draw.text((x, top), 'photo: ', font=font, fill=255)
draw.text((x, top+16), 'tankLevel: ', font=font, fill=255)
draw.text((x, top+32), 'tankTemp: ', font=font, fill=255)
draw.text((x, top+48), 'bedTemp: ', font=font, fill=255)
draw.text((x+64, top), str(readings['photocell'])[:4], font=font, fill=255)
draw.text((x+64, top+16), str(readings['tankLevel'])[:4], font=font, fill=255)
draw.text((x+64, top+32), str(readings['tankTemp'])[:4], font=font, fill=255)
draw.text((x+64, top+48), str(readings['bedTemp'])[:4], font=font, fill=255)
# Draw an ellipse.
# draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a rectangle.
# draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
# x += shape_width+padding
# Draw a triangle.
# draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
# x += shape_width+padding
# Draw an X.
# draw.line((x, bottom, x+shape_width, top), fill=255)
# draw.line((x, top, x+shape_width, bottom), fill=255)
# x += shape_width+padding
# Display image.
disp.image(image)
disp.display()
# so, what will state display be?
# I2C display of tank temp?
def do_pump_toggle():
print 'pump actuate'
'''
this should actually work like:
if currentMinute mod PUMP_DURATION < PUMP_INTERVAL:
activate pump
else:
turn off pump
'''
if (datetime.datetime.today().hour>6 and datetime.datetime.today().hour<23):
print 'within actuating timeframe'
# changed this to just pump for the first PUMP_DURATION minutes every hour
if(datetime.datetime.today().minute <= PUMP_DURATION):
print 'we are in the first %s minutes of the hour, so pump should be on.' % PUMP_DURATION
gpio.output(pumpPin,gpio.HIGH)
else:
print 'shutting off pump at %s' % datetime.datetime.today().minute
gpio.output(pumpPin,gpio.LOW)
else:
print 'it is the actuator quiet period, between 11pm and 6am'
gpio.output(pumpPin,gpio.LOW)
print 'starting sampling at'
print datetime.datetime.now(tzlocal())
logging.basicConfig(filename='example.log',level=logging.DEBUG)
# adc.setup(thermistor1)
# adc.setup(thermistor2)
# adc.setup(photoPin)
adc.setup()
# uart.setup('UART2')
# print 'uart setup'
gpio.setup(pumpPin,gpio.OUT)
# t = tmp102.TMP102()
disp = ssd.SSD1306_128_64(rst=RST,i2c_address=0x3D)
disp.begin()
disp.clear()
disp.display()
# NOTE
# There is currently a bug in the ADC driver.
# You'll need to read the values twice
# in order to get the latest value.
# pwm.start(greenPin, 10.0, 2000.0)
# pwm.start(redPin, 10.0, 2000.0)
# pwm.start(bluePin, 10.0, 2000.0)
atexit.register(exit_handler)
while True:
try:
do_sensor_read()
except Exception, e:
print e
print 'sensor_read error!'
try:
do_db_update()
except Exception, e:
print e
print 'do_db_update error!'
try:
do_state_display()
# pass
except Exception, e:
print e
print 'do_state_display error!'
try:
do_pump_toggle()
except Exception, e:
print e
print 'do_pump_toggle error!'
print 'done with cycle, now waiting %s' % datetime.datetime.today()
time.sleep(interval)
|
5,498 | d081abf3cd9bc323486772b4f6235fbbc9022099 | '''
@mainpage Rat15S Compiler
@section intro_sec Introduction
This will become a Rat15S compiler. Currently working on Lexical Analyzer.
@author Reza Nikoopour
@author Eric Roe
'''
def main():
tokens = Lexer()
if __name__ == '__main__':
sys.path.append('Lib')
from lexicalanalyzer import Lexer
main(sys.argv[1], sys.argv[2])
|
5,499 | 2e6bce05c8ba21aa322e306d2cdb8871531d7341 | import random
OPTIONS = ['rock', 'paper', 'scissors']
def get_human_choice():
print('(1) Rock\n(2) Paper\n(3) Scissors')
return OPTIONS[int(input('Enter the number of your choice: ')) - 1]
def get_computer_choice():
return random.choice(OPTIONS)
def print_choices(human_choice, computer_choice):
print(f'You chose {human_choice.title()}')
print(f'The computer chose {computer_choice.title()}')
def eval_game_result(human_choice, computer_choice):
if human_choice == computer_choice:
return 'draw'
elif human_choice == 'rock':
return 'human' if computer_choice == 'scissors' else 'computer'
elif human_choice == 'paper':
return 'human' if computer_choice == 'rock' else 'computer'
else:
return 'human' if computer_choice == 'paper' else 'computer'
def compose_output_message(result, human_choice, computer_choice):
if result == 'draw':
return 'Draw!'
elif result == 'human':
return f'Yes, {human_choice} beat {computer_choice}!'
else:
return f'Sorry, {computer_choice} beat {human_choice}'
def print_result(message):
print(message)
human_choice = get_human_choice()
computer_choice = get_computer_choice()
print_choices(human_choice, computer_choice)
game_result = eval_game_result(human_choice, computer_choice)
print_result(compose_output_message(game_result, human_choice, computer_choice))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.