blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7c20591f36d3a78bb501c4b60a1b4b5e79f72dd | 8fe983691e012a4e83b73be1d9aac33e0449593a | /manage.py | 736cedcced36af0ad618f1c4fb2558424400df37 | [] | no_license | damnation14/Blog-website | 3b50379f532cff60cf62fbf5f768b04f55b45e71 | 685f5bfb351a8aae54967e48ebed47ed65b36fec | refs/heads/main | 2023-04-19T23:27:08.138264 | 2021-04-30T18:51:02 | 2021-04-30T18:51:02 | 361,385,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djang.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"manoj.kartik14@gmail.com"
] | manoj.kartik14@gmail.com |
cef6bd38f4fad1c3701ee6dd6b153b9d00497fe1 | a8d0af54470f5d0b33596c918562bfb18ffceb27 | /Problem Set 8 - Unique BST/AnuKritiWadhwa.py | a9d3b0e523ad16d3fe3e3e1e86d725ff15f1f517 | [] | no_license | AnuKritiW/git-at-soc | 71d2b1040ae2b2731f863bc81af516ef9855d383 | a570c65eb5ede637d086ee2757467aa1da8fef3e | refs/heads/master | 2021-07-16T13:24:01.671210 | 2020-06-07T14:40:20 | 2020-06-07T14:40:20 | 172,067,770 | 2 | 3 | null | 2020-05-23T11:01:33 | 2019-02-22T13:06:00 | Python | UTF-8 | Python | false | false | 862 | py | class Solution:
def numTrees(self, n: int) -> int:
if n < 3:
return n
lst = [0] * (n+1) #create a list of 0s of length 'n+1'
lst[0:3] = [0, 1, 2]
for i in range(3, n+1):
for j in range(2, i):
lst[i] += lst[j-1]*lst[i-j]
lst[i] += lst[i-1]*2
return lst[n]
def main():
import sys
import io
def readlines():
for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):
yield line.strip('\n')
lines = readlines()
while True:
try:
line = next(lines)
n = int(line);
ret = Solution().numTrees(n)
out = str(ret);
print(out)
except StopIteration:
break
if __name__ == '__main__':
main() | [
"anu.kriti.w@gmail.com"
] | anu.kriti.w@gmail.com |
ed642068a76c4955cfea971258841d394062ea8c | d959f73049a19d20dd9a628175dfe2827c3e84ef | /python3/class_python/class2.py | 078360df9593572560cd4d5f04879cc41431d98b | [
"MIT"
] | permissive | fossabot/experiment_code | 5f1071d45f824e0f41e14bb5f3ffced5146e0d1e | de0fdfc4f6cc61cd1941af8df6e39491fada0e6b | refs/heads/master | 2020-03-21T15:16:18.937619 | 2018-06-26T07:46:26 | 2018-06-26T07:46:26 | 138,703,041 | 0 | 0 | MIT | 2018-06-26T07:46:21 | 2018-06-26T07:46:20 | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
# @Author: Zhiwei Yang
# @Date: 2018-04-03 13:46:22
# @Last Modified by: Zhiwei Yang
# @Last Modified time: 2018-04-03 14:48:41
from class1 import Base
class Checkusebasevar(Base):
def __init__(self):
self.vc = Base.printme()
def printtwo(self):
# print (self.a,self.b)
print (self.vc)
# print (self.c)
if __name__ == '__main__':
bb = Checkusebasevar(1,3)
bb.printtwo()
# print (bb.printme())
| [
"tencrance@gmail.com"
] | tencrance@gmail.com |
ab1287140a9c366c6cfb75957e93c3de60460ba6 | 9588bd52bcc9fff2ae52e29ea92f6b0120e4313b | /mysite/polls/models.py | 210450915f6ab67f543bc945504f397997b48149 | [] | no_license | neelparmar17/learn-python | 05a03f4b4d8673bcfc8b64e5e658281e6122efa1 | a5d30f8380ca2535909df9d99382dd13a87f17ec | refs/heads/master | 2021-01-25T13:18:29.249108 | 2018-09-17T05:32:37 | 2018-09-17T05:32:37 | 123,553,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length = 200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"neel.parmar@mountblue.io"
] | neel.parmar@mountblue.io |
3a074572647edca905c1104c2e82709c859ebddb | 4050f786f3cc505760e25608d66805e3543835f8 | /the_flyer_15147/urls.py | 141a25667c75334ebfabf7887b5c99cfe55f3ff9 | [] | no_license | crowdbotics-apps/the-flyer-15147 | 6fb0a403286d06c5393d9f58b39f76ad5c538312 | e2f62327110f1200c8d4ebf46f127ce4fe903189 | refs/heads/master | 2022-12-11T02:03:31.153849 | 2020-03-28T02:01:50 | 2020-03-28T02:01:50 | 250,693,069 | 0 | 0 | null | 2022-12-08T05:09:49 | 2020-03-28T01:59:48 | Python | UTF-8 | Python | false | false | 2,055 | py | """the_flyer_15147 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("event.api.v1.urls")),
path("event/", include("event.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "the flyer"
admin.site.site_title = "the flyer Admin Portal"
admin.site.index_title = "the flyer Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="the flyer API",
default_version="v1",
description="API documentation for the flyer App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c0af08e88913629965f51ba0bb0312a9f360c0a5 | 12246c255155df4af076cc75b5085b187d847fb2 | /supervised_algo/AdaBoost_Classifier.py | df02e8fcc6d018d568658811bd9635fcff3c72d0 | [] | no_license | babakaskari/MachineLearning | e349748fa8fae499faf86ff1ae7e0624fea3d6ff | c948b4b6c889bddbb09d0876a9448324e80a9b06 | refs/heads/master | 2023-03-12T07:50:28.616330 | 2021-02-23T12:25:10 | 2021-02-23T12:25:10 | 315,897,013 | 0 | 0 | null | 2020-11-25T11:02:10 | 2020-11-25T09:57:05 | Python | UTF-8 | Python | false | false | 1,590 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
from matplotlib import dates as mpl_dates
import sklearn
from sklearn.metrics import roc_auc_score
from sklearn.semi_supervised import LabelPropagation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestRegressor
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
from sklearn.neighbors import KNeighborsClassifier
import prepossessed_dataset
import evaluator
from sklearn.metrics import roc_curve
import seaborn as sns
from sklearn import metrics
from xgboost import XGBClassifier
sns.set()
dataset = prepossessed_dataset.labeled_two_features()
x_train = dataset["x_train"]
y_train = dataset["y_train"]
x_test = dataset["x_test"]
y_test = dataset["y_test"]
x_cv = dataset["x_cv"]
y_cv = dataset["y_cv"]
clf = AdaBoostClassifier(n_estimators=100, random_state=0)
clf.fit(x_train, y_train)
evaluator.evaluate_preds(clf, x_train, y_train, x_test, y_test, x_cv, y_cv) | [
"babakas76@gmail.com"
] | babakas76@gmail.com |
026c8dcdda379049b5cf134c6075b408057c02e2 | cb06f6c7d6ca495374bbef4cb2bb868067920cbc | /solution/q1_100/question_23.py | f70910675bd0cc4158e88a1961bdca09f9c47e2c | [] | no_license | wentjiang/leetcode-py | 7d9d7fe6e59d225a7c0cdf34e42fec612065b2bd | 5b9b214f26d44fe798a1ce382e0f764a6f23a4ee | refs/heads/master | 2022-10-27T06:40:00.469691 | 2022-10-04T03:41:53 | 2022-10-04T03:41:53 | 210,098,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # Definition for singly-linked list.
from typing import List
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
head = ListNode(0)
temp = []
for i in lists:
while i:
temp.append(i.val)
i = i.next
temp.sort()
temp_node = head
for i in temp:
temp_node.next = ListNode(i)
temp_node = temp_node.next
return head.next
| [
"wentjiang@sina.com"
] | wentjiang@sina.com |
9f496144a2fa17d02324a2573e5c8d6dde020f12 | b5fa5c71bd4013cd504c1e383a10a547ca618f8d | /customers/serializers.py | a987ea8bc0906679d6392052a9274cbcc1df05d4 | [] | no_license | Bsaint95/Digireceipt-backend | c785ff0a986c5a0c8e58bac395078099949578c7 | 005d6587df0fafd8a16e79d520f5b3466c9b494b | refs/heads/master | 2022-11-15T15:50:49.571214 | 2020-06-22T21:13:07 | 2020-06-22T21:13:07 | 274,195,045 | 0 | 0 | null | 2020-06-22T16:56:45 | 2020-06-22T16:56:45 | null | UTF-8 | Python | false | false | 218 | py | from .models import customers
from rest_framework import serializers
class customersSerializer(serializers.ModelSerializer):
class Meta:
model = customers
fields = ('id', 'issue_no', 'name', 'email', 'platform') | [
"bsaintdesigns@gmail.com"
] | bsaintdesigns@gmail.com |
68b314221633cc684b3e98955cfb6fb09c89b25b | 2c3ec46e6a3fe4db76ee576b4e52f69106b3171f | /app/app.py | 4599fe0d5cf71b71ecd255eab9557cebc1628b93 | [] | no_license | deepakn01/cs410proj | 9033596c18aacb65a9cdc14131bed31637388006 | cadcbdf15358394b0c4528714b8de90ff2e7ee39 | refs/heads/master | 2021-07-16T11:23:23.775206 | 2017-10-19T21:59:06 | 2017-10-19T21:59:06 | 107,602,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | from flask import Flask, request, redirect, render_template
import cgi
import os
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route("/")
def index():
return render_template('index.html')
#@app.route("/results", methods=['post'])
#def results():
# search_term = request.form['search']
# return render_template('results.html',s_t = search_term)
@app.route("/results", methods=['post'])
def results():
results = ['Result 1','Result 2','Result 3']
sentiments = ['Positive','Neutral','Negetive']
search_term = request.form['search']
head1 = '<h1> Search Results for: '+ search_term +'</h1><br>'
head2 = '<br>'
for i, result in enumerate(results):
head2 = head2+'<h2>' +result+ '    '+ sentiments[i] +'</h2>'
return head1+head2
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
d484d90a1d4d8c4e0529ddf16eb67ff40f9a0521 | c6e3fdae79621611defdcafddc504a899781fe8f | /cgi-bin/user_avatar.py | 4febaa86a37fa40db08dd13f82ee85e0c3b6bb43 | [
"MIT"
] | permissive | zhchbin/Yagra | a1a458f1f42ec8eab043b8bbc0ff721897ce5652 | b0422aa5d2972c2352d26518c1da686908f2a015 | refs/heads/master | 2021-01-25T10:29:49.125552 | 2015-03-17T00:54:11 | 2015-03-17T00:54:11 | 31,765,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | #!/usr/bin/python
from config import USER_AVATAR_DIRECTORY
from util import print_html_and_exit, connect_db
from MySQLdb import Error
import cgi
import mimetypes
form = cgi.FieldStorage()
username = form.getvalue('username')
try:
con = connect_db()
cur = con.cursor()
cur.execute("SELECT avatar FROM User WHERE username=%s", (username,))
data = cur.fetchall()
if len(data) == 0:
print_html_and_exit('Invalid username.')
avatar = data[0][0]
suffix = avatar.split('.')[-1].lower()
avatar = USER_AVATAR_DIRECTORY + '/' + avatar
mimetypes.init()
with open(avatar, 'r') as f:
print "Content-type: " + mimetypes.types_map['.' + suffix] + '\n'
print f.read()
except Error, e:
print_html_and_exit(e[1])
finally:
con.close()
| [
"zhchbin@gmail.com"
] | zhchbin@gmail.com |
664fa09f48389314c2a900ac4b98a88fce679aba | 84a617d20424c0e06847ee5230c579d99e85956c | /pytorch/re/pure/entity_models.py | 36fb6f121c714294e0fbd9672ed9362eec3ddb85 | [] | no_license | ienoob/neo_nlp_project | a6b1fce5ba3413e3d8dba174ffba8b8700d03444 | 1009dc9780b8827813ee3e575e8bfcb03aa5d388 | refs/heads/master | 2023-02-22T20:09:06.366958 | 2022-07-14T02:45:12 | 2022-07-14T02:45:12 | 249,361,823 | 8 | 0 | null | 2023-02-16T03:31:49 | 2020-03-23T07:22:13 | Python | UTF-8 | Python | false | false | 12,864 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) ***
import torch
import torch.nn as nn
from transformers import BertTokenizer, BertPreTrainedModel, BertModel
from transformers import AlbertTokenizer, AlbertPreTrainedModel, AlbertModel
from torch.nn import CrossEntropyLoss
import logging
logger = logging.getLogger('root')
class BertForEntity(BertPreTrainedModel):
def __init__(self, config, num_ner_labels, head_hidden_dim=150, width_embedding_dim=150, max_span_length=8):
super().__init__(config)
self.bert = BertModel(config)
self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
self.width_embedding = nn.Embedding(max_span_length + 1, width_embedding_dim)
self.ner_classifier = nn.Sequential(
FeedForward(input_dim=config.hidden_size * 2 + width_embedding_dim,
num_layers=2,
hidden_dims=head_hidden_dim,
activations=F.relu,
dropout=0.2),
nn.Linear(head_hidden_dim, num_ner_labels)
)
self.init_weights()
def _get_span_embeddings(self, input_ids, spans, token_type_ids=None, attention_mask=None):
sequence_output, pooled_output = self.bert(input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
sequence_output = self.hidden_dropout(sequence_output)
"""
spans: [batch_size, num_spans, 3]; 0: left_ned, 1: right_end, 2: width
spans_mask: (batch_size, num_spans, )
"""
spans_start = spans[:, :, 0].view(spans.size(0), -1)
spans_start_embedding = batched_index_select(sequence_output, spans_start)
spans_end = spans[:, :, 1].view(spans.size(0), -1)
spans_end_embedding = batched_index_select(sequence_output, spans_end)
spans_width = spans[:, :, 2].view(spans.size(0), -1)
spans_width_embedding = self.width_embedding(spans_width)
# Concatenate embeddings of left/right points and the width embedding
spans_embedding = torch.cat((spans_start_embedding, spans_end_embedding, spans_width_embedding), dim=-1)
"""
spans_embedding: (batch_size, num_spans, hidden_size*2+embedding_dim)
"""
return spans_embedding
def forward(self, input_ids, spans, spans_mask, spans_ner_label=None, token_type_ids=None, attention_mask=None):
spans_embedding = self._get_span_embeddings(input_ids, spans, token_type_ids=token_type_ids,
attention_mask=attention_mask)
ffnn_hidden = []
hidden = spans_embedding
for layer in self.ner_classifier:
hidden = layer(hidden)
ffnn_hidden.append(hidden)
logits = ffnn_hidden[-1]
if spans_ner_label is not None:
loss_fct = CrossEntropyLoss(reduction='sum')
if attention_mask is not None:
active_loss = spans_mask.view(-1) == 1
active_logits = logits.view(-1, logits.shape[-1])
active_labels = torch.where(
active_loss, spans_ner_label.view(-1), torch.tensor(loss_fct.ignore_index).type_as(spans_ner_label)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, logits.shape[-1]), spans_ner_label.view(-1))
return loss, logits, spans_embedding
else:
return logits, spans_embedding, spans_embedding
class EntityModel():
def __init__(self, args, num_ner_labels):
super().__init__()
bert_model_name = args.model
vocab_name = bert_model_name
if args.bert_model_dir is not None:
bert_model_name = str(args.bert_model_dir) + '/'
# vocab_name = bert_model_name + 'vocab.txt'
vocab_name = bert_model_name
logger.info('Loading BERT model from {}'.format(bert_model_name))
if args.use_albert:
self.tokenizer = AlbertTokenizer.from_pretrained(vocab_name)
self.bert_model = AlbertForEntity.from_pretrained(bert_model_name, num_ner_labels=num_ner_labels,
max_span_length=args.max_span_length)
else:
self.tokenizer = BertTokenizer.from_pretrained(vocab_name)
self.bert_model = BertForEntity.from_pretrained(bert_model_name, num_ner_labels=num_ner_labels,
max_span_length=args.max_span_length)
self._model_device = 'cpu'
self.move_model_to_cuda()
def move_model_to_cuda(self):
if not torch.cuda.is_available():
logger.error('No CUDA found!')
exit(-1)
logger.info('Moving to CUDA...')
self._model_device = 'cuda'
self.bert_model.cuda()
logger.info('# GPUs = %d' % (torch.cuda.device_count()))
if torch.cuda.device_count() > 1:
self.bert_model = torch.nn.DataParallel(self.bert_model)
def _get_input_tensors(self, tokens, spans, spans_ner_label):
start2idx = []
end2idx = []
bert_tokens = []
bert_tokens.append(self.tokenizer.cls_token)
for token in tokens:
start2idx.append(len(bert_tokens))
sub_tokens = self.tokenizer.tokenize(token)
bert_tokens += sub_tokens
end2idx.append(len(bert_tokens) - 1)
bert_tokens.append(self.tokenizer.sep_token)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(bert_tokens)
tokens_tensor = torch.tensor([indexed_tokens])
bert_spans = [[start2idx[span[0]], end2idx[span[1]], span[2]] for span in spans]
bert_spans_tensor = torch.tensor([bert_spans])
spans_ner_label_tensor = torch.tensor([spans_ner_label])
return tokens_tensor, bert_spans_tensor, spans_ner_label_tensor
def _get_input_tensors_batch(self, samples_list, training=True):
tokens_tensor_list = []
bert_spans_tensor_list = []
spans_ner_label_tensor_list = []
sentence_length = []
max_tokens = 0
max_spans = 0
for sample in samples_list:
tokens = sample['tokens']
spans = sample['spans']
spans_ner_label = sample['spans_label']
tokens_tensor, bert_spans_tensor, spans_ner_label_tensor = self._get_input_tensors(tokens, spans,
spans_ner_label)
tokens_tensor_list.append(tokens_tensor)
bert_spans_tensor_list.append(bert_spans_tensor)
spans_ner_label_tensor_list.append(spans_ner_label_tensor)
assert (bert_spans_tensor.shape[1] == spans_ner_label_tensor.shape[1])
if (tokens_tensor.shape[1] > max_tokens):
max_tokens = tokens_tensor.shape[1]
if (bert_spans_tensor.shape[1] > max_spans):
max_spans = bert_spans_tensor.shape[1]
sentence_length.append(sample['sent_length'])
sentence_length = torch.Tensor(sentence_length)
# apply padding and concatenate tensors
final_tokens_tensor = None
final_attention_mask = None
final_bert_spans_tensor = None
final_spans_ner_label_tensor = None
final_spans_mask_tensor = None
for tokens_tensor, bert_spans_tensor, spans_ner_label_tensor in zip(tokens_tensor_list, bert_spans_tensor_list,
spans_ner_label_tensor_list):
# padding for tokens
num_tokens = tokens_tensor.shape[1]
tokens_pad_length = max_tokens - num_tokens
attention_tensor = torch.full([1, num_tokens], 1, dtype=torch.long)
if tokens_pad_length > 0:
pad = torch.full([1, tokens_pad_length], self.tokenizer.pad_token_id, dtype=torch.long)
tokens_tensor = torch.cat((tokens_tensor, pad), dim=1)
attention_pad = torch.full([1, tokens_pad_length], 0, dtype=torch.long)
attention_tensor = torch.cat((attention_tensor, attention_pad), dim=1)
# padding for spans
num_spans = bert_spans_tensor.shape[1]
spans_pad_length = max_spans - num_spans
spans_mask_tensor = torch.full([1, num_spans], 1, dtype=torch.long)
if spans_pad_length > 0:
pad = torch.full([1, spans_pad_length, bert_spans_tensor.shape[2]], 0, dtype=torch.long)
bert_spans_tensor = torch.cat((bert_spans_tensor, pad), dim=1)
mask_pad = torch.full([1, spans_pad_length], 0, dtype=torch.long)
spans_mask_tensor = torch.cat((spans_mask_tensor, mask_pad), dim=1)
spans_ner_label_tensor = torch.cat((spans_ner_label_tensor, mask_pad), dim=1)
# update final outputs
if final_tokens_tensor is None:
final_tokens_tensor = tokens_tensor
final_attention_mask = attention_tensor
final_bert_spans_tensor = bert_spans_tensor
final_spans_ner_label_tensor = spans_ner_label_tensor
final_spans_mask_tensor = spans_mask_tensor
else:
final_tokens_tensor = torch.cat((final_tokens_tensor, tokens_tensor), dim=0)
final_attention_mask = torch.cat((final_attention_mask, attention_tensor), dim=0)
final_bert_spans_tensor = torch.cat((final_bert_spans_tensor, bert_spans_tensor), dim=0)
final_spans_ner_label_tensor = torch.cat((final_spans_ner_label_tensor, spans_ner_label_tensor), dim=0)
final_spans_mask_tensor = torch.cat((final_spans_mask_tensor, spans_mask_tensor), dim=0)
# logger.info(final_tokens_tensor)
# logger.info(final_attention_mask)
# logger.info(final_bert_spans_tensor)
# logger.info(final_bert_spans_tensor.shape)
# logger.info(final_spans_mask_tensor.shape)
# logger.info(final_spans_ner_label_tensor.shape)
return final_tokens_tensor, final_attention_mask, final_bert_spans_tensor, final_spans_mask_tensor, final_spans_ner_label_tensor, sentence_length
def run_batch(self, samples_list, try_cuda=True, training=True):
# convert samples to input tensors
tokens_tensor, attention_mask_tensor, bert_spans_tensor, spans_mask_tensor, spans_ner_label_tensor, sentence_length = self._get_input_tensors_batch(
samples_list, training)
output_dict = {
'ner_loss': 0,
}
if training:
self.bert_model.train()
ner_loss, ner_logits, spans_embedding = self.bert_model(
input_ids=tokens_tensor.to(self._model_device),
spans=bert_spans_tensor.to(self._model_device),
spans_mask=spans_mask_tensor.to(self._model_device),
spans_ner_label=spans_ner_label_tensor.to(self._model_device),
attention_mask=attention_mask_tensor.to(self._model_device),
)
output_dict['ner_loss'] = ner_loss.sum()
output_dict['ner_llh'] = F.log_softmax(ner_logits, dim=-1)
else:
self.bert_model.eval()
with torch.no_grad():
ner_logits, spans_embedding, last_hidden = self.bert_model(
input_ids=tokens_tensor.to(self._model_device),
spans=bert_spans_tensor.to(self._model_device),
spans_mask=spans_mask_tensor.to(self._model_device),
spans_ner_label=None,
attention_mask=attention_mask_tensor.to(self._model_device),
)
_, predicted_label = ner_logits.max(2)
predicted_label = predicted_label.cpu().numpy()
last_hidden = last_hidden.cpu().numpy()
predicted = []
pred_prob = []
hidden = []
for i, sample in enumerate(samples_list):
ner = []
prob = []
lh = []
for j in range(len(sample['spans'])):
ner.append(predicted_label[i][j])
# prob.append(F.softmax(ner_logits[i][j], dim=-1).cpu().numpy())
prob.append(ner_logits[i][j].cpu().numpy())
lh.append(last_hidden[i][j])
predicted.append(ner)
pred_prob.append(prob)
hidden.append(lh)
output_dict['pred_ner'] = predicted
output_dict['ner_probs'] = pred_prob
output_dict['ner_last_hidden'] = hidden
return output_dict
| [
"mostli@qq.com"
] | mostli@qq.com |
1e445cc5bd290315f961eb98d248e02c72584909 | f4653b4bd7528150a53c8f454658c00d7ea0b836 | /cbm/ipycbm/ipy_view/view_main.py | e19d0de98e84ac96843c63b82e5adf468f855f50 | [
"BSD-3-Clause"
] | permissive | mokasini/cbm | ccb09cb8ab96e6b06b0e13d86ff51124538706f6 | 33bd9c8a0d107f6cdc3343953ae9f7c9bd9272cd | refs/heads/main | 2023-02-24T04:44:07.744715 | 2021-02-01T12:29:38 | 2021-02-01T12:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import Tab
from cbm.ipycbm.utils import help_docs
from cbm.ipycbm.ipy_view import view_settings, view_panel
def view_widget_box():
try:
tab_box = Tab(children=[view_panel.view(), help_docs.widget_box(),
view_settings.widget_box()])
tab_box.set_title(0, 'View Data')
tab_box.set_title(1, 'Help')
tab_box.set_title(2, 'Settings')
except Exception as err:
tab_box = Tab(children=[help_docs.widget_box(),
view_settings.widget_box()])
tab_box.set_title(1, 'Help')
tab_box.set_title(2, 'Settings')
print("Could not show 'View panel'.", err)
return tab_box
| [
"Konstantinos.ANASTASAKIS@ext.ec.europa.eu"
] | Konstantinos.ANASTASAKIS@ext.ec.europa.eu |
12ec702200149989447216a4b0d095a71ab4c438 | e161773a84fe27b1ce7c05101342e9cec42ec28b | /accounts/views.py | 941232652c991c841fa829bd3be4163b460fd60c | [] | no_license | pyorc/pyorc | 7b7852945717b2c06e4955bf04519de805a16d5c | 6248347228c3dfca6760a27f6c03af95c6834a9d | refs/heads/master | 2021-01-24T22:36:28.996534 | 2016-01-22T05:36:11 | 2016-01-22T05:36:11 | 47,625,086 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | # coding=utf-8
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from accounts.serializers import UserSerializer
from accounts.token import Token
class AccountViewSet(ViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
def register(self, request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = User.objects.create_user(**serializer.validated_data)
token = Token.create_token(user.id)
data = {
'username': user.username,
'token': token
}
return Response(data, status=201)
return Response(serializer.errors, status=400)
def authentication(self, request):
# 缺失用户名或密码
if 'username' not in request.data or 'password' not in request.data:
return Response(status=400)
user = authenticate(
username=request.data['username'],
password=request.data['password'])
# 用户验证失败
if not user:
return Response(status=401)
token = Token.get_token_by_user_id(user.id)
# 若已有token,则删除原有token,重新生成token
if token:
Token.delete_token(token)
token = Token.create_token(user.id)
return Response(data={'token': token}, status=200)
| [
"zhaojames0707@hotmail.com"
] | zhaojames0707@hotmail.com |
ca3f567bca835be25d44fd12966652681f3d321e | c8e763e2870f2245c1598404faf1c79643c88c53 | /con_functions_resting.py | 1c96579c769bb50066f2b7d41f76246d9ec27bda | [] | no_license | KathrineSN/Bachelor-Project | 0b448db665463f048f34d848a9214a49d801de19 | 995a73680ef1e882a6850a6e55b289a7c10b58a2 | refs/heads/master | 2023-04-23T12:07:09.798464 | 2021-05-06T08:09:17 | 2021-05-06T08:09:17 | 333,376,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,235 | py |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 14:24:09 2021
@author: kathr
"""
import os
import mne
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from hypyp import prep
from hypyp import analyses
from hypyp import stats
from hypyp import viz
from collections import Counter
from collections import OrderedDict
from itertools import groupby
path="C:\\Users\\kathr\\OneDrive\\Documents\\Github\\Bachelor-Project\\Resting condition"
os.chdir(path)
def coh(epochs_a, epochs_b, pair_name, length, drop_list):
event_dict = {'Resting': 101, 'Uncoupled': 102, 'Coupled': 103, 'Leader': 105,
'Follower': 107, 'Control':108 }
conditions = ['Resting','Coupled', 'Uncoupled', 'Leader', 'Control']
if length == 'long':
epochs_a.crop(tmin = 2, tmax = 23)
#epochs_a.plot(n_epochs = 1, n_channels = 10)
epochs_b.crop(tmin = 2, tmax = 23)
for i in conditions:
# Merging the leader and follower
if i == 'Leader':
epo_a_l = epochs_a['Leader']
epo_b_l = epochs_b['Leader']
epo_a_f = epochs_a['Follower']
epo_b_f = epochs_b['Follower']
epo_a = mne.concatenate_epochs([epo_a_l, epo_b_f])
epo_b = mne.concatenate_epochs([epo_b_l, epo_a_f])
i = 'Leader-Follower'
else:
print(i)
epo_a = epochs_a[i]
epo_b = epochs_b[i]
#Defining frequency bands
freq_bands = {'Theta': [4, 7],
'Alpha' :[8, 13],
'Beta': [15, 25]}
freq_bands = OrderedDict(freq_bands)
sampling_rate = epo_a.info['sfreq']
#Connectivity
#Data and storage
data_inter = np.array([epo_a, epo_b])
#result_intra = []
#Analytic signal per frequency band
complex_signal = analyses.compute_freq_bands(data_inter, sampling_rate,
freq_bands)
result = analyses.compute_sync(complex_signal, mode='coh', epochs_average = True)
#Get inter brain part of the matrix
n_ch = len(epochs_a.info['ch_names'])
theta, alpha, beta = result[:, 0:n_ch, n_ch:2*n_ch]
#print(alpha)
plt.figure()
plt.imshow(theta,cmap=plt.cm.hot)
plt.clim(0,0.1)
plt.colorbar()
plt.show()
plt.figure()
plt.imshow(alpha,cmap=plt.cm.hot)
plt.clim(0,0.1)
plt.colorbar()
plt.show()
plt.figure()
plt.imshow(beta,cmap=plt.cm.hot)
plt.clim(0,0.1)
plt.colorbar()
plt.show()
#theta = abs(theta - np.mean(theta[:])) / np.std(theta[:])
#alpha = abs(alpha - np.mean(alpha[:])) / np.std(alpha[:])
#beta = abs(beta - np.mean(beta[:])) / np.std(beta[:])
print('Range of the connectivities:')
print('Theta max:' + str(np.max(theta)))
print('Theta min:' + str(np.min(theta)))
print('Alpha max:' + str(np.max(alpha)))
print('Alpha min:' + str(np.min(alpha)))
print('Beta max:' + str(np.max(beta)))
print('Beta min:' + str(np.min(beta)))
np.save('con matrices/coh/' + 'coh_' + pair_name + '_theta_' + i + '_' + length, theta)
np.save('con matrices/coh/' + 'coh_' + pair_name + '_alpha_' + i + '_' + length, alpha)
np.save('con matrices/coh/' + 'coh_' + pair_name + '_beta_' + i + '_' + length, beta)
if length == 'short':
#conditions = ['Coupled', 'Uncoupled', 'Leader', 'Follower', 'Control']
epo_drop = []
epo_drop.append(0)
epo_drop.append(1)
epo_drop.append(2)
epo_drop.append(24)
epo_drop.append(25)
for i in range(64*5):
epo_drop.append(epo_drop[i]+26)
# Ensuring that already removed epochs are not in list
for i in epo_drop:
Epoch_no = drop_list
if i in Epoch_no:
#print(i)
epo_drop.remove(i)
# Ensuring list is no longer than the number of epochs
while epo_drop[-1]>(len(epochs_b)-1):
epo_drop.pop(-1)
# Dropping the beginning and end of a trial
epo_a = epochs_a.drop(epo_drop)
epo_b = epochs_b.drop(epo_drop)
# Getting the number of epochs of specific condition in a row
a = epo_a.events[:,2]
d = dict()
for k, v in groupby(a):
d.setdefault(k, []).append(len(list(v)))
#print(d)
#equalize number of epochs used to calculate connectivity values
#mne.epochs.equalize_epoch_counts([epo_a, epo_b])
for c in conditions:
# Merging the leader and follower
if c == 'Leader':
epo_a_l = epochs_a['Leader']
epo_b_l = epochs_b['Leader']
epo_a_f = epochs_a['Follower']
epo_b_f = epochs_b['Follower']
epo_a_c = mne.concatenate_epochs([epo_a_l, epo_b_f])
epo_b_c = mne.concatenate_epochs([epo_b_l, epo_a_f])
c = 'Leader-Follower'
freq_bands = {'Theta': [4, 7],
'Alpha' :[8, 13],
'Beta': [15, 25]}
freq_bands = OrderedDict(freq_bands)
sampling_rate = epo_a_c.info['sfreq']
#Connectivity
#Data and storage
data_inter = np.array([epo_a_c, epo_b_c])
#Analytic signal per frequency band
complex_signal = analyses.compute_freq_bands(data_inter, sampling_rate,
freq_bands)
result = analyses.compute_sync(complex_signal, mode='coh', epochs_average = False)
#Defining the number of channels
n_ch = len(epochs_a.info['ch_names'])
#Averaging over the epochs specific to the given trial
trials = []
for j in range(3):
for i in d[event_dict['Leader']] or d[event_dict['Follower']]:
trials.append(sum(result[j,0:i,:,:])/i)
theta = sum(trials[::3])/16
alpha = sum(trials[1::3])/16
beta = sum(trials[2::3])/16
theta = theta[0:n_ch, n_ch:2*n_ch]
alpha = alpha[0:n_ch, n_ch:2*n_ch]
beta = beta[0:n_ch, n_ch:2*n_ch]
theta = abs(theta - np.mean(theta[:])) / np.std(theta[:])
alpha = abs(alpha - np.mean(alpha[:])) / np.std(alpha[:])
beta = abs(beta - np.mean(beta[:])) / np.std(beta[:])
print(c)
print('Range of the connectivities:')
print('Theta max:' + str(np.max(theta)))
print('Theta min:' + str(np.min(theta)))
print('Alpha max:' + str(np.max(alpha)))
print('Alpha min:' + str(np.min(alpha)))
print('Beta max:' + str(np.max(beta)))
print('Beta min:' + str(np.min(beta)))
np.save('con matrices/coh/' + 'coh_' + pair_name + '_theta_' + c + '_' + length, theta)
np.save('con matrices/coh/' + 'coh_' + pair_name + '_alpha_' + c + '_' + length, alpha)
np.save('con matrices/coh/' + 'coh_' + pair_name + '_beta_' + c + '_' + length, beta)
else:
epo_a_c = epo_a[c]
epo_b_c = epo_b[c]
#Defining frequency bands
freq_bands = {'Theta': [4, 7],
'Alpha' :[8, 13],
'Beta': [15, 25]}
freq_bands = OrderedDict(freq_bands)
sampling_rate = epo_a_c.info['sfreq']
#Connectivity
#Data and storage
data_inter = np.array([epo_a_c, epo_b_c])
#Analytic signal per frequency band
complex_signal = analyses.compute_freq_bands(data_inter, sampling_rate,
freq_bands)
result = analyses.compute_sync(complex_signal, mode='coh', epochs_average = False)
#Defining the number of channels
n_ch = len(epochs_a.info['ch_names'])
#Averaging over the epochs specific to the given trial
trials = []
for j in range(3):
for i in d[event_dict[c]]:
trials.append(sum(result[j,0:i,:,:])/i)
theta = sum(trials[::3])/16
alpha = sum(trials[1::3])/16
beta = sum(trials[2::3])/16
theta = theta[0:n_ch, n_ch:2*n_ch]
alpha = alpha[0:n_ch, n_ch:2*n_ch]
beta = beta[0:n_ch, n_ch:2*n_ch]
#theta = abs(theta - np.mean(theta[:])) / np.std(theta[:])
#alpha = abs(alpha - np.mean(alpha[:])) / np.std(alpha[:])
#beta = abs(beta - np.mean(beta[:])) / np.std(beta[:])
print(c)
print('Range of the connectivities:')
print('Theta max:' + str(np.max(theta)))
print('Theta min:' + str(np.min(theta)))
print('Alpha max:' + str(np.max(alpha)))
print('Alpha min:' + str(np.min(alpha)))
print('Beta max:' + str(np.max(beta)))
print('Beta min:' + str(np.min(beta)))
np.save('con matrices/coh/' + 'coh_' + pair_name + '_theta_' + c + '_' + length, theta)
np.save('con matrices/coh/' + 'coh_' + pair_name + '_alpha_' + c + '_' + length, alpha)
np.save('con matrices/coh/' + 'coh_' + pair_name + '_beta_' + c + '_' + length, beta)
return theta, alpha, beta, result
def ccorr(epochs_a, epochs_b, pair_name, length, drop_list):
event_dict = {'Resting': 101, 'Uncoupled': 102, 'Coupled': 103, 'Leader': 105,
'Follower': 107, 'Control':108 }
conditions = ['Resting','Coupled', 'Uncoupled', 'Leader', 'Control']
if length == 'long':
epochs_a.crop(tmin = 2, tmax = 23)
epochs_a.plot(n_epochs = 1, n_channels = 10)
epochs_b.crop(tmin = 2, tmax = 23)
for i in conditions:
# Merging the leader and follower
if i == 'Leader':
epo_a_l = epochs_a['Leader']
epo_b_l = epochs_b['Leader']
epo_a_f = epochs_a['Follower']
epo_b_f = epochs_b['Follower']
epo_a = mne.concatenate_epochs([epo_a_l, epo_b_f])
epo_b = mne.concatenate_epochs([epo_b_l, epo_a_f])
i = 'Leader-Follower'
else:
print(i)
epo_a = epochs_a[i]
epo_b = epochs_b[i]
#Defining frequency bands
freq_bands = {'Theta': [4, 7],
'Alpha' :[8, 13],
'Beta': [15, 25]}
freq_bands = OrderedDict(freq_bands)
sampling_rate = epo_a.info['sfreq']
#Connectivity
#Data and storage
data_inter = np.array([epo_a, epo_b])
#result_intra = []
#Analytic signal per frequency band
complex_signal = analyses.compute_freq_bands(data_inter, sampling_rate,
freq_bands)
result = analyses.compute_sync(complex_signal, mode='ccorr', epochs_average = True)
#Get inter brain part of the matrix
n_ch = len(epochs_a.info['ch_names'])
theta, alpha, beta = result[:, 0:n_ch, n_ch:2*n_ch]
plt.figure()
plt.imshow(theta,cmap=plt.cm.hot)
plt.clim(0,0.8)
plt.colorbar()
plt.show()
plt.figure()
plt.imshow(alpha,cmap=plt.cm.hot)
plt.clim(0,0.8)
plt.colorbar()
plt.show()
plt.figure()
plt.imshow(beta,cmap=plt.cm.hot)
plt.clim(0,0.8)
plt.colorbar()
plt.show()
theta = abs(theta - np.mean(theta[:]) / np.std(theta[:]))
alpha = abs(alpha - np.mean(alpha[:]) / np.std(alpha[:]))
beta = abs(beta - np.mean(beta[:]) / np.std(beta[:]))
print('Range of the connectivities:')
print('Theta max:' + str(np.max(theta)))
print('Theta min:' + str(np.min(theta)))
print('Alpha max:' + str(np.max(alpha)))
print('Alpha min:' + str(np.min(alpha)))
print('Beta max:' + str(np.max(beta)))
print('Beta min:' + str(np.min(beta)))
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_theta_' + i + '_' + length, theta)
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_alpha_' + i + '_' + length, alpha)
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_beta_' + i + '_' + length, beta)
if length == 'short':
#conditions = ['Coupled', 'Uncoupled', 'Leader', 'Follower', 'Control']
epo_drop = []
epo_drop.append(0)
epo_drop.append(1)
epo_drop.append(2)
epo_drop.append(24)
epo_drop.append(25)
for i in range(64*5):
epo_drop.append(epo_drop[i]+26)
# Ensuring that already removed epochs are not in list
for i in epo_drop:
Epoch_no = drop_list
if i in Epoch_no:
#print(i)
epo_drop.remove(i)
# Ensuring list is no longer than the number of epochs
while epo_drop[-1]>(len(epochs_b)-1):
epo_drop.pop(-1)
# Dropping the beginning and end of a trial
epo_a = epochs_a.drop(epo_drop)
epo_b = epochs_b.drop(epo_drop)
# Getting the number of epochs of specific condition in a row
a = epo_a.events[:,2]
d = dict()
for k, v in groupby(a):
d.setdefault(k, []).append(len(list(v)))
#print(d)
#equalize number of epochs used to calculate connectivity values
#mne.epochs.equalize_epoch_counts([epo_a, epo_b])
for c in conditions:
# Merging the leader and follower
if c == 'Leader':
epo_a_l = epochs_a['Leader']
epo_b_l = epochs_b['Leader']
epo_a_f = epochs_a['Follower']
epo_b_f = epochs_b['Follower']
epo_a_c = mne.concatenate_epochs([epo_a_l, epo_b_f])
epo_b_c = mne.concatenate_epochs([epo_b_l, epo_a_f])
c = 'Leader-Follower'
freq_bands = {'Theta': [4, 7],
'Alpha' :[8, 13],
'Beta': [15, 25]}
freq_bands = OrderedDict(freq_bands)
sampling_rate = epo_a_c.info['sfreq']
#Connectivity
#Data and storage
data_inter = np.array([epo_a_c, epo_b_c])
#Analytic signal per frequency band
complex_signal = analyses.compute_freq_bands(data_inter, sampling_rate,
freq_bands)
result = analyses.compute_sync(complex_signal, mode='ccorr', epochs_average = False)
#Defining the number of channels
n_ch = len(epochs_a.info['ch_names'])
#Averaging over the epochs specific to the given trial
trials = []
for j in range(3):
for i in d[event_dict['Leader']] or d[event_dict['Follower']]:
trials.append(sum(result[j,0:i,:,:])/i)
'''
if c == 'Leader' or c == 'Follower':
print('LF')
print(len(trials))
theta = sum(trials[::3])/8
alpha = sum(trials[1::3])/8
beta = sum(trials[2::3])/8
else:
theta = sum(trials[::3])/16
alpha = sum(trials[1::3])/16
beta = sum(trials[2::3])/16
'''
theta = sum(trials[::3])/16
alpha = sum(trials[1::3])/16
beta = sum(trials[2::3])/16
theta = theta[0:n_ch, n_ch:2*n_ch]
alpha = alpha[0:n_ch, n_ch:2*n_ch]
beta = beta[0:n_ch, n_ch:2*n_ch]
theta = abs(theta - np.mean(theta[:]) / np.std(theta[:]))
alpha = abs(alpha - np.mean(alpha[:]) / np.std(alpha[:]))
beta = abs(beta - np.mean(beta[:]) / np.std(beta[:]))
print(c)
print('Range of the connectivities:')
print('Theta max:' + str(np.max(theta)))
print('Theta min:' + str(np.min(theta)))
print('Alpha max:' + str(np.max(alpha)))
print('Alpha min:' + str(np.min(alpha)))
print('Beta max:' + str(np.max(beta)))
print('Beta min:' + str(np.min(beta)))
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_theta_' + c + '_' + length, theta)
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_alpha_' + c + '_' + length, alpha)
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_beta_' + c + '_' + length, beta)
else:
epo_a_c = epo_a[c]
epo_b_c = epo_b[c]
#Defining frequency bands
freq_bands = {'Theta': [4, 7],
'Alpha' :[8, 13],
'Beta': [15, 25]}
freq_bands = OrderedDict(freq_bands)
sampling_rate = epo_a_c.info['sfreq']
#Connectivity
#Data and storage
data_inter = np.array([epo_a_c, epo_b_c])
#Analytic signal per frequency band
complex_signal = analyses.compute_freq_bands(data_inter, sampling_rate,
freq_bands)
result = analyses.compute_sync(complex_signal, mode='ccorr', epochs_average = False)
#Defining the number of channels
n_ch = len(epochs_a.info['ch_names'])
#Averaging over the epochs specific to the given trial
trials = []
for j in range(3):
for i in d[event_dict[c]]:
trials.append(sum(result[j,0:i,:,:])/i)
'''
if c == 'Leader' or c == 'Follower':
print('LF')
print(len(trials))
theta = sum(trials[::3])/8
alpha = sum(trials[1::3])/8
beta = sum(trials[2::3])/8
else:
theta = sum(trials[::3])/16
alpha = sum(trials[1::3])/16
beta = sum(trials[2::3])/16
'''
theta = sum(trials[::3])/16
alpha = sum(trials[1::3])/16
beta = sum(trials[2::3])/16
theta = theta[0:n_ch, n_ch:2*n_ch]
alpha = alpha[0:n_ch, n_ch:2*n_ch]
beta = beta[0:n_ch, n_ch:2*n_ch]
theta = abs(theta - np.mean(theta[:]) / np.std(theta[:]))
alpha = abs(alpha - np.mean(alpha[:]) / np.std(alpha[:]))
beta = abs(beta - np.mean(beta[:]) / np.std(beta[:]))
print(c)
print('Range of the connectivities:')
print('Theta max:' + str(np.max(theta)))
print('Theta min:' + str(np.min(theta)))
print('Alpha max:' + str(np.max(alpha)))
print('Alpha min:' + str(np.min(alpha)))
print('Beta max:' + str(np.max(beta)))
print('Beta min:' + str(np.min(beta)))
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_theta_' + c + '_' + length, theta)
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_alpha_' + c + '_' + length, alpha)
np.save('con matrices/ccorr/' + 'ccorr_' + pair_name + '_beta_' + c + '_' + length, beta)
return theta, alpha, beta
def load_avg_matrix(con_measure, freq_band, cond, length, plot = 1, sep = 0, save = 0):
matrices = []
pairs = ['pair003_','pair004_','pair005_','pair007_','pair009_','pair0010_']
for i in pairs:
path="C:\\Users\\kathr\\OneDrive\\Documents\\GitHub\\Bachelor-Project\\Resting condition\\con matrices\\" + con_measure
os.chdir(path)
for root, dirs, files in os.walk(path):
for f in files:
if f.startswith(con_measure + '_' + i + freq_band + '_' + cond + '_' + length + '.npy'):
print(f)
matrices.append(np.load(f))
print(matrices)
#avg_matrix = np.mean(matrices, axis = )
mat_sum = np.zeros_like(matrices[0])
for mat in matrices:
mat_sum += mat
avg_matrix = mat_sum/len(matrices)
path="C:\\Users\\kathr\\OneDrive\\Documents\\GitHub\\Bachelor-Project\\Resting condition"
os.chdir(path)
if plot:
fig = plt.figure()
plt.title(cond + ' ' + freq_band + ' ' + length)
plt.imshow(avg_matrix,cmap=plt.cm.Reds)
if length == 'short':
plt.clim(0.15,0.35)
else:
plt.clim(0,0.2)
plt.colorbar()
plt.show()
if save:
fig.savefig('avg. matrices/' + con_measure + '/' + cond + '_' + freq_band + '_' + length + '.png')
if sep:
for i in range(len(matrices)):
fig = plt.figure()
plt.title(cond + ' ' + freq_band + ' ' + length)
plt.imshow(matrices[i], cmap=plt.cm.Reds)
if length == 'short':
plt.clim(0.15,0.35)
else:
plt.clim(0,0.1)
plt.colorbar()
plt.show()
if save:
fig.savefig('sep. matrices/' + con_measure + '/' + str(pairs[i]) + '_' + cond + '_' + freq_band + '_' + length + '.png')
return avg_matrix
def con_matrix_comparison():
coupled = load_avg_matrix('coh', 'alpha', 'Coupled', 'short')
uncoupled = load_avg_matrix('coh', 'alpha', 'Uncoupled', 'short')
control = load_avg_matrix('coh', 'alpha', 'Control', 'short')
contrast1 = coupled-uncoupled
contrast2 = coupled-control
contrast3 = control-uncoupled
fig = plt.figure()
plt.title('coupled - uncoupled')
plt.imshow(contrast1,cmap=plt.cm.bwr)
plt.clim(0.0,0.1)
plt.colorbar()
plt.show()
fig = plt.figure()
plt.title('coupled - control')
plt.imshow(contrast2,cmap=plt.cm.bwr)
plt.clim(0.0,0.1)
plt.colorbar()
plt.show()
fig = plt.figure()
plt.title('control - uncoupled')
plt.imshow(contrast2,cmap=plt.cm.bwr)
plt.clim(0.0,0.1)
plt.colorbar()
plt.show()
return
| [
"46521720+KathrineSN@users.noreply.github.com"
] | 46521720+KathrineSN@users.noreply.github.com |
56bd58f9bd52e614c848299da4fe59b994d1f0f2 | 13d54ea22049f93637df5267411d50e71f68e739 | /scripts/cable_joint/envs/gym_gazebo_env.py | 3cf4f566d8453ee3a7c62f13e31cfb31a96b8502 | [
"MIT"
] | permissive | deePurrobotics/gazebo_rl | 555c2fe49f410edadf745a546e07daf51a200b88 | 7ec8b135d9334f7d28ce8db84a04599f5506c9e3 | refs/heads/master | 2020-03-31T10:27:34.337469 | 2019-02-07T17:52:17 | 2019-02-07T17:52:17 | 152,135,408 | 16 | 8 | MIT | 2019-02-07T17:52:18 | 2018-10-08T19:33:27 | Python | UTF-8 | Python | false | false | 3,732 | py | import numpy as np
import rospy
import gym # https://github.com/openai/gym/blob/master/gym/core.py
from gym.utils import seeding
from .gazebo_connection import GazeboConnection
class GymGazeboEnv(gym.Env):
"""
Gazebo env converts standard openai gym methods into Gazebo commands
To check any topic we need to have the simulations running, we need to do two things:
1)Unpause the simulation: without that the stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2)If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation and need to be reseted to work properly.
"""
def __init__(self, start_init_physics_parameters=True, reset_world_or_sim="SIMULATION"):
# To reset Simulations
rospy.logdebug("START init RobotGazeboEnv")
self.gazebo = GazeboConnection(start_init_physics_parameters,reset_world_or_sim)
self.seed()
rospy.logdebug("END init RobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Gives env an action to enter the next state,
obs, reward, done, info = env.step(action)
"""
# Convert the action num to movement action
self.gazebo.unpauseSim()
self._take_action(action)
self.gazebo.pauseSim()
obs = self._get_observation()
reward = self._compute_reward()
done = self._is_done()
info = self._post_information()
return obs, reward, done, info
def reset(self):
"""
obs, info = env.reset()
"""
# self.gazebo.pauseSim()
rospy.logdebug("Reseting RobotGazeboEnvironment")
self._reset_sim()
obs = self._get_observation()
info = self._post_information()
rospy.logdebug("END Reseting RobotGazeboEnvironment")
return obs, info
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logwarn("Closing RobotGazeboEnvironment")
rospy.signal_shutdown("Closing RobotGazeboEnvironment")
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("START robot gazebo _reset_sim")
self.gazebo.unpauseSim()
self.gazebo.resetSim()
self._set_init()
self.gazebo.pauseSim()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.gazebo.pauseSim()
rospy.logdebug("END robot gazebo _reset_sim")
return True
def _set_init(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _get_observation(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _post_information(self):
"""Returns the info.
"""
raise NotImplementedError()
def _take_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
| [
"zhan2838@purdue.edu"
] | zhan2838@purdue.edu |
49307e4030a27ff3a99f09bee2dfa9b7677a0bfa | 6109a95a284891792c35d0d19906ab8d1697f9c7 | /src/datamigration/azext_datamigration/vendored_sdks/datamigration/operations/_database_migrations_sql_mi_operations.py | 442a15827c7342be590b73150c6bde88654f882a | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | Tatsinnit/azure-cli-extensions | 3e5a1752edced00d7c33660027d2c17fae074569 | a1959b123d4c11149adae2728ab5791949889d54 | refs/heads/master | 2022-10-05T17:40:10.825889 | 2022-03-16T10:33:56 | 2022-03-16T10:33:56 | 250,102,909 | 0 | 0 | MIT | 2020-03-25T22:12:01 | 2020-03-25T22:12:01 | null | UTF-8 | Python | false | false | 26,682 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DatabaseMigrationsSqlMiOperations(object):
"""DatabaseMigrationsSqlMiOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datamigration.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
migration_operation_id=None, # type: Optional[str]
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.DatabaseMigrationSqlMi"
"""Retrieve the Database Migration resource.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param migration_operation_id: Optional migration operation ID. If this is provided, then
details of migration operation for that ID are retrieved. If not provided (default), then
details related to most recent or current operation are retrieved.
:type migration_operation_id: str
:param expand: The child resources to include in the response.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabaseMigrationSqlMi, or the result of cls(response)
:rtype: ~azure.mgmt.datamigration.models.DatabaseMigrationSqlMi
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DatabaseMigrationSqlMi"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if migration_operation_id is not None:
query_parameters['migrationOperationId'] = self._serialize.query("migration_operation_id", migration_operation_id, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.DatabaseMigrationSqlMi"
**kwargs # type: Any
):
# type: (...) -> "models.DatabaseMigrationSqlMi"
cls = kwargs.pop('cls', None) # type: ClsType["models.DatabaseMigrationSqlMi"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DatabaseMigrationSqlMi')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.DatabaseMigrationSqlMi"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.DatabaseMigrationSqlMi"]
"""Create a new database migration to a given SQL Managed Instance.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param parameters: Details of SqlMigrationService resource.
:type parameters: ~azure.mgmt.datamigration.models.DatabaseMigrationSqlMi
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DatabaseMigrationSqlMi or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.datamigration.models.DatabaseMigrationSqlMi]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DatabaseMigrationSqlMi"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
target_db_name=target_db_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DatabaseMigrationSqlMi', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}'} # type: ignore
def _cancel_initial(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._cancel_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'MigrationOperationInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cancel'} # type: ignore
def begin_cancel(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stop migrations in progress for the database.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param parameters: Required migration operation ID for which cancel will be initiated.
:type parameters: ~azure.mgmt.datamigration.models.MigrationOperationInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
target_db_name=target_db_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cancel'} # type: ignore
def _cutover_initial(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._cutover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'MigrationOperationInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cutover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cutover'} # type: ignore
def begin_cutover(
self,
resource_group_name, # type: str
managed_instance_name, # type: str
target_db_name, # type: str
parameters, # type: "models.MigrationOperationInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Initiate cutover for online migration in progress for the database.
:param resource_group_name: Name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name:
:type managed_instance_name: str
:param target_db_name: The name of the target database.
:type target_db_name: str
:param parameters: Required migration operation ID for which cutover will be initiated.
:type parameters: ~azure.mgmt.datamigration.models.MigrationOperationInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cutover_initial(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
target_db_name=target_db_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedInstanceName': self._serialize.url("managed_instance_name", managed_instance_name, 'str'),
'targetDbName': self._serialize.url("target_db_name", target_db_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cutover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/providers/Microsoft.DataMigration/databaseMigrations/{targetDbName}/cutover'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
27c35318c6b5f8212dd449e282c2b081d6dc4c61 | 046c1141399890afa13fd243e55da3dbf31085c5 | /test/test22.py | 05c20e2d7892acce138d4df0ab6d184be9b7d49e | [] | no_license | carusyte/tflab | 1d0edf87282352aeb5a38b83c58ab9c0189bbb1a | 2324c3b0ad22d28c50a4fd8db56e36a2836735c3 | refs/heads/master | 2021-05-12T06:58:26.270868 | 2019-03-24T14:57:44 | 2019-03-24T14:57:44 | 117,232,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,165 | py | from __future__ import print_function
# Path hack.
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import tensorflow as tf
from pstk.model import model11
from time import strftime
from pstk.data import data as data0
from pstk.data import data12
from test import collect_summary
import os
import numpy as np
import math
EPOCH_SIZE = 444
RNN_LAYERS = 1
FCN_LAYERS = 3
LAYER_WIDTH = 256
MAX_STEP = 50
TIME_SHIFT = 9
DROP_OUT = math.e / 10.0
LEARNING_RATE = 1e-3
LOG_DIR = 'logdir'
def run():
tf.logging.set_verbosity(tf.logging.INFO)
loader = data12.DataLoader(TIME_SHIFT)
print('{} loading test data...'.format(strftime("%H:%M:%S")))
tuuids, tdata, tlabels, tseqlen = loader.loadTestSet(MAX_STEP)
print(tdata.shape)
print(tlabels.shape)
featSize = tdata.shape[2]
nclass = tlabels.shape[1]
classes = [i-nclass//2 for i in range(nclass)]
data = tf.placeholder(tf.float32, [None, MAX_STEP, featSize], "input")
target = tf.placeholder(tf.float32, [None, nclass], "labels")
seqlen = tf.placeholder(tf.int32, [None], "seqlen")
dropout = tf.placeholder(tf.float32, [], name="dropout")
training = tf.placeholder(tf.bool, [], name="training")
with tf.Session() as sess:
model = model11.DRnnPredictorV6(
data=data,
target=target,
seqlen=seqlen,
classes=classes,
rnn_layers=RNN_LAYERS,
fcn_layers=FCN_LAYERS,
layer_width=LAYER_WIDTH,
dropout=dropout,
training=training,
learning_rate=LEARNING_RATE)
stime = '{}'.format(strftime("%Y-%m-%d %H:%M:%S"))
model_name = model.getName()
f = __file__
fbase = f[f.rfind('/')+1:f.rindex('.py')]
base_dir = '{}/{}_{}/{}'.format(LOG_DIR, fbase,
model_name, strftime("%Y%m%d_%H%M%S"))
print('{} using model: {}'.format(strftime("%H:%M:%S"), model_name))
if tf.gfile.Exists(base_dir):
tf.gfile.DeleteRecursively(base_dir)
tf.gfile.MakeDirs(base_dir)
# Isolate the variables stored behind the scenes by the metric operation
metric_local_vars = tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="Precisions") + tf.get_collection(
tf.GraphKeys.LOCAL_VARIABLES, scope="Recalls")
metric_vars_initializer = tf.variables_initializer(
var_list=metric_local_vars)
sess.run(tf.group(tf.global_variables_initializer(),
metric_vars_initializer))
summary, train_writer, test_writer = collect_summary(
sess, model, base_dir)
saver = tf.train.Saver()
bno = 0
for epoch in range(EPOCH_SIZE):
bno = epoch*50
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tlabels,
seqlen: tseqlen, dropout: 0, training: False}
accuracy, worst, test_summary_str = sess.run(
[model.accuracy, model.worst, summary, model.precisions[1], model.recalls[1], model.f_score], feeds)[:3]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} test accuracy {:3.3f}% max_entropy {:3.4f} predict {} actual {} uuid {}'.format(
strftime("%H:%M:%S"), epoch, 100. * accuracy, max_entropy, predict, actual, tuuids[bidx]))
data0.save_worst_rec(model_name, stime, "test", epoch,
tuuids[bidx], max_entropy, predict, actual)
summary_str = None
for i in range(50):
sess.run(metric_vars_initializer)
bno = bno+1
print('{} loading training data for batch {}...'.format(
strftime("%H:%M:%S"), bno))
truuids, trdata, labels, trseqlen = loader.loadTrainingData(
bno, MAX_STEP)
print('{} training...'.format(strftime("%H:%M:%S")))
feeds = {data: trdata, target: labels,
seqlen: trseqlen, dropout: DROP_OUT, training: True}
summary_str, worst = sess.run(
[summary, model.worst, model.optimize, model.precisions[1], model.recalls[1], model.f_score], feeds)[:2]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} bno {} max_entropy {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), bno, max_entropy, predict, actual))
data0.save_worst_rec(model_name, stime, "train", bno,
truuids[bidx], max_entropy, predict, actual)
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
checkpoint_file = os.path.join(base_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=bno)
sess.run(metric_vars_initializer)
# test last epoch
print('{} running on test set...'.format(strftime("%H:%M:%S")))
feeds = {data: tdata, target: tlabels, seqlen: tseqlen,
dropout: 0, training: False}
accuracy, worst, test_summary_str = sess.run(
[model.accuracy, model.worst, summary, model.precisions[1], model.recalls[1], model.f_score], feeds)[:3]
bidx, max_entropy, predict, actual = worst[0], worst[1], worst[2], worst[3]
print('{} Epoch {} test accuracy {:3.3f}% max_entropy {:3.4f} predict {} actual {}'.format(
strftime("%H:%M:%S"), EPOCH_SIZE, 100. * accuracy, max_entropy, predict, actual))
data0.save_worst_rec(model_name, stime, "test", EPOCH_SIZE,
tuuids[bidx], max_entropy, predict, actual)
train_writer.add_summary(summary_str, bno)
test_writer.add_summary(test_summary_str, bno)
train_writer.flush()
test_writer.flush()
if __name__ == '__main__':
run()
| [
"carusyte@163.com"
] | carusyte@163.com |
4fb7f3f8c8810c58b87853feb8ee750250f4d42e | a4efbdbef086fe1822601d87ef04a605d8e993d4 | /problem-solving/bon-appetit.py | 06afecbe9d7e50eb8053cc89e537b6b7c5c8bdb1 | [] | no_license | JMSchietekat/hackerrank | 618270e45dcf2f3cac0aa46b9356e914ac44cbb4 | 704192d5fc81ec9497cdcd9d2d1414817988c5bd | refs/heads/master | 2022-11-30T02:22:14.338578 | 2020-08-12T20:08:11 | 2020-08-12T20:08:11 | 274,339,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | import math
import os
import random
import re
import sys
def bonAppetit(bill, k, b):
b_charged = 0
b_actual = 0
for e in bill:
b_charged += e
b_actual = (b_charged - bill[k]) / 2
return 'Bon Appetit' if b == b_actual else int(b - b_actual)
if __name__ == '__main__':
nk = input().rstrip().split()
n = int(nk[0])
k = int(nk[1])
bill = list(map(int, input().rstrip().split()))
b = int(input().strip())
print(bonAppetit(bill, k, b))
| [
"justinschietekat@gmail.com"
] | justinschietekat@gmail.com |
d954433fc734887cf2bed62499ea0205cefd66a3 | 30b97efb2f36f81aa684d16d19e0e2db17f2967d | /기타/2468.py | 05d75b8ff59e165f6298ad243ba4d49c20202b24 | [] | no_license | jmseb3/bakjoon | 0a784a74c6476ef51864e2ada9d2551c7c7979eb | a38db54e851372059b0e45add92e43e556835e62 | refs/heads/main | 2023-08-25T08:43:04.579785 | 2021-10-01T08:40:37 | 2021-10-01T08:40:37 | 362,287,450 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | from collections import deque
N = int(input())
maps = []
max_len = 0
for _ in range(N):
temp = list(map(int, input().split()))
max_len = max(max_len, max(temp))
maps.append(temp)
moves = [(-1, 0), (1, 0), (0, 1), (0, -1)]
ans = 0
def bfs(y, x, ck, visited):
q = deque()
q.append((y, x))
visited[y][x] = True
while q:
y, x = q.popleft()
for dy, dx in moves:
ny = y + dy
nx = x + dx
if ny < 0 or nx < 0 or ny >= N or nx >= N:
continue
if maps[ny][nx] >= ck and not visited[ny][nx]:
visited[ny][nx] = True
q.append((ny, nx))
for ck in range(max_len+1):
tmp = 0
visited = [[False]*N for _ in range(N)]
for y in range(N):
for x in range(N):
if maps[y][x] >= ck and not visited[y][x]:
bfs(y, x, ck, visited)
tmp += 1
ans = max(tmp, ans)
print(ans)
| [
"jmseb3@naver.com"
] | jmseb3@naver.com |
d75eff24f288cc9555e4bedb4e88a7c5a17eac8b | 8c648a15d5e97a4d8449ca9120d7989d69f4d7fd | /leaderboard/endpoints/__init__.py | 1abb177bd9671ac3b99813cd25df5ef1ec417c7a | [] | no_license | robclewley/flask-leaderboard | ef3e951b9f61845978efded72a6f0d9a1d90adab | c738edee5069e9da2d28ca7a73d41d61868e6bc8 | refs/heads/master | 2021-05-29T16:02:53.656242 | 2015-07-21T18:26:46 | 2015-07-21T18:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | import pkgutil
from flask import g, session
from leaderboard import app
from leaderboard.database import User
__all__ = []
for (loader, module_name, is_pkg,) in pkgutil.walk_packages(__path__):
__all__.append(module_name)
module = loader.find_module(module_name).load_module(module_name)
globals()[module_name] = module
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.filter(User.id == session['user_id']).first() | [
"mykolasmith@gmail.com"
] | mykolasmith@gmail.com |
4ecf47ca7e7b37620817c44064a35600aa63affa | dfc2c18053b8e7576f88e7b2524d7ca3a8f47282 | /ch03/session3/63.py | a3458fac0a02818719ccecbeba2d2a88982ce7e0 | [] | no_license | Xoozi/tchomework | a6eed3bbf697ff12af8d42249ec58a139aed0c4c | 627c98b0b652ef20fd93025a17341bba76fbfce6 | refs/heads/master | 2021-01-23T21:18:15.793703 | 2018-10-21T11:05:55 | 2018-10-21T11:05:55 | 57,583,655 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | #一族三次曲线
#(a)对k = 0, 及其邻近的k的正和负值, 把f(x) = x**3 + k*x的图形画在一个公共屏幕上.
#k的值是怎么影响到图形的形状的
#k小于0时, 函数递减, 只有一个根
#k向0移动, 函数图像开始逆时针旋转, 并且开始弯曲, 靠近0时开始有多个根
#k大于0时, 又开始伸展,
#(b)求f'(x). 正如你知道的, f'(x)是一个二次函数, 求该二次函数的判别式. 对什么样的k值, 该判别式
#为正, 为零, 为负? 对什么k值f'有两个零点, 一个或,没有零点?
#说明k的值对f图形的形状有什么影响?
#f'(x) = 3*x**2 + k
#Δ = -4*3*k = -12k
#k>0时Δ<0, f'无零点
#k<0时Δ>0, f'有两个零点
#k=0时Δ=0, f'有一个零点
#说明k值影响了f是否有极值
def f(x, k):
return x**3 + k*x
def ddd(s, e, a):
r = 0
g = 0
b = 0
k = -1280
plot([s, e], [0, 0], '-k')
x = linspace(s, e, a)
while(k <= 1280):
y = f(x, k)
plot(x, y, '#%02X%02X%02X' % (r, g, b))
r += 2
k += 20
ddd(-16, 16, 1000)
| [
"wwfxtt@gmail.com"
] | wwfxtt@gmail.com |
27cee5430e7698de53fc8eb0c7e228edb4d834a4 | be7ff4e7d995cfd2c28b2b93321931f886994653 | /project/stack_overflow/migrations/0006_auto_20190209_1310.py | 7493b227b034913c612d5f3ea72c0a03660844fe | [] | no_license | j7sai/Pregunta | 26f96ce41a870d9d4724fac80ba6a9388fc95d7d | 0255347bb1d3945c21d5dcaae70cfa4efff6c828 | refs/heads/master | 2022-12-09T20:47:47.773368 | 2019-03-10T09:26:12 | 2019-03-10T09:26:12 | 174,516,925 | 0 | 0 | null | 2022-12-08T01:41:12 | 2019-03-08T10:26:22 | Python | UTF-8 | Python | false | false | 805 | py | # Generated by Django 2.1.5 on 2019-02-09 07:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stack_overflow', '0005_auto_20190209_1228'),
]
operations = [
migrations.RenameField(
model_name='questions',
old_name='questionid',
new_name='question_id',
),
migrations.AddField(
model_name='questions',
name='ans',
field=models.ManyToManyField(to='stack_overflow.Answers'),
),
migrations.AlterField(
model_name='answers',
name='ans_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stack_overflow.User'),
),
]
| [
"sairohanj@gmail.com"
] | sairohanj@gmail.com |
5db9f1bb82aaada88a79243dab0be796299f41e9 | a1d8fefb84ce2f69ebce5fedcdf5262ba0005a5f | /zvt/recorders/eastmoney/meta/china_stock_category_recorder.py | a0a6b9c47e284491ad80a9fade92202ba428d1f4 | [
"MIT"
] | permissive | vinhphu3000/FinanceCenter | f2c827ffe268421011682ed45375f55ac6ddc54a | 1511751fe6d7d1f1fb940ae66d29b45eb0782fea | refs/heads/master | 2023-03-30T19:55:17.124679 | 2021-03-27T11:40:18 | 2021-03-27T11:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,630 | py | # -*- coding: utf-8 -*-
import pandas as pd
from numba import njit
from zvt import zvt_config
from zvt.api.data_type import Region, Provider, EntityType
from zvt.api.quote import china_stock_code_to_id
from zvt.domain import BlockStock, BlockCategory, Block
from zvt.contract.api import df_to_db
from zvt.contract.recorder import RecorderForEntities, TimeSeriesDataRecorder
from zvt.networking.request import sync_get
from zvt.utils.time_utils import now_pd_timestamp, PD_TIME_FORMAT_DAY
from zvt.utils.utils import json_callback_param
class EastmoneyChinaBlockRecorder(RecorderForEntities):
provider = Provider.EastMoney
data_schema = Block
region = Region.CHN
# 用于抓取行业/概念/地域列表
category_map_url = {
BlockCategory.industry: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKHY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_F1A61014DE5E45B7A50068EA290BC918&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08766',
BlockCategory.concept: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKGN&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=300&lvl=&cb=jsonp_3071689CC1E6486A80027D69E8B33F26&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08251',
# BlockCategory.area: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKDY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_A597D4867B3D4659A203AADE5B3B3AD5&token=4f1862fc3b5e77c150a2b985b12db0fd&_=02443'
}
def init_entities(self):
self.entities = [BlockCategory.industry, BlockCategory.concept]
def process_loop(self, entity, http_session):
text = sync_get(http_session, self.category_map_url[entity], return_type='text')
if text is None:
return
results = json_callback_param(text)
@njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
code = items[1]
name = items[2]
entity_id = f'block_cn_{code}'
the_list.append({
'id': entity_id,
'entity_id': entity_id,
'entity_type': EntityType.Block.value,
'exchange': 'cn',
'code': code,
'name': name,
'category': entity.value
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
df_to_db(df=df, ref_df=None, region=Region.CHN, data_schema=self.data_schema, provider=self.provider)
self.logger.info(f"finish record sina blocks:{entity.value}")
class EastmoneyChinaBlockStockRecorder(TimeSeriesDataRecorder):
region = Region.CHN
provider = Provider.EastMoney
entity_schema = Block
data_schema = BlockStock
# 用于抓取行业包含的股票
category_stocks_url = 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C.{}{}&sty=SFCOO&st=(Close)&sr=-1&p=1&ps=300&cb=jsonp_B66B5BAA1C1B47B5BB9778045845B947&token=7bc05d0d4c3c22ef9fca8c2a912d779c'
def __init__(self, exchanges=None, entity_ids=None, codes=None, batch_size=10, force_update=False, sleeping_time=5,
default_size=zvt_config['batch_size'], real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(EntityType.Block, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
def generate_domain_id(self, entity, df, time_fmt=PD_TIME_FORMAT_DAY):
return entity.id + '_' + df['stock_id']
def record(self, entity, start, end, size, timestamps, http_session):
url = self.category_stocks_url.format(entity.code, '1')
text = sync_get(http_session, url, return_type='text')
if text is None:
return None
results = json_callback_param(text)
# @njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
stock_code = items[1]
stock_id = china_stock_code_to_id(stock_code)
the_list.append({
'stock_id': stock_id,
'stock_code': stock_code,
'stock_name': items[2],
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
return df
self.sleep()
return None
def format(self, entity, df):
df['timestamp'] = now_pd_timestamp(Region.CHN)
df['entity_id'] = entity.id
df['provider'] = self.provider.value
df['code'] = entity.code
df['name'] = entity.name
df['level'] = self.level.value
df['exchange'] = entity.exchange
df['entity_type'] = EntityType.Block.value
df['id'] = self.generate_domain_id(entity, df)
return df
__all__ = ['EastmoneyChinaBlockRecorder', 'EastmoneyChinaBlockStockRecorder']
if __name__ == '__main__':
# init_log('china_stock_category.log')
recorder = EastmoneyChinaBlockStockRecorder(codes=['BK0727'])
recorder.run()
| [
"doncat99@gmail.com"
] | doncat99@gmail.com |
c2b307ea65c6d7a19d34a4f1085403b21d38e02f | 874fcbf6b35226a9b61205630b2eaeed5f884303 | /decision_tree/studentMain.py | a93089a87af6670df4b488330a2e82d86e304941 | [] | no_license | pieces201020/ud120-projects | c8236c8597a5e7c626478ac6ef565e2cbf4fdd22 | 3ebee19f66a6f85f11a669a3ecad597a0ebec5c0 | refs/heads/master | 2021-01-18T16:14:14.145015 | 2017-04-04T01:18:57 | 2017-04-04T01:18:57 | 86,730,863 | 0 | 0 | null | 2017-03-30T17:38:21 | 2017-03-30T17:38:20 | null | UTF-8 | Python | false | false | 710 | py | #!/usr/bin/python
""" lecture and example code for decision tree unit """
import sys
from class_vis import prettyPicture, output_image
from prep_terrain_data import makeTerrainData
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
from classifyDT import classify
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the classify() function in classifyDT is where the magic
### happens--fill in this function in the file 'classifyDT.py'!
clf = classify(features_train, labels_train)
#### grader code, do not modify below this line
prettyPicture(clf, features_test, labels_test)
plt.show()
# output_image("test.png", "png", open("test.png", "rb").read())
| [
"zbw.zhangbowei@gmail.com"
] | zbw.zhangbowei@gmail.com |
931dd466a1df578c537a95a690a9c0529161c7ba | 6553f98336fa296ca4faa6e3e247c0a343d883f3 | /yolo/utils.py | c639349f2c4306f0ce792157149cee22126cc8ce | [] | no_license | DableUTeeF/algea | 5341a529534e26f1d7ae4ad71d064f32f8f0aba5 | 3a34c796bdddd07f2ab17811fe472cdce6d9207a | refs/heads/master | 2020-08-29T12:27:39.566480 | 2019-11-08T02:41:03 | 2019-11-08T02:41:03 | 218,030,814 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,045 | py | import numpy as np
import os
import xml.etree.ElementTree as ET
import csv
import cv2
# from keras.optimizers import Optimizer
# from keras import backend as K
import copy
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from six import raise_from
import csv
import sys
import os.path
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
""" Parse the classes file given by csv_reader.
"""
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_annotations(csv_reader, classes):
""" Read annotations from the csv_reader.
"""
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, x1, y1, x2, y2, class_name = row[:6]
except ValueError:
raise_from(ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)),
None)
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def _open_for_csv(path):
""" Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
class CocoGenerator:
""" Generate data from the COCO dataset.
See https://github.com/cocodataset/cocoapi/tree/master/PythonAPI for more information.
"""
def __init__(self, json_path, image_dir):
""" Initialize a COCO data generator.
Args
data_dir: Path to where the COCO dataset is stored.
set_name: Name of the set to parse.
"""
self.image_dir = image_dir
self.coco = COCO(json_path)
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
""" Loads the class to label mapping (and inverse) for COCO.
"""
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def size(self):
""" Size of the COCO dataset.
"""
return len(self.image_ids)
def num_classes(self):
""" Number of classes in the dataset. For COCO this is 80.
"""
return len(self.classes)
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def coco_label_to_label(self, coco_label):
""" Map COCO label to the label as used in the network.
COCO has some gaps in the order of labels. The highest label is 90, but there are 80 classes.
"""
return self.coco_labels_inverse[coco_label]
def coco_label_to_name(self, coco_label):
""" Map COCO label to name.
"""
return self.label_to_name(self.coco_label_to_label(coco_label))
def label_to_coco_label(self, label):
""" Map label as used by the network to labels as used by COCO.
"""
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = {'labels': np.empty((0,)), 'bboxes': np.empty((0, 4))}
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotations['labels'] = np.concatenate(
[annotations['labels'], [self.coco_label_to_label(a['category_id'])]], axis=0)
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[
a['bbox'][0],
a['bbox'][1],
a['bbox'][0] + a['bbox'][2],
a['bbox'][1] + a['bbox'][3],
]]], axis=0)
return annotations
def parse_annotation(ann_dir, img_dir, labels=()):
all_imgs = []
seen_labels = {}
for ann in sorted(os.listdir(ann_dir)):
img = {'object': []}
tree = ET.parse(os.path.join(ann_dir, ann))
for elem in tree.iter():
if 'filename' in elem.tag:
img['filename'] = os.path.join(img_dir, elem.text)
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
if len(img['object']) > 0:
all_imgs += [img]
return all_imgs, seen_labels
def parse_voc_annotation(ann_dir, img_dir, labels=()):
all_imgs = {}
seen_labels = {}
max_box_per_image = 0
for ann in sorted(os.listdir(ann_dir)):
img = {'object': []}
tree = ET.parse(os.path.join(ann_dir, ann))
for elem in tree.iter():
if 'filename' in elem.tag:
filename = elem.text[:-4]
img['filename'] = os.path.join(img_dir, elem.text)
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
if len(img['object']) > 0:
all_imgs[filename] = img
if len(img['object']) > max_box_per_image:
max_box_per_image = len(img['object'])
return all_imgs, seen_labels, max_box_per_image
def create_voc_training_instances(voc_folder):
# parse annotations of the training set
ints, labels, max_box_per_image = parse_voc_annotation(os.path.join(voc_folder, 'Annotations'),
os.path.join(voc_folder, 'JPEGImages'))
train_txt = open(os.path.join(voc_folder, 'ImageSets/Main/train.txt')).read().split('\n')[:-1]
val_txt = open(os.path.join(voc_folder, 'ImageSets/Main/val.txt')).read().split('\n')[:-1]
train_ints = [ints[train] for train in train_txt]
valid_ints = [ints[val] for val in val_txt]
# for instance in ints:
# filename = os.path.split(instance['filename'])[-1][:-4]
# if filename in train_txt:
# train_ints.append(instance)
# else:
# valid_ints.append(instance)
return train_ints, valid_ints, sorted(labels), max_box_per_image
def create_csv_training_instances(train_csv, test_csv, class_csv, with_wh=False):
with _open_for_csv(class_csv) as file:
classes = _read_classes(csv.reader(file, delimiter=','))
with _open_for_csv(train_csv) as file:
train_image_data = _read_annotations(csv.reader(file, delimiter=','), classes)
with _open_for_csv(test_csv) as file:
test_image_data = _read_annotations(csv.reader(file, delimiter=','), classes)
train_ints = []
valid_ints = []
labels = list(classes)
max_box_per_image = 0
for k in train_image_data:
image_data = train_image_data[k]
ints = {'filename': k, 'object': []}
for i, obj in enumerate(image_data):
o = {'xmin': obj['x1'], 'xmax': obj['x2'], 'ymin': obj['y1'], 'ymax': obj['y2'], 'name': obj['class']}
if with_wh:
x = cv2.imread(k)
height, width, _ = x.shape
o['width'] = width
o['height'] = height
ints['object'].append(o)
if i + 1 > max_box_per_image:
max_box_per_image = i + 1
train_ints.append(ints)
for k in test_image_data:
image_data = test_image_data[k]
ints = {'filename': k, 'object': []}
for i, obj in enumerate(image_data):
o = {'xmin': obj['x1'], 'xmax': obj['x2'], 'ymin': obj['y1'], 'ymax': obj['y2'], 'name': obj['class']}
if with_wh:
x = cv2.imread(k)
height, width, _ = x.shape
o['width'] = width
o['height'] = height
ints['object'].append(o)
if i + 1 > max_box_per_image:
max_box_per_image = i + 1
valid_ints.append(ints)
return train_ints, valid_ints, sorted(labels), max_box_per_image
def create_coco_training_instances(train_json,
val_json,
train_image_dir,
val_image_dir,
with_empty=True
):
train_coco = CocoGenerator(train_json, train_image_dir)
val_coco = CocoGenerator(val_json, val_image_dir)
assert sorted(val_coco.labels) == sorted(
train_coco.labels), r"Something's wrong, the labels in val and train seem to not the same"
labels = {}
for label in val_coco.labels:
labels[val_coco.labels[label]] = 0
max_box_per_image = 0
train_ints = []
valid_ints = []
for image_index in range(len(train_coco.image_ids)):
ann = train_coco.load_annotations(image_index)
image_info = train_coco.coco.loadImgs(train_coco.image_ids[image_index])[0]
impath = os.path.join(train_coco.image_dir, image_info['file_name'])
instance = {'filename': impath,
'object': [],
'width': image_info['width'],
'height': image_info['height']}
for j in range(len(ann['labels'])):
x1 = int(ann['bboxes'][j][0])
y1 = int(ann['bboxes'][j][1])
x2 = int(ann['bboxes'][j][2])
y2 = int(ann['bboxes'][j][3])
cls = train_coco.labels[ann['labels'][j]]
obj = {'xmin': x1, 'xmax': x2, 'ymin': y1, 'ymax': y2, 'name': cls}
instance['object'].append(obj)
if with_empty or len(instance['object']) > 0:
train_ints.append(instance)
if len(instance['object']) > max_box_per_image:
max_box_per_image = len(instance['object'])
for image_index in range(len(val_coco.image_ids)):
ann = val_coco.load_annotations(image_index)
image_info = val_coco.coco.loadImgs(val_coco.image_ids[image_index])[0]
impath = os.path.join(val_coco.image_dir, image_info['file_name'])
instance = {'filename': impath,
'object': [],
'width': image_info['width'],
'height': image_info['height']}
for j in range(len(ann['labels'])):
x1 = int(ann['bboxes'][j][0])
y1 = int(ann['bboxes'][j][1])
x2 = int(ann['bboxes'][j][2])
y2 = int(ann['bboxes'][j][3])
cls = val_coco.labels[ann['labels'][j]]
obj = {'xmin': x1, 'xmax': x2, 'ymin': y1, 'ymax': y2, 'name': cls}
instance['object'].append(obj)
if with_empty or len(instance['object']) > 0:
valid_ints.append(instance)
if len(instance['object']) > max_box_per_image:
max_box_per_image = len(instance['object'])
return train_ints, valid_ints, sorted(labels), max_box_per_image
def create_training_instances(train_annot_folder,
train_image_folder,
valid_annot_folder,
valid_image_folder,
labels,
):
# parse annotations of the training set
train_ints, train_labels = parse_annotation(train_annot_folder, train_image_folder, labels)
# parse annotations of the validation set, if any, otherwise split the training set
if os.path.exists(valid_annot_folder):
valid_ints, valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels)
else:
print("valid_annot_folder not exists. Spliting the trainining set.")
train_valid_split = int(0.8 * len(train_ints))
np.random.seed(0)
np.random.shuffle(train_ints)
np.random.seed()
valid_ints = train_ints[train_valid_split:]
train_ints = train_ints[:train_valid_split]
# compare the seen labels with the given labels in config.json
if len(labels) > 0:
overlap_labels = set(labels).intersection(set(train_labels.keys()))
print('Seen labels: \t' + str(train_labels) + '\n')
print('Given labels: \t' + str(labels))
# return None, None, None if some given label is not in the dataset
if len(overlap_labels) < len(labels):
print('\033[33m\nThese labels has no image')
for label in labels:
if label not in overlap_labels:
print(label)
print('\033[0m')
labels = list(overlap_labels)
else:
print('No labels are provided. Train on all seen labels.')
# print(train_labels)
labels = train_labels.keys()
max_box_per_image = max([len(inst['object']) for inst in (train_ints + valid_ints)])
return train_ints, valid_ints, sorted(labels), max_box_per_image
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, c=None, classes=None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.c = c
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
class WeightReader:
def __init__(self, weight_file):
self.offset = 4
self.all_weights = np.fromfile(weight_file, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset - size:self.offset]
def reset(self):
self.offset = 4
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin
w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin
union = w1 * h1 + w2 * h2 - intersect
return float(intersect) / union
def draw_boxes(image, boxes, labels):
image_h, image_w, _ = image.shape
color = [(0, 255, 0), (0, 255, 255), (255, 255, 0), (0, 0, 255), (255, 0, 255), (255, 0, 0)]
for box in boxes:
xmin = max(0, int(box.xmin * image_w))
ymin = max(0, int(box.ymin * image_h))
xmax = min(int(box.xmax * image_w), image_w)
ymax = min(int(box.ymax * image_h), image_h)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color[box.get_label() % 6], 3)
cv2.putText(image,
labels[box.get_label()] + ' ' + str(box.get_score()),
(xmin, ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image_h,
color[box.get_label() % 6], 1)
return image
def decode_netout(netout, anchors, nb_class, obj_threshold=0.3, nms_threshold=0.3):
grid_h, grid_w, nb_box = netout.shape[:3]
boxes = []
# decode the output by the network
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_threshold
for row in range(grid_h):
for col in range(grid_w):
for b in range(nb_box):
# from 4th element onwards are confidence and class classes
classes = netout[row, col, b, 5:]
if np.sum(classes) > 0:
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row, col, b, :4]
x = (col + _sigmoid(x)) / grid_w # center position, unit: image width
y = (row + _sigmoid(y)) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height
confidence = netout[row, col, b, 4]
box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, confidence, classes)
boxes.append(box)
# suppress non-maximal boxes
for c in range(nb_class):
sorted_indices = list(reversed(np.argsort([box.classes[c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
else:
for j in range(i + 1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_threshold:
boxes[index_j].classes[c] = 0
# remove the boxes which are less likely than a obj_threshold
boxes = [box for box in boxes if box.get_score() > obj_threshold]
return boxes
def decode_netoutv3(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h * grid_w):
row = i // grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[row, col, b, 4]
if objectness <= obj_thresh:
continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row, col, b, :4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[row, col, b, 5:]
box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, objectness, classes)
boxes.append(box)
return boxes
def compute_overlap(a, b):
"""
Code originally from https://github.com/rbgirshick/py-faster-faster_rcnn.
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-faster_rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-faster_rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2, x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2, x4) - x3
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _softmax(x, axis=-1, t=-100.):
x = x - np.max(x)
if np.min(x) < t:
x = x / np.min(x) * t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
def _rand_scale(scale):
scale = np.random.uniform(1, scale)
return scale if (np.random.randint(2) == 0) else 1. / scale
def _constrain(min_v, max_v, value):
if value < min_v:
return min_v
if value > max_v:
return max_v
return value
def random_flip(image, flip):
if flip == 1:
return cv2.flip(image, 1)
return image
def correct_bounding_boxes(boxes, new_w, new_h, net_w, net_h, dx, dy, flip, image_w, image_h):
boxes = copy.deepcopy(boxes)
# randomize boxes' order
np.random.shuffle(boxes)
# correct sizes and positions
sx, sy = float(new_w) / image_w, float(new_h) / image_h
zero_boxes = []
for i in range(len(boxes)):
boxes[i]['xmin'] = int(_constrain(0, net_w, boxes[i]['xmin'] * sx + dx))
boxes[i]['xmax'] = int(_constrain(0, net_w, boxes[i]['xmax'] * sx + dx))
boxes[i]['ymin'] = int(_constrain(0, net_h, boxes[i]['ymin'] * sy + dy))
boxes[i]['ymax'] = int(_constrain(0, net_h, boxes[i]['ymax'] * sy + dy))
if boxes[i]['xmax'] <= boxes[i]['xmin'] or boxes[i]['ymax'] <= boxes[i]['ymin']:
zero_boxes += [i]
continue
if flip == 1:
swap = boxes[i]['xmin']
boxes[i]['xmin'] = net_w - boxes[i]['xmax']
boxes[i]['xmax'] = net_w - swap
boxes = [boxes[i] for i in range(len(boxes)) if i not in zero_boxes]
return boxes
def random_distort_image(image, hue=18, saturation=1.5, exposure=1.5):
# determine scale factors
dhue = np.random.uniform(-hue, hue)
dsat = _rand_scale(saturation)
dexp = _rand_scale(exposure)
# convert RGB space to HSV space
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV).astype('float')
# change satuation and exposure
image[:, :, 1] *= dsat
image[:, :, 2] *= dexp
# change hue
image[:, :, 0] += dhue
image[:, :, 0] -= (image[:, :, 0] > 180) * 180
image[:, :, 0] += (image[:, :, 0] < 0) * 180
# convert back to RGB from HSV
return cv2.cvtColor(image.astype('uint8'), cv2.COLOR_HSV2RGB)
def apply_random_scale_and_crop(image, new_w, new_h, net_w, net_h, dx, dy):
try:
im_sized = cv2.resize(image, (new_w, new_h))
except cv2.error as e:
print('something')
print(new_w, new_h)
raise cv2.error('{}, {} {}'.format(new_w, new_h, e.__cause__))
if dx > 0:
im_sized = np.pad(im_sized, ((0, 0), (dx, 0), (0, 0)), mode='constant', constant_values=127)
else:
im_sized = im_sized[:, -dx:, :]
if (new_w + dx) < net_w:
im_sized = np.pad(im_sized, ((0, 0), (0, net_w - (new_w + dx)), (0, 0)), mode='constant', constant_values=127)
if dy > 0:
im_sized = np.pad(im_sized, ((dy, 0), (0, 0), (0, 0)), mode='constant', constant_values=127)
else:
im_sized = im_sized[-dy:, :, :]
if (new_h + dy) < net_h:
im_sized = np.pad(im_sized, ((0, net_h - (new_h + dy)), (0, 0), (0, 0)), mode='constant', constant_values=127)
return im_sized[:net_h, :net_w, :]
def makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def label_to_coco_label(label):
return {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17,
16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33,
29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46, 41: 47,
42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60,
55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77,
68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90}[label]
def coco_label_to_label(coco_label):
dictionary = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16,
15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31,
27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43,
39: 44, 40: 46, 41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56,
51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72,
63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85,
75: 86, 76: 87, 77: 88, 78: 89, 79: 90}
for label, d_coco_label in dictionary.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)
if d_coco_label == coco_label:
return label
return -1
def boundbox2cocobox(boxes, scale):
"""
:param scale:
:param boxes: [Bndbox(), Bndbox(),...]
:return: boxes: [[x, y, w, h]]
scores: float
labels: int
"""
cocoboxes = []
scores = []
labels = []
for bbox in boxes:
cocoboxes.append([bbox.xmin / scale,
bbox.ymin / scale,
(bbox.xmax - bbox.xmin) / scale,
(bbox.ymax - bbox.ymin) / scale])
scores.append(bbox.get_score())
labels.append(bbox.get_label())
assert len(cocoboxes) == len(scores) == len(labels)
return cocoboxes, scores, labels
def compute_resize_scale(image_shape, min_side=800, max_side=1333):
""" Compute an image scale such that the image size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resizing scale.
"""
(rows, cols, _) = image_shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
return scale
def resize_image(img, min_side=800, max_side=1333):
""" Resize an image such that the size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resized image.
"""
# compute scale to resize the image
scale = compute_resize_scale(img.shape, min_side=min_side, max_side=max_side)
# resize the image with the computed scale
img = cv2.resize(img, None, fx=scale, fy=scale)
return img, scale
# noinspection PyTypeChecker
def evaluate_coco(generator, model, anchors, json_path, imsize=448, threshold=0.5):
""" Use the pycocotools to evaluate a COCO model on a dataset.
Args
generator : The generator for generating the evaluation data.
model : The model to evaluate.
threshold : The score threshold to use.
"""
# start collecting results
import pickle
if os.path.exists('coco_eval_temp.pk'):
results, image_ids = pickle.load(open('coco_eval_temp.pk', 'rb'))
else:
results = []
image_ids = []
for index in range(generator.size()):
# if index % 50 == 0:
# print()
print(index, end='\r')
image = generator.load_image(index)
image, scale = resize_image(image, 360, imsize)
image = np.expand_dims(image, 0)
boxes = get_yolo_boxes(model,
image,
imsize, imsize,
anchors,
0.5,
0.5,
preprocess=True)[0]
boxes, scores, labels = boundbox2cocobox(boxes, scale)
# assert len(boxes) > 0
# compute predicted labels and scores
image_id = int(os.path.split(generator.instances[index]['filename'])[-1][:-4])
for box, score, label in zip(boxes, scores, labels):
# scores are sorted, so we can break
if score < threshold:
break
# append detection for each positively labeled class
image_result = {
'image_id': image_id,
'category_id': label_to_coco_label(label), # todo:
'score': float(score),
'bbox': box,
}
# append detection to results
results.append(image_result)
# append image to list of processed images
image_ids.append(image_id)
with open('coco_eval_temp.pk', 'wb') as wr:
pickle.dump([results, image_ids], wr)
if not len(results):
return
import json
# write output
json.dump(results, open('{}_bbox_results.json'.format('val2017'), 'w'), indent=4)
json.dump(image_ids, open('{}_processed_image_ids.json'.format('val2017'), 'w'), indent=4)
# load results in COCO evaluation tool
coco_true = COCO(json_path)
coco_pred = coco_true.loadRes('{}_bbox_results.json'.format('val2017'))
# run COCO evaluation
coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
# noinspection PyTypeChecker
def evaluate(model,
generator,
iou_threshold=0.5,
obj_thresh=0.5,
nms_thresh=0.45,
net_h=416,
net_w=416,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
model : The model to evaluate.
generator : The generator that represents the dataset to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
obj_thresh : The threshold used to distinguish between object and non-object
nms_thresh : The threshold used to determine whether two detections are duplicates
net_h : The height of the input image to the model, higher value results in better accuracy
net_w : The width of the input image to the model
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
all_annotations = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
for i in range(generator.size()):
print(i, end='\r')
raw_image = [generator.load_image(i)]
# make the boxes and the labels
pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]
score = np.array([box.get_score() for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
try:
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
except IndexError:
pass
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
print()
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
print(i, end='\r')
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
# noinspection PyTypeChecker
def evaluate_acc(model,
generator,
iou_threshold=0.5,
obj_thresh=0.5,
nms_thresh=0.45,
net_h=416,
net_w=416,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
model : The model to evaluate.
generator : The generator that represents the dataset to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
obj_thresh : The threshold used to distinguish between object and non-object
nms_thresh : The threshold used to determine whether two detections are duplicates
net_h : The height of the input image to the model, higher value results in better accuracy
net_w : The width of the input image to the model
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
all_annotations = [[None for _ in range(generator.num_classes())] for _ in range(generator.size())]
for i in range(generator.size()):
print(i, end='\r')
raw_image = [generator.load_image(i)]
# make the boxes and the labels
pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]
score = np.array([box.get_score() for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
try:
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
except IndexError:
pass
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
print()
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
print(i, end='\r')
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
def normalize(image):
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
image = np.subtract(image.astype('float32'), MEAN_RGB)
image = np.divide(image, STDDEV_RGB)
return image # effnet use this instead of image/255.
def draw_boxesv3(image, boxes, labels, obj_thresh):
color = [(255, 0, 0), (0, 255, 0), (0, 0, 255),
(255, 0, 255), (255, 255, 0), (0, 255, 255),
(0, 0, 0), (255, 255, 255),
]
for box in boxes:
label_str = ''
label = -1
for i in range(len(labels)):
if box.classes[i] > obj_thresh:
label_str += labels[i]
label = i
# print(labels[i] + ': ' + str(box.classes[i] * 100) + '%')
if label >= 0:
cv2.rectangle(image, (box.xmin, box.ymin), (box.xmax, box.ymax), color[box.get_label() % 6], 1)
cv2.putText(image,
label_str + ' ' + str(box.get_score()),
(box.xmin, box.ymin - 13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
color[box.get_label() % 8], 1)
return image
def preprocess_input(image, net_h, net_w):
new_h, new_w, _ = image.shape
# determine the new size of the image
if (float(net_w) / new_w) < (float(net_h) / new_h):
new_h = (new_h * net_w) // new_w
new_w = net_w
else:
new_w = (new_w * net_h) // new_h
new_h = net_h
# resize the image to the new size
resized = cv2.resize(normalize(image[:, :, ::-1]), (new_w, new_h))
# embed the image into the standard letter box
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[(net_h - new_h) // 2:(net_h + new_h) // 2, (net_w - new_w) // 2:(net_w + new_w) // 2, :] = resized
new_image = np.expand_dims(new_image, 0)
return new_image
def get_yolo_boxes(model, images, net_h, net_w, anchors, obj_thresh, nms_thresh, preprocess=True):
image_h, image_w, _ = images[0].shape
nb_images = len(images)
batch_input = np.zeros((nb_images, net_h, net_w, 3))
# preprocess the input
if preprocess:
for i in range(nb_images):
batch_input[i] = preprocess_input(images[i], net_h, net_w)
# run the prediction
batch_output = model.predict_on_batch(batch_input)
batch_boxes = [None] * nb_images
for i in range(nb_images):
yolos = [batch_output[0][i], batch_output[1][i], batch_output[2][i]]
boxes = []
# decode the output of the network
for j in range(len(yolos)):
yolo_anchors = anchors[(2 - j) * 6:(3 - j) * 6] # config['model']['anchors']
boxes += decode_netoutv3(yolos[j], yolo_anchors, obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
batch_boxes[i] = boxes
return batch_boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w) / image_w) < (float(net_h) / image_h):
new_w = net_w
new_h = (image_h * net_w) / image_w
else:
new_h = net_w
new_w = (image_w * net_h) / image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w) / 2. / net_w, float(new_w) / net_w
y_offset, y_scale = (net_h - new_h) / 2. / net_h, float(new_h) / net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0:
continue
for j in range(i + 1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
| [
"palm22180@gmail.com"
] | palm22180@gmail.com |
0c5ddd754402e381d583db718376e182f1a98e59 | b86a1c66449bdb7ce8c14605cb37e96de9c8d89b | /util.py | 778b7b760a83d395d6a003fab7b31cd440926e25 | [] | no_license | offmeplz/9game | bbfc273e57ee0c231a278e8bcc40564839ba1f31 | 8b9a0e610126778939d6900e8c6b039861ee49e1 | refs/heads/master | 2021-01-24T04:09:40.290376 | 2012-09-16T14:31:47 | 2012-09-16T14:31:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,836 | py | #!/usr/bin/env python
#vim:fileencoding=utf-8
import itertools
import math
import os
import pygame
from pygame import Rect
from cfg import *
def load_image(name):
'''Return: image, image rectangle pair.'''
fullpath = os.path.join(RESOURCE_PATH, name)
image = pygame.image.load(fullpath)
if image.get_alpha() is None:
image = image.convert()
else:
image = image.convert_alpha()
return image, image.get_rect()
def load_image_array(name):
fullpath = os.path.join(RESOURCE_PATH, name)
image = pygame.image.load(fullpath)
if image.get_alpha() is None:
image = image.convert()
else:
image = image.convert_alpha()
width, height = image.get_size()
if width % height != 0:
raise ValueError, 'Image has invalid dimensions'
image_list = []
rect = Rect((0, 0), (height, height))
for i in xrange(width / height):
rect.left = i * height
image_list.append(image.subsurface(rect))
return image_list
def screen2game(screen_pos):
s_x, s_y = screen_pos
g_x, g_y = s_x / GAME_CELL_SIZE, s_y / GAME_CELL_SIZE
return g_x, g_y
def screen2fgame(screen_pos):
cell = float(GAME_CELL_SIZE)
s_x, s_y = screen_pos
g_x, g_y = float(s_x - cell / 2) / cell, float(s_y - cell / 2) / cell
return g_x, g_y
def game2screen(game_pos):
g_x, g_y = game_pos
s_x, s_y = g_x * GAME_CELL_SIZE, g_y * GAME_CELL_SIZE
return int(s_x), int(s_y)
game2tlscreen = game2screen
def game2cscreen(game_pos):
screen = game2screen(game_pos)
return (int(screen[0] + GAME_CELL_SIZE / 2),
int(screen[1] + GAME_CELL_SIZE / 2))
def game2screencellrect(g_rect):
s_rect = g_rect.copy()
s_rect.top *= GAME_CELL_SIZE
s_rect.left *= GAME_CELL_SIZE
s_rect.width *= GAME_CELL_SIZE
s_rect.height *= GAME_CELL_SIZE
return s_rect
def screen2gamecellrect(s_rect):
if s_rect.top % GAME_CELL_SIZE or \
s_rect.left % GAME_CELL_SIZE or \
s_rect.width % GAME_CELL_SIZE or \
s_rect.height % GAME_CELL_SIZE:
raise ValueError, "Rect %s is not cellrect" % s_rect
g_rect = s_rect.copy()
g_rect.top /= GAME_CELL_SIZE
g_rect.left /= GAME_CELL_SIZE
g_rect.width /= GAME_CELL_SIZE
g_rect.height /= GAME_CELL_SIZE
return g_rect
def signum(num):
if num > 0:
return 1
elif num < 0:
return -1
else:
return 0
def collideline(rect, line):
"""
Check if line collide rectangle.
rect - Rect object.
line - a pair of points.
"""
p1, p2 = line
if p1 == p2:
return rect.collidepoint(p1)
# Check if rect with (p1,p2) as diagonal collides rect.
linerect = Rect(
(min(p1[0], p2[0]), min(p1[1], p2[1])),
(abs(p1[0] - p2[0]), abs(p1[1] - p2[1])))
if not rect.colliderect(linerect):
return False
# Check if both half planes (formed by line) have at least one rect corner.
sides = [False, False]
for p in (rect.topleft, rect.topright, rect.bottomleft, rect.bottomright):
v = (p2[0] - p1[0]) * (p[1] - p1[1]) - (p2[1] - p1[1]) * (p[0] - p1[0])
if v >= 0:
sides[0] = True
if v <= 0:
sides[1] = True
return sides[0] and sides[1]
def anycollideline(rects, line):
'''
Check if any of rectangles collides line.
rects - iterable of Rect.
line - a pair of points.
'''
p1, p2 = line
if p1 == p2:
return any(r.collidepoint(p1) for r in rects)
linerect = Rect(
(min(p1[0], p2[0]), min(p1[1], p2[1])),
(abs(p1[0] - p2[0]), abs(p1[1] - p2[1])))
for rect in rects:
if rect.colliderect(linerect):
sides = [False, False]
for p in (rect.topleft, rect.topright, rect.bottomleft, rect.bottomright):
v = (p2[0] - p1[0]) * (p[1] - p1[1]) - (p2[1] - p1[1]) * (p[0] - p1[0])
if v >= 0:
sides[0] = True
if v <= 0:
sides[1] = True
if sides[0] and sides[1]:
return True
return False
def is_walkable(begin, end, radius, sprites):
if begin == end:
raise ValueError, 'begin and end are the same'
begin = Vec(begin)
end = Vec(end)
linevec = end - begin
shift = linevec.perpendicular()
shift *= radius / abs(shift)
line1 = (begin + shift, end + shift)
if anycollideline((s.rect for s in sprites), line1):
return False
line2 = (begin - shift, end - shift)
if anycollideline((s.rect for s in sprites), line2):
return False
return True
def placeintsegment(approxcenter, size):
size = int(size)
if size % 2 == 0:
return int(math.ceil(approxcenter)) - size / 2
else:
return int(round(approxcenter)) - size / 2
def placeintrect(approxcenter, sizes):
xcorner = placeintsegment(approxcenter[0], sizes[0])
ycorner = placeintsegment(approxcenter[1], sizes[1])
return (xcorner, ycorner)
def iterpoints(x, y=None):
if y == None:
rect = x
return itertools.product(
xrange(rect.left, rect.left + rect.width + 1),
xrange(rect.top, rect.top + rect.height + 1))
else:
return itertools.product(
xrange(x[0], x[0] + y[0]), xrange(x[1], x[1] + y[1]))
class Vec(object):
__slots__ = ['x', 'y']
def __init__(self, x, y=None):
if y is None:
if isinstance(x, Vec):
self.x = x.x
self.y = x.y
else:
self.x, self.y = x
else:
self.x, self.y = x, y
def __add__(self, other):
return Vec(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Vec(self.x - other.x, self.y - other.y)
def __iadd__(self, other):
self.x += other.x
self.y += other.y
return self
def __isub__(self, other):
self.x -= other.x
self.y -= other.y
return self
def __mul__(self, a):
return Vec(self.x * a, self.y * a)
def __imul__(self, a):
self.x *= a
self.y *= a
return self
def __div__(self, a):
return Vec(self.x / a, self.y / a)
def __idiv__(self, a):
self.x /= a
self.y /= a
return self
def __neg__(self):
return Vec(-self.x, -self.y)
def __len__(self):
return 2
def __iter__(self):
return iter((self.x, self.y))
def __getitem__(self, i):
if i == 0:
return self.x
elif i == 1:
return self.y
else:
raise IndexError, "Index is out of range."
def __setitem__(self, i, x):
if i == 0:
self.x = x
elif i == 1:
self.y = x
else:
raise IndexError, "Index is out of range."
def __str__(self):
return "Vec(%s,%s)" % (str(self.x), str(self.y))
def __repr__(self):
return str(self)
def __abs__(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def __eq__(self, other):
if hasattr(other, '__getitem__') and len(other) == 2:
return self.x == other[0] and self.y == other[1]
else:
return False
def __ne__(self, other):
return not self == other
def perpendicular(self):
return Vec(self.y, -self.x)
class MessageQueue(object):
def __init__(self):
self.msg = None
self.endtime = None
def get_message(self):
if self.msg is not None and self.endtime < pygame.time.get_ticks():
self.msg = None
return self.msg
def post_message(self, msg, time):
self.msg = msg
self.endtime = pygame.time.get_ticks() + time * 1000
| [
"epdmitry@yandex.ru"
] | epdmitry@yandex.ru |
8c07bb997a9c57cbe78d33f9d4148796158ad0c6 | 45f38bb8754d5144fbbdc72dbe0cc70b43cde8a7 | /migration/trash/synonym2.py | 957e0ba474b5cfd17c869281404d9e6aeabacada | [] | no_license | koyoru12/ShizuokaTrashNavi | aa2bec1e2b92fd0a6669840c373d7c715bc727e4 | ed443c701f1250f9867e2d2c19f7ae9280ba663f | refs/heads/master | 2020-04-27T07:46:29.644108 | 2019-03-21T02:16:57 | 2019-03-21T02:16:57 | 174,147,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | import re
import csv
import uuid
from dbconn import DatabaseConnection
from settings import migrate_settings as SETTINGS
trash_conn = DatabaseConnection.get_connection(SETTINGS['database_path'])
trash_cursor = trash_conn.cursor()
def import_csv(file_path):
csv_file = open(file_path)
return csv.DictReader(csv_file)
def import_synonym_data():
count_exist = 0
count_nonexist = 0
count_synosym = 0
trash_list = trash_cursor.execute('SELECT * FROM trash').fetchall()
for trash in trash_list:
# 括弧書きがあるものをストリップする
trash_name = trash['name']
m = re.match('([^\((]+)[\((]', trash_name)
trash_name = trash_name if m == None else m.group(1)
synlist = find_synonym(trash_name)
for synonym in synlist:
insert_synonym(trash['id'], synonym)
trash_conn.commit()
print('synonym table migrated successfully')
def find_synonym(trash_name):
result = []
synonym_csv = import_csv(SETTINGS['csv_synonym_path'])
for row in synonym_csv:
if trash_name == row['trash_name']:
result.append(row['synonym'])
return result
def insert_synonym(trash_id, synonym):
trash_cursor.execute('SELECT id FROM synonym WHERE name = ?', (synonym,))
row = trash_cursor.fetchone()
if row == None:
# synonymがなければ登録する
synonym_id = str(uuid.uuid4())
trash_cursor.execute("""
INSERT INTO synonym(
id, name
)VALUES(
?, ?
) """, (synonym_id, synonym))
else:
synonym_id = row['id']
# trash_synonymの登録
trash_cursor.execute("""
INSERT INTO trash_synonym(
trash_id, synonym_id
)VALUES(
?, ?
)
""", (trash_id, synonym_id))
| [
"koyoru12@yahoo.co.jp"
] | koyoru12@yahoo.co.jp |
aca881e30875093e445b415e2338ebc303c5b3d1 | 30364152b551754ae4ef122e0ff9b4eb6c82d295 | /user/migrations/0008_user_register_time.py | 1ab56daa333270f291fe372f970ee7536c7cde52 | [
"MIT"
] | permissive | darkliang/JudeeBE | fa16d2dcccf276a082200d5750ed02f929e85224 | 7e0ef9c9a69215c8ffd20cd09ddd300df1dec729 | refs/heads/master | 2020-08-18T09:49:45.997545 | 2020-03-08T04:49:46 | 2020-03-08T04:49:46 | 215,775,981 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # Generated by Django 2.2.7 on 2019-11-21 02:06
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('user', '0007_auto_20191121_0205'),
]
operations = [
migrations.AddField(
model_name='user',
name='register_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"11710911@mail.sustech.edu.cn"
] | 11710911@mail.sustech.edu.cn |
e0b1b9862bfdcbcd85808a3da492258a9d3be3b4 | 05148c0ea223cfc7ed9d16234ab3e6bb40885e9d | /Packages/matplotlib-2.2.2/examples/units/basic_units.py | be07f0c9fce57fe2c456d751270dfbe77b983ac9 | [
"MIT"
] | permissive | NightKirie/NCKU_NLP_2018_industry3 | 9ee226e194287fd9088429f87c58c874e050a8b3 | 23ac13644b140587e23cfeffb114c7c6f46f17a2 | refs/heads/master | 2021-06-05T05:33:09.510647 | 2018-07-05T10:19:47 | 2018-07-05T10:19:47 | 133,680,341 | 1 | 4 | MIT | 2020-05-20T16:29:54 | 2018-05-16T14:43:38 | Python | UTF-8 | Python | false | false | 10,875 | py | """
===========
Basic Units
===========
"""
import six
import math
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
from matplotlib.axes import Axes
from matplotlib.cbook import iterable
class ProxyDelegate(object):
def __init__(self, fn_name, proxy_type):
self.proxy_type = proxy_type
self.fn_name = fn_name
def __get__(self, obj, objtype=None):
return self.proxy_type(self.fn_name, obj)
class TaggedValueMeta(type):
def __init__(cls, name, bases, dict):
for fn_name in cls._proxies:
try:
dummy = getattr(cls, fn_name)
except AttributeError:
setattr(cls, fn_name,
ProxyDelegate(fn_name, cls._proxies[fn_name]))
class PassThroughProxy(object):
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
class ConvertArgsProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
for a in args:
try:
converted_args.append(a.convert_to(self.unit))
except AttributeError:
converted_args.append(TaggedValue(a, self.unit))
converted_args = tuple([c.get_value() for c in converted_args])
return PassThroughProxy.__call__(self, *converted_args)
class ConvertReturnProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
ret = PassThroughProxy.__call__(self, *args)
return (NotImplemented if ret is NotImplemented
else TaggedValue(ret, self.unit))
class ConvertAllProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
arg_units = [self.unit]
for a in args:
if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'):
# if this arg has a unit type but no conversion ability,
# this operation is prohibited
return NotImplemented
if hasattr(a, 'convert_to'):
try:
a = a.convert_to(self.unit)
except:
pass
arg_units.append(a.get_unit())
converted_args.append(a.get_value())
else:
converted_args.append(a)
if hasattr(a, 'get_unit'):
arg_units.append(a.get_unit())
else:
arg_units.append(None)
converted_args = tuple(converted_args)
ret = PassThroughProxy.__call__(self, *converted_args)
if ret is NotImplemented:
return NotImplemented
ret_unit = unit_resolver(self.fn_name, arg_units)
if ret_unit is NotImplemented:
return NotImplemented
return TaggedValue(ret, ret_unit)
class TaggedValue(six.with_metaclass(TaggedValueMeta)):
_proxies = {'__add__': ConvertAllProxy,
'__sub__': ConvertAllProxy,
'__mul__': ConvertAllProxy,
'__rmul__': ConvertAllProxy,
'__cmp__': ConvertAllProxy,
'__lt__': ConvertAllProxy,
'__gt__': ConvertAllProxy,
'__len__': PassThroughProxy}
def __new__(cls, value, unit):
# generate a new subclass for value
value_class = type(value)
try:
subcls = type('TaggedValue_of_%s' % (value_class.__name__),
tuple([cls, value_class]),
{})
if subcls not in units.registry:
units.registry[subcls] = basicConverter
return object.__new__(subcls)
except TypeError:
if cls not in units.registry:
units.registry[cls] = basicConverter
return object.__new__(cls)
def __init__(self, value, unit):
self.value = value
self.unit = unit
self.proxy_target = self.value
def __getattribute__(self, name):
if name.startswith('__'):
return object.__getattribute__(self, name)
variable = object.__getattribute__(self, 'value')
if hasattr(variable, name) and name not in self.__class__.__dict__:
return getattr(variable, name)
return object.__getattribute__(self, name)
def __array__(self, dtype=object):
return np.asarray(self.value).astype(dtype)
def __array_wrap__(self, array, context):
return TaggedValue(array, self.unit)
def __repr__(self):
return 'TaggedValue(' + repr(self.value) + ', ' + repr(self.unit) + ')'
def __str__(self):
return str(self.value) + ' in ' + str(self.unit)
def __len__(self):
return len(self.value)
def __iter__(self):
# Return a generator expression rather than use `yield`, so that
# TypeError is raised by iter(self) if appropriate when checking for
# iterability.
return (TaggedValue(inner, self.unit) for inner in self.value)
def get_compressed_copy(self, mask):
new_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(new_value, self.unit)
def convert_to(self, unit):
if unit == self.unit or not unit:
return self
new_value = self.unit.convert_value_to(self.value, unit)
return TaggedValue(new_value, unit)
def get_value(self):
return self.value
def get_unit(self):
return self.unit
class BasicUnit(object):
def __init__(self, name, fullname=None):
self.name = name
if fullname is None:
fullname = name
self.fullname = fullname
self.conversions = dict()
def __repr__(self):
return 'BasicUnit(%s)' % self.name
def __str__(self):
return self.fullname
def __call__(self, value):
return TaggedValue(value, self)
def __mul__(self, rhs):
value = rhs
unit = self
if hasattr(rhs, 'get_unit'):
value = rhs.get_value()
unit = rhs.get_unit()
unit = unit_resolver('__mul__', (self, unit))
if unit is NotImplemented:
return NotImplemented
return TaggedValue(value, unit)
def __rmul__(self, lhs):
return self*lhs
def __array_wrap__(self, array, context):
return TaggedValue(array, self)
def __array__(self, t=None, context=None):
ret = np.array([1])
if t is not None:
return ret.astype(t)
else:
return ret
def add_conversion_factor(self, unit, factor):
def convert(x):
return x*factor
self.conversions[unit] = convert
def add_conversion_fn(self, unit, fn):
self.conversions[unit] = fn
def get_conversion_fn(self, unit):
return self.conversions[unit]
def convert_value_to(self, value, unit):
conversion_fn = self.conversions[unit]
ret = conversion_fn(value)
return ret
def get_unit(self):
return self
class UnitResolver(object):
def addition_rule(self, units):
for unit_1, unit_2 in zip(units[:-1], units[1:]):
if (unit_1 != unit_2):
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if (len(non_null) > 1):
return NotImplemented
return non_null[0]
op_dict = {
'__mul__': multiplication_rule,
'__rmul__': multiplication_rule,
'__add__': addition_rule,
'__radd__': addition_rule,
'__sub__': addition_rule,
'__rsub__': addition_rule}
def __call__(self, operation, units):
if (operation not in self.op_dict):
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x: 1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n//2,)
else:
return r'$%s\pi/2$' % (n,)
class BasicUnitConverter(units.ConversionInterface):
@staticmethod
def axisinfo(unit, axis):
'return AxisInfo instance for x and unit'
if unit == radians:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.fullname,
)
elif unit == degrees:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'),
label=unit.fullname,
)
elif unit is not None:
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
@staticmethod
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
if iterable(val):
return [thisval.convert_to(unit).get_value() for thisval in val]
else:
return val.convert_to(unit).get_value()
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
return x.unit
def cos(x):
if iterable(x):
return [math.cos(val.convert_to(radians).get_value()) for val in x]
else:
return math.cos(x.convert_to(radians).get_value())
basicConverter = BasicUnitConverter()
units.registry[BasicUnit] = basicConverter
units.registry[TaggedValue] = basicConverter
| [
"qwer55113322@gmail.com"
] | qwer55113322@gmail.com |
928da74d5675d0c79228f461deb1e13f55008135 | 0ffa373c5dfe3f20739d6a646d5b0f5ca0f66b54 | /utils/dataset.py | e0006687f823de977ad3c757857e3a357e74201d | [] | no_license | Mahmudulazamshohan/vgg16-image-classification | e31612336ea7dd823d0bcb2e2564ee9467a63182 | eb45260db73d46c744f8e2d90d6b515e901c8b55 | refs/heads/master | 2022-09-02T12:54:58.908981 | 2020-05-31T16:49:01 | 2020-05-31T16:49:01 | 267,092,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import numpy as np
def pretrain_dataset():
return np.load('weights/train.npy')
def load_labels():
return np.load('weights/labels.npy')
def save_dataset(data):
return np.save('weights/train.npy', data)
def save_labels(labels):
return np.save('weights/labels.npy',labels) | [
"mahmudulazamshohan7@gmail.com"
] | mahmudulazamshohan7@gmail.com |
ad0fac08a240ed3589c725d9032cea898daf0b13 | 99b116d1c3f64823e0867beb4d9a0568fb0e8eb8 | /test_model.py | 373a2da56af176eb783e7a664f3d393f0d856c57 | [] | no_license | 0205yes/softdrink-classifier | 04917e0303568e6ae1f4cca4bb4805c12917c4a9 | 212f7f06556f031b16bdb5eb8948089c70fd608b | refs/heads/main | 2023-04-21T03:33:36.084118 | 2021-05-06T17:00:30 | 2021-05-06T17:00:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.python.keras.models import load_model
import numpy as np
from set_vars import classes, test_dir, new_model_dir
def test_model(model, files):
acc_count = 0
for fname in files:
img_path = os.path.join(test_dir, fname)
img = image.load_img(img_path, target_size=(150, 150))
x = image.img_to_array(img)
x = x / 255. # 이미지 rescale
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
predict = model.predict(images, batch_size=4, verbose=0)
np.set_printoptions(precision=3, suppress=True)
result = predict.argmax()
print("IMAGE NAME: {:13}, RESULT: {:7}, PROBA: {:.3f}"
.format(fname, classes[result], predict.max()), end='')
if fname[:2] == classes[result][:2]:
acc_count += 1
print()
else:
print(" !!!")
print("\nTotal Accuracy: {:.3f}" .format(acc_count/len(files)))
if __name__ == '__main__':
test_files = os.listdir(test_dir)
if '.DS_Store' in test_files:
test_files.remove('.DS_Store')
model = load_model(new_model_dir, compile=False)
test_model(model, test_files)
| [
"noreply@github.com"
] | noreply@github.com |
8e3fa73798e6a5f4db6926aeb15688606a866cb0 | f33cb8fbe1ba67e57288ade2fc59909f5fc48db0 | /Client/venv/Scripts/easy_install-3.7-script.py | 1ac2c5e93f3cd16cb0ce5b5bc26295e1679a9dc7 | [] | no_license | Zombinn/VoteSysterm | cd0c3f289e8aa7ec4f1b68f93a99fc910148dfe0 | 212eccc86e04c5edb15bf128a9e06c9ca32f8060 | refs/heads/master | 2023-01-09T23:19:22.291579 | 2020-11-22T11:24:09 | 2020-11-22T11:24:09 | 312,867,463 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 461 | py | #!E:\¼ÆËã»úÍøÂç¿ÎÉè\Client\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"zyb5603@qq.com"
] | zyb5603@qq.com |
84f29f68b65af4c479188bad5fe13eb540caa362 | 7fac5e7216c8f4328f21a14f9e222005890a57f8 | /11_Actor_Critic_Advantage/refactor/CartPole/network.py | a91406a51ae6f89da620c63ac4298837c272d612 | [] | no_license | ZhangRui111/MorvanRL | bee77d644df50ce9900be6ec7d702c395238fae4 | ad443d56314427aa9ebe4af552dde0f5470da967 | refs/heads/master | 2021-04-14T12:05:14.657272 | 2019-03-26T02:28:31 | 2019-03-26T02:28:31 | 126,663,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import numpy as np
import tensorflow as tf
def build_actor_network(n_features, n_actions, lr):
s = tf.placeholder(tf.float32, [1, n_features], "state")
a = tf.placeholder(tf.int32, None, "act")
td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error
with tf.variable_scope('Actor'):
l1 = tf.contrib.layers.fully_connected(s, 20, activation_fn=tf.nn.relu)
acts_prob = tf.contrib.layers.fully_connected(l1, n_actions, activation_fn=tf.nn.softmax)
with tf.variable_scope('exp_v'):
log_prob = tf.log(acts_prob[0, a])
# log_prob = tf.exp(acts_prob[0, a]) # tf.exp can also convergent
exp_v = tf.reduce_mean(log_prob * td_error) # advantage (TD_error) guided loss
with tf.variable_scope('train'):
train_op = tf.train.AdamOptimizer(lr).minimize(-exp_v) # minimize(-exp_v) = maximize(exp_v)
return [[s, a, td_error], [acts_prob, exp_v, train_op]]
# # debug mode # #
# return [[s, a, td_error], [acts_prob, exp_v, train_op], [log_prob, l1]]
# # debug mode # #
def build_critic_network(n_features, lr, discount):
s = tf.placeholder(tf.float32, [1, n_features], "state")
v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
r = tf.placeholder(tf.float32, None, 'r')
with tf.variable_scope('Critic'):
l1 = tf.contrib.layers.fully_connected(s, 20, activation_fn=tf.nn.relu)
v = tf.contrib.layers.fully_connected(l1, 1, activation_fn=None)
with tf.variable_scope('squared_TD_error'):
td_error = r + discount * v_ - v
loss = tf.square(td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
return [[s, v_, r], [v, td_error, loss, train_op]]
| [
"zhangruisg111@163.com"
] | zhangruisg111@163.com |
418acbd366d52090308359359fd700121fa0ab19 | f89bd745e96223513c03ab1a59d67a1a27eec715 | /Python-Project/firefighter/gwrobolib/__init__.py | e2d19d122d3805c3b57c6be734b8d804858c3ae5 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Hakum-Lartey/2017-Firefighter | a462ac7320b0fb88cb7a3ab9dab69c5c686b0f2d | 0d42eb37d7a0e19d7bef907f73837d53a11cd638 | refs/heads/master | 2021-06-22T10:43:35.442520 | 2017-04-05T20:07:59 | 2017-04-05T20:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | # gwrobolib
# Copyright 2017 GW Robotics
# See LICENSE for details
"""
GW Robotics Library
"""
__version__ = '0.0.1'
__author__ = 'GW Robotics'
__license__ = 'MIT'
from gwrobolib import * | [
"tranngocnam97@yahoo.com"
] | tranngocnam97@yahoo.com |
b0b88e277c56283992b1ba38b41328a47c44402a | bf890b0fd6e4aab1544d96d444ed5e28ac27bba8 | /pythalesians_examples/markets/paralleldata_examples.py | da8d1594d5d8fffe70687c0742f0ae7a69c7e782 | [
"Apache-2.0"
] | permissive | femtotrader/pythalesians | 24cfbb88d28a468d58498219d09d441f2209c2c7 | febdfbabc9f99b0ae3eb0b20ce4c5ed6a34c0bbb | refs/heads/master | 2021-01-17T23:59:10.220652 | 2016-08-20T15:46:54 | 2016-08-20T15:46:54 | 67,237,312 | 27 | 11 | null | 2016-09-02T16:15:34 | 2016-09-02T16:15:33 | null | UTF-8 | Python | false | false | 9,279 | py | __author__ = 'saeedamen' # Saeed Amen / saeed@thalesians.com
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
paralleldata_examples
Shows how we can change settings to increase the parallel downloading of data from sources like Bloomberg.
"""
# for logging
from pythalesians.util.loggermanager import LoggerManager
# to download market data
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
if __name__ == '__main__':
# On Windows calling this function is necessary (to prevent repeated respawning of multiprocess functions)
# On Linux/OSX it does nothing
# only necessary if you use multiprocessing_on_dill code
try:
# the standard multiprocessing library fails in pickling the market data classes
import multiprocessing_on_dill as multiprocessing
multiprocessing.freeze_support()
except: pass
if True:
logger = LoggerManager().getLogger(__name__)
import datetime
# just change "False" to "True" to run any of the below examples
###### download daily data from Bloomberg for FX, with different threading techniques
if False:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 1999", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['EURUSD', # ticker (Thalesians)
'GBPUSD',
'USDJPY',
'AUDUSD'],
fields = ['close', 'high', 'low'], # which fields to download
vendor_tickers = ['EURUSD BGN Curncy', # ticker (Bloomberg)
'GBPUSD BGN Curncy',
'USDJPY BGN Curncy',
'AUDUSD BGN Curncy'],
vendor_fields = ['PX_LAST', 'PX_HIGH', 'PX_LOW'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
from pythalesians.util.constants import Constants
# use different thread numbers, thread and multiprocessor Python libraries
# note that speed of download depends on many factors, such as length of time series
# so not always quickest to use threading!
thread_no = [1,2,3,4]
thread_technique = ["thread", "multiprocessor"]
diag = []
for tech in thread_technique:
# change the static variable in Constants which govern the threading we use
Constants.time_series_factory_thread_technique = tech
for no in thread_no:
for key in Constants.time_series_factory_thread_no:
Constants.time_series_factory_thread_no[key] = no
import time
start = time.time();
df = ltsf.harvest_time_series(time_series_request);
end = time.time()
duration = end - start
diag.append("With " + str(no) + " " + tech + " no: " + str(duration) + " seconds")
for d in diag:
logger.info(d)
###### download intraday data from Bloomberg for FX, with different threading techniques
if True:
from datetime import timedelta
time_series_request = TimeSeriesRequest(
start_date = datetime.date.today() - timedelta(days=10), # start date
finish_date = datetime.date.today(), # finish date
freq = 'intraday', # intraday data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = ['EURUSD', # ticker (Thalesians)
'GBPUSD',
'USDJPY',
'AUDUSD'],
fields = ['close', 'high', 'low'], # which fields to download
vendor_tickers = ['EURUSD BGN Curncy', # ticker (Bloomberg)
'GBPUSD BGN Curncy',
'USDJPY BGN Curncy',
'AUDUSD BGN Curncy'],
vendor_fields = ['PX_LAST', 'PX_HIGH', 'PX_LOW'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
from pythalesians.util.constants import Constants
# use different thread numbers, thread and multiprocessor Python libraries
# note that speed of download depends on many factors, such as length of time series
# so not always quickest to use threading!
thread_no = [1,2,3,4]
thread_technique = ["thread", "multiprocessor"]
diag = []
for tech in thread_technique:
# change the static variable in Constants which govern the threading we use
Constants.time_series_factory_thread_technique = tech
for no in thread_no:
for key in Constants.time_series_factory_thread_no: Constants.time_series_factory_thread_no[key] = no
import time
start = time.time(); df = ltsf.harvest_time_series(time_series_request); end = time.time()
duration = end - start
diag.append("With " + str(no) + " " + tech + " no: " + str(duration) + " seconds")
for d in diag:
logger.info(d)
##### load FX data using Quandl
if True:
tickers = ['EURUSD', 'USDJPY', 'GBPUSD', 'AUDUSD', 'USDCAD',
'NZDUSD', 'USDCHF', 'USDNOK', 'USDSEK']
vendor_tickers = ['FRED/DEXUSEU', 'FRED/DEXJPUS', 'FRED/DEXUSUK', 'FRED/DEXUSAL', 'FRED/DEXCAUS',
'FRED/DEXUSNZ', 'FRED/DEXSZUS', 'FRED/DEXNOUS', 'FRED/DEXSDUS']
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 1999", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'quandl', # use Quandl as data source
tickers = tickers, # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = vendor_tickers, # ticker (Quandl)
vendor_fields = ['close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
from pythalesians.util.constants import Constants
# use different thread numbers, thread and multiprocessor Python libraries
# note that speed of download depends on many factors, such as length of time series
# so not always quickest to use threading!
thread_no = [1,2,3,4]
thread_technique = ["thread", "multiprocessor"]
diag = []
for tech in thread_technique:
# change the static variable in Constants which govern the threading we use
Constants.time_series_factory_thread_technique = tech
for no in thread_no:
for key in Constants.time_series_factory_thread_no: Constants.time_series_factory_thread_no[key] = no
import time
start = time.time(); df = ltsf.harvest_time_series(time_series_request); end = time.time()
duration = end - start
diag.append("With " + str(no) + " " + tech + " no: " + str(duration) + " seconds")
for d in diag:
logger.info(d) | [
"saeedamen@hotmail.com"
] | saeedamen@hotmail.com |
e1ecdb9dddc1bcdc4da805d75772b02eead18e04 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/LIVINGSTON-PM4-MIB.py | aa942223a96f63f216f498a166e8bc9c5381dac9 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 135,200 | py | #
# PySNMP MIB module LIVINGSTON-PM4-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LIVINGSTON-PM4-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:07:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
lucentPM4, = mibBuilder.importSymbols("LIVINGSTON-ROOT-MIB", "lucentPM4")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
sysName, = mibBuilder.importSymbols("SNMPv2-MIB", "sysName")
Counter32, Gauge32, Counter64, IpAddress, ModuleIdentity, Unsigned32, Integer32, TimeTicks, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, NotificationType, NotificationType, MibIdentifier, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Gauge32", "Counter64", "IpAddress", "ModuleIdentity", "Unsigned32", "Integer32", "TimeTicks", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "NotificationType", "NotificationType", "MibIdentifier", "iso")
DisplayString, TextualConvention, PhysAddress = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "PhysAddress")
class PMUnitType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 255))
namedValues = NamedValues(("mrgmodule", 1), ("quadt1", 2), ("trie1", 3), ("modem", 4), ("serialport", 5), ("ether0", 6), ("ether1", 7), ("console", 8), ("acpwrsup", 9), ("fan", 10), ("dcpwrsup", 11), ("allunits", 255))
class PMEquipPRIStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("up", 1), ("down", 2), ("loopback", 3), ("fault", 4))
class PMEquipStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("up", 1), ("down", 2), ("maintenance", 3), ("fault", 4), ("other", 5))
class PMDiagCmdStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("success", 1), ("fail", 2), ("inprogress", 3), ("notsupported", 4), ("aborted", 5), ("other", 6))
class PMDiagTestCntrl(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("normal", 1), ("start", 2), ("stop", 3), ("abort", 4))
class PMAlarmType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("informational", 1), ("warning", 2), ("minor", 3), ("major", 4), ("critical", 5))
lucentPM4Mib = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1))
lucentPM4Traps = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2))
lucentPM4MibRev = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4MibRev.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4MibRev.setDescription('This object specifies the current MIB revision number. Example of the MIB revision can be PM4xxx for PM4 product and PM3xxx for PM3 products etc. Where xxx can be any combination of alpha-numeric characters.')
lucentPM4SWRev = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SWRev.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SWRev.setDescription('This object specifies the ComOS revision number. Example of the ComOS revision can be ComOS4.xx. Where xxx can be any combination of alpha-numeric characters.')
lucentPM4Chassis = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3))
lucentPM4ChasSummary = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(55, 55)).setFixedLength(55)).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ChasSummary.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasSummary.setDescription("This object provides general information about the PortMaster 4 chassis operational status. This object is read-only. The DisplayString represents a summary of all the devices in the chassis as follows: Bytes 1 - 2 '0''0' Byte 3 'U', 'D', 'M', 'F', 'O' Byte 4 space Bytes 5 - 6 '0''1' Byte 7 'U', 'D', 'M', 'F', 'O' Byte 8 space Bytes 9 - 10 '0''2' Byte 11 'U', 'D', 'M', 'F', 'O' Byte 12 space Bytes 13 - 14 '0''3' Byte 15 'U', 'D', 'M', 'F', 'O' Byte 16 space Bytes 17 - 18 '0''4' Byte 19 'U', 'D', 'M', 'F', 'O' Byte 20 space Bytes 21 - 22 '0''5' Byte 23 'U', 'D', 'M', 'F', 'O' Byte 24 space Bytes 25 - 26 '0''6' Byte 27 'U', 'D', 'M', 'F', 'O' Byte 28 space Bytes 29 - 30 '0''7' Byte 31 'U', 'D', 'M', 'F', 'O' Byte 32 space Bytes 33 - 34 '0''8' Byte 35 'U', 'D', 'M', 'F', 'O' Byte 36 space Bytes 37 - 38 '0''9' Byte 39 'U', 'D', 'M', 'F', 'O' Byte 40 space Bytes 41 - 42 '1''0' Byte 43 'U', 'D', 'M', 'F', 'O' Byte 44 space Bytes 45 - 46 '1''1' Byte 47 'U', 'D', 'M', 'F', 'O' Byte 48 space Bytes 49 - 50 '1''2' Byte 51 'U', 'D', 'M', 'F', 'O' Byte 52 space Bytes 53 - 54 '1''3' Byte 55 'U', 'D', 'M', 'F', 'O' Byte 56 space Bytes 57 - 58 '1''4' Byte 59 'U', 'D', 'M', 'F', 'O' Byte 60 space Bytes 61 - 62 '1''5' Byte 63 'U', 'D', 'M', 'F', 'O' Byte 64 space Bytes 65 - 66 'P''1' Byte 67 'U', 'D', 'M', 'F', 'O' Byte 68 space Bytes 69 - 70 'P''2' Byte 71 'U', 'D', 'M', 'F', 'O' Byte 72 space Bytes 73 - 74 'P''3' Byte 75 'U', 'D', 'M', 'F', 'O' Byte 76 space Bytes 77 - 78 'D''1' Byte 79 'U', 'D', 'M', 'F', 'O' Byte 80 space Bytes 81 - 82 'D''2' Byte 83 'U', 'D', 'M', 'F', 'O' Byte 84 space Bytes 85 - 86 'F''1' Byte 87 'U', 'D', 'M', 'F', 'O' Byte 88 space Bytes 89 - 90 'F''2' Byte 91 'U', 'D', 'M', 'F', 'O' Byte 92 space Bytes 93 - 94 'F''3' Byte 95 'U', 'D', 'M', 'F', 'O' Byte 96 space Bytes 97 - 98 'F''4' Byte 99 'U', 'D', 'M', 'F', 'O' Legend '#''#' Represents the board number 'F''#' Represents the Fan # 'P''#' Represents the Power Supply # 'D''#' Represents the DC Power Supply # 'U' Up 'D' Down 'M' Maintenance 'F' Failed 'O' Other unknown state.")
lucentPM4ChasCmdTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2), )
if mibBuilder.loadTexts: lucentPM4ChasCmdTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdTable.setDescription('Table describing the commands that can be issued to the agent to perform specific actions to any card, port or device in the system. For example to erase the flash or a particular file from the flash. Note that only a station configured with the appropriate authentication string can issue commands to the agent.')
lucentPM4ChasCmdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4ChasCmdIndex"))
if mibBuilder.loadTexts: lucentPM4ChasCmdEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdEntry.setDescription('Entries in the command table for the chassis commands. This describes one entry or row in the command table.')
lucentPM4ChasCmdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdIndex.setDescription('This object specifies the index in the command table. The values for this object is limited to the size of the command table on the network element. The size of the command table is set to 10 which can be changed if and when users need to schedule more than 10 commands at a given time.')
lucentPM4ChasCmdBoardId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdBoardId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdBoardId.setDescription('This object specifies the board for which the command is to be applied. The values for this object is limited to the Max number of boards.')
lucentPM4ChasCmdUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 3), PMUnitType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitType.setDescription('This object specifies the type of the device to which this command must apply.')
lucentPM4ChasCmdUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdUnitIndex.setDescription('This object specifies the interface index.')
lucentPM4ChasCmdDevId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdDevId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdDevId.setDescription('This object specifies the sub-unit id for which the command must be applied to. This value will be used by the agent to index to the correct sub-device on a board. For example, this object can have values from 1 - 96 for the modems or 1 - 4 for T1 or 1 - 3 for the E1.')
lucentPM4ChasCmdId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29))).clone(namedValues=NamedValues(("eraseflashfile", 1), ("eraseallflash", 2), ("saveall", 3), ("resetport", 4), ("resetfilter", 5), ("adduser", 6), ("deleteuser", 7), ("addlocation", 8), ("diallocation", 9), ("addfilter", 10), ("deletefilter", 11), ("addmodem", 12), ("resetvirtport", 13), ("addospfarea", 14), ("resetospf", 15), ("addprop", 16), ("deleteprop", 17), ("resetprop", 18), ("resetether0", 19), ("resetether1", 20), ("resetall", 21), ("resetconsole", 22), ("version", 23), ("traceroutes", 24), ("ptrace", 25), ("ifconfig", 26), ("eraseconfig", 27), ("erasecomos", 28), ("reboot", 29)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdId.setDescription('This object specifies the command. Each command takes a unique value. The completion status of this command is set in the result object of the table.')
lucentPM4ChasCmdParams = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4ChasCmdParams.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdParams.setDescription("This object specifies the command parameters. Each parameter must be seperated by a blank space. The last parameter is terminated with a ';'.")
lucentPM4ChasCmdResult = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 3, 2, 1, 8), PMDiagCmdStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ChasCmdResult.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ChasCmdResult.setDescription('This object specifies the command result. The result for each of the previous 10 commands will be stored in a table, which can be retrieved by the client when needed.')
lucentPM4ConfigMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4))
lucentPM4CmInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1))
lucentPM4CmSerial = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1))
lucentPM4SerialTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1), )
if mibBuilder.loadTexts: lucentPM4SerialTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTable.setDescription('A list of serial interface entries.')
lucentPM4SerialEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4SerialBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4SerialIndex"))
if mibBuilder.loadTexts: lucentPM4SerialEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialEntry.setDescription('A serial interface entry containing objects at the physical and session layer.')
lucentPM4SerialBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialBoardIndex.setDescription('A unique value for each board in the PortMaster chassis. The Max value of this variable is limited by the number of boards in the chassis. This value is limited to 255.')
lucentPM4SerialUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialUnitType.setDescription('Unit type indicates the serial port. The interface table ifIndex is a combination of board index, unit type and unit index.')
lucentPM4SerialIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialIndex.setDescription('A unique value for each serial interface on a given board.')
lucentPM4ModemId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemId.setDescription('This object is the cross reference to the modem interface table index. The value is dynamically assigned when the call is established.')
lucentPM4SerialPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialPortNumber.setDescription('A serial port to which this modem is assigned for this call.')
lucentPM4SerialPhysType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("async", 2), ("sync", 3), ("isdn", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialPhysType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialPhysType.setDescription('The type of physical serial interface, distinguished according to the physical/link protocol(s) being currently used on the interface. When this object is set to asyn(2), then the service types dial-in, dial- out, login, and device are valid. When this object is set to sync(3), the serial service types dial-in, dial- out and hardwired are valid.')
lucentPM4SerialPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("idle", 1), ("connecting", 2), ("established", 3), ("disconnecting", 4), ("command", 5), ("noservice", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialPortStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialPortStatus.setDescription('The status of the serial interface.')
lucentPM4SerialDS0State = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notavailable", 1), ("busyout", 2), ("havecomport", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialDS0State.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialDS0State.setDescription('Cross reference value for each DS0 for a given T1/E1 line and a given board in the PM4 chassis.')
lucentPM4SerialUser = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialUser.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialUser.setDescription('Name of the active user. Blank if not active.')
lucentPM4SerialSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialSessionId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialSessionId.setDescription('A unique Session Identifier which matches the RADIUS session ID. Blank when not using RADIUS.')
lucentPM4SerialTypeHardwired = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeHardwired.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeHardwired.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeNwDialIn = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialIn.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialIn.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeNwDialout = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialout.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeNwDialout.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeLogin = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeLogin.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeLogin.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeDevice = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeDevice.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeDevice.setDescription('The active type of service being provided by the serial interface.')
lucentPM4SerialTypeDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 16), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SerialTypeDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialTypeDeviceName.setDescription('Device name if the lucentPM4SerialTypeDevice is enabled. This is a string of characters (e.g. /dev/tty1) indicating the device name.')
lucentPM4SerialDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("in", 1), ("out", 2), ("inout", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialDirection.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialDirection.setDescription('The direction the active session was initiated.')
lucentPM4SerialStarted = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 18), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialStarted.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialStarted.setDescription('The amount of time this session has been active.')
lucentPM4SerialIdle = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 19), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialIdle.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialIdle.setDescription('The amount of time this session has been idle.')
lucentPM4SerialInSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 20), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialInSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialInSpeed.setDescription("An estimate of the serial interface's current inbound bandwidth in bits per second.")
lucentPM4SerialOutSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 21), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialOutSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialOutSpeed.setDescription("An estimate of the serial interface's current outbound bandwidth in bits per second.")
lucentPM4SerialIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 22), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialIpAddress.setDescription("The IP address associated with the serial interface. If being used as a network type port, this is the remote user's IP address. If being used as a device or login, this is the IP address of the host the user is connected to.")
lucentPM4SerialifDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 23), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialifDescr.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialifDescr.setDescription('A textual string containing information about the network interface bound to the serial interface.')
lucentPM4SerialInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialInOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialInOctets.setDescription('The total number of octets received on the serial interface.')
lucentPM4SerialOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialOutOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialOutOctets.setDescription('The total number of octets transmitted on the serial interface.')
lucentPM4SerialQOctets = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 1, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SerialQOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SerialQOctets.setDescription('The total number of octets queued on the serial interface.')
lucentPM4CmT1E1 = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2))
lucentPM4T1E1Table = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1), )
if mibBuilder.loadTexts: lucentPM4T1E1Table.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Table.setDescription('A list of T1/E1 interface entries.')
lucentPM4T1E1Entry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1BoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1Index"))
if mibBuilder.loadTexts: lucentPM4T1E1Entry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Entry.setDescription('A T1/E1 entry containing objects at the physical layer.')
lucentPM4T1E1BoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1BoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1BoardIndex.setDescription('A unique value for each board in the PM4 chassis.')
lucentPM4T1E1UnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1UnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1UnitType.setDescription('This object specifies the type of the unit as the T1/E1 line. This value is a part of the interface table ifIndex.')
lucentPM4T1E1Index = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Index.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Index.setDescription('A unique value for each T1/E1 interface.')
lucentPM4T1E1SerialIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SerialIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SerialIndex.setDescription('The value of the instance for the serial port. This object provides a cross reference from the T1/E1 interface to the serial port to which it is mapped.')
lucentPM4T1E1SerialCount = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SerialCount.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SerialCount.setDescription('The number of serial ports assigned to this interface. If this is a Channelized T1/E1, then the count is 24/32. If this is a fractional T1/E1, then the count can be any number between 1 and a number less than 24/32.')
lucentPM4T1E1PhysType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("t1", 1), ("e1", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PhysType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PhysType.setDescription('The type of interface (T1 or E1).')
lucentPM4T1E1Status = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 7), PMEquipPRIStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Status.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Status.setDescription('The current operational status of the interface.')
lucentPM4T1E1Function = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("isdn", 1), ("channelized", 2), ("clear", 3), ("fractional", 4), ("isdnfrac", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Function.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Function.setDescription('The configured function of the interface.')
lucentPM4T1E1Framing = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("esf", 1), ("d4", 2), ("crc4", 3), ("fas", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Framing.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Framing.setDescription('The configured line framing.')
lucentPM4T1E1Encoding = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ami", 1), ("b8zs", 2), ("hdb3", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1Encoding.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1Encoding.setDescription('The configured line signal encoding.')
lucentPM4T1E1PCM = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ulaw", 1), ("alaw", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PCM.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PCM.setDescription('The configured voice modulation.')
lucentPM4T1E1SuperSignal = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("em", 1), ("groundstart", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4T1E1SuperSignal.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SuperSignal.setDescription('The configured supervisory signalling mode for this interface.')
lucentPM4T1E1StartMode = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("wink", 1), ("immediate", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4T1E1StartMode.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1StartMode.setDescription('The configured start mode for this interface.')
lucentPM4T1E1ChangeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 14), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1ChangeTime.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1ChangeTime.setDescription('The amount of time since the last status change.')
lucentPM4T1E1RecvLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1RecvLevel.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1RecvLevel.setDescription("An estimate of the serial interface's current recieve signal level in DB.")
lucentPM4T1E1BlueAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1BlueAlarms.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1BlueAlarms.setDescription('The total number of Blue Alarms on the interface.')
lucentPM4T1E1YellowAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1YellowAlarms.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1YellowAlarms.setDescription('The total number of Yellow Alarms on the interface.')
lucentPM4T1E1CarrierLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1CarrierLoss.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1CarrierLoss.setDescription('The total number of times the interface has lost carrier.')
lucentPM4T1E1SyncLoss = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SyncLoss.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SyncLoss.setDescription('The total number of times the interface has lost frame synchronization.')
lucentPM4T1E1BipolarErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1BipolarErrors.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1BipolarErrors.setDescription('The total number of bipolar violations detected on the interface.')
lucentPM4T1E1CRCErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1CRCErrors.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1CRCErrors.setDescription('The total number of frame level CRC errors detected on the interface.')
lucentPM4T1E1SyncErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1SyncErrors.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1SyncErrors.setDescription('The total number of frame synchronization errors detected on the interface.')
lucentPM4CmModem = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3))
lucentPM4ModemTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1), )
if mibBuilder.loadTexts: lucentPM4ModemTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemTable.setDescription('A list of modem entries.')
lucentPM4ModemEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4ModemBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4ModemUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4ModemIndex"))
if mibBuilder.loadTexts: lucentPM4ModemEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemEntry.setDescription('A modem entry containing objects at the session layer.')
lucentPM4ModemBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemBoardIndex.setDescription('A unique value for each modem interface.')
lucentPM4ModemUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemUnitType.setDescription('Unit type specifies the type of device or interface.')
lucentPM4ModemIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemIndex.setDescription('A unique value for each modem interface. The value of this object can be 1 - 96 for a Quad T1, 1 - 94 for a Tri E1 card.')
lucentPM4ModemPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemPortName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemPortName.setDescription('A textual string containing the name of the serial interface (ie. S0, S1, etc).')
lucentPM4ModemStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("none", 1), ("bound", 2), ("connecting", 3), ("active", 4), ("test", 5), ("down", 6), ("ready", 7), ("halt", 8), ("admin", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemStatus.setDescription('A current state of the modem.')
lucentPM4ModemProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("none", 1), ("lapm", 2), ("mnp", 3), ("bufferd", 4), ("direct", 5), ("cellular", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemProtocol.setDescription('The error correcting protocol being used in the modem.')
lucentPM4ModemCompression = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("v42bis", 2), ("mnp5", 3), ("stac", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemCompression.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemCompression.setDescription('The compression being used in the modem interface.')
lucentPM4ModemInSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemInSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemInSpeed.setDescription("An estimate of the modem interface's current inbound bandwidth in bits per second.")
lucentPM4ModemOutSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemOutSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemOutSpeed.setDescription("An estimate of the modem interface's current outbound bandwidth in bits per second.")
lucentPM4ModemInByteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemInByteCount.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemInByteCount.setDescription('The total number of bytes received on the serial interface.')
lucentPM4ModemOutByteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemOutByteCount.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemOutByteCount.setDescription('The total number of bytes transmitted on the serial interface.')
lucentPM4ModemRetrains = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemRetrains.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemRetrains.setDescription('The number of retrains attempted by the modem.')
lucentPM4ModemRenegotiates = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemRenegotiates.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemRenegotiates.setDescription('The number of renegotiates attempted by the modem.')
lucentPM4ModemCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemCalls.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemCalls.setDescription('The number of times a call received by the modem.')
lucentPM4ModemDetects = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemDetects.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemDetects.setDescription('The number of analog calls received by the modem.')
lucentPM4ModemConnects = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 3, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4ModemConnects.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4ModemConnects.setDescription('The number of successful calls received by the modem.')
lucentPM4CmEther = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4))
lucentPM4EtherTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1), )
if mibBuilder.loadTexts: lucentPM4EtherTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherTable.setDescription('A list of ethernet interface entries. This object is not accessible')
lucentPM4EtherEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4EtherBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4EtherIfType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4EtherIndex"))
if mibBuilder.loadTexts: lucentPM4EtherEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherEntry.setDescription('Ethernet interface entry containing objects at the Session/Physical layers.')
lucentPM4EtherBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherBoardIndex.setDescription('A unique value for each ethernet board. The manager card has two ethernet interfaces at present. The ethernet interface in slot 4 has a board ID 10 and if there is a manager card in slot 5, the board ID for the interface would be 11.')
lucentPM4EtherIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherIfType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIfType.setDescription('The interface type which together with the board ID and the interface number will uniquely identify the interface.')
lucentPM4EtherIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ether0", 1), ("ether1", 2), ("other", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIndex.setDescription('A unique value for each ethernet interface. The manager card has two ethernet interfaces at present. ether0(1) represents 10 Base-T interface and ether1(2) specifies the 10/100 Base-T auto-sensing ethernet interface.')
lucentPM4EtherIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(67436545, 168099841))).clone(namedValues=NamedValues(("ether0", 67436545), ("ether1", 168099841)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIfIndex.setDescription('IfIndex cross reference value for the ethernet interfaces. The manager card has two ethernet interfaces at present. ether0(67436545) represents 10 Base-T interface which corresponds to board 4 and interface 1. The enumerated value ether1(168099841) specifies the 10/100 Base-T auto-sensing ethernet interface which corresponds to board 4 and interface 2. We can add the standby manager card ethernet interfaces when they are available.')
lucentPM4EtherPortName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherPortName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherPortName.setDescription('A printable ASCII string specifying the name of the ethernet port.')
lucentPM4EtherMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 6), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4EtherMacAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherMacAddress.setDescription('Physical address of the interface.')
lucentPM4EtherIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIpAddress.setDescription('IP address of the interface.')
lucentPM4EtherIpGateway = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherIpGateway.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherIpGateway.setDescription('IP address of the gateway machine.')
lucentPM4EtherPriNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 9), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherPriNameServer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherPriNameServer.setDescription('IP address of the primary name server.')
lucentPM4EtherAltNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 10), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherAltNameServer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherAltNameServer.setDescription('IP address of the alternate name server.')
lucentPM4EtherSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 11), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherSubnetMask.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherSubnetMask.setDescription('Subnet mask of the interface. Used to partition the network into different branches.')
lucentPM4EtherInFilter = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherInFilter.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherInFilter.setDescription('IP Input packet filter. Used to control the type of IP packets reaching the interface.')
lucentPM4EtherOutFilter = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOutFilter.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOutFilter.setDescription('IP output packet filter. Used to control the type of packets sent out of the interface.')
lucentPM4EtherOptRip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptRip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptRip.setDescription('The RIP protocol enable/disable option.')
lucentPM4EtherOptSlip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptSlip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptSlip.setDescription('The SLIP protocol enable/disable option.')
lucentPM4EtherOptEtherDown = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptEtherDown.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptEtherDown.setDescription('Ethernet interface down enable/disable option.')
lucentPM4EtherOptBcastHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptBcastHigh.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptBcastHigh.setDescription('Use high one(s) broadcast address enable/disable option.')
lucentPM4EtherOptSnmp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptSnmp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptSnmp.setDescription('Default SNMP protocol enable/disable option.')
lucentPM4EtherOptNoListen = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNoListen.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNoListen.setDescription('Do not listen to RIP on the ether interface.')
lucentPM4EtherOptDefaultRip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultRip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultRip.setDescription('Default RIP protocol enable/disable option.')
lucentPM4EtherOptDefaultListen = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultListen.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptDefaultListen.setDescription('Default listen enable/disable option.')
lucentPM4EtherOptIPFilter = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptIPFilter.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptIPFilter.setDescription('IP filter enable/disable option.')
lucentPM4EtherOptDns = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptDns.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptDns.setDescription('DNS enable/disable option.')
lucentPM4EtherOptPmeMsg = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptPmeMsg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptPmeMsg.setDescription('PME Msg. enable/disable option. Whatever that means.')
lucentPM4EtherOptNoClip = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNoClip.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNoClip.setDescription('No Clip enable/disable option. Whatever that means.')
lucentPM4EtherOptEtherIpx = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptEtherIpx.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptEtherIpx.setDescription('Ether IPX enable/disable option.')
lucentPM4EtherOptNetBIOS = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNetBIOS.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNetBIOS.setDescription('Net BIOS enable/disable option.')
lucentPM4EtherOptAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptAccounting.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptAccounting.setDescription('Accounting enable/disable option.')
lucentPM4EtherOptNoPAP = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 4, 1, 4, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4EtherOptNoPAP.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4EtherOptNoPAP.setDescription('PAP enable/disable option.')
lucentPM4FaultMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5))
lucentPM4FaultMgmtIsolation = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1))
lucentPM4FaultMgmtChasTrap = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1), )
if mibBuilder.loadTexts: lucentPM4FaultMgmtChasTrap.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FaultMgmtChasTrap.setDescription('Chassis Trap table which indicates one of several Traps. The chassis trap table stores the previous history of the traps which can be retrieved by the management stations at a later time. This object is not-accessible and present for MIB clarity.')
lucentPM4FMChasTrapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMChasTrapIndex"))
if mibBuilder.loadTexts: lucentPM4FMChasTrapEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapEntry.setDescription('Entry in the chassis Trap table. Each trap is uniquely identified by an index. This object is not accessible and present for MIB clarity.')
lucentPM4FMChasTrapIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapIndex.setDescription('Index into the Trap table on the agent. This table stores the previous 500 traps which can be retrieved at any given time.')
lucentPM4FMChasTrapBoardID = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapBoardID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapBoardID.setDescription('Board ID is the board number for which this trap is stored. If the trap is for an auxillary device such as a power supply or fan, this value is set to zero.')
lucentPM4FMChasTrapUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 3), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitType.setDescription('Uniquely specifies the unit type for this trap. The unit can be a board or any other device in the chassis such as a fan or a power supply.')
lucentPM4FMChasTrapUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapUnitIndex.setDescription('Uniquely specifies the unit index. The unit index is same as the ifIndex for T1/E1 interfaces, or the modemIndex for the modems or fan or power supply index for fan or power supplies.')
lucentPM4FMChasTrapStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("online", 2), ("offline", 3), ("maintenance", 4), ("fault", 5), ("notinstalled", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapStatus.setDescription('Trap status specifies the associated object in the Trap is online(2), offline(3), maintenance(4) or fault(5).')
lucentPM4FMChasTrapSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 6), PMAlarmType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapSeverity.setDescription('Trap severity specifies the severity of the Trap for the associated object.')
lucentPM4FMChasTrapTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMChasTrapTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapTimeStamp.setDescription('This object stores the timestamp of this trap.')
lucentPM4FMChasTrapState = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("trapsent", 1), ("ackdue", 2), ("acked", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMChasTrapState.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMChasTrapState.setDescription('This object stores the Trap state of this trap.')
lucentPM4FMBoardIndex = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 2), Integer32())
if mibBuilder.loadTexts: lucentPM4FMBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMBoardIndex.setDescription('Board index uniquely specifies the board in the chassis. This object is set to zero for power supplies, fans and other auxillary devices. This object is not accessible through Get, Get-Next or Set PDUs. It is sent out as part of the Trap.')
lucentPM4FMUnitIndex = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 3), Integer32())
if mibBuilder.loadTexts: lucentPM4FMUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitIndex.setDescription('Unit index uniquely specifies the T1/E1 line, or modem or any device (logical/physical) in the chassis. This object is not accessible through Get, Get-Next or Set PDUs. It is sent out as part of the Trap.')
lucentPM4FMUnitType = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 4), PMUnitType())
if mibBuilder.loadTexts: lucentPM4FMUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitType.setDescription('Unit type specifies the T1/E1 line, or modem or any device in the chassis. This object is not accessible through Get, Get-Next or Set PDUs. It is sent out as part of the Trap.')
lucentPM4FMUnitTrapStatus = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))).clone(namedValues=NamedValues(("other", 1), ("offline", 2), ("online", 3), ("failed", 4), ("restored", 5), ("pwrwarn", 6), ("tempwarn", 7), ("temphot", 8), ("dtrloss", 9), ("carrierloss", 10), ("renegotiation", 11), ("los", 12), ("ais", 13), ("redalarm", 14), ("yellowalarm", 15), ("cv", 16), ("crc", 17), ("bpv", 18), ("fer", 19), ("pll", 20), ("es", 21), ("ses", 22), ("sefs", 23), ("uas", 24), ("dm", 25), ("les", 26), ("css", 27), ("bes", 28))))
if mibBuilder.loadTexts: lucentPM4FMUnitTrapStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitTrapStatus.setDescription('Trap status specifies the associated object in the Trap. This object is not accessible other than when produced as the result of a trap.')
lucentPM4FMUnitTrapSeverity = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 1, 6), PMAlarmType())
if mibBuilder.loadTexts: lucentPM4FMUnitTrapSeverity.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMUnitTrapSeverity.setDescription('Trap severity specifies the severity of the Trap for the associated object. This object is not accessible except when produced as the result of a trap.')
lucentPM4FMTrapConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2))
lucentPM4FMEqpTrapCfg = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1), )
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfg.setDescription('Equipment Trap configuration table configure Traps. The objects in this table are used to enable or disable traps on a per board/interface/device basis. This object is not-accessible and present for MIB clarity.')
lucentPM4FMEqpTrapCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEqpBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEqpUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEqpUnitIndex"))
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCfgEntry.setDescription('Entry in the equipment Trap config table. Each trap is uniquely identified by the board ID, Unit type and unit index. For auxillary devices such as power supplies and fans, the board index will be zero, the unit index identifies the units and the unit type specifies if the unit is a fan, power supplies etc. This object is not accessible and present for MIB clarity.')
lucentPM4FMEqpBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEqpBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpBoardIndex.setDescription('Board ID for which the Trap configuration need to apply. The board ID is zero if this trap configuration is for an auxillary device such as fans or power supplies.')
lucentPM4FMEqpUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEqpUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpUnitType.setDescription('Unit type alongwith the board index and unit index specifies uniquely the device/interface which is being configured.')
lucentPM4FMEqpUnitIndex = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEqpUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpUnitIndex.setDescription('Unit index refers to the interface or sub-device such as a modem, serial port etc. For devices such as power supplies and fans this object is zero.')
lucentPM4FMEqpTrapId = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("boardoffline", 1), ("boardonline", 2), ("pwrsupfail", 3), ("pwrsuprestored", 4), ("fanfail", 5), ("fanrestored", 6), ("boardtempwarn", 7), ("boardtempnormal", 8), ("boardtoohot", 9), ("modemfail", 10), ("linedown", 11), ("lineup", 12), ("linethresh", 13), ("boardpwrshutdown", 14), ("radiusauthfail", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEqpTrapId.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapId.setDescription('Trap ID indicating the trap for which the configuration must apply.')
lucentPM4FMEqpTrapCtl = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCtl.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpTrapCtl.setDescription('Trap control which configures the trap off(1) or on(2). When the trap is configured as off(1), the trap is not sent out to the management station. When configures as on(2), the trap is sent to all the management stations configured to receive the trap.')
lucentPM4FMEqpRepTimer = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEqpRepTimer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEqpRepTimer.setDescription('If the trap is to be repeated, this object specifies the time in seconds. When this object value is set to 0, it indicates the trap is non-repeat trap.')
lucentPM4FMT1E1ThreshTrapCfg = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2), )
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfg.setDescription('T1/E1 Threshold Trap configuration table to configure the thresholds for various T1/E1 traps. This object is not-accessible and present for MIB clarity.')
lucentPM4FMT1E1ThreshTrapCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMThreshBoardIndex"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMThreshUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMThreshUnitIndex"))
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapCfgEntry.setDescription('Entry in the T1/E1 threshold trap config table. Each trap is uniquely identified by the board index, unit type and unit index which is the T1/E1 interface number. This object is not accessible and present for MIB clarity.')
lucentPM4FMT1E1ThreshBoardIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBoardIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBoardIndex.setDescription('Board ID for which the Trap threshold configuration must apply. It includes boards 1 - 10 and other devices such as power supplies and fans etc.')
lucentPM4FMT1E1ThreshUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUnitType.setDescription('Unit type for which the Trap threshold configuration must be applied.')
lucentPM4FMT1E1ThreshESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 3), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshESs.setDescription('The threshold for errored seconds on the interface. A trap is issued when this set limit is exceeded. When this threshold exceeds, the performance of the interface will degrade. A trap is generated to notify the adminstrator to take appropriate action.')
lucentPM4FMT1E1ThreshSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 4), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSESs.setDescription('The threshold for severely errored seconds on the interface. A trap is issued when this limit is exceeded. A severely errored seconds is a second with 320 or more path code violation error events or one or more out of frame defects or detected AIS defect.')
lucentPM4FMT1E1ThreshSEFSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 5), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshSEFSs.setDescription('The threshold for severely errored framing seconds. A trap is issued when this threshold is exceeded. A severely errored framing second is a second with one or more frame defects or detected AIS defect.')
lucentPM4FMT1E1ThreshUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 6), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshUASs.setDescription('The threshold for unavailable seconds. A trap is issued when this set threshold is exceeded. Unavailable seconds are calculated by counting the number of seconds that the interface is unavailable from the onset of 10 SESs. Once unavailable, the interface becomes available at the onset of 10 contiguous no SESs.')
lucentPM4FMT1E1ThreshCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 7), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshCSSs.setDescription('The threshold for controlled slip seconds on the interface. A trap is issued when this set threshold is exceeded. A controlled slip second is a one-second interval containing one or more controlled slips.')
lucentPM4FMT1E1ThreshPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 8), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshPCVs.setDescription('The threshold for path code violations on the interface. A trap is issued when this set threshold is exceeded. PCV is a frame syncronization bit error in the D4 and E1-noCRC format interfaces or a CRC error in the ESF (extended super frame) and E1-CRC interface formats.')
lucentPM4FMT1E1ThreshLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 9), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshLESs.setDescription('The threshold for line errored seconds on the interface. A trap is sent to the manager when this set threshold is exceeded. A line errored second, according to T1M1.3 is a second in which one or more line code violations were detected.')
lucentPM4FMT1E1ThreshBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 10), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshBESs.setDescription('The threshold for bursty errored seconds on the interface. A trap is sent to the manager when this set threshold is exceeded. A bursty errored second is a second with fewer than 320 and more than 1 path code violations.')
lucentPM4FMT1E1ThreshDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 11), Gauge32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshDMs.setDescription('The threshold for degraded minutes on the interface. A trap is sent to the manager when this set threshold is exceeded. Degraded minutes are determined by collecting all of the available seconds, after removing any severely errored seconds. The resulting seconds is grouped into 60 second intervals and if the cumulative errors during the seconds present in the group exceeds 1E-6.')
lucentPM4FMT1E1ThreshRepTimer = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 12), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshRepTimer.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshRepTimer.setDescription('If the trap is to be repeated, this object specifies the time in seconds. When this object value is set to 0, it indicates the trap is non-repeat trap.')
lucentPM4FMT1E1ThreshTrapAck = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 2, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("noack", 2), ("ack", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapAck.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMT1E1ThreshTrapAck.setDescription('If set to ack(3), clears the trap condition. If the value is set to noack(2), leaves the trap condition unchanged.')
lucentPM4FMEnvTrapCfg = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3), )
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfg.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfg.setDescription('Environment Trap configuration table. This table enables configuration of voltage, power levels and temperature ranges for different units in the chassis. This object is not-accessible and present for MIB clarity.')
lucentPM4FMEnvTrapCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEnvBoardID"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEnvUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4FMEnvUnitIndex"))
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfgEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCfgEntry.setDescription('Entry in the environment trap config table. Each trap is uniquely identified by the board index, unit type and unit index. This object is not accessible and present for MIB clarity.')
lucentPM4FMEnvBoardID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEnvBoardID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvBoardID.setDescription('Board ID specifies the board identifier for this trap. If the trap configuration is for an auxillary device such as a power supply or fan, this object will be set to zero. The unit type and the unit index will uniquely identify the auxillary devices.')
lucentPM4FMEnvUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitType.setDescription('Unit for which the Trap configuration must to apply.')
lucentPM4FMEnvUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitIndex.setDescription('Unit index specifies the interface or sub-unit for this trap (modem or T1/E1 interface etc.). The unit type and the unit index will uniquely identify the auxillary devices.')
lucentPM4FMEnvOptUnitTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitTemp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitTemp.setDescription('The optimum temperature for this unit. A trap is generated when the temperature deviates from the specified range. The temperature is specified as an integer in degrees farenheit.')
lucentPM4FMEnvUnitTempRange = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitTempRange.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitTempRange.setDescription('The temperature range above which a trap is generated. The temperature must be specified as an integer in degree Farenhiet (for example +/- 5 degree Far.).')
lucentPM4FMEnvOptUnitPwrLvl = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitPwrLvl.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvOptUnitPwrLvl.setDescription('The optimal power level that is appropriate for this unit. A trap is generated when the power level fluctuates outside the limits set.')
lucentPM4FMEnvUnitPwrRange = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvUnitPwrRange.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvUnitPwrRange.setDescription('The power range specified in volts. A trap is generated when the power level fluctuates outside the Opt Pwr +/- Range set.')
lucentPM4FMEnvTrapCtl = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 5, 2, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCtl.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4FMEnvTrapCtl.setDescription('The trap control used to turn the environment traps on or off for the specified unit(s).')
lucentPM4PerfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6))
lucentPM4T1E1PerfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1))
lucentPM4T1E1PMCur = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1), )
if mibBuilder.loadTexts: lucentPM4T1E1PMCur.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCur.setDescription('Performance management table representing the performance statistics of T1/E1 interfaces in the box. This table represents the current 15 mins statistics. This object is not accessible and present for clarity purpose. This table is part of RFC 1406.')
lucentPM4T1E1PMCurEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMBoardID"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMLineNum"))
if mibBuilder.loadTexts: lucentPM4T1E1PMCurEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurEntry.setDescription('Performance management table entries for all the T1/E1 interfaces in the box. This table represents the current 15 mins statistics. This object is not accessible and present for clarity purpose.')
lucentPM4T1E1PMCurBoard = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBoard.setDescription('Board number of the interface. The global interface number is computed by using the Most Significant byte of the ifIndex and the Least Significant 2 bytes represents the interface index. Byte 3 will represent the unit type which would be a T1 or E1.')
lucentPM4T1E1PMCurUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUnitType.setDescription('Unit type indicates the type of interface as T1/E1 or T3/E3 in future. This is part of the interface table ifIndex which is constructed with boardID, unit type and unit index.')
lucentPM4T1E1PMCurLineNum = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLineNum.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLineNum.setDescription('Line number uniquely identifies the T1/E1 interface on a given board.')
lucentPM4T1E1PMCurIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurIfIndex.setDescription('Interface table ifIndex cross reference. The global interface number is computed by using the Most Significant byte as the board ID and the Least Significant 2 bytes represents the interface index. The third byte represents the unit type which will be a T1 or E1. Thus board 0 interface 3 is represented as 0x00050003. The global interface number corresponds to the IfIndex in MIB II.')
lucentPM4T1E1PMCurESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurESs.setDescription('The number of errored seconds, encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSESs.setDescription('The number of Severely Errored Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurSEFSs = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurSEFSs.setDescription('The number of Severely Errored Framing Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurUASs.setDescription('The number of Unavailable Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurCSSs.setDescription('The number of Controlled Slip Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurPCVs.setDescription('The number of Path Coding Violations encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLESs.setDescription('The number of Line Errored Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurBESs.setDescription('The number of Bursty Errored Seconds encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurDMs.setDescription('The number of Degraded Minutes encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMCurLCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 1, 1, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMCurLCVs.setDescription('The number of Line Code Violations encountered by the line in the current 15 mins interval.')
lucentPM4T1E1PMInt = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2), )
if mibBuilder.loadTexts: lucentPM4T1E1PMInt.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMInt.setDescription('Performance management table representing the performance statistics of T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose. This table is part of RFC 1406.')
lucentPM4T1E1PMIntEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntBoard"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntLineNum"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMIntInterval"))
if mibBuilder.loadTexts: lucentPM4T1E1PMIntEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntEntry.setDescription('Performance management table entries for all the T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose.')
lucentPM4T1E1PMIntBoard = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBoard.setDescription('Board number of the interface. The global interface number is computed by using the Most Significant nibble of the ifIndex and the Least Significant nibble represents the interface index. Thus board 0 interface 3 is represented as 0x03 or 03 decimal and board 10 interface 3 is represented as 0xa3 or 163 decimal. In an integer, of 4 bytes wide, the 3 MSBytes will all be zeros. The global interface number corresponds to the IfIndex of MIB II.')
lucentPM4T1E1PMIntUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUnitType.setDescription('Unit type indicates the type of physical or logical device. The unit type for this table is either T1 or E1.')
lucentPM4T1E1PMIntLineNum = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLineNum.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLineNum.setDescription('Line number uniquely identifies the T1/E1 interface for this board.')
lucentPM4T1E1PMIntInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 96))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntInterval.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntInterval.setDescription('Interval number for a given board. The 24 hr period is divided into 96 15 min intervals, where 1 is the most recent and 96 is the least recent.')
lucentPM4T1E1PMIntIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntIfIndex.setDescription('Interface table ifIndex cross reference. The global interface number is computed by using the Most Significant byte as the board ID and the Least Significant 2 bytes represents the interface index. The third byte represents the unit type which will be a T1 or E1. Thus board 0 interface 3 is represented as 0x00050003. The global interface number corresponds to the IfIndex in MIB II.')
lucentPM4T1E1PMIntESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntESs.setDescription('The number of errored seconds, encountered by the line in the last 24 hrs divided into 96 15 mins intervals.')
lucentPM4T1E1PMIntSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSESs.setDescription('The number of Severely Errored Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntSEFSs = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntSEFSs.setDescription('The number of Severely Errored Framing Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntUASs.setDescription('The number of Unavailable Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntCSSs.setDescription('The number of Controlled Slip Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntPCVs.setDescription('The number of Path Coding Violations encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLESs.setDescription('The number of Line Errored Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntBESs.setDescription('The number of Bursty Errored Seconds encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntDMs.setDescription('The number of Degraded Minutes encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMIntLCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 2, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMIntLCVs.setDescription('The number of Line Code Violations encountered by the line for one of the 96 15 mins intervals.')
lucentPM4T1E1PMTotal = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3), )
if mibBuilder.loadTexts: lucentPM4T1E1PMTotal.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotal.setDescription('Performance management table representing the performance statistics of T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose. This table is part of RFC 1406.')
lucentPM4T1E1PMTotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalBoard"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalUnitType"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalLineNum"), (0, "LIVINGSTON-PM4-MIB", "lucentPM4T1E1PMTotalInterval"))
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalEntry.setDescription('Performance management table entries for all the T1/E1 interfaces in the box. This table represents the 24 hr statistics divided into 96 15 mins intervals. This object is not accessible and present for clarity purpose.')
lucentPM4T1E1PMTotalBoard = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBoard.setDescription('Board number of the interface. The global interface number is computed by using the Most Significant nibble of the ifIndex and the Least Significant nibble represents the interface index. Thus board 0 interface 3 is represented as 0x03 or 03 decimal and board 10 interface 3 is represented as 0xa3 or 163 decimal. In an integer, of 4 bytes wide, the 3 MSBytes will all be zeros. The global interface number corresponds to the IfIndex of MIB II. This table stores the cumulative values for the past 24 hr period.')
lucentPM4T1E1PMTotalUnitType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 2), PMUnitType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUnitType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUnitType.setDescription('Unit type indicates the type of physical or logical device. The unit type for this table is either T1 or E1.')
lucentPM4T1E1PMTotalLineNum = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLineNum.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLineNum.setDescription('Interface number for a given board. The global interface number is computed by using the Most Significant nibble of the ifIndex and the Least Significant nibble represents the interface index. Thus board 0 interface 3 is represented as 0x03 or 03 decimal and board 10 interface 3 is represented as 0xa3 or 163 decimal. In an integer, of 4 bytes wide, the 3 MSBytes will all be zeros. The global interface number corresponds to the IfIndex in MIB II.')
lucentPM4T1E1PMTotalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalIfIndex.setDescription('IfIndex cross reference value. This value is obtained from the Board/board number and interface number by combining them into the LSByte. The upper nibble represents the board and the lower nibble represents the line number.')
lucentPM4T1E1PMTotalESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalESs.setDescription('The cumulative value of errored seconds, encountered by the line in the last 24 hrs.')
lucentPM4T1E1PMTotalSESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSESs.setDescription('The cumulative value Severely Errored Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalSEFSs = MibScalar((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSEFSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalSEFSs.setDescription('The cumulative value of Severely Errored Framing Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalUASs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUASs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalUASs.setDescription('The cumulative value of Unavailable Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalCSSs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalCSSs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalCSSs.setDescription('The cumulative value of Controlled Slip Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalPCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalPCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalPCVs.setDescription('The cumulative value of Path Coding Violations encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalLESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLESs.setDescription('The cumulative value of Line Errored Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalBESs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBESs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalBESs.setDescription('The cumulative value of Bursty Errored Seconds encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalDMs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalDMs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalDMs.setDescription('The cumulative value of Degraded Minutes encountered by the line for the 24 hr period.')
lucentPM4T1E1PMTotalLCVs = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 6, 3, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLCVs.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4T1E1PMTotalLCVs.setDescription('The cumulative value of Line Code Violations encountered by the line for the 24 hr period.')
lucentPM4SecurityMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 7))
lucentPM4AcctMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8))
lucentPM4AcctMgmtComm = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1))
lucentPM4SnmpCommTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1), )
if mibBuilder.loadTexts: lucentPM4SnmpCommTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommTable.setDescription('The SNMP Community Table. This table contains entries to restrict the SNMP get and set operations.')
lucentPM4SnmpCommEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4SnmpCommName"))
if mibBuilder.loadTexts: lucentPM4SnmpCommEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommEntry.setDescription('The entries in the community table.')
lucentPM4SnmpCommIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SnmpCommIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommIndex.setDescription('The index of the command in the command table. A MAX of 10 network management stations must be specified along with their community names.')
lucentPM4SnmpCommName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: lucentPM4SnmpCommName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommName.setDescription('The name of the SNMP Community for SNMP readers and writers. The size of the string is limited to 32 characters. All characters in the string must be printable.')
lucentPM4SnmpCommIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommIpAddr.setDescription('The IP Address of the remote community.')
lucentPM4SnmpCommReadAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("ensable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommReadAccess.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommReadAccess.setDescription('Read access enable or disable for this community. When enabled, it allows read-only variable access using this community string by the SNMP client.')
lucentPM4SnmpCommWriteAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("ensable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommWriteAccess.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommWriteAccess.setDescription('Write access enable or disable for this community. When enabled, the agent allows write access to the parameters on the agent by the SNMP clients.')
lucentPM4SnmpCommTraps = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("ensable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommTraps.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommTraps.setDescription('Traps receiving capability enable or disable for this community. When enabled, the SNMP agent forwards the traps generated in the box to this SNMP client.')
lucentPM4SnmpCommStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("delete", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: lucentPM4SnmpCommStatus.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommStatus.setDescription('The status of the entry for this community. If the status is set to normal, it allows requests from this SNMP client else it discards the requests from this client.')
lucentPM4SnmpCommLastError = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 1, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 511))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4SnmpCommLastError.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4SnmpCommLastError.setDescription('If there is an error on a request, this variable may contain a message indicating the error.')
lucentPM4AcctMgmtCallEvent = MibIdentifier((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2))
lucentPM4AMCallEventTable = MibTable((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1), )
if mibBuilder.loadTexts: lucentPM4AMCallEventTable.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCallEventTable.setDescription('Call accounting table containing a list of call events, which may be used for billing purposes.')
lucentPM4AMCallEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1), ).setIndexNames((0, "LIVINGSTON-PM4-MIB", "lucentPM4AMCEIndex"))
if mibBuilder.loadTexts: lucentPM4AMCallEventEntry.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCallEventEntry.setDescription('The entries in the accounting/billing table.')
lucentPM4AMCEIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEIndex.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEIndex.setDescription('Call event index used as an index into the call event table. The table stores call events which may be used for billing.')
lucentPM4AMCETimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCETimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCETimeStamp.setDescription('Time stamp for this event in seconds since the last reboot.')
lucentPM4AMCEType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("calloriginated", 1), ("callanswered", 2), ("callcleared", 3), ("servicechanged", 4), ("namechanged", 5), ("baudratechanged", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEType.setDescription('Specifies the type of event associated with this entry in the call event table.')
lucentPM4AMCESvcType = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))).clone(namedValues=NamedValues(("none", 1), ("ppp", 2), ("slip", 3), ("mpp", 4), ("x25", 5), ("combinet", 6), ("frameRelay", 7), ("euraw", 8), ("euui", 9), ("telnet", 10), ("telnetBinary", 11), ("rawTcp", 12), ("terminalServer", 13), ("mp", 14), ("virtualConnect", 15), ("x25DChannel", 16)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCESvcType.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCESvcType.setDescription('The type of service provided to the user. This field is meaningful if the event type is servicechanged(4), or namechanged(5) events. In all other cases, this object must return none(1).')
lucentPM4AMCEUName = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEUName.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEUName.setDescription('User name of the dialed in user. This object returns the valid user name when the event type is servicechanged(4) or namechanged(5). In all other cases, it returns a NULL.')
lucentPM4AMCEModemBoard = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEModemBoard.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEModemBoard.setDescription('Board ID of the modem which handled this call. This value can be used to diagnose modem related problems (dropping the call, retraining too frequently etc.).')
lucentPM4AMCEModemID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEModemID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEModemID.setDescription('Identifies the specific modem on a board which handled this call. Can be used to diagnose modem related problems.')
lucentPM4AMCEModemPort = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEModemPort.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEModemPort.setDescription('A textual string containing the name of the serial interface (ie. S0, S1, etc).')
lucentPM4AMCEDataRate = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEDataRate.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEDataRate.setDescription('Specifies the speed of this connection. Speed is specified as baud rate for modem calls and receive data rate for ISDN calls. This object returns a 0 for call answered and call cleared events.')
lucentPM4AMCECallingPartyID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCECallingPartyID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCECallingPartyID.setDescription('Calling party ID. This object is valid only for call answered, call originated, and call cleared events. For all invalid event types, this object is set to NULL.')
lucentPM4AMCECalledPartyID = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCECalledPartyID.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCECalledPartyID.setDescription('Called party ID. This object is valid only for call answered, call originated, and call cleared events. For all invalid event types, this object is set to NULL.')
lucentPM4AMCEInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEInOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEInOctets.setDescription('Total octets received during this call. This object is cleared at the end of each call.')
lucentPM4AMCEOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEOutOctets.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEOutOctets.setDescription('Total octets sent out during this call. This object is cleared at the end of each call.')
lucentPM4AMCECallCharge = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCECallCharge.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCECallCharge.setDescription('Call charge for this call. This object is valid only when the event is call cleared. For all other events this object is set to zero.')
lucentPM4AMCEDisconnReason = MibTableColumn((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 1, 8, 2, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 60, 61, 62, 63, 64, 65, 66, 67, 68, 100, 101, 102, 120, 150, 151, 152, 160, 170, 180, 185, 190, 195, 201, 210))).clone(namedValues=NamedValues(("notApplicable", 1), ("unknown", 2), ("disconnected", 3), ("clidAuthFailed", 4), ("clidAuthServTimeout", 5), ("clidAuthRequestCallback", 6), ("preT310Timeout", 7), ("noModemAvailable", 9), ("noModemNoCarrier", 10), ("noModemLossCarrier", 11), ("noModemResultCodes", 12), ("noModemOpenFailed", 13), ("noModemOpenFailedDiag", 14), ("tsUserExit", 20), ("tsIdleTimeout", 21), ("tsExitTelnet", 22), ("tsNoIPAddr", 23), ("tsExitTcp", 24), ("tsPassWordFail", 25), ("tsRawTCPDisable", 26), ("tsControlC", 27), ("tsDestroyed", 28), ("tsClosedVirtualConnect", 29), ("tsVirtualConnectDestroyed", 30), ("tsExitRlogin", 31), ("tsRloginBadOption", 32), ("tsErrorResource", 33), ("mpNullMessageTimeout", 35), ("pppLcpTimeout", 40), ("pppLcpNegotiateFail", 41), ("pppPAPAuthFail", 42), ("pppCHAPAuthFail", 43), ("pppRemoteAuthFail", 44), ("pppRcvTerminate", 45), ("pppCloseEvent", 46), ("pppCloseNoNcpsOpened", 47), ("pppCloseUnknownMpBundle", 48), ("pppCloseMpAddChanFail", 49), ("tsExitErrTooMany", 50), ("tsExitErrResource", 51), ("tsExitErrInvalidIP", 52), ("tsExitErrHostName", 53), ("tsExitErrBadPort", 54), ("tsExitErrHostReset", 60), ("tsExitErrConnRefused", 61), ("tsExitErrTimedOut", 62), ("tsExitErrClosed", 63), ("tsExitErrNetUnreach", 64), ("tsExitErrHostUnreach", 65), ("tsExitErrNetAdminUnreach", 66), ("tsExitErrHostAdminUnreach", 67), ("tsExitErrPortUnreach", 68), ("sessTimeOut", 100), ("sessFailSecurity", 101), ("sessCallback", 102), ("invalidProtocol", 120), ("requestByRadiusClient", 150), ("localAdmin", 151), ("localSnmp", 152), ("v110Timeout", 160), ("pppAuthTimeout", 170), ("userCallClearRequest", 180), ("remoteEndHungUp", 185), ("resourceQuiesced", 190), ("maxCallDurationReached", 195), ("lowMemory", 201), ("boardDied", 210)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: lucentPM4AMCEDisconnReason.setStatus('mandatory')
if mibBuilder.loadTexts: lucentPM4AMCEDisconnReason.setDescription('Reason for the disconnect.')
lucentPM4BoardOfflineTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,1)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardOfflineTrap.setDescription('Board down trap. The variable bindings in the Trap packet provide information about the chassis name, board number and the trap status. This Trap must be cleared manually.')
lucentPM4BoardOnlineTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,2)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardOnlineTrap.setDescription('Board up trap. The variable bindings in the Trap packet provide information about the chassis name, board number and the trap status. This Trap must be cleared manually.')
lucentPM4PwrSupFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,3)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4PwrSupFailTrap.setDescription('Power supply failed trap. The variable bindings in the Trap packet provide information about the chassis name, power supply unit and the trap status. This Trap must be cleared manually.')
lucentPM4PwrSupWarnTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,4)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4PwrSupWarnTrap.setDescription('Power supply warning trap. The variable bindings in the Trap packet provide information about the chassis name, power supply unit and the trap status. This Trap is issued when the power supply fluctuates between a set threshold. This Trap must be cleared manually.')
lucentPM4PwrSupRestoredTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,5)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4PwrSupRestoredTrap.setDescription('Power supply restored trap. The variable bindings in the Trap packet provide information about the chassis name, power supply unit and the trap status. This Trap is issued when a failed power supply is restored. This must be cleared manually.')
lucentPM4FanFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,6)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4FanFailTrap.setDescription('Fan failure trap. The variable bindings in the Trap packet provide information about the chassis name, fan number and the trap status. This Trap must be cleared manually.')
lucentPM4FanRestoredTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,7)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4FanRestoredTrap.setDescription('Fan restored trap. The variable bindings in the Trap packet provide information about the chassis name, fan number and the trap status. This Trap is issued when the failed fan is restored. This trap must be cleared manually.')
lucentPM4BoardTempWarnTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,8)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardTempWarnTrap.setDescription('Board temperature warning trap. The variable bindings in the Trap packet provide information about the chassis name, unit and the trap status. This Trap is issued when the board temperature exceeds a set threshold value. This trap must be cleared manually.')
lucentPM4BoardTempNormalTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,9)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardTempNormalTrap.setDescription('Board temperature normal trap. The variable bindings in the Trap packet provide information about the chassis name, unit and the trap status. This Trap is issued when the board temperature returns to normal. This trap must be cleared manually.')
lucentPM4BoardTooHotTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,10)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardTooHotTrap.setDescription('Board trap. The variable bindings in the Trap packet provide information about the chassis name, board number and the trap status. This Trap must be cleared manually.')
lucentPM4ModemFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,11)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4ModemFailTrap.setDescription('Modem failure trap. The variable bindings in the Trap packet provide information about the chassis name, modem number and the trap status. This Trap must be cleared manually.')
lucentPM4T1E1LineDownTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,12)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4T1E1LineDownTrap.setDescription('T1/E1 Line trap. The variable bindings in the Trap packet provide all the information for the clients to display the Board ID, Line ID and the status of the line. This Trap could be generated when the line comes up or goes down once. It must be cleared manually.')
lucentPM4T1E1LineUpTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,13)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4T1E1LineUpTrap.setDescription('T1/E1 Line trap. The variable bindings in the Trap packet provide all the information for the clients to display the Board ID, Line ID and the status of the line. This Trap could be generated when the line comes up or goes down once. It must be cleared manually.')
lucentPM4T1E1LineThreshTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,14)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4T1E1LineThreshTrap.setDescription('T1/E1 Line trap. The variable bindings in the Trap packet provide all the information for the clients to display the Board ID, Line ID and the trap type. This Trap could be generated when the thresholds for the various performance statistics (ES, SES etc.) exceed. It must be cleared manually.')
lucentPM4BoardPwrOffTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,15)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitType"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitTrapStatus"))
if mibBuilder.loadTexts: lucentPM4BoardPwrOffTrap.setDescription('This trap is issued when the power supply to the board is not enough. The variable bindings in the Trap packet provide information about the chassis name, board/board number and the trap status. This Trap must be cleared manually.')
lucentPM4RadiusAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 307, 1, 1, 2, 2) + (0,16)).setObjects(("SNMPv2-MIB", "sysName"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMBoardIndex"), ("LIVINGSTON-PM4-MIB", "lucentPM4FMUnitIndex"))
if mibBuilder.loadTexts: lucentPM4RadiusAuthFailTrap.setDescription('This trap is issued when the Radius authentication fails. This Trap must be cleared manually. The trap provides information about the board and the modem number.')
mibBuilder.exportSymbols("LIVINGSTON-PM4-MIB", lucentPM4FaultMgmt=lucentPM4FaultMgmt, lucentPM4SerialUser=lucentPM4SerialUser, lucentPM4T1E1ChangeTime=lucentPM4T1E1ChangeTime, lucentPM4EtherOptNoClip=lucentPM4EtherOptNoClip, lucentPM4EtherOutFilter=lucentPM4EtherOutFilter, lucentPM4FMEnvOptUnitPwrLvl=lucentPM4FMEnvOptUnitPwrLvl, lucentPM4T1E1PMIntLESs=lucentPM4T1E1PMIntLESs, lucentPM4ModemUnitType=lucentPM4ModemUnitType, PMAlarmType=PMAlarmType, lucentPM4SerialIndex=lucentPM4SerialIndex, lucentPM4SerialQOctets=lucentPM4SerialQOctets, lucentPM4ModemIndex=lucentPM4ModemIndex, lucentPM4SerialPortNumber=lucentPM4SerialPortNumber, lucentPM4SerialTypeDevice=lucentPM4SerialTypeDevice, lucentPM4EtherMacAddress=lucentPM4EtherMacAddress, lucentPM4EtherOptSlip=lucentPM4EtherOptSlip, lucentPM4FMEnvUnitPwrRange=lucentPM4FMEnvUnitPwrRange, lucentPM4T1E1PMCurUASs=lucentPM4T1E1PMCurUASs, lucentPM4CmT1E1=lucentPM4CmT1E1, lucentPM4EtherOptNetBIOS=lucentPM4EtherOptNetBIOS, lucentPM4EtherSubnetMask=lucentPM4EtherSubnetMask, lucentPM4EtherOptAccounting=lucentPM4EtherOptAccounting, lucentPM4SerialInSpeed=lucentPM4SerialInSpeed, lucentPM4T1E1Encoding=lucentPM4T1E1Encoding, lucentPM4Traps=lucentPM4Traps, lucentPM4FMT1E1ThreshTrapAck=lucentPM4FMT1E1ThreshTrapAck, lucentPM4T1E1PMTotal=lucentPM4T1E1PMTotal, lucentPM4Chassis=lucentPM4Chassis, lucentPM4T1E1PMCurDMs=lucentPM4T1E1PMCurDMs, lucentPM4FMChasTrapState=lucentPM4FMChasTrapState, lucentPM4AMCECallCharge=lucentPM4AMCECallCharge, lucentPM4SerialStarted=lucentPM4SerialStarted, PMUnitType=PMUnitType, lucentPM4T1E1StartMode=lucentPM4T1E1StartMode, lucentPM4FMUnitTrapSeverity=lucentPM4FMUnitTrapSeverity, lucentPM4T1E1PMCurCSSs=lucentPM4T1E1PMCurCSSs, lucentPM4T1E1BlueAlarms=lucentPM4T1E1BlueAlarms, lucentPM4ChasCmdId=lucentPM4ChasCmdId, lucentPM4BoardTempWarnTrap=lucentPM4BoardTempWarnTrap, lucentPM4T1E1Status=lucentPM4T1E1Status, lucentPM4FMUnitTrapStatus=lucentPM4FMUnitTrapStatus, lucentPM4FMT1E1ThreshTrapCfg=lucentPM4FMT1E1ThreshTrapCfg, lucentPM4FMT1E1ThreshPCVs=lucentPM4FMT1E1ThreshPCVs, lucentPM4FMEnvBoardID=lucentPM4FMEnvBoardID, lucentPM4FMChasTrapEntry=lucentPM4FMChasTrapEntry, lucentPM4FMT1E1ThreshBESs=lucentPM4FMT1E1ThreshBESs, lucentPM4EtherOptDns=lucentPM4EtherOptDns, lucentPM4FMT1E1ThreshSEFSs=lucentPM4FMT1E1ThreshSEFSs, lucentPM4T1E1PMIntIfIndex=lucentPM4T1E1PMIntIfIndex, lucentPM4FMEnvUnitIndex=lucentPM4FMEnvUnitIndex, lucentPM4ModemCalls=lucentPM4ModemCalls, lucentPM4AMCEIndex=lucentPM4AMCEIndex, lucentPM4SerialTypeLogin=lucentPM4SerialTypeLogin, lucentPM4FMTrapConfig=lucentPM4FMTrapConfig, lucentPM4T1E1LineDownTrap=lucentPM4T1E1LineDownTrap, lucentPM4SerialTypeNwDialIn=lucentPM4SerialTypeNwDialIn, lucentPM4FMT1E1ThreshDMs=lucentPM4FMT1E1ThreshDMs, lucentPM4T1E1PMTotalCSSs=lucentPM4T1E1PMTotalCSSs, lucentPM4FMT1E1ThreshCSSs=lucentPM4FMT1E1ThreshCSSs, lucentPM4MibRev=lucentPM4MibRev, lucentPM4T1E1PMCurSEFSs=lucentPM4T1E1PMCurSEFSs, lucentPM4SnmpCommTraps=lucentPM4SnmpCommTraps, lucentPM4AMCEModemBoard=lucentPM4AMCEModemBoard, lucentPM4T1E1PMCurESs=lucentPM4T1E1PMCurESs, lucentPM4FMT1E1ThreshESs=lucentPM4FMT1E1ThreshESs, lucentPM4ModemRetrains=lucentPM4ModemRetrains, lucentPM4SerialSessionId=lucentPM4SerialSessionId, lucentPM4SerialEntry=lucentPM4SerialEntry, lucentPM4ChasCmdResult=lucentPM4ChasCmdResult, lucentPM4EtherOptBcastHigh=lucentPM4EtherOptBcastHigh, lucentPM4ChasCmdDevId=lucentPM4ChasCmdDevId, lucentPM4T1E1SuperSignal=lucentPM4T1E1SuperSignal, PMDiagCmdStatus=PMDiagCmdStatus, lucentPM4CmInterfaces=lucentPM4CmInterfaces, lucentPM4T1E1PMCurBESs=lucentPM4T1E1PMCurBESs, lucentPM4SnmpCommIpAddr=lucentPM4SnmpCommIpAddr, lucentPM4AMCETimeStamp=lucentPM4AMCETimeStamp, lucentPM4PerfMgmt=lucentPM4PerfMgmt, PMDiagTestCntrl=PMDiagTestCntrl, lucentPM4T1E1PMTotalBoard=lucentPM4T1E1PMTotalBoard, lucentPM4SnmpCommReadAccess=lucentPM4SnmpCommReadAccess, lucentPM4ModemPortName=lucentPM4ModemPortName, lucentPM4AcctMgmt=lucentPM4AcctMgmt, lucentPM4T1E1PMTotalEntry=lucentPM4T1E1PMTotalEntry, lucentPM4SerialUnitType=lucentPM4SerialUnitType, lucentPM4ChasCmdTable=lucentPM4ChasCmdTable, lucentPM4ModemOutSpeed=lucentPM4ModemOutSpeed, lucentPM4FMEnvTrapCfg=lucentPM4FMEnvTrapCfg, lucentPM4SerialTypeHardwired=lucentPM4SerialTypeHardwired, lucentPM4FMT1E1ThreshSESs=lucentPM4FMT1E1ThreshSESs, lucentPM4BoardOfflineTrap=lucentPM4BoardOfflineTrap, lucentPM4T1E1PMIntPCVs=lucentPM4T1E1PMIntPCVs, lucentPM4FMEqpTrapCfg=lucentPM4FMEqpTrapCfg, lucentPM4T1E1UnitType=lucentPM4T1E1UnitType, lucentPM4SnmpCommStatus=lucentPM4SnmpCommStatus, lucentPM4CmSerial=lucentPM4CmSerial, lucentPM4T1E1BipolarErrors=lucentPM4T1E1BipolarErrors, lucentPM4ChasCmdUnitType=lucentPM4ChasCmdUnitType, lucentPM4T1E1PMInt=lucentPM4T1E1PMInt, lucentPM4ModemStatus=lucentPM4ModemStatus, lucentPM4ChasCmdParams=lucentPM4ChasCmdParams, lucentPM4AMCallEventEntry=lucentPM4AMCallEventEntry, lucentPM4ChasCmdIndex=lucentPM4ChasCmdIndex, lucentPM4EtherOptNoPAP=lucentPM4EtherOptNoPAP, lucentPM4FMEqpTrapCfgEntry=lucentPM4FMEqpTrapCfgEntry, lucentPM4T1E1PMIntCSSs=lucentPM4T1E1PMIntCSSs, lucentPM4T1E1PMTotalUASs=lucentPM4T1E1PMTotalUASs, lucentPM4EtherBoardIndex=lucentPM4EtherBoardIndex, lucentPM4T1E1PMIntESs=lucentPM4T1E1PMIntESs, lucentPM4EtherOptNoListen=lucentPM4EtherOptNoListen, lucentPM4ModemFailTrap=lucentPM4ModemFailTrap, lucentPM4FMChasTrapSeverity=lucentPM4FMChasTrapSeverity, lucentPM4FMEqpUnitIndex=lucentPM4FMEqpUnitIndex, lucentPM4EtherPriNameServer=lucentPM4EtherPriNameServer, lucentPM4SerialIpAddress=lucentPM4SerialIpAddress, lucentPM4SerialBoardIndex=lucentPM4SerialBoardIndex, lucentPM4FMEnvUnitType=lucentPM4FMEnvUnitType, lucentPM4AMCEModemID=lucentPM4AMCEModemID, lucentPM4T1E1PMIntLCVs=lucentPM4T1E1PMIntLCVs, lucentPM4SecurityMgmt=lucentPM4SecurityMgmt, lucentPM4T1E1PMIntUASs=lucentPM4T1E1PMIntUASs, lucentPM4T1E1PMIntBESs=lucentPM4T1E1PMIntBESs, lucentPM4T1E1PMTotalSESs=lucentPM4T1E1PMTotalSESs, lucentPM4T1E1PMCurPCVs=lucentPM4T1E1PMCurPCVs, lucentPM4T1E1PMTotalLineNum=lucentPM4T1E1PMTotalLineNum, lucentPM4AMCEDisconnReason=lucentPM4AMCEDisconnReason, lucentPM4T1E1PMCurIfIndex=lucentPM4T1E1PMCurIfIndex, lucentPM4SnmpCommWriteAccess=lucentPM4SnmpCommWriteAccess, lucentPM4FMEnvUnitTempRange=lucentPM4FMEnvUnitTempRange, lucentPM4FMT1E1ThreshUnitType=lucentPM4FMT1E1ThreshUnitType, lucentPM4EtherOptDefaultRip=lucentPM4EtherOptDefaultRip, lucentPM4AMCEUName=lucentPM4AMCEUName, lucentPM4FMBoardIndex=lucentPM4FMBoardIndex, lucentPM4BoardOnlineTrap=lucentPM4BoardOnlineTrap, lucentPM4T1E1SerialCount=lucentPM4T1E1SerialCount, lucentPM4AMCECallingPartyID=lucentPM4AMCECallingPartyID, lucentPM4SerialOutSpeed=lucentPM4SerialOutSpeed, lucentPM4AMCEOutOctets=lucentPM4AMCEOutOctets, lucentPM4T1E1Table=lucentPM4T1E1Table, lucentPM4ModemTable=lucentPM4ModemTable, lucentPM4EtherTable=lucentPM4EtherTable, lucentPM4T1E1LineUpTrap=lucentPM4T1E1LineUpTrap, lucentPM4EtherOptIPFilter=lucentPM4EtherOptIPFilter, lucentPM4EtherOptEtherIpx=lucentPM4EtherOptEtherIpx, lucentPM4AMCESvcType=lucentPM4AMCESvcType, lucentPM4T1E1PMIntSESs=lucentPM4T1E1PMIntSESs, lucentPM4T1E1PMIntDMs=lucentPM4T1E1PMIntDMs, lucentPM4T1E1SyncErrors=lucentPM4T1E1SyncErrors, lucentPM4T1E1PMTotalUnitType=lucentPM4T1E1PMTotalUnitType, lucentPM4T1E1PMTotalBESs=lucentPM4T1E1PMTotalBESs, lucentPM4T1E1PMCurSESs=lucentPM4T1E1PMCurSESs, lucentPM4T1E1PMTotalPCVs=lucentPM4T1E1PMTotalPCVs, lucentPM4T1E1PMIntUnitType=lucentPM4T1E1PMIntUnitType, lucentPM4EtherIfType=lucentPM4EtherIfType, lucentPM4AMCEModemPort=lucentPM4AMCEModemPort, PMEquipPRIStatus=PMEquipPRIStatus, lucentPM4CmModem=lucentPM4CmModem, lucentPM4ModemConnects=lucentPM4ModemConnects, lucentPM4SerialIdle=lucentPM4SerialIdle, lucentPM4PwrSupRestoredTrap=lucentPM4PwrSupRestoredTrap, lucentPM4AMCEType=lucentPM4AMCEType, lucentPM4AcctMgmtComm=lucentPM4AcctMgmtComm, lucentPM4EtherOptDefaultListen=lucentPM4EtherOptDefaultListen, lucentPM4EtherOptPmeMsg=lucentPM4EtherOptPmeMsg, lucentPM4T1E1PMCurEntry=lucentPM4T1E1PMCurEntry, lucentPM4EtherIndex=lucentPM4EtherIndex, lucentPM4ModemBoardIndex=lucentPM4ModemBoardIndex, lucentPM4FMChasTrapIndex=lucentPM4FMChasTrapIndex, lucentPM4T1E1PMTotalSEFSs=lucentPM4T1E1PMTotalSEFSs, lucentPM4FMChasTrapStatus=lucentPM4FMChasTrapStatus, lucentPM4FMEqpRepTimer=lucentPM4FMEqpRepTimer, lucentPM4FMT1E1ThreshLESs=lucentPM4FMT1E1ThreshLESs, lucentPM4T1E1SyncLoss=lucentPM4T1E1SyncLoss, lucentPM4T1E1YellowAlarms=lucentPM4T1E1YellowAlarms, lucentPM4FanFailTrap=lucentPM4FanFailTrap, lucentPM4FMT1E1ThreshTrapCfgEntry=lucentPM4FMT1E1ThreshTrapCfgEntry, lucentPM4SnmpCommName=lucentPM4SnmpCommName, lucentPM4T1E1PMIntSEFSs=lucentPM4T1E1PMIntSEFSs, lucentPM4ChasSummary=lucentPM4ChasSummary, lucentPM4T1E1PhysType=lucentPM4T1E1PhysType, lucentPM4EtherPortName=lucentPM4EtherPortName, lucentPM4T1E1PMCurLESs=lucentPM4T1E1PMCurLESs, lucentPM4ChasCmdEntry=lucentPM4ChasCmdEntry, lucentPM4FMEqpBoardIndex=lucentPM4FMEqpBoardIndex, lucentPM4T1E1RecvLevel=lucentPM4T1E1RecvLevel, lucentPM4SnmpCommIndex=lucentPM4SnmpCommIndex, lucentPM4FMChasTrapTimeStamp=lucentPM4FMChasTrapTimeStamp, lucentPM4AMCEDataRate=lucentPM4AMCEDataRate, lucentPM4T1E1PCM=lucentPM4T1E1PCM, lucentPM4EtherEntry=lucentPM4EtherEntry, lucentPM4ConfigMgmt=lucentPM4ConfigMgmt, lucentPM4FMEnvTrapCfgEntry=lucentPM4FMEnvTrapCfgEntry, lucentPM4FMChasTrapUnitIndex=lucentPM4FMChasTrapUnitIndex, lucentPM4FMEqpUnitType=lucentPM4FMEqpUnitType, lucentPM4SerialPhysType=lucentPM4SerialPhysType, lucentPM4FMChasTrapUnitType=lucentPM4FMChasTrapUnitType, lucentPM4FMEnvOptUnitTemp=lucentPM4FMEnvOptUnitTemp, lucentPM4T1E1LineThreshTrap=lucentPM4T1E1LineThreshTrap, lucentPM4FaultMgmtChasTrap=lucentPM4FaultMgmtChasTrap, lucentPM4ModemId=lucentPM4ModemId, lucentPM4CmEther=lucentPM4CmEther, lucentPM4T1E1CarrierLoss=lucentPM4T1E1CarrierLoss, lucentPM4T1E1Framing=lucentPM4T1E1Framing, lucentPM4T1E1PMIntEntry=lucentPM4T1E1PMIntEntry, lucentPM4T1E1Index=lucentPM4T1E1Index, lucentPM4T1E1PMCur=lucentPM4T1E1PMCur, lucentPM4ModemInByteCount=lucentPM4ModemInByteCount, lucentPM4T1E1Entry=lucentPM4T1E1Entry, lucentPM4T1E1BoardIndex=lucentPM4T1E1BoardIndex, lucentPM4EtherOptSnmp=lucentPM4EtherOptSnmp, lucentPM4FMUnitType=lucentPM4FMUnitType, lucentPM4ModemRenegotiates=lucentPM4ModemRenegotiates, lucentPM4FMEnvTrapCtl=lucentPM4FMEnvTrapCtl, lucentPM4T1E1PMCurBoard=lucentPM4T1E1PMCurBoard, lucentPM4EtherAltNameServer=lucentPM4EtherAltNameServer, lucentPM4T1E1PMCurLineNum=lucentPM4T1E1PMCurLineNum, lucentPM4BoardTempNormalTrap=lucentPM4BoardTempNormalTrap, lucentPM4ModemCompression=lucentPM4ModemCompression, lucentPM4EtherInFilter=lucentPM4EtherInFilter, lucentPM4SerialOutOctets=lucentPM4SerialOutOctets, lucentPM4T1E1PMTotalIfIndex=lucentPM4T1E1PMTotalIfIndex, lucentPM4T1E1PMTotalLCVs=lucentPM4T1E1PMTotalLCVs, lucentPM4SerialDirection=lucentPM4SerialDirection, lucentPM4T1E1PMTotalLESs=lucentPM4T1E1PMTotalLESs, lucentPM4FanRestoredTrap=lucentPM4FanRestoredTrap, lucentPM4SWRev=lucentPM4SWRev, lucentPM4PwrSupFailTrap=lucentPM4PwrSupFailTrap, lucentPM4FMT1E1ThreshBoardIndex=lucentPM4FMT1E1ThreshBoardIndex, lucentPM4ModemOutByteCount=lucentPM4ModemOutByteCount, lucentPM4SerialifDescr=lucentPM4SerialifDescr, lucentPM4T1E1SerialIndex=lucentPM4T1E1SerialIndex, lucentPM4ChasCmdBoardId=lucentPM4ChasCmdBoardId, lucentPM4ModemEntry=lucentPM4ModemEntry, lucentPM4FMT1E1ThreshRepTimer=lucentPM4FMT1E1ThreshRepTimer, lucentPM4T1E1PMCurLCVs=lucentPM4T1E1PMCurLCVs, lucentPM4EtherOptRip=lucentPM4EtherOptRip, lucentPM4T1E1PMTotalESs=lucentPM4T1E1PMTotalESs, lucentPM4FMEqpTrapCtl=lucentPM4FMEqpTrapCtl, lucentPM4BoardPwrOffTrap=lucentPM4BoardPwrOffTrap, lucentPM4RadiusAuthFailTrap=lucentPM4RadiusAuthFailTrap, lucentPM4AMCallEventTable=lucentPM4AMCallEventTable, PMEquipStatus=PMEquipStatus, lucentPM4T1E1PMIntLineNum=lucentPM4T1E1PMIntLineNum, lucentPM4PwrSupWarnTrap=lucentPM4PwrSupWarnTrap, lucentPM4T1E1CRCErrors=lucentPM4T1E1CRCErrors, lucentPM4SerialInOctets=lucentPM4SerialInOctets, lucentPM4ModemDetects=lucentPM4ModemDetects, lucentPM4FaultMgmtIsolation=lucentPM4FaultMgmtIsolation, lucentPM4T1E1PMTotalDMs=lucentPM4T1E1PMTotalDMs, lucentPM4T1E1PMCurUnitType=lucentPM4T1E1PMCurUnitType, lucentPM4SnmpCommLastError=lucentPM4SnmpCommLastError, lucentPM4SerialTable=lucentPM4SerialTable, lucentPM4AMCEInOctets=lucentPM4AMCEInOctets, lucentPM4FMUnitIndex=lucentPM4FMUnitIndex)
mibBuilder.exportSymbols("LIVINGSTON-PM4-MIB", lucentPM4AcctMgmtCallEvent=lucentPM4AcctMgmtCallEvent, lucentPM4ModemInSpeed=lucentPM4ModemInSpeed, lucentPM4EtherIpAddress=lucentPM4EtherIpAddress, lucentPM4T1E1PerfMgmt=lucentPM4T1E1PerfMgmt, lucentPM4AMCECalledPartyID=lucentPM4AMCECalledPartyID, lucentPM4BoardTooHotTrap=lucentPM4BoardTooHotTrap, lucentPM4SerialTypeNwDialout=lucentPM4SerialTypeNwDialout, lucentPM4SnmpCommTable=lucentPM4SnmpCommTable, lucentPM4SerialDS0State=lucentPM4SerialDS0State, lucentPM4SerialPortStatus=lucentPM4SerialPortStatus, lucentPM4EtherIpGateway=lucentPM4EtherIpGateway, lucentPM4ChasCmdUnitIndex=lucentPM4ChasCmdUnitIndex, lucentPM4ModemProtocol=lucentPM4ModemProtocol, lucentPM4T1E1Function=lucentPM4T1E1Function, lucentPM4FMT1E1ThreshUASs=lucentPM4FMT1E1ThreshUASs, lucentPM4T1E1PMIntBoard=lucentPM4T1E1PMIntBoard, lucentPM4Mib=lucentPM4Mib, lucentPM4SerialTypeDeviceName=lucentPM4SerialTypeDeviceName, lucentPM4T1E1PMIntInterval=lucentPM4T1E1PMIntInterval, lucentPM4EtherIfIndex=lucentPM4EtherIfIndex, lucentPM4SnmpCommEntry=lucentPM4SnmpCommEntry, lucentPM4FMChasTrapBoardID=lucentPM4FMChasTrapBoardID, lucentPM4EtherOptEtherDown=lucentPM4EtherOptEtherDown, lucentPM4FMEqpTrapId=lucentPM4FMEqpTrapId)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
38369a60ad247f19f2f3c5a6ca6ea3730df8b4ea | 58259290601e1a35bf2303743269f577f58ad4e7 | /audio.py | 68fa89b0f162463303272623c7f8d63025ffdf7f | [] | no_license | notGivingUp/PYTHON-DSP | 45b7063eefc486a63e461caed0b023f142a4991f | 9e3050176d059c34cb54f01001d28603b99ae8db | refs/heads/master | 2021-05-10T12:05:40.931158 | 2018-01-22T08:58:02 | 2018-01-22T08:58:02 | 118,430,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,181 | py | from tkinter import *
from tkinter.ttk import Frame, Button, Style, Label
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy
import numpy as np
import wave
import contextlib
from scipy.io import wavfile
# import sounddevice as sd
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.canvas1 = None
self.initUI()
def initUI(self):
self.parent.title("Audio Signal")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=True)
frame1 = Frame(self, relief = RAISED, borderwidth=1)
frame1.pack(fill=X)
browseButton=Button(frame1, text="Chọn file", command=self.browseFile)
browseButton.pack(side=LEFT, padx=5, pady=5)
self.label1 = Label(frame1)
self.label1.pack(side=LEFT)
frame2 = Frame(self, relief=RAISED, borderwidth=1)
frame2.pack(fill=X)
label1 = Label(frame2, text='Chiều dài frame (số mẫu)')
label1.pack(side=LEFT, padx = 20)
self.entry1 = Entry(frame2)
self.entry1.pack(side=LEFT)
frame3 = Frame(self, relief=RAISED, borderwidth=1)
frame3.pack(fill=X)
label2 = Label(frame3, text='Mức năng lượng')
label2.pack(side=LEFT, padx = 43)
self.entry2 = Entry(frame3)
self.entry2.pack(side=LEFT)
frame6 = Frame(self, relief=RAISED, borderwidth=1)
frame6.pack(fill=X)
label3 = Label(frame6, text='Ngưỡng thời gian(ms)')
label3.pack(side=LEFT, padx = 28)
self.entry3 = Entry(frame6)
self.entry3.pack(side=LEFT)
frame5 = Frame(self, relief=RAISED, borderwidth=1)
frame5.pack(fill=X)
label14 = Label(frame5, text='Số thứ tự âm được tách')
label14.pack(side=LEFT, padx=25)
self.entry4 = Entry(frame5)
self.entry4.pack(side=LEFT)
frame4 = Frame(self, relief=RAISED, borderwidth=1)
frame4.pack(fill=X)
self.pack(fill=BOTH, expand=True)
drawButton = Button(frame4, text="Vẽ đồ thị", command=self.draw_handle_file)
drawButton.pack(side=LEFT, padx=5, pady=5)
# drawButton2 = Button(frame4, text="F0", command=self.draw_f0)
# drawButton2.pack(side=LEFT, padx=5, pady=5)
drawButton3 = Button(frame4, text="Nghe âm cơ bản", command=self.listen)
drawButton3.pack(side=LEFT, padx=5, pady=5)
drawButton4 = Button(frame4,text="Nghe hamming", command=self.listen_hamming)
drawButton4.pack(side=LEFT, padx=5, pady=5)
self.frame5 = Frame(self, relief=RAISED, borderwidth=1)
self.frame5.pack(fill=BOTH)
self.pack(fill=BOTH, expand=True)
def browseFile(self):
from tkinter.filedialog import askopenfilename
self.filename = askopenfilename()
self.label1['text'] = self.filename
def draw_handle_file(self):
# global sound
if(self.canvas1 != None):
self.canvas1.get_tk_widget().destroy()
self.a = Figure()
self.canvas1 = FigureCanvasTkAgg(self.a, self.frame5)
self.b = self.a.add_subplot(131)
self.c = self.a.add_subplot(132)
self.d = self.a.add_subplot(133)
self.fs, data = wavfile.read(self.filename)
self.data = data
self.n = len(data)
self.frame_len= int(self.entry1.get())
self.i = int(self.entry4.get())
number_frame = np.floor(self.n / self.frame_len) + 1
data_add = [0 for i in range(int(number_frame * self.frame_len) - self.n)]
data = numpy.append(data, data_add)
# self.data = data
energy = int(self.entry2.get())
i = 0
no_signal = 1 # khong co tin hieu
y = [max(data), min(data)]
time_limit = int(self.entry3.get())
self.b.plot(data)
time = (self.b.get_xticks() * 1000 / self.fs)
time = (self.b.get_xticks()*1000/self.fs).astype(int)
self.b.set_xticklabels(time)
self.b.set_xlabel('T(ms)')
j = 0
k = 0
k1= 0
self.sound = []
#================đánh dấu khoảng lặng âm thanh=================
while i <= self.n - self.frame_len:
frame = data[int(i):int(i + self.frame_len - 1)]
nang_luong = np.mean(np.square(frame))
if (nang_luong > energy):
if (no_signal == 1):# nang luong > energy va truoc do k co tin hieu
# x = [i, i]
j = i
# self.b.plot(x, y, 'r') # ve mau do
no_signal = 0 #set trang thai co tin hieu
else:
if (no_signal == 0): # nang luong < energy va truoc do co tin hieu
# x = [i, i]
k = i
# self.b.plot(x, y, 'g') # ve mau xanh
no_signal = 1 # set ve trang thai k co tin hieu
if ((k-j > time_limit) and (k!=k1)):
x1 = [j, j]
x2 = [k, k]
# self.sound = [j, k]
self.sound = self.sound + [j, k]# sound lưu các điểm đánh dấu của âm thanh
self.b.plot(x1, y, 'r')
self.b.plot(x2, y, 'g')
k1 = k
i = i + self.frame_len / 2
#===xong===
#===========tìm tần số cơ bản f0 của âm thanh=============
# print(self.sound)
self.sound1 = data[int(self.sound[2*self.i-2]):int(self.sound[2*self.i-1])]
# sound1 là mảng lưu trữ giá trị các mẫu của âm thanh đâu tiên
self.f0=[] # mảng f lưu giá trị tần số f0 của các frame của âm thanh đầu tiên
n = 0# giống tài liệu
k = 0
# N = frame_len = 350
# K = 150
i = 0*self.frame_len # frame thu 20
rk = 0
j = 0
sum = 0 # tổng giá trị các rk, sum = R(k)
while i < len(self.sound1)-self.frame_len:
self.Rk = [] # mảng lưu giá trị mẫu của frame đầu tiên của âm thanh đâu tiên
for k in range(0,151,1):# k chay tu 0 den K = 150
for n in range(0, self.frame_len - 1 - k,1):
# print(self.sound[n+k])
rk= (self.sound1[n+i]*self.sound1[n+k+i]).astype(float)
sum = sum + rk
# hết vòng lặp for n thì có 1 giá trị Rk, sum = rk
self.Rk.append(sum)# them phan tu sum vao mang Rk
sum = 0
# hết vòng lặp for k có mảng self.Rk gồm các Rk của frame đầu tiên
Rk_max = 0
j_max = 0
for j in range(1,150):
if (self.Rk[j] > self.Rk[j - 1] and self.Rk[j] > self.Rk[j + 1]):
if(Rk_max<self.Rk[j]):
Rk_max = self.Rk[j]
j_max = j
tanso = self.fs/j_max
if(tanso > 80 and tanso<400):
self.f0.append(tanso)
i = i + self.frame_len
self.c.set_title("F0 của âm thanh cơ bản")
self.c.set_ylim(0,800)
self.c.plot(self.f0)
# ==============nhân tín hiệu với hàm hamming================
self.hammingArr = []
i=600
multi=np.hamming(self.frame_len)
while i < len(self.sound1)-self.frame_len:
hamArr = []
hamArr = list(self.sound1[i:i+self.frame_len]*multi)#nhân tín hiệu với hamming
print(i)
self.hammingArr = self.hammingArr + hamArr
i = i + self.frame_len
# c.set_ylim(0, 800)
#==========tìm f0 của hamming============
self.f0_hamming = [] # mảng f lưu giá trị tần số f0 của các frame của âm thanh đầu tiên
n = 0 # giống tài liệu
k = 0
# N = frame_len = 350
# K = 150
i = 0 * self.frame_len # frame thu 20
rk = 0
j = 0
sum = 0 # tổng giá trị các rk, sum = R(k)
while i < len(self.hammingArr) - self.frame_len:
self.Rk_hamming = [] # mảng lưu giá trị mẫu của frame đầu tiên của âm thanh đâu tiên
for k in range(0, 151, 1): # k chay tu 0 den K = 150
for n in range(0, self.frame_len - 1 - k, 1):
# print(self.sound[n+k])
rk = (self.hammingArr[n + i] * self.hammingArr[n + k + i]).astype(float)
sum = sum + rk
# hết vòng lặp for n thì có 1 giá trị Rk, sum = rk
self.Rk_hamming.append(sum) # them phan tu sum vao mang Rk
sum = 0
# hết vòng lặp for k có mảng self.Rk gồm các Rk của frame đầu tiên
Rk_max = 0
j_max = 0
for j in range(1, 150):
if (self.Rk_hamming[j] > self.Rk_hamming[j - 1] and self.Rk_hamming[j] > self.Rk_hamming[j + 1]):
if (Rk_max < self.Rk_hamming[j]):
Rk_max = self.Rk_hamming[j]
j_max = j
tanso = self.fs / j_max
if (tanso > 80 and tanso < 400):
self.f0_hamming.append(tanso)
i = i + self.frame_len
# self.d.plot(self.hammingArr)
self.d.set_title('Tần số F0 của hàm hamming')
self.d.set_ylim(0, 800)
self.d.plot(self.f0_hamming)
self.canvas1.get_tk_widget().pack(fill=BOTH, expand=True)
self.canvas1.draw()
def listen(self):
import sounddevice as sd
sd.play(self.data[int(self.sound[self.i*2-2]):int(self.sound[self.i*2-1])], self.fs)
# sd.play(self.hammingArr,self.fs)
def listen_hamming(self):
import sounddevice as sd
# sd.play(self.data[int(self.sound[self.i*2-2]):int(self.sound[self.i*2-1])], self.fs)
sd.play(self.hammingArr,self.fs)
root = Tk()
# root.geometry("800x600+300+300")
root.state("zoomed")
app = Example(root)
root.mainloop()
| [
"hoxuanduc172@gmail.com"
] | hoxuanduc172@gmail.com |
a25196a8f29cc48a0abcab0af5d74810790319c3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/470/usersdata/281/112524/submittedfiles/Av2_Parte3.py | a4b0c34470239c11ef1a33686d04422e6413ad37 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
m=int(input('Digite a quantidade de listas desejada: '))
for i in range(0,m,1):
lista=[]
n=int(input('Digite a quantidade de elementos da %d lista: ' %(i+1)))
for i in range(0,n,1):
lista.append(int(input('Digite o %d elemento dessa lista: ' %(i+1))))
media=sum(lista)/len(lista)
for i in range(0,n,1):
soma=0
soma(i-media)**2
dp=((1/(n-1))*soma)**(1/2)
print(media)
print(dp)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
a71ef68374913762fe7a2464d2a06799e0304138 | 135a93aa1c521c80d83038367c9293cad010ee47 | /detect_mask_video.py | cc43cf07c9c6dd1b8643773d09e9bc7f9ea08cdd | [] | no_license | Kikyo1264/selectoptic | 74eb8ff97a840f013722386153a15fe59b403b62 | 60ef79009e1a574cb2b41a60d4328539fe2ea1d2 | refs/heads/main | 2023-01-22T11:47:50.178621 | 2020-12-05T14:58:15 | 2020-12-05T14:58:15 | 318,801,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,210 | py | # import the necessary packages
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import imutils
import time
import cv2
import os
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
print(detections.shape)
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
# load our serialized face detector model from disk
prototxtPath = r"face_detector\deploy.prototxt"
weightsPath = r"face_detector\res10_300x300_ssd_iter_140000.caffemodel"
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
maskNet = load_model("mask_detector.model")
# initialize the video stream
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1080)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop() | [
"noreply@github.com"
] | noreply@github.com |
75b561b5406589aa4620fdd70b2b1aa41f7f173b | 49d94aae03550cd4cfc4590e617bafc1dffa7f7e | /scripts/intersect-tail-with-head.py | b13760ebe74e7c16b2185a53765e54c553121ebb | [] | no_license | vmanisha/entityAnalysis | 01e0b95a1ba29c93ac44567a351bfc1b6f1eca03 | e9209e6930a23198fb14a58e06ae8a6ed0c0f690 | refs/heads/master | 2020-06-06T14:46:08.402658 | 2014-10-31T15:48:08 | 2014-10-31T15:48:08 | 22,752,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!/usr/bin/env python
import sys
def main():
head_queries = {}
with open(sys.argv[1]) as head:
for line in head:
head_queries[line.split("\t")[0]]=1
with open(sys.argv[2]) as tail:
with open(sys.argv[3],"w") as output:
for line in tail:
if (line.split("\t")[0] in head_queries):
output.write(line)
if __name__ == "__main__":
main() | [
"manisha.verma@research.iiit.ac.in"
] | manisha.verma@research.iiit.ac.in |
7fb852b04434c3fa237af3b7bd89c067bec8d328 | 62dd63d1c0fab618575d00882122004b301d978d | /RTS/techRender.py | f1ae9756bc9b63eea1e9e34525ecb4d773533ae4 | [] | no_license | benjamintomad/modo-scripts | 4cb241ce9b7614a597581d3f6bef9e8ad55f9c36 | a1b27eb7c3f7e5b2671ab4e8e871dea0a5835abc | refs/heads/master | 2021-01-16T19:16:51.897211 | 2016-01-13T16:33:36 | 2016-01-13T16:33:36 | 21,005,240 | 0 | 0 | null | 2015-03-24T16:27:02 | 2014-06-19T15:21:41 | Python | UTF-8 | Python | false | false | 1,441 | py | # python
import lx
import os
import modo
import tank
reload(tank)
# scene = modo.scene.current()
#
# tk = tank.tank_from_path(r"w:\rts")
# temp = tk.template_from_path(scene.filename)
#
# step = temp.get_fields(scene.filename)['Step']
# assetName = temp.get_fields(scene.filename)['Asset']
# assetType = temp.get_fields(scene.filename)['sg_asset_type']
# version = temp.get_fields(scene.filename)['version']
#
'''
scene.importReference "W:\RTS\Caches\tch\q340\q340_s210\publish\maya\q340_s210-richardOld-001_tch_tch_v001.abc" true false false false false
'''
scene = modo.scene.current()
shotList = []
# sceneFolder = ("W:/RTS/Sequences/q340/%s/tch/work/modo" % shot)
def createscenefiles(shot):
lx.eval('scene.open "W:/RTS/People/Btomad/_pipeline/techRender/q340-base_tch_tch_v001.lxo"')
lx.eval('scene.saveAs "W:/RTS/Sequences/q340/%s/tch/work/modo/%s_tch_tch_v001.lxo"' % (shot, shot))
def techimportcam():
def techrendergroups():
feathers = []
eyes = []
# identify the groups
for g in scene.groups:
if g.name == "feathers_GRP":
feathersGrp = g
if g.name == "eyes_GRP":
eyesGrp = g
# feed the groups with feathers and eyes
for i in scene.items(itype="mesh"):
if "Feather" in i.name or "_lock_" in i.name:
feathers.append(i)
if "eyeCornea" in i.name:
i.channel('visible').set("allOff")
if "_eye_" in i.name:
eyes.append(i)
eyesGrp.addItems(eyes)
feathersGrp.addItems(feathers)
| [
"benjamin.tomad@gmail.com"
] | benjamin.tomad@gmail.com |
10bede8a4f0b3d9d7746c90716585439ccf491dc | 9dc137725be8ab131359820b6fee0b087da09ab7 | /clan_unit.py | 28fe4c216882717fc8479125d6f5cab9dacab40f | [] | no_license | swoogles/ClashOfClans | 88f5842b08933c0b7d176258e01dbd94a1035051 | 71cee3532cf61d36ab39eecbb4d4f5e119e0becd | refs/heads/master | 2021-01-01T05:32:38.493168 | 2014-08-30T03:05:37 | 2014-08-30T03:05:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | from numpy import array, random, linalg
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
class Unit(object):
# Define some colors
_table = "unit"
name = "Abstract Unit"
width = 1
color = WHITE
fill = 0
def __init__(self, level=1):
self.level = level
self.hp_max = max(self._prop_levels_hp[level],1)
self.hp_cur = self.hp_max
self.cost = self._prop_levels_cost[level]
self.pos = random.randint(0, 40)
self.pos_3d = array(
[random.randint(0, 40) * 1.0, random.randint(0, 40) * 1.0, 0])
def copy_from_json(self, json):
for key, value in json.items():
setattr(self, key, value)
def print_cost_levels(self):
self.print_levels(self._prop_levels_cost)
def print_levels(self, prop_levels):
print("Values: ", prop_levels.values())
def distance_from(self, target):
return linalg.norm(target.pos_3d - self.pos_3d)
def unit_vec_to(self, target):
return (target.pos_3d - self.pos_3d) / linalg.norm(target.pos_3d - self.pos_3d)
def is_alive(self):
return (self.hp_cur > 0)
def sql_get_table(self):
return self._table
def repr_json(self):
return dict(
[(var, getattr(self, var)) for var in vars(self)
if var != 'target']
)
_prop_levels_cost = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
}
_prop_levels_dps = {
1: 1,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
}
_prop_levels_hp = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
}
_prop_levels_cost_list = [
0,
0,
0,
0,
0,
0,
]
| [
"bill.frasure@gmail.com"
] | bill.frasure@gmail.com |
6aba3ced73c931303b47e07e10a5cae357364b9e | 31ab01a0e8f8fe37ce1b7561de77916c0e0d5223 | /optimisers/timer.py | 6c2bd479a381fcdc10c03cc4158a6272b48bf8a8 | [] | no_license | jakelevi1996/backprop2 | 1e392404432e6a18d55347050a82eeda8c41f37c | 389dbb3c4f84f8498ea879980b82e2cf543e5441 | refs/heads/master | 2022-11-14T08:13:00.379250 | 2022-10-23T17:06:26 | 2022-10-23T17:06:26 | 162,126,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,306 | py | """ This module contains the Timer and TimedObject classes """
from time import perf_counter
class Timer:
""" Timer object, which can be shared between different classes, for
example Result, Evaluator, Terminator, which all require access to a shared
timer """
def __init__(self, t_lim=None):
""" Initialise a Timer object """
self._start_time = None
self._t_lim = t_lim
def begin(self):
""" Begin the Timer object by recording the current time """
self._start_time = perf_counter()
def time_elapsed(self):
""" Return the time elapsed in seconds since the begin method of this
Timer obejct was called. The time elapsed is returned as a float. The
Timer.begin method of this timer object must have been called before
this method is called """
return perf_counter() - self._start_time
def time_remaining(self):
""" Return the time remaining in seconds until t_lim seconds after the
begin method of this object was last called (assuming t_lim is not
None) """
return self._start_time + self._t_lim - perf_counter()
class TimedObject:
""" Class representing an object which has a timer. This timer can be
shared between multiple objects (EG a Result, an Evaluator, and a
Terminator). This class is intended to be inherited from, but not
instantiated directly """
def _init_timer(self):
""" Initialise a _timer attrinute for this TimedObject object. This
method MUST be called by all subclasses of TimedObject when they are
initialised """
self._timer = None
def set_timer(self, timer):
""" Set the timer for this object """
self._timer = timer
def has_timer(self):
""" Check if this object has been set with a valid timer object """
return isinstance(self._timer, Timer)
def time_elapsed(self):
""" Return the time elapsed in seconds since the begin method of this
obejct's timer was called. The time elapsed is returned as a float. The
set_timer method of this object and the Timer.begin method of this
object's timer must have been called before this method is called """
return self._timer.time_elapsed()
| [
"jakelevi@hotmail.co.uk"
] | jakelevi@hotmail.co.uk |
641a500e6b10de6d078f37e63879a26490cda3da | ddbae4e3616ce77216c9d085b6ad831fe213232e | /tests/deepspeed/test_deepspeed.py | 5f8cab68003f772d5d37922dadb279b3028a3976 | [
"Apache-2.0"
] | permissive | asg0451/transformers | 84e32f8815cf5bec02c71f2a85e64435f2d5c823 | 32290d87f6e1550cf251e318b9543dd1fdf54fd2 | refs/heads/master | 2023-08-21T16:26:00.326475 | 2021-06-08T15:36:15 | 2021-06-08T15:36:15 | 375,081,599 | 0 | 0 | Apache-2.0 | 2023-09-06T17:32:57 | 2021-06-08T16:49:10 | null | UTF-8 | Python | false | false | 37,168 | py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import io
import json
import os
import unittest
from copy import deepcopy
from parameterized import parameterized
from transformers import AutoModel, TrainingArguments, is_torch_available, logging
from transformers.deepspeed import HfDeepSpeedConfig, is_deepspeed_available
from transformers.file_utils import WEIGHTS_NAME
from transformers.testing_utils import (
CaptureLogger,
CaptureStderr,
ExtendSysPath,
LoggingLevel,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed
bindir = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/.."):
from test_trainer import TrainerIntegrationCommon # noqa
if is_torch_available():
from test_trainer import RegressionModelConfig, RegressionPreTrainedModel, get_regression_trainer # noqa
set_seed(42)
MBART_TINY = "sshleifer/tiny-mbart"
T5_SMALL = "t5-small"
T5_TINY = "patrickvonplaten/t5-tiny-random"
def load_json(path):
with open(path) as f:
return json.load(f)
# a candidate for testing_utils
def require_deepspeed(test_case):
"""
Decorator marking a test that requires deepspeed
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
else:
return test_case
def require_deepspeed_aio(test_case):
"""
Decorator marking a test that requires deepspeed aio (nvme)
"""
if not is_deepspeed_available():
return unittest.skip("test requires deepspeed")(test_case)
import deepspeed
from deepspeed.ops.aio import AsyncIOBuilder
if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
return unittest.skip("test requires deepspeed async-io")(test_case)
else:
return test_case
if is_deepspeed_available():
from deepspeed.utils import logger as deepspeed_logger # noqa
from transformers.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled # noqa
ZERO2 = "zero2"
ZERO3 = "zero3"
stages = [ZERO2, ZERO3]
@require_deepspeed
@require_torch_gpu
class CoreIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
"""
Testing non-Trainer DeepSpeed integration
"""
def setUp(self):
super().setUp()
self.dist_env_1_gpu = dict(
MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
)
def test_init_zero3(self):
# test that zero.Init() works correctly under zero3
ds_config = {
"train_batch_size": 1,
"zero_optimization": {
"stage": 3,
},
}
dschf = HfDeepSpeedConfig(ds_config)
self.assertTrue(dschf.is_zero3())
self.assertTrue(is_deepspeed_zero3_enabled())
with LoggingLevel(logging.INFO):
with mockenv_context(**self.dist_env_1_gpu):
logger = logging.get_logger("transformers.modeling_utils")
with CaptureLogger(logger) as cl:
AutoModel.from_pretrained(T5_TINY)
self.assertIn("Detected DeepSpeed ZeRO-3", cl.out)
# now remove zero optimization
del ds_config["zero_optimization"]
dschf = HfDeepSpeedConfig(ds_config)
self.assertFalse(dschf.is_zero3())
self.assertFalse(is_deepspeed_zero3_enabled())
with LoggingLevel(logging.INFO):
with mockenv_context(**self.dist_env_1_gpu):
logger = logging.get_logger("transformers.modeling_utils")
with CaptureLogger(logger) as cl:
AutoModel.from_pretrained(T5_TINY)
self.assertNotIn("Detected DeepSpeed ZeRO-3", cl.out)
@require_deepspeed
@require_torch_gpu
class TrainerIntegrationDeepSpeed(TestCasePlus, TrainerIntegrationCommon):
"""
This class is for testing directly via get_regression_trainer
It mixes in `TrainerIntegrationCommon` which already has a lot of helper validation methods
which we can re-use here.
Important: this class' setup can only work with a single gpu because it runs within the current
pytest worker. For multi-gpu tests use TestDeepSpeedWithLauncher.
Note: if any of the tests of this class get run there will be at least one gpu occupied by them
until this pytest worker exits. This is because the gpu memory allocated by the cuda-kernels
won't be released until this pytest worker exits.
This may appear as some run-away tests if you watch `nvidia-smi` while other tests that fork new
processes are run. So there will be one or two "stale" processes reported in `nvidia-smi`. This
is not a bug.
"""
def setUp(self):
super().setUp()
args = TrainingArguments(".")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
self.dist_env_1_gpu = dict(
MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1"
)
self.ds_config_file = dict(
zero2=f"{self.test_file_dir_str}/ds_config_zero2.json",
zero3=f"{self.test_file_dir_str}/ds_config_zero3.json",
)
# use self.get_config_dict(stage) to use these to ensure the original is not modified
with io.open(self.ds_config_file[ZERO2], "r", encoding="utf-8") as f:
config_zero2 = json.load(f)
# by default use fp16
config_zero2["fp16"]["enabled"] = True
with io.open(self.ds_config_file[ZERO3], "r", encoding="utf-8") as f:
config_zero3 = json.load(f)
# by default use fp16
config_zero3["fp16"]["enabled"] = True
# This setting slows things down, so don't enable it by default unless needed by a test.
# It's in the file as a demo for users since we want everything to work out of the box even if slower.
config_zero3["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = False
self.ds_config_dict = dict(
zero2=config_zero2,
zero3=config_zero3,
)
def get_config_dict(self, stage):
# As some tests modify the dict, always make a copy
return deepcopy(self.ds_config_dict[stage])
# --- These tests are enough to run on one of zero stages --- #
def test_hf_ds_config_mismatch(self):
ds_config = self.get_config_dict(ZERO2)
# Purposefully configure these values to mismatch TrainingArguments values.
# This currently doesn't cover all keys (but it could)
per_device_train_batch_size = 2
ds_config["train_micro_batch_size_per_gpu"] = per_device_train_batch_size + 2
ds_config["train_batch_size"] = 1000
gradient_accumulation_steps = 2
ds_config["gradient_accumulation_steps"] = gradient_accumulation_steps + 2
max_grad_norm = 1.0
ds_config["gradient_clipping"] = max_grad_norm + 0.1
adam_beta1, adam_beta2 = 0.9, 0.99
ds_config["optimizer"]["params"]["betas"] = [adam_beta1 - 0.1, adam_beta2 - 0.1]
fp16 = True
ds_config["fp16"]["enabled"] = not fp16
keys = [
"per_device_train_batch_size",
"train_batch_size",
"gradient_accumulation_steps",
"max_grad_norm",
"betas",
"fp16",
]
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(
local_rank=0,
fp16=fp16,
deepspeed=ds_config,
per_device_train_batch_size=per_device_train_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
max_grad_norm=max_grad_norm,
adam_beta1=adam_beta1,
adam_beta2=adam_beta2,
)
with self.assertRaises(Exception) as context:
trainer.train()
for key in keys:
self.assertTrue(
key in str(context.exception),
f"{key} is not in the exception message:\n{context.exception}",
)
# Test various combos
# 1. DS scheduler + DS optimizer: this is already tested by most other tests
# 2. HF scheduler + HF optimizer:
# 3. DS scheduler + HF optimizer:
# 4. HF scheduler + DS optimizer:
def test_hf_scheduler_hf_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer
del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
def test_ds_scheduler_hf_optimizer(self):
a = 0
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["optimizer"] # force default HF Trainer optimizer
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(a=a, local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
trainer.train()
new_a = trainer.model.a.item()
self.assertNotEqual(new_a, a)
def test_hf_scheduler_ds_optimizer(self):
# this combo is not possible at the moment
with mockenv_context(**self.dist_env_1_gpu):
ds_config_zero2_dict = self.get_config_dict(ZERO2)
del ds_config_zero2_dict["scheduler"] # force default HF Trainer scheduler
ds_config_zero2_dict["zero_optimization"]["offload_optimizer"]["device"] = "none"
ds_config_zero2_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero2_dict)
with self.assertRaises(Exception) as context:
trainer.train()
self.assertTrue(
"HF scheduler + DeepSpeed optimizer combination is not possible" in str(context.exception),
f"got exception: {context.exception}",
)
@require_deepspeed_aio
def test_stage3_nvme_offload(self):
with mockenv_context(**self.dist_env_1_gpu):
# this actually doesn't have to be on NVMe, any storage will do since this test only
# runs a simple check that we can use some directory as if it were NVMe
nvme_path = self.get_auto_remove_tmp_dir()
nvme_config = dict(device="nvme", nvme_path=nvme_path)
ds_config_zero3_dict = self.get_config_dict(ZERO3)
ds_config_zero3_dict["zero_optimization"]["offload_optimizer"] = nvme_config
ds_config_zero3_dict["zero_optimization"]["offload_param"] = nvme_config
trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_zero3_dict)
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
# --- These tests need to run on both zero stages --- #
@parameterized.expand(stages)
def test_hf_optimizer_with_offload(self, stage):
# must not allow non-DS optimizer when using ZERO-offload
ds_config_dict = self.get_config_dict(stage)
del ds_config_dict["optimizer"] # force default HF Trainer optimizer
# force cpu offload
ds_config_dict["zero_optimization"]["offload_optimizer"]["device"] = "cpu"
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=ds_config_dict)
with self.assertRaises(Exception) as context:
trainer.train()
self.assertIn(
"ZeRO Offload can only work with DeepSpeed optimizers",
str(context.exception),
f"got exception: {context.exception}",
)
@parameterized.expand(stages)
def test_fake_notebook_no_launcher(self, stage):
# this setup emulates a notebook where a launcher needs to be emulated by hand
# note that unittest resets sys.stdout each test, so `CaptureStd` will work here to capture
# DeepSpeed log if this test happens to run first in this pytest worker. But it will fail if
# it's run not as a first test as `sys.stdout` will no longer be the same. So we either have
# to reset `deepspeed_logger.handlers[0].setStream(sys.stdout)` or directly capture from the deepspeed_logger.
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(local_rank=0, fp16=True, deepspeed=self.get_config_dict(stage))
with CaptureLogger(deepspeed_logger) as cl:
trainer.train()
self.assertIn("DeepSpeed info", cl.out, "expected DeepSpeed logger output but got none")
@parameterized.expand(stages)
def test_early_get_last_lr(self, stage):
# with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
# not run for the first few dozen steps while loss scale is too large, and thus during
# that time `get_last_lr` will fail if called during that warm up stage,
#
# setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls
# `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step.
with mockenv_context(**self.dist_env_1_gpu):
a = b = 0.0
trainer = get_regression_trainer(
a=a,
b=b,
local_rank=0,
train_len=8,
fp16=True,
deepspeed=self.get_config_dict(stage),
per_device_train_batch_size=8,
logging_steps=1,
)
trainer.train()
post_train_a = trainer.model.a.item()
# XXX: for some reason the following check fails with zero3 - not a broken but a
# different qualitative outcome - as if optimizer did run
# oddly getting 1.0 for both a and b from 0.0 - there is a bug somewhere
# print(trainer.model.a.item())
# print(trainer.model.b.item())
# need to investigate at some point
if stage == ZERO3:
return
# it's enough that train didn't fail for this test, but we must check that
# optimizer/scheduler didn't run (since if it did this test isn't testing the right thing)
self.assertEqual(post_train_a, a)
@parameterized.expand(stages)
def test_gradient_accumulation(self, stage):
# this test measures that we get identical weights and similar loss with:
# 1. per_device_train_batch_size=8, gradient_accumulation_steps=1
# 2. per_device_train_batch_size=4, gradient_accumulation_steps=2
# since the 2nd should produce the effective batch of 1st, with the same results
#
# I can get an identical loss for a small train_len=32, plus the power of the initial
# dynamic loss scale value set to:
# "fp16.initial_scale_power": 1
# plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file
# but for some reason going to train_len=64 the weights, weights start to mismatch with this setup.
# the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical
train_len = 64
a = b = 0.0
with mockenv_context(**self.dist_env_1_gpu):
no_grad_accum_trainer = get_regression_trainer(
a=a,
b=b,
local_rank=0,
train_len=train_len,
fp16=True,
deepspeed=self.get_config_dict(stage),
per_device_train_batch_size=8,
gradient_accumulation_steps=1,
)
no_grad_accum_result = no_grad_accum_trainer.train()
no_grad_accum_loss = no_grad_accum_result.training_loss
no_grad_accum_a = no_grad_accum_trainer.model.a.item()
no_grad_accum_b = no_grad_accum_trainer.model.b.item()
# make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger
self.assertNotEqual(no_grad_accum_a, a)
with mockenv_context(**self.dist_env_1_gpu):
yes_grad_accum_trainer = get_regression_trainer(
a=a,
b=b,
local_rank=0,
train_len=train_len,
fp16=True,
deepspeed=self.get_config_dict(stage),
per_device_train_batch_size=4,
gradient_accumulation_steps=2,
)
yes_grad_accum_result = yes_grad_accum_trainer.train()
yes_grad_accum_loss = yes_grad_accum_result.training_loss
yes_grad_accum_a = yes_grad_accum_trainer.model.a.item()
yes_grad_accum_b = yes_grad_accum_trainer.model.b.item()
self.assertNotEqual(yes_grad_accum_a, a)
# training with half the batch size but accumulation steps as 2 should give the same
# weights, but sometimes get a slight difference still of 1e-6
self.assertAlmostEqual(no_grad_accum_a, yes_grad_accum_a, places=5)
self.assertAlmostEqual(no_grad_accum_b, yes_grad_accum_b, places=5)
# see the note above how to get identical loss on a small bs
self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=5)
def check_saved_checkpoints_deepspeed(self, output_dir, freq, total, stage):
# adapted from TrainerIntegrationCommon.check_saved_checkpoints
file_list = [WEIGHTS_NAME, "training_args.bin", "trainer_state.json", "config.json"]
if stage == ZERO2:
ds_file_list = ["mp_rank_00_model_states.pt"]
elif stage == ZERO3:
ds_file_list = ["zero_pp_rank_0_mp_rank_00_model_states.pt"]
else:
raise ValueError(f"unknown stage {stage}")
# XXX: this can be recoded and then removed once we require deepspeed>0.3.13
from packaging import version
import deepspeed
if version.parse(deepspeed.__version__) > version.parse("0.3.13"):
ds_file_list.append("zero_pp_rank_0_mp_rank_00_optim_states.pt")
else:
ds_file_list.append("zero_pp_rank_0_mp_rank_00optim_states.pt")
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f"checkpoint-{step}")
self.assertTrue(os.path.isdir(checkpoint), f"[{stage}] {checkpoint} dir is not found")
# common files
for filename in file_list:
path = os.path.join(checkpoint, filename)
self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
# ds files
ds_path = os.path.join(checkpoint, f"global_step{step}")
for filename in ds_file_list:
# filename = os.path.join(path, filename)
# print(filename)
path = os.path.join(ds_path, filename)
self.assertTrue(os.path.isfile(path), f"[{stage}] {path} is not found")
@parameterized.expand(stages)
def test_save_checkpoints(self, stage):
# adapted from TrainerIntegrationTest.test_save_checkpoints
freq = 5
output_dir = self.get_auto_remove_tmp_dir()
ds_config_dict = self.get_config_dict(stage)
ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
if stage == ZERO3:
ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True
# save checkpoints
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(
output_dir=output_dir,
save_steps=freq,
fp16=True,
deepspeed=ds_config_dict,
)
trainer.train()
total = int(self.n_epochs * 64 / self.batch_size)
self.check_saved_checkpoints_deepspeed(output_dir, freq, total, stage)
@parameterized.expand(stages)
def test_can_resume_training_errors(self, stage):
with mockenv_context(**self.dist_env_1_gpu):
ds_config_dict = self.get_config_dict(stage)
output_dir = self.get_auto_remove_tmp_dir()
trainer = get_regression_trainer(output_dir=output_dir, fp16=True, deepspeed=ds_config_dict)
# 1. fail to find any checkpoint - due a fresh output_dir
with self.assertRaises(Exception) as context:
trainer.train(resume_from_checkpoint=True)
self.assertTrue(
"No valid checkpoint found in output directory" in str(context.exception),
f"got exception: {context.exception}",
)
# 2. fail to find a bogus checkpoint
with self.assertRaises(Exception) as context:
checkpoint = os.path.join(output_dir, "checkpoint-5")
trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus")
self.assertTrue(
"Can't find a valid checkpoint at" in str(context.exception), f"got exception: {context.exception}"
)
@parameterized.expand(stages)
def test_can_resume_training_normal(self, stage):
# adapted from TrainerIntegrationTest.test_can_resume_training
# test normal resume for each stage separately, error-handling is tested in a different test
output_dir = self.get_auto_remove_tmp_dir()
ds_config_dict = self.get_config_dict(stage)
ds_config_dict["fp16"]["initial_scale_power"] = 1 # force optimizer on the first step
if stage == ZERO3:
ds_config_dict["zero_optimization"]["stage3_gather_fp16_weights_on_model_save"] = True
kwargs = dict(
output_dir=output_dir, train_len=128, save_steps=5, learning_rate=0.1, fp16=True, deepspeed=ds_config_dict
)
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(**kwargs)
trainer.train()
(a, b) = trainer.model.a.item(), trainer.model.b.item()
state = dataclasses.asdict(trainer.state)
checkpoint = os.path.join(output_dir, "checkpoint-5")
# Reinitialize trainer
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
# Now check with a later checkpoint that it also works when we span over one epoch
checkpoint = os.path.join(output_dir, "checkpoint-15")
# Reinitialize trainer and load model
trainer = get_regression_trainer(**kwargs)
trainer.train(resume_from_checkpoint=checkpoint)
(a1, b1) = trainer.model.a.item(), trainer.model.b.item()
state1 = dataclasses.asdict(trainer.state)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
self.check_trainer_state_are_the_same(state, state1)
def test_config_object(self):
# test that we can switch from zero2 to zero3 in the same process for example
# test is_zero, etc.
output_dir = self.get_auto_remove_tmp_dir()
kwargs = dict(output_dir=output_dir, train_len=8, fp16=True)
ds_config_zero3_dict = self.get_config_dict("zero3")
ds_config_zero2_dict = self.get_config_dict("zero2")
with mockenv_context(**self.dist_env_1_gpu):
trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
self.assertTrue(is_deepspeed_zero3_enabled())
# test we can repeat that and with train this time
trainer = get_regression_trainer(deepspeed=ds_config_zero3_dict, **kwargs)
trainer.train()
self.assertTrue(is_deepspeed_zero3_enabled())
# test zero3 is disabled
trainer = get_regression_trainer(deepspeed=ds_config_zero2_dict, **kwargs)
self.assertFalse(is_deepspeed_zero3_enabled())
# check config obj
config = deepspeed_config()
self.assertTrue(bool(config), "Deepspeed config should be accessible")
del trainer
# now weakref should gc the global and we shouldn't get anything here
config = deepspeed_config()
self.assertFalse(is_deepspeed_zero3_enabled())
self.assertFalse(bool(config), "Deepspeed config should not be accessible")
@slow
@require_deepspeed
@require_torch_gpu
class TestDeepSpeedWithLauncher(TestCasePlus):
"""This class is for testing via an external script - can do multiple gpus"""
# Tests to devise #
#
# 1. predict_with_generate on multigpu - need to figure out how to give input sequences so that
# the 2 gpus will generate prediction sequences that aren't of the same length - this is because
# we had to code a special feature to sync the gpus when the predicted sequences aren't of the
# same length. In general this will tested as a side-effect through a variety of other tests -
# it'll simply hang trying to synchronize with other gpus if this problem is encountered. So as
# long as we have a few full tests running on zero3 + predict_with_generate this should be
# mostly covered.
#
# but there are 5 variations on beam search in `generate`- with identical code branched with `if
# synced_gpus`
#
# 2. most tests should probably be run on both: zero2 and zero3 configs
#
@require_torch_multi_gpu
@parameterized.expand(stages)
def test_basic_distributed(self, stage):
self.run_and_check(stage=stage, distributed=True)
@parameterized.expand(stages)
def test_do_eval_no_train(self, stage):
# we should not fail if train is skipped
self.run_and_check(
stage=stage,
eval_steps=1,
distributed=False,
do_train=False,
do_eval=True,
)
@parameterized.expand(stages)
def test_fp32_non_distributed(self, stage):
# real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
# therefore no quality checks, just basic completion checks are done
self.run_and_check(
stage=stage,
model_name=T5_TINY,
distributed=False,
do_train=True,
do_eval=True,
quality_checks=False,
fp16=False,
)
@require_torch_multi_gpu
@parameterized.expand(stages)
def test_fp32_distributed(self, stage):
# real model needs too much GPU memory under stage2+fp32, so using tiny random model here -
# therefore no quality checks, just basic completion checks are done
self.run_and_check(
stage=stage,
model_name=T5_TINY,
distributed=True,
do_train=True,
do_eval=True,
quality_checks=False,
fp16=False,
)
@parameterized.expand(stages)
def test_resume_train_not_from_ds_checkpoint(self, stage):
# do normal training and then resume not from the deepspeed checkpoint but explicitly from
# the saved model dir
do_train = True
do_eval = False
kwargs = dict(stage=stage, eval_steps=1, distributed=True, do_train=do_train, do_eval=do_eval)
# 1. normal training
output_dir = self.run_and_check(**kwargs)
# 2. now resume explicitly from the saved weights, by passing --model_name_or_path output_dir
# - i.e. the same path the model was saved to in step 1
output_dir = self.run_trainer(**kwargs, model_name=output_dir)
self.do_checks(output_dir, do_train=do_train, do_eval=do_eval)
def do_checks(self, output_dir, do_train=True, do_eval=True, quality_checks=True):
if do_train:
train_metrics = load_json(os.path.join(output_dir, "train_results.json"))
self.assertIn("train_samples_per_second", train_metrics)
if quality_checks:
self.assertGreater(train_metrics["train_samples_per_second"], 0.5)
if do_eval:
eval_metrics = load_json(os.path.join(output_dir, "eval_results.json"))
self.assertIn("eval_bleu", eval_metrics)
if quality_checks:
self.assertGreater(eval_metrics["eval_bleu"], 1)
# XXX: need to do better validation beyond just that the run was successful
def run_and_check(
self,
stage,
model_name: str = T5_SMALL,
eval_steps: int = 10,
distributed: bool = True,
do_train: bool = True,
do_eval: bool = True,
quality_checks: bool = True,
fp16: bool = True,
extra_args_str: str = None,
remove_args_str: str = None,
):
# we are doing quality testing so using a small real model
output_dir = self.run_trainer(
stage=stage,
model_name=model_name,
eval_steps=eval_steps,
num_train_epochs=1,
do_train=do_train,
do_eval=do_eval,
distributed=distributed,
fp16=fp16,
extra_args_str=extra_args_str,
remove_args_str=remove_args_str,
)
self.do_checks(output_dir, do_train=do_train, do_eval=do_eval, quality_checks=quality_checks)
return output_dir
def run_trainer(
self,
stage: str,
model_name: str,
eval_steps: int = 10,
num_train_epochs: int = 1,
do_train: bool = False,
do_eval: bool = True,
distributed: bool = True,
fp16: bool = True,
extra_args_str: str = None,
remove_args_str: str = None,
):
max_len = 32
data_dir = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--output_dir {output_dir}
--overwrite_output_dir
--max_source_length {max_len}
--max_target_length {max_len}
--val_max_target_length {max_len}
--warmup_steps 8
--predict_with_generate
--logging_steps 0
--save_steps 0
--eval_steps {eval_steps}
--group_by_length
--label_smoothing_factor 0.1
--adafactor
--source_lang en
--target_lang ro
--report_to none
""".split()
args.extend(["--source_prefix", '"translate English to Romanian: "'])
if fp16:
args.extend(["--fp16"])
actions = 0
if do_train:
actions += 1
args.extend(
f"""
--do_train
--num_train_epochs {str(num_train_epochs)}
--max_train_samples 16
--per_device_train_batch_size 2
--learning_rate 3e-3
""".split()
)
if do_eval:
actions += 1
args.extend(
"""
--do_eval
--max_eval_samples 16
--per_device_eval_batch_size 2
""".split()
)
assert actions > 0, "need at least do_train or do_eval for the test to run"
if extra_args_str is not None:
args.extend(extra_args_str.split())
# currently only works for bool args
if remove_args_str is not None:
remove_args = remove_args_str.split()
args = [x for x in args if x not in remove_args]
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
script = [f"{self.examples_dir_str}/pytorch/translation/run_translation.py"]
launcher = self.get_launcher(distributed)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
return output_dir
@parameterized.expand(stages)
def test_clm(self, stage):
# this test exercises model.resize_token_embeddings() which requires param gathering outside
# of forward - it's not used by `run_translation.py`, but it is in `run_clm.py`
data_dir = self.tests_dir / "fixtures"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_name_or_path sshleifer/tiny-gpt2
--train_file {data_dir}/sample_text.txt
--validation_file {data_dir}/sample_text.txt
--output_dir {output_dir}
--overwrite_output_dir
--do_train
--do_eval
--max_train_samples 16
--max_eval_samples 16
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--num_train_epochs 1
--warmup_steps 8
--block_size 64
--fp16
--report_to none
""".split()
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split()
script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
launcher = self.get_launcher(distributed=True)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
def test_clm_from_config_zero3(self):
# this test exercises AutoModel.from_config(config) - to ensure zero.Init is called
data_dir = self.tests_dir / "fixtures"
output_dir = self.get_auto_remove_tmp_dir()
args = f"""
--model_type gpt2
--tokenizer_name sshleifer/tiny-gpt2
--train_file {data_dir}/sample_text.txt
--validation_file {data_dir}/sample_text.txt
--output_dir {output_dir}
--overwrite_output_dir
--do_train
--max_train_samples 4
--per_device_train_batch_size 2
--num_train_epochs 1
--warmup_steps 8
--block_size 8
--fp16
--report_to none
""".split()
ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_zero3.json".split()
script = [f"{self.examples_dir_str}/pytorch/language-modeling/run_clm.py"]
launcher = self.get_launcher(distributed=True)
cmd = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
with CaptureStderr() as cs:
execute_subprocess_async(cmd, env=self.get_env())
assert "Detected DeepSpeed ZeRO-3" in cs.err
def get_launcher(self, distributed=False):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
num_gpus = min(2, get_gpu_count()) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| [
"noreply@github.com"
] | noreply@github.com |
2c095792fbb4ab3f50aaabdd4912343dfe6bba50 | de711dd6fbd6e30c75f2da329c3949e561e37af0 | /Kniznica/games/views.py | 615c6c1597758a15d1ced0d5151ca4f7f1fccb6c | [] | no_license | zurek11/Kniznica | 47be816327065ba1f56566b3d8a7fd5cf42aff3e | 97fba293eae65cd8d2a2935bfb0f6b7818f1fa2b | refs/heads/master | 2022-12-15T16:20:40.985991 | 2020-06-25T08:19:35 | 2020-06-25T08:19:35 | 131,394,918 | 0 | 0 | null | 2022-12-08T09:34:47 | 2018-04-28T09:26:24 | Python | UTF-8 | Python | false | false | 3,600 | py | import logging
import re
import unidecode
from django.core.paginator import Paginator
from django.http import Http404
from django.shortcuts import render
from register.models import Type, Product, Category, StatisticsProductUse
def index(request):
logged = False
user_name = ''
products_list = None
products = None
try:
game_type = Type.objects.get(name='game')
products_list = Product.objects.filter(type=game_type).order_by('pk')
except Type.DoesNotExist:
logging.error('Game category does not exist!')
categories_list = Category.objects.filter(products__type__name='game').distinct()
if products_list:
paginator = Paginator(products_list, 8)
page = request.GET.get('page')
products = paginator.get_page(page)
if request.user.is_authenticated:
logged = True
user_name = request.user
return render(
request,
'games.html',
{'logged': logged, 'user_name': user_name, 'products': products, 'categories_list': categories_list}
)
def play(request, index):
logged = False
user_name = ''
try:
product = Product.objects.get(id=index)
except Type.DoesNotExist:
raise Http404('Stránka neexistuje!')
categories_list = Category.objects.filter(products__type__name='game').distinct()
if request.user.is_authenticated:
logged = True
user_name = request.user
try:
statistic = StatisticsProductUse.objects.get(user=user_name, product=product)
statistic.counter += 1
statistic.save()
except StatisticsProductUse.DoesNotExist:
StatisticsProductUse.objects.create(
user=user_name,
product=product,
counter=1
)
return render(
request,
'game.html',
{'logged': logged, 'user_name': user_name, 'product': product, 'categories_list': categories_list}
)
def search(request):
logged = False
user_name = ''
input_text = request.GET.get('input')
input_voice = request.GET.get('input_voice')
input_type = request.GET.get('input_type')
categories = request.GET.getlist('categories')
try:
type_obj = Type.objects.get(name='game')
except Type.DoesNotExist:
raise Http404('Stránka neexistuje!')
products = Product.objects.filter(type=type_obj).order_by('pk')
categories_list = Category.objects.filter(products__type__name='game').distinct()
if input_text:
if input_type == 'title':
products = products.filter(title__icontains=input_text)
elif input_type == 'author':
products = products.filter(author__icontains=input_text)
elif input_type == 'publisher':
products = products.filter(publlisher__icontains=input_text)
elif input_voice:
input_voice = input_voice.lower()
input_voice = re.findall(r"[\w']+", input_voice)
for voice_word in input_voice:
products = products.filter(category__name__icontains=unidecode.unidecode(voice_word))
if categories:
products = products.filter(category__name__in=categories)
paginator = Paginator(products, 8)
page = request.GET.get('page')
products = paginator.get_page(page)
if request.user.is_authenticated:
logged = True
user_name = request.user
return render(
request,
'games.html',
{'logged': logged, 'user_name': user_name, 'products': products, 'categories_list': categories_list}
)
| [
"adamzurek14@gmail.com"
] | adamzurek14@gmail.com |
e53efbe049c094e5be678421fe7672e4b5d7d7ad | 89b74e49ae1fd52aa3445c474fd5293c4e6fd1f5 | /personal_potfolio_project/portfolio/migrations/0002_auto_20210117_1804.py | ce63ec21a387f2fd29955bb56d38d06bb579b95a | [] | no_license | romainquere29/django | f3b749fdb488d3918d9ff24163fc0582d24dbd78 | fa5243fa9eb45efdbc6f901317c5a60a65583d91 | refs/heads/main | 2023-03-02T13:21:32.930178 | 2021-02-13T09:50:45 | 2021-02-13T09:50:45 | 338,542,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # Generated by Django 3.1.5 on 2021-01-17 17:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Project',
new_name='MyProject',
),
]
| [
"rquere@tribvn-hc.com"
] | rquere@tribvn-hc.com |
8097cd76dfdb17a5f56a6669d5f06e4bfe40892f | 733d611026eb6c816a8ac35a7f5b397cd07e4e55 | /guestbook/gb_con.py | fb18b056ca979654cf8ba6e5d28f66e2dab0293f | [] | no_license | DeVeom/pyweb | 9bfb26e3256ca255ca7e27520d79b50068ca567a | 8c21eb7171b64fcfa826116b9b37cf605ce488ef | refs/heads/master | 2022-04-03T02:46:40.644838 | 2020-01-14T05:11:52 | 2020-01-14T05:11:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | from flask import Flask, request, render_template, session, redirect, url_for
from db_con import *
app = Flask(__name__)
app.config["SECRET_KEY"] = "3vdTWLLW25b05EZCbd_gIQ"
#게시판 컨트롤러
#글쓰기
@app.route('/write/')
def write_con()->'html':
return render_template('write.html')
#글쓰기 컨트롤
@app.route('/write_db/', methods=["POST"])
def write_db_con()->'html':
id = session.get('LOGINID')
newName = request.form['name']
newTitle = request.form['title']
newContent = request.form['content']
Db.writeContent(Db, id, newName, newTitle, newContent)
return redirect(url_for("list_con"))
#글읽기
@app.route('/read/', methods=["GET"])
def read_con()->'html':
no = request.args.get('no')
Db.viewControl(Db, no)
contentData = Db.readContent(Db, no)
return render_template('read.html',
nameHtml = contentData[3],
titleHtml = contentData[4],
contentHtml = contentData[5],
dateHtml = contentData[6],
viewHtml = contentData[7])
#글목록
@app.route('/list/')
def list_con()->'html':
return render_template('list.html', listData = Db.listContent(Db))
#글검색
@app.route('/search/', methods=["POST"])
def search_con()->'html':
searchTarget = request.form['searchTarget']
searchData = request.form['searchData']
return render_template('list.html', listData = Db.searchContent(Db, searchTarget, searchData))
# 글수정
@app.route('/update/', methods=["GET", "POST"])
def update_con()->'html':
if request.method == 'GET':
no = request.args.get('no')
contentData = Db.readContent(Db, no)
return render_template('update.html',
noHtml = contentData[2],
nameHtml = contentData[3],
titleHtml = contentData[4],
contentHtml = contentData[5])
else:
newName = request.form['name']
newTitle = request.form['title']
newContent = request.form['content']
no = request.form['no']
Db.updateContent(Db, newName, newTitle, newContent, no)
return redirect(url_for("read_con", no = no))
#글삭제
@app.route('/delete/', methods=["GET"])
def delete_con()->'html':
no = request.args.get('no')
Db.deleteContent(Db, no)
return redirect(url_for("list_con"))
#로그인 페이지
@app.route('/login/')
def login_con()->'html':
return render_template('login.html')
#로그인 컨트롤
@app.route('/login_db/', methods=["POST"])
def login_db_con()->'html':
inputId = request.form['id']
inputPw = request.form['password']
log = Db.loginCheck(Db, inputId)
if log is False :
print("등록되지 않은 아이디입니다.")
else :
if inputPw != log :
print("비밀번호를 잘못 입력하셨습니다.")
else :
print("로그인 성공!")
session["LOGINID"] = inputId
return redirect(url_for("list_con"))
return redirect(url_for("login_con"))
#로그아웃
@app.route('/logout/')
def logout_con()->'html':
session.pop("LOGINID", None)
return redirect(url_for("list_con"))
app.run(debug = True) | [
"geekhaam@gmail.com"
] | geekhaam@gmail.com |
4243066e38fc996fe0cc5a456c6976896605c8e7 | ebb12c398ec1ecf9abe79ec2a8f7048b0bfaeb36 | /server.py | b74b0425b9d60ec85fb50b169d1d90b93be6430d | [] | no_license | shiyayonn/unscrambl_assessment | ebc95fef9aea1df9996ae460cd39fb46b29603da | 10361428e577ec0d29b80d3172c2aae3a1f3e559 | refs/heads/master | 2022-12-09T07:20:08.159806 | 2020-09-28T10:58:03 | 2020-09-28T10:58:03 | 299,276,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | import json
from flask import Flask
from flask import jsonify
from methods import *
app = Flask(__name__)
@app.route("/assignment/transaction/<int:id>")
def getTransactionById(id):
data = {}
record = TransactionById(id)
if(record == False):
return jsonify({"Message":"No record was found"}), 404
return record,200
@app.route("/assignment/transactionSummaryByProducts/<int:last_n_days>")
def getTransactionSummaryByProducts(last_n_days):
data = {}
record = TransactionSummaryByProductsByDays(last_n_days)
if(record == False):
return jsonify({"Message":"No record was found"}), 404
return jsonify(summary=json.loads(record)),200
@app.route("/assignment/transactionSummaryByManufacturingCity/<int:last_n_days>")
def getTransactionSummaryByManufacturingCity(last_n_days):
data = {}
record = TransactionSummaryByManufacturingCityByDays(last_n_days)
if(record == False):
return jsonify({"Message":"No record was found"}), 404
return jsonify(summary=json.loads(record)),200
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080) | [
"ysonconcepcion@gmail.com"
] | ysonconcepcion@gmail.com |
f3bb31c5b3e0097237eea1155694ed19ff7e560d | 176e6ffbf4187cabf0271b446d3a201db4be8c8f | /bodb/authorization.py | 3deb28e077f63cce5afc14a4577e4ff84fe64713 | [] | no_license | uscbp/bodb | 49af42a2a75fae01be0a699b5942466c3959f610 | bf4920f2af813636aef913f1eedbc0e3cdc2ce1e | refs/heads/master | 2021-03-27T11:52:44.372581 | 2016-11-17T14:38:14 | 2016-11-17T14:38:14 | 15,534,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | from tastypie.authorization import Authorization
from tastypie.exceptions import Unauthorized
from bodb.models import *
class BODBAPIAuthorization(Authorization):
def read_list(self, object_list, bundle):
q=Document.get_security_q(bundle.request.user)
return object_list.filter(q)
def read_detail(self, object_list, bundle):
if bundle.request.user.is_superuser:
return True
else:
if bundle.obj.collator.id==bundle.request.user.id or bundle.obj.public==1:
return True
elif bundle.obj.draft==0:
for group in bundle.request.user.groups.all():
if bundle.obj.collator.groups.filter(id=group.id).exists():
return True
def create_list(self, object_list, bundle):
raise Unauthorized("Sorry, you are not allowed to perform this operation.")
def create_detail(self, object_list, bundle):
usr = bundle.request.user
if usr.is_superuser:
return True
elif isinstance(bundle.obj, SED):
usr.has_perm('bodb.add_sed')
elif isinstance(bundle.obj, BOP):
usr.has_perm('bodb.add_bop')
elif isinstance(bundle.obj, Model):
usr.has_perm('bodb.add_model')
else:
raise Unauthorized("Sorry, you are not allowed to perform this operation.")
def update_list(self, object_list, bundle):
raise Unauthorized("Sorry, you are not allowed to perform this operation.")
def update_detail(self, object_list, bundle):
usr = bundle.request.user
#print usr.is_superuser
if usr.is_superuser:
return True
elif isinstance(bundle.obj, SED):
usr.has_perm('bodb.change_sed')
elif isinstance(bundle.obj, BOP):
usr.has_perm('bodb.change_bop')
elif isinstance(bundle.obj, Model):
usr.has_perm('bodb.change_model')
else:
raise Unauthorized("Sorry, you are not allowed to perform this operation.")
def delete_list(self, object_list, bundle):
raise Unauthorized("Sorry, you are not allowed to perform this operation.")
def delete_detail(self, object_list, bundle):
usr = bundle.request.user
print usr.is_superuser
if usr.is_superuser:
return True
elif isinstance(bundle.obj, SED):
usr.has_perm('bodb.delete_sed')
elif isinstance(bundle.obj, BOP):
usr.has_perm('bodb.delete_bop')
elif isinstance(bundle.obj, Model):
usr.has_perm('bodb.delete_model')
else:
raise Unauthorized("Sorry, you are not allowed to perform this operation.") | [
"mwinter@unboundedpress.org"
] | mwinter@unboundedpress.org |
64b3b19617750c864e2a4ba97f8d5091be077485 | 23bd415bbe8a4c714a0f59b42ff3c533d7dffbb2 | /vot_siamfc_3D_v1_2/siamfc_mine/dataloader.py | 1ab3a9b8f6c8c557a9f9f196e0751ff69ad59c24 | [] | no_license | khw11044/PlenOpticVot_Siamfc_2020 | 04269e1881ceba550975c0a84d489b6a5f0244f8 | 6315eda7e54e5ee8e77e9eb55ffddeef0fc9b1b2 | refs/heads/master | 2023-07-04T02:11:19.375740 | 2021-07-31T12:59:19 | 2021-07-31T12:59:19 | 312,847,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | import os
import argparse
import cv2
import numpy as np
from glob import glob
from PIL import Image
def dataLoader_img(video_name,locateframe):
filelist = []
root_dir = video_name
for (root, dirs, files) in os.walk(root_dir):
if len(dirs) > 0:
for dir_name in dirs:
if dir_name == 'images' :
file = root +'/'+dir_name + '/' + locateframe +'.png'
#print(" root : " + file)
filelist.append(file)
return filelist
def dataLoader_focal(video_name,locateframe):
filelist = []
root_dir = video_name
for (root, dirs, files) in os.walk(root_dir):
#print(" root : " + root)
if len(dirs) > 0:
for dir_name in dirs:
#print(" root : " + dir_name)
if dir_name == 'focal' :
file = root +'/'+dir_name + '/' + locateframe +'.png'
#print(" root : " + file)
filelist.append(file)
return filelist
def listdirLoader(root):
files = []
#root = '../siamfc-pytorch/tools/data/NonVideo4_tiny'
path = os.listdir(root)
return path
def AllfocalLoader(root):
local = []
for f, frame in enumerate(os.listdir(root)):
local.append(root + '/' + frame + '/focal') # + '/focal'
return local
def AllframeLoader(root): #모든 프레임 폴더 [NonVideo4/000 001 002 003 .....]
local = []
for f, frame in enumerate(os.listdir(root)):
local.append(root + '/' + frame) # + '/focal'
return local
if __name__ == "__main__":
video_name = '../siamfc-pytorch/tools/data/NonVideo4_tiny'
locateframe ='005'
#dataLoader(locateframe)
print(dataLoader_img(video_name,locateframe)) | [
"khw11044@gmail.com"
] | khw11044@gmail.com |
e49ef0beb9df247be1e2e07453f35d25f74ddbbc | 52b6560a9bc096df7803459c92a261d4878e3377 | /earleyparser.py | d404628055ed13996510e51af5279cd662583d3a | [] | no_license | ajeya-bhat/new_earley | e870a94791132737b06466ab06bd1df926abe4cc | 83fc9d326268a628e32f5087edd39fb85d846d4d | refs/heads/master | 2023-04-09T08:57:22.267629 | 2021-04-08T16:52:21 | 2021-04-08T16:52:21 | 355,962,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,285 | py | import argparse
import sys
import os
import string
from collections import defaultdict
from nltk.tree import Tree
class Rule(object):
"""
Represents a CFG rule.
"""
def __init__(self, lhs, rhs):
# Represents the rule 'lhs -> rhs', where lhs is a non-terminal and
# rhs is a list of non-terminals and terminals.
self.lhs, self.rhs = lhs, rhs
def __contains__(self, sym):
return sym in self.rhs
def __eq__(self, other):
if type(other) is Rule:
return self.lhs == other.lhs and self.rhs == other.rhs
return False
def __getitem__(self, i):
return self.rhs[i]
def __len__(self):
return len(self.rhs)
def __repr__(self):
return self.__str__()
def __str__(self):
return self.lhs + ' -> ' + ' '.join(self.rhs)
class Grammar(object):
"""
Represents a CFG.
"""
def __init__(self):
# The rules are represented as a dictionary from L.H.S to R.H.S.
self.rules = defaultdict(list)
def add(self, rule):
"""
Adds the given rule to the grammar.
"""
self.rules[rule.lhs].append(rule)
@staticmethod
def load_grammar(fpath):
"""
Loads the grammar from file (from the )
"""
grammar = Grammar()
with open(fpath,"r") as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
entries = line.split('->')
lhs = entries[0].strip()
for rhs in entries[1].split('|'):
grammar.add(Rule(lhs, rhs.strip().split()))
return grammar
def __repr__(self):
return self.__str__()
def __str__(self):
s = [str(r) for r in self.rules['S']]
for nt, rule_list in self.rules.iteritems():
if nt == 'S':
continue
s += [str(r) for r in rule_list]
return '\n'.join(s)
# Returns the rules for a given Non-terminal.
def __getitem__(self, nt):
return self.rules[nt]
def is_terminal(self, sym):
"""
Checks is the given symbol is terminal.
"""
return len(self.rules[sym]) == 0
def is_tag(self, sym):
"""
Checks whether the given symbol is a tag, i.e. a non-terminal with rules
to solely terminals.
"""
if not self.is_terminal(sym):
return all(self.is_terminal(s) for r in self.rules[sym] for s in
r.rhs)
return False
class EarleyState(object):
"""
Represents a state in the Earley algorithm.
"""
GAM = '<GAM>'
def __init__(self, rule, dot=0, sent_pos=0, chart_pos=0, back_pointers=[]):
# CFG Rule.
self.rule = rule
# Dot position in the rule.
self.dot = dot
# Sentence position.
self.sent_pos = sent_pos
# Chart index.
self.chart_pos = chart_pos
# Pointers to child states (if the given state was generated using
# Completer).
self.back_pointers = back_pointers
def __eq__(self, other):
if type(other) is EarleyState:
return self.rule == other.rule and self.dot == other.dot and \
self.sent_pos == other.sent_pos
return False
def __len__(self):
return len(self.rule)
def __repr__(self):
return self.__str__()
def __str__(self):
def str_helper(state):
return ('(' + state.rule.lhs + ' -> ' +
' '.join(state.rule.rhs[:state.dot] + ['*'] +
state.rule.rhs[state.dot:]) +
(', [%d, %d])' % (state.sent_pos, state.chart_pos)))
return (str_helper(self) +
' (' + ', '.join(str_helper(s) for s in self.back_pointers) + ')')
def next(self):
"""
Return next symbol to parse, i.e. the one after the dot
"""
if self.dot < len(self):
return self.rule[self.dot]
def is_complete(self):
"""
Checks whether the given state is complete.
"""
return len(self) == self.dot
@staticmethod
def init():
"""
Returns the state used to initialize the chart in the Earley algorithm.
"""
return EarleyState(Rule(EarleyState.GAM, ['S']))
class ChartEntry(object):
"""
Represents an entry in the chart used by the Earley algorithm.
"""
def __init__(self, states):
# List of Earley states.
self.states = states
def __iter__(self):
return iter(self.states)
def __len__(self):
return len(self.states)
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n'.join(str(s) for s in self.states)
def add(self, state):
"""
Add the given state (if it hasn't already been added).
"""
if state not in self.states:
self.states.append(state)
class Chart(object):
"""
Represents the chart used in the Earley algorithm.
"""
def __init__(self, entries):
# List of chart entries.
self.entries = entries
def __getitem__(self, i):
return self.entries[i]
def __len__(self):
return len(self.entries)
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n\n'.join([("Chart[%d]:\n" % i) + str(entry) for i, entry in
enumerate(self.entries)])
@staticmethod
def init(l):
"""
Initializes a chart with l entries (Including the dummy start state).
"""
return Chart([(ChartEntry([]) if i > 0 else
ChartEntry([EarleyState.init()])) for i in range(l)])
class EarleyParse(object):
"""
Represents the Earley-generated parse for a given sentence according to a
given grammar.
"""
def __init__(self, sentence, grammar):
self.words = sentence.split()
self.grammar = grammar
self.chart = Chart.init(len(self.words) + 1)
def predictor(self, state, pos):
"""
Earley Predictor.
"""
for rule in self.grammar[state.next()]:
self.chart[pos].add(EarleyState(rule, dot=0,
sent_pos=state.chart_pos, chart_pos=state.chart_pos))
def scanner(self, state, pos):
"""
Earley Scanner.
"""
if state.chart_pos < len(self.words):
word = self.words[state.chart_pos]
if any((word in r) for r in self.grammar[state.next()]):
self.chart[pos + 1].add(EarleyState(Rule(state.next(), [word]),
dot=1, sent_pos=state.chart_pos,
chart_pos=(state.chart_pos + 1)))
def completer(self, state, pos):
"""
Earley Completer.
"""
for prev_state in self.chart[state.sent_pos]:
if prev_state.next() == state.rule.lhs:
self.chart[pos].add(EarleyState(prev_state.rule,
dot=(prev_state.dot + 1), sent_pos=prev_state.sent_pos,
chart_pos=pos,
back_pointers=(prev_state.back_pointers + [state])))
def parse(self):
"""
Parses the sentence by running the Earley algorithm and filling out the
chart.
"""
# Checks whether the next symbol for the given state is a tag.
def is_tag(state):
return self.grammar.is_tag(state.next())
for i in range(len(self.chart)):
for state in self.chart[i]:
if not state.is_complete():
if is_tag(state):
self.scanner(state, i)
else:
self.predictor(state, i)
else:
self.completer(state, i)
def has_parse(self):
"""
Checks whether the sentence has a parse.
"""
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == 'S' and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
return True
return False
def get(self):
"""
Returns the parse if it exists, otherwise returns None.
"""
def get_helper(state):
if self.grammar.is_tag(state.rule.lhs):
return Tree(state.rule.lhs, [state.rule.rhs[0]])
return Tree(state.rule.lhs,
[get_helper(s) for s in state.back_pointers])
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == 'S' and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
return get_helper(state)
return None
def main():
"""
Main.
"""
parser_description = ("Runs the Earley parser according to a given "
"grammar.")
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument('draw', nargs='?', default=False)
parser.add_argument('grammar_file', help="Filepath to grammer file")
args = parser.parse_args()
print(args.grammar_file)
grammar = Grammar.load_grammar(args.grammar_file)
def run_parse(sentence):
parse = EarleyParse(sentence, grammar)
parse.parse()
return parse.get()
while True:
try:
sentence = input()
# Strip the sentence of any puncutation.
stripped_sentence = sentence
for p in string.punctuation:
stripped_sentence = stripped_sentence.replace(p, '')
parse = run_parse(stripped_sentence)
if parse is None:
print(sentence + '\n')
else:
if args.draw:
parse.draw()
else:
parse.pretty_print()
except EOFError:
sys.exit()
if args.draw:
sys.exit()
if __name__ == '__main__':
main()
| [
"ajeyabs@gmail.com"
] | ajeyabs@gmail.com |
eb8c10b039bfd5279a3863efc1aa2b22a486d2c9 | 971348f300d55be3c83580fa17a86939ffd00de1 | /assets/migrations/0002_auto_20171208_1359.py | 1d7f98f9864002a55f3196e4ca769b34cae6cbfb | [] | no_license | UnicornSymbol/hxoms | 6eef8c04e4eda81b2d3c59a21ba3c3c3c87910c9 | 6b12b1926df46d77c779cdd3009ae0246447b66f | refs/heads/master | 2021-05-14T20:23:52.037501 | 2018-03-16T07:42:11 | 2018-03-16T07:42:11 | 113,306,083 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-12-08 13:59
from __future__ import unicode_literals
import assets.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='supplier',
name='contact',
),
migrations.RemoveField(
model_name='supplier',
name='phone',
),
migrations.AddField(
model_name='service',
name='backstage',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='\u540e\u53f0\u5730\u5740'),
),
migrations.AddField(
model_name='supplier',
name='bus_phone',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='\u4e1a\u52a1\u8054\u7cfb\u4eba\u7535\u8bdd'),
),
migrations.AddField(
model_name='supplier',
name='business',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='\u4e1a\u52a1\u8054\u7cfb\u4eba'),
),
migrations.AddField(
model_name='supplier',
name='contract',
field=models.FileField(blank=True, null=True, upload_to=assets.models.contract_dir_path, verbose_name='\u5408\u540c'),
),
migrations.AddField(
model_name='supplier',
name='tec_phone',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='\u6280\u672f\u8054\u7cfb\u4eba\u7535\u8bdd'),
),
migrations.AddField(
model_name='supplier',
name='technical',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='\u4e1a\u52a1\u8054\u7cfb\u4eba'),
),
]
| [
"623882484@qq.com"
] | 623882484@qq.com |
5c40044346818fa4a6f0a6df9b47198cae9db72d | 965a4651890f8ac0a9f4c7f315529f36ab659268 | /etwist/etwist.py | 924c1eda1affbcb9bdaf1daaff560f100aa818c7 | [
"Apache-2.0"
] | permissive | sabaini/emkuu | d27aba3e31ea8734d6b1bebc13f8e48db00ad7da | 4953ff96e15eeb1659c16400e5f61be0daf277b4 | refs/heads/master | 2021-03-12T22:15:47.372700 | 2010-08-13T17:26:29 | 2010-08-13T17:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,689 | py | import logging
from xml import sax
from zope.interface import implements
from zope.component import getGlobalSiteManager, getUtility, queryUtility
import twisted
from twisted.internet.protocol import Protocol, ClientFactory
from twisted.internet import reactor, defer
from twisted.python import components
import emkuupy, emkuupy.ct
import interfaces
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger("etwist")
log.addHandler(NullHandler())
class EmkuuProtocol(Protocol):
implements(interfaces.IEmkuuMessageSource, interfaces.IEmkuuMessageSink)
# NOP; override or set this
messageCallback = lambda a: a
# The messageCallback may return a Deferred in which case ack of the message
# will be sent when the Deferred fires
def __init__(self, parser):
self.parser = parser
self.okmap = {}
self.cbmap = {}
def connectionMade(self):
log.debug("con made")
self.transport.write(emkuupy.ct.STREAMHEAD)
def initCallback(self, inimsg):
log.debug("inimsg: %s", inimsg)
appname, cnt = inimsg.split(':')
self.msgid = [appname, int(cnt)]
def _sendOk(self, msg):
okmsg = emkuupy.messages.OkMsg()
okmsg.corrid = None
okmsg.msgid = msg.msgid
log.debug("send: %s : %s", okmsg, msg)
self.transport.write(str(okmsg.to_xml()))
def msgCallback(self, msg):
if isinstance(msg, emkuupy.messages.OkMsg):
msgid = msg.msgid
log.debug("got ok %s", msgid)
if self.okmap.has_key(msgid):
d = self.okmap.get(msgid)
d.callback(msgid)
del self.okmap[msgid]
else:
log.debug("regular msg: %s", msg)
# handle message callbacks
corrid = msg.corrid
if self.cbmap.has_key(corrid):
origmsg, cb = self.cbmap[corrid]
cb(origmsg, msg)
if not isinstance(emkuupy.messages.SubscribeMsg):
del self.cbmap[corrid]
# pass it on to the message receiver
d = defer.maybeDeferred(self.messageCallback, msg)
d.pause()
d.addCallback(self._sendOk)
d.unpause()
def dataReceived(self, data):
log.debug("datarcvd: %s", data)
self.parser.feed(data)
def send(self, msg, correlationCallback=None):
log.debug("sending %s, msgid: %s", msg, self.msgid)
msg.msgid = "%s:%s" % tuple(self.msgid)
if msg.corrid < 0:
msg.corrid = self.msgid[1]
self.msgid[1] += 1
self.transport.write(str(msg.to_xml()))
if correlationCallback:
self.cbmap[msg.corrid] = (msg, correlationCallback)
ackDefer = defer.Deferred()
self.okmap[msg.msgid] = ackDefer
return ackDefer
class EmkuuClientFactory(ClientFactory):
appname = "(set me)"
def buildProtocol(self, addr):
log.debug("buildproto: %s", addr)
parser = sax.make_parser()
parser.setFeature(sax.handler.feature_namespaces, 1)
self.proto = proto = EmkuuProtocol(parser)
proto.contentHandler = emkuupy.ct.EmkuuContentHandler(proto.msgCallback, proto.initCallback)
parser.setContentHandler(proto.contentHandler)
gsm = getGlobalSiteManager()
gsm.registerUtility(proto, interfaces.IEmkuuMessageSink, self.appname)
gsm.registerUtility(proto, interfaces.IEmkuuMessageSource, self.appname)
return proto
if __name__ == '__main__':
LOGFILE="/tmp/etwist.log"
FORMAT="%(levelname)s %(asctime)s %(process)d %(name)s %(funcName)s %(message)s"
logging.basicConfig(filename=LOGFILE,
level=logging.DEBUG,
format=FORMAT)
log = logging.getLogger("etwist")
log.debug("etwist start")
observer = twisted.python.log.PythonLoggingObserver()
observer.start()
f = EmkuuClientFactory()
f.appname = 'etwist'
reactor.connectTCP("localhost", 2354, f)
def later():
def g(msg):
log.debug("he said: %s", getattr(msg, 'body', 'no body'))
return msg
sink = getUtility(interfaces.IEmkuuMessageSink, f.appname)
source = getUtility(interfaces.IEmkuuMessageSource, f.appname)
sink.messageCallback = g
msg = emkuupy.messages.GetMsg()
msg.to = emkuupy.messages.Uri("/emkuu/foo")
d = source.send(msg)
d.addCallback(log.debug)
reactor.callLater(2, later)
reactor.run()
| [
"peter@sabaini.at"
] | peter@sabaini.at |
27da08cfa78217f9a5c0fc73b6cccf72ff2e25ac | 69a2f0c4419d0bf39d2fe46e8ff2ee117eaf237a | /mutilprocess/test.py | 5b992fe9cd521106dc327da20aafd0555f827fc5 | [] | no_license | lxy5513/python | 7da339e8ef6e2fa827e2da723c0f4e3595e11e04 | 228c3e631e642228de659e68f98ea938bcb2509f | refs/heads/master | 2020-03-27T03:21:03.582842 | 2020-01-17T00:39:57 | 2020-01-17T00:39:57 | 145,855,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | import multiprocessing
import time
import collections
Queue = collections.deque(maxlen=10)
def consume(interval):
while True:
print("Queue: ", Queue)
if len(Queue) == 0:
print("no data")
time.sleep(0.5)
else:
num = Queue.pop()
print("Num: ", num)
time.sleep(0.5)
print("worker_1")
time.sleep(interval)
print("end worker_1")
def productor(interval):
while True:
print("productor")
time.sleep(interval)
Queue.append(1)
print("length of queue is: ", len(Queue))
print("end worker_2")
if __name__ == "__main__":
p1 = multiprocessing.Process(target = consume, args = (2,))
p2 = multiprocessing.Process(target = productor, args = (3,))
p1.start()
p2.start()
| [
"lxy5513@gmail.com"
] | lxy5513@gmail.com |
d47788e04577cf96cfacf288e50dc9c0ec8ba4ad | 5075934450c3b267a347a01604b68e6300dd9a63 | /Tarea_4/Array.py | 66fe1f0a5f92f8ea3323b62afebb97b95d4a6274 | [] | no_license | JarethManrique/edd_1310_2021 | b68a7002bf46ad25a5c91e6b1cb09870de13a3d7 | 5a01948cc39c2a5a05873d5f5abcf4b8004bd4e1 | refs/heads/master | 2023-02-22T14:09:48.163755 | 2021-01-26T18:55:11 | 2021-01-26T18:55:11 | 299,690,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | class Array:
def __init__(self,tam):
self.__info = [0 for x in range(tam) ]
def get_item(self,posicion):
dato = -1
try:
dato = self.__info[posicion]
except Exception as e:
print("error de posicion")
dato = "error"
return dato
def set_item(self,indice,posicion):
try:
self.__info[posicion]
except Exception as e:
print("error de posicion")
def get_length(self):
return len(self.__info)
def __iter__(self):
return _IteradorArreglo(self.__info)
def Clear(self,dato):
self.__info=[dato for x in range(len(self.__info))]
class _IteradorArreglo():
def __init__(self,arr):
self.__arr = arr
self.__indice = 0
def __iter__(self):
return self
def __next__(self):
if self.__indice < len(self.__arr):
dato = self.__arr[self.__indice]
self.__indice += 1
return dato
else :
raise StopIteration
| [
"yarethmanrique@gmail.com"
] | yarethmanrique@gmail.com |
9da339758ae02de67a2fc145c94fe84fa5a78291 | b540881f6d6db128f1cc30e6ab88d1d686a8bb59 | /train_tb.py | 6fe4a357d58acf1c82474b95fdace042f8f26a34 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | TracelessLe/stylegan2-pytorch | 2a1ec2c6db32d516437d2c821104dcf90ecf73cf | 484237d2b971906cfaabaf6bca6d225afb99a754 | refs/heads/master | 2023-08-30T14:20:03.299685 | 2021-10-18T13:31:07 | 2021-10-18T13:31:07 | 416,154,508 | 0 | 0 | NOASSERTION | 2021-10-12T02:26:45 | 2021-10-12T02:26:44 | null | UTF-8 | Python | false | false | 18,862 | py | import argparse
import math
import random
import os
import time
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from tqdm import tqdm
from tensorboardX import SummaryWriter
try:
import wandb
except ImportError:
wandb = None
from dataset import MultiResolutionDataset
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from op import conv2d_gradfix
from non_leaking import augment, AdaptiveAugment
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def sample_data(loader):
while True:
for batch in loader:
yield batch
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(real_pred, real_img):
with conv2d_gradfix.no_weight_gradients():
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3]
)
grad, = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True
)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_mean.detach(), path_lengths
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def set_grad_none(model, targets):
for n, p in model.named_parameters():
if n in targets:
p.grad = None
def train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device):
loader = sample_data(loader)
pbar = range(args.iter)
# tensorboard
date = time.strftime("%Y%m%d%H", time.localtime())
writer = SummaryWriter(f'runs/{args.arch}_{date}')
if get_rank() == 0:
pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
mean_path_length = 0
d_loss_val = 0
r1_loss = torch.tensor(0.0, device=device)
g_loss_val = 0
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length_avg = 0
loss_dict = {}
if args.distributed:
g_module = generator.module
d_module = discriminator.module
else:
g_module = generator
d_module = discriminator
accum = 0.5 ** (32 / (10 * 1000))
ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0
r_t_stat = 0
if args.augment and args.augment_p == 0:
ada_augment = AdaptiveAugment(args.ada_target, args.ada_length, 8, device)
sample_z = torch.randn(args.n_sample, args.latent, device=device)
for idx in pbar:
i = idx + args.start_iter
if i > args.iter:
print("Done!")
break
real_img = next(loader)
real_img = real_img.to(device)
requires_grad(generator, False)
requires_grad(discriminator, True)
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
fake_img, _ = generator(noise)
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
fake_img, _ = augment(fake_img, ada_aug_p)
else:
real_img_aug = real_img
fake_pred = discriminator(fake_img)
real_pred = discriminator(real_img_aug)
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict["d"] = d_loss
loss_dict["real_score"] = real_pred.mean()
loss_dict["fake_score"] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
if args.augment and args.augment_p == 0:
ada_aug_p = ada_augment.tune(real_pred)
r_t_stat = ada_augment.r_t_stat
d_regularize = i % args.d_reg_every == 0
if d_regularize:
real_img.requires_grad = True
if args.augment:
real_img_aug, _ = augment(real_img, ada_aug_p)
else:
real_img_aug = real_img
real_pred = discriminator(real_img_aug)
r1_loss = d_r1_loss(real_pred, real_img)
discriminator.zero_grad()
(args.r1 / 2 * r1_loss * args.d_reg_every + 0 * real_pred[0]).backward()
d_optim.step()
loss_dict["r1"] = r1_loss
requires_grad(generator, True)
requires_grad(discriminator, False)
noise = mixing_noise(args.batch, args.latent, args.mixing, device)
fake_img, _ = generator(noise)
if args.augment:
fake_img, _ = augment(fake_img, ada_aug_p)
fake_pred = discriminator(fake_img)
g_loss = g_nonsaturating_loss(fake_pred)
loss_dict["g"] = g_loss
generator.zero_grad()
g_loss.backward()
g_optim.step()
g_regularize = i % args.g_reg_every == 0
if g_regularize:
path_batch_size = max(1, args.batch // args.path_batch_shrink)
noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)
fake_img, latents = generator(noise, return_latents=True)
path_loss, mean_path_length, path_lengths = g_path_regularize(
fake_img, latents, mean_path_length
)
generator.zero_grad()
weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss
if args.path_batch_shrink:
weighted_path_loss += 0 * fake_img[0, 0, 0, 0]
weighted_path_loss.backward()
g_optim.step()
mean_path_length_avg = (
reduce_sum(mean_path_length).item() / get_world_size()
)
loss_dict["path"] = path_loss
loss_dict["path_length"] = path_lengths.mean()
accumulate(g_ema, g_module, accum)
loss_reduced = reduce_loss_dict(loss_dict)
d_loss_val = loss_reduced["d"].mean().item()
g_loss_val = loss_reduced["g"].mean().item()
r1_val = loss_reduced["r1"].mean().item()
path_loss_val = loss_reduced["path"].mean().item()
real_score_val = loss_reduced["real_score"].mean().item()
fake_score_val = loss_reduced["fake_score"].mean().item()
path_length_val = loss_reduced["path_length"].mean().item()
if get_rank() == 0:
pbar.set_description(
(
f"d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; r1: {r1_val:.4f}; "
f"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; "
f"augment: {ada_aug_p:.4f}"
)
)
if wandb and args.wandb:
wandb.log(
{
"Generator": g_loss_val,
"Discriminator": d_loss_val,
"Augment": ada_aug_p,
"Rt": r_t_stat,
"R1": r1_val,
"Path Length Regularization": path_loss_val,
"Mean Path Length": mean_path_length,
"Real Score": real_score_val,
"Fake Score": fake_score_val,
"Path Length": path_length_val,
}
)
if i % 100 == 0:
with torch.no_grad():
g_ema.eval()
sample, _ = g_ema([sample_z])
#utils.save_image(
# sample,
# f"sample/{str(i).zfill(6)}.png",
# nrow=int(args.n_sample ** 0.5),
# normalize=True,
# range=(-1, 1),
#)
writer.add_scalar(
'loss/g_loss', g_loss_val, i)
writer.add_scalar(
'loss/d_loss', d_loss_val, i)
writer.add_scalar(
'loss/path_loss', path_loss_val, i)
writer.add_scalar(
'loss/r1', r1_val, i)
writer.add_scalar(
'loss/mean_path_length_avg', mean_path_length_avg, i)
writer.add_scalar(
'loss/real_score', real_score_val, i)
writer.add_scalar(
'loss/fake_score', fake_score_val, i)
writer.add_scalar(
'loss/path_length', path_length_val, i)
img_grid = utils.make_grid(sample, nrow=int(args.n_sample ** 0.5), normalize=True, range=(-1, 1))
writer.add_image('result', img_grid, i)
if i % 10000 == 0:
torch.save(
{
"g": g_module.state_dict(),
"d": d_module.state_dict(),
"g_ema": g_ema.state_dict(),
"g_optim": g_optim.state_dict(),
"d_optim": d_optim.state_dict(),
"args": args,
"ada_aug_p": ada_aug_p,
},
f"checkpoint/{str(i).zfill(6)}.pt",
)
if __name__ == "__main__":
device = "cuda"
parser = argparse.ArgumentParser(description="StyleGAN2 trainer")
parser.add_argument("path", type=str, help="path to the lmdb dataset")
parser.add_argument('--arch', type=str, default='stylegan2_gpen', help='model architectures (stylegan2_gpen | stylegan2 | swagan)')
parser.add_argument(
"--iter", type=int, default=800000, help="total training iterations"
)
parser.add_argument(
"--batch", type=int, default=16, help="batch sizes for each gpus"
)
parser.add_argument(
"--n_sample",
type=int,
default=64,
help="number of the samples generated during training",
)
parser.add_argument(
"--size", type=int, default=256, help="image sizes for the model"
)
parser.add_argument(
"--r1", type=float, default=10, help="weight of the r1 regularization"
)
parser.add_argument(
"--path_regularize",
type=float,
default=2,
help="weight of the path length regularization",
)
parser.add_argument(
"--path_batch_shrink",
type=int,
default=2,
help="batch size reducing factor for the path length regularization (reduce memory consumption)",
)
parser.add_argument(
"--d_reg_every",
type=int,
default=16,
help="interval of the applying r1 regularization",
)
parser.add_argument(
"--g_reg_every",
type=int,
default=4,
help="interval of the applying path length regularization",
)
parser.add_argument(
"--mixing", type=float, default=0.9, help="probability of latent code mixing"
)
parser.add_argument(
"--ckpt",
type=str,
default=None,
help="path to the checkpoints to resume training",
)
parser.add_argument("--lr", type=float, default=0.002, help="learning rate")
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help="channel multiplier factor for the model. config-f = 2, else = 1",
)
parser.add_argument(
"--narrow",
type=float,
default=1,
help="channel narrow factor for the model. default = 1",
)
parser.add_argument(
"--isconcat",
dest='isconcat',
action='store_true',
default=False,
help="noise isconcat for the model. default = False",
)
parser.add_argument(
"--wandb", action="store_true", help="use weights and biases logging"
)
parser.add_argument(
"--local_rank", type=int, default=0, help="local rank for distributed training"
)
parser.add_argument(
"--augment", action="store_true", help="apply non leaking augmentation"
)
parser.add_argument(
"--augment_p",
type=float,
default=0,
help="probability of applying augmentation. 0 = use adaptive augmentation",
)
parser.add_argument(
"--ada_target",
type=float,
default=0.6,
help="target augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_length",
type=int,
default=500 * 1000,
help="target duraing to reach augmentation probability for adaptive augmentation",
)
parser.add_argument(
"--ada_every",
type=int,
default=256,
help="probability update interval of the adaptive augmentation",
)
args = parser.parse_args()
n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
args.latent = 512
args.n_mlp = 8
args.start_iter = 0
narrow_flag = False # whether use narrow
if args.arch == 'stylegan2_gpen':
from stylegan2_gpen import Generator, Discriminator
narrow_flag = True
elif args.arch == 'stylegan2':
from model import Generator, Discriminator
elif args.arch == 'swagan':
from swagan import Generator, Discriminator
if narrow_flag:
generator = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier,
isconcat=args.isconcat, narrow=args.narrow
).to(device) # size=256, channel_multiplier=1, narrow=0.5
discriminator = Discriminator(
args.size, channel_multiplier=args.channel_multiplier, narrow=args.narrow
).to(device)
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier,
isconcat=args.isconcat, narrow=args.narrow
).to(device)
g_ema.eval()
accumulate(g_ema, generator, 0)
else:
generator = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
discriminator = Discriminator(
args.size, channel_multiplier=args.channel_multiplier
).to(device)
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
).to(device)
g_ema.eval()
accumulate(g_ema, generator, 0)
g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)
d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)
g_optim = optim.Adam(
generator.parameters(),
lr=args.lr * g_reg_ratio,
betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),
)
d_optim = optim.Adam(
discriminator.parameters(),
lr=args.lr * d_reg_ratio,
betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),
)
if args.ckpt is not None:
print("load model:", args.ckpt)
ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)
try:
ckpt_name = os.path.basename(args.ckpt)
args.start_iter = int(os.path.splitext(ckpt_name)[0])
except ValueError:
pass
generator.load_state_dict(ckpt["g"])
discriminator.load_state_dict(ckpt["d"])
g_ema.load_state_dict(ckpt["g_ema"])
g_optim.load_state_dict(ckpt["g_optim"])
d_optim.load_state_dict(ckpt["d_optim"])
if args.distributed:
generator = nn.parallel.DistributedDataParallel(
generator,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
discriminator = nn.parallel.DistributedDataParallel(
discriminator,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
]
)
dataset = MultiResolutionDataset(args.path, transform, args.size)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),
drop_last=True,
)
if get_rank() == 0 and wandb is not None and args.wandb:
wandb.init(project="stylegan 2")
train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device)
| [
"tracelessle@163.com"
] | tracelessle@163.com |
1b611b721f8c8b246e16af69e02d8583d26b2d75 | 44bf3fc2af5de97c3bb4803b2048f4f5fac3f4b1 | /learning_templates/basicapp/views.py | cf29b043d79f4c8c50227cc1987055c845acf900 | [] | no_license | gyanvardhan7/django-deployment-example | 20e7e5855dfe2107418088f2f26cd33e71bf54f5 | 8ea9aaef1f47719325cc606458b9da1a5fd5ea29 | refs/heads/master | 2021-01-02T08:18:30.632452 | 2017-08-01T17:41:58 | 2017-08-01T17:41:58 | 98,991,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from django.shortcuts import render
# Create your views here.
def index(request):
context_dict = {'text':'hello world','number':100}
return render(request,'basicapp/index.html',context=context_dict)
def other(request):
return render(request,'basicapp/other.html')
def relative(request):
return render(request,'basicapp/rel_url_templates.html')
| [
"vardhangyan3@gmail.com"
] | vardhangyan3@gmail.com |
e41e207d7e1effbd92680d3cc30d14ea00d3cf54 | b4dad46c4b2639e68effa88a08612f9997c6c573 | /project/src/api/ticket_feedback.py | 099ee93ae468d794aacead617b71a6c557022a04 | [] | no_license | AutograderUCSD/Queues-Backend | 1cc5c03892c94580c8b8ad544dfc110256e45b6f | 64b69182b0aafe353b1ab314e8b7647f8ff73ba0 | refs/heads/master | 2023-04-20T01:51:51.733761 | 2021-05-17T22:39:34 | 2021-05-17T22:39:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,829 | py | from flask_cors import CORS
from flask_login import login_required, current_user
from flask import Blueprint, request, jsonify
from ..models.ticket import Ticket
from ..models.ticket_feedback import TicketFeedback, Rating
feedback_api_bp = Blueprint('feedback_api', __name__)
CORS(feedback_api_bp, supports_credentials=True)
@feedback_api_bp.route('/add_feedback', methods=['POST'])
@login_required
def add_ticket_feedback():
"""
Add ticket feedback related to a ticket by student.
"""
ticket_id = int(request.json['ticket_id'])
rating = Rating(request.json['rating'])
feedback = request.json['feedback']
is_anonymous = bool(request.json['is_anonymous']) if 'is_anonymous' \
in request.json else True
t = Ticket.get_ticket_by_id(ticket_id=ticket_id)
if not t.is_resolved():
return jsonify({'reason': 'Ticket has not been resolved'}), 400
else:
fb = TicketFeedback.add_feedback(ticket_id=ticket_id,
rating=rating,
feedback=feedback,
anonymous=is_anonymous)
return jsonify({'reason': 'feedback added',
'feedback': fb.to_json()}), 200
@feedback_api_bp.route('/get_feedback', methods=['GET'])
@login_required
def get_ticket_feedback():
"""
Get ticket feedback from a ticket.
"""
t_id = request.args.get('ticket_id', type=int)
t = Ticket.get_ticket_by_id(t_id)
if not t.can_view_by(current_user.id):
return {'reason': 'permission denied', 'feedbacks': ""}, 200
else:
feedbacks = TicketFeedback.get_ticket_feedback(ticket_id=t_id)
feedbacks = [feedback.to_json() for feedback in feedbacks]
return {'reason': 'success', 'feedbacks': feedbacks}, 200
| [
"noreply@github.com"
] | noreply@github.com |
1648baed214078a8926589e49711518dd6f5a517 | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/220/users/4258/codes/1647_2445.py | e74ad3e0112c554a5f274784d793ec04694fb134 | [] | no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | escala = input("Escolha C para Celsius, ou F para Fahrenheit: ")
temp = float(input("Temperatura: "))
c = (5/9)*(temp - 32)
f = ((9/5)*temp) + 32
if(escala == "C"):
print(f)
if(escala == "F"):
print(c) | [
"psb@icomp.ufam.edu.br"
] | psb@icomp.ufam.edu.br |
aca102ba379f86d774530313c359be0ea25547c8 | 747f759311d404af31c0f80029e88098193f6269 | /extra-addons/hr_attendance_analysis/interface.py | 01271421ed420fa708a35f11eb536752ed1a9217 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Clock Reader for OpenERP
# Copyright (C) 2004-2009 Moldeo Interactive CT
# (<http://www.moldeointeractive.com.ar>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import timeutils as tu
class Interface(object):
def __init__(self, cr, uid, pool, oid, otype):
self._parms = (cr, uid, pool)
self._cache = pool.get(otype).read(cr, uid, oid)
self._field = pool.get(otype).fields_get(cr, uid)
self._local_cache = {}
def __getitem__(self, name):
if name in self._local_cache:
return self._local_cache[name]
if name in self._cache:
ret = self._cache[name]
if isinstance(ret, bool): return ret
field = self._field[name]
if field['type'] in ['char','int','float', 'selection']:
_r = ret
elif field['type'] in ['datetime']:
_r = tu.dt(ret)
elif field['type'] in ['date']:
_r = tu.d(ret)
elif field['type'] in ['many2one']:
_r = Interface(*(self._parms + (ret[0] ,field['relation'])))
elif field['type'] in ['many2many', 'one2many']:
_r = map(lambda a: Interface(*(self._parms + a))
, zip(ret, [field['relation']]*len(ret)))
else:
raise NotImplementedError, \
"Not implemented for %s of type %s (%s)." % (name,
field['type'],
str(ret))
self._local_cache[name] = _r
return _r
else:
# raise ValueError, "Not exists %s in object." % name
return False
def __getattr__(self, name):
return self[name]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"geerish@omerp.net"
] | geerish@omerp.net |
769afb7623da0289c6dc97015f9e4fa301f95254 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/mzm.py | 3ec62cfc159dc28f0f1be15728991241acff646f | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'mZM':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
3c564b99140d76bb4a4319fc75ad1861e7dc024e | cb66762a849455d5955afd69db21586ba61c1b9c | /blog/sitemaps.py | be45d99b7b0e16383d21f05a865b8b09cfbcda9c | [] | no_license | ilnurgi/website | e4ad35061d0edeb93a1aa020ff57ab0a9fb366ba | ca54fcfd2e67b97634e8cb802a4cfed314f1a396 | refs/heads/master | 2021-01-24T10:35:42.985453 | 2017-12-16T17:14:55 | 2017-12-16T17:14:55 | 39,440,066 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | # coding: utf-8
from django.contrib.sitemaps import Sitemap
from .models import Post
class BlogSitemap(Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Post.objects.filter(published=True)
def lastmod(self, obj):
"""
:param obj:
:type obj: Post
"""
return obj.modified
def location(self, obj):
"""
:param obj:
:type obj: Post
"""
return obj.get_absolute_url()
| [
"ilgayfut@mera.ru"
] | ilgayfut@mera.ru |
d7dbf831295933c7978f3753cabb23510160f5f5 | 88165c107bacc215d9c3eaee89f1d31d8c736a51 | /Pygame/test_dirty/test_dirty_v6.py | eacf8171a5e2733e97c78cb7f78dbd40ffd5d909 | [
"MIT"
] | permissive | Pharaoh00/My-Experiments | 9490afcaa71a69a949ee90efbd03cbe594917a28 | 2c761d04500e2ba9778feb4a66641d4565da5040 | refs/heads/master | 2021-04-28T08:07:25.735514 | 2018-03-15T14:04:43 | 2018-03-15T14:04:43 | 122,241,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,463 | py | #-*- coding:utf-8 -*-
#test_dirty_v6.py
import pygame
from pygame.locals import *
pygame.init()
#pygame.key.set_repeat(250, 25)
s_width = 600
s_height = 600
clock = pygame.time.Clock()
fps = 30
running = True
game_screen = pygame.display.set_mode((s_width, s_height), pygame.DOUBLEBUF)
background = pygame.Surface(game_screen.get_size()) # Criando uma nova Surface
#background = background.convert() # Convertendo os pixels para "pygame"
background.fill((255,0,0)) # Cor do Background
class Player(pygame.sprite.DirtySprite): # Herdando Dirty_Sprites
def __init__(self):
pygame.sprite.DirtySprite.__init__(self) # Inicializando a Herança
#self.image = pygame.image.load("poo1.png") # Imagem aleatoria
self.image = pygame.Surface((50,50)) # Criando uma nova Surface
self.rect = self.image.get_rect() # Criando um rect do tamanho da Surface
self.rect = pygame.draw.rect(self.image, (0,255,0), self.rect)
self.speed_x = 0
self.speed_y = 0
# Desenhando um quadrado na tela.
self.check_press_x = False
self.check_press_y = False
def start_move_x(self, x):
self.speed_x = x
self.check_press_x = True
def stop_move_x(self):
self.check_press_x = False
def start_move_y(self, y):
self.speed_y = y
self.check_press_y = True
def stop_move_y(self):
self.check_press_y = False
def update(self):
#print(self.rect)
if self.check_press_x == True:
self.rect.x += self.speed_x
if self.check_press_x == False:
self.rect.x += 0
if self.check_press_y == True:
self.rect.y += self.speed_y
if self.check_press_y == False:
self.rect.y += 0
if self.rect.left < 0 or self.rect.right > s_width:
self.rect.x -= self.speed_x
if self.rect.top < 0 or self.rect.bottom > s_height:
self.rect.y -= self.speed_y
print(self.check_press_y)
self.dirty = 1 # Atualiza Sprite
player = Player()
allsprites = pygame.sprite.LayeredDirty(player) # Adicionando o Objeto ao "grupo"
allsprites.clear(game_screen, background) # Limpando a tela
speed = 10
while(running):
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.start_move_x(-speed)
if event.key == pygame.K_RIGHT:
player.start_move_x(speed)
if event.key == pygame.K_UP:
player.start_move_y(-speed)
if event.key == pygame.K_DOWN:
player.start_move_y(speed)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player.stop_move_x()
if event.key == pygame.K_RIGHT:
player.stop_move_x()
if event.key == pygame.K_UP:
player.stop_move_y()
if event.key == pygame.K_DOWN:
player.stop_move_y()
allsprites.update()
rects = allsprites.draw(game_screen) # Desenhando os objetos do grupo na tela
pygame.display.update(rects) # Atualizando os objetos do grupo
clock.tick(fps)
#print(clock.get_fps())
pygame.quit()
| [
"noreply@github.com"
] | noreply@github.com |
883182e36ae3c57c73a7b281ee795b79299603a9 | 191fbcc96b9f0c74b88b001003f024064c973753 | /gateware/rtl/platform/syzygy/boson.py | 8ca2d666a8022c603a68c64631c4c2278825ce82 | [
"BSD-2-Clause"
] | permissive | gregdavill/boson-eth-firmware | f0b5895469260e414b90cd7e7e0fad37a5728159 | 056843c43fac6486114bfb916fb78a4f7d38e87c | refs/heads/main | 2023-08-24T12:55:10.648741 | 2021-10-15T00:28:16 | 2021-10-15T00:28:16 | 374,504,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | # This file is Copyright (c) 2020 Gregory Davill <greg.davill@gmail.com>
# License: BSD
from litex.build.generic_platform import *
def boson_syzygy_r0d1(syzygy_id=0):
_id = f'SYZYGY{syzygy_id}'
return [
("Boson", 0,
Subsignal("data", Pins(f'{_id}:S27 {_id}:P2C_CLKN {_id}:D5P {_id}:S26 \
{_id}:D7N {_id}:D2P {_id}:D2N {_id}:S17 \
{_id}:D1N {_id}:S16 {_id}:D5N {_id}:S18 \
{_id}:C2P_CLKN {_id}:S25 {_id}:D1P {_id}:D6P \
{_id}:D4P {_id}:D0P {_id}:D6N {_id}:S23 \
{_id}:'),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("clk", Pins("A17"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("vsync", Pins("A13"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("hsync", Pins("D16"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("valid", Pins("C16"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("tx", Pins("A3"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("rx", Pins("B9"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("reset", Pins("B2"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("ext_sync", Pins("B18"),IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW")),
Subsignal("rst_n", Pins("SYZYGY1:D5N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("clk_p", Pins("SYZYGY1:D4P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("clk_n", Pins("SYZYGY1:D4N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("cs_n", Pins("SYZYGY1:D6P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=SLOW TERMINATION=OFF")),
Subsignal("dq", Pins("SYZYGY1:D2N SYZYGY1:D0N SYZYGY1:D5P SYZYGY1:D2P SYZYGY1:D3P SYZYGY1:D1N SYZYGY1:D1P SYZYGY1:D0P"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
Subsignal("rwds", Pins("SYZYGY1:D3N"), IOStandard("LVCMOS18"),Misc("SLEWRATE=FAST TERMINATION=OFF")),
),
] | [
"greg.davill@gmail.com"
] | greg.davill@gmail.com |
fbdf8f7eb7a94a0c622ef52e62168d56f6834aa6 | d3f167a5c5e114c51a4dda0037117c7cb9733410 | /book/urls.py | 90f62a97d31fecff6910cab488c1538e4b43fe52 | [] | no_license | BaselAllam/books | 3fb41bc566726e2ceef2595267fe4ecba186fcc8 | 63acfe154237206af79186bd86fbaade8729c471 | refs/heads/master | 2023-03-04T03:29:25.596466 | 2021-02-12T15:59:47 | 2021-02-12T15:59:47 | 329,945,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from django.urls import path
from . import views
from django.conf.urls.static import static
from django.conf import settings
from .views import BookListView, AddBook
urlpatterns = [
path('books/', views.first_view),
path('search/<int:id>/', views.search),
path('bookListView/', BookListView.as_view()),
path('bookDetails/<int:id>/', views.search, name='book_details'),
path('addBook/', AddBook.as_view(), name='add_book')
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"baseljahen@gmail.com"
] | baseljahen@gmail.com |
21b8c9f44927459be125440bea1eff530f530da0 | 040236bf3bb45826c0bbc39e7432512ff420a0d1 | /geomk/api/serializers.py | 6c022a859e6e149bbc1d0f638e27c128eb57e92b | [] | no_license | ThiagoDiasV/parking-lot-api | 2768baf8921b9dc087616def8c93ccc4f2fe8cf5 | 5cb3f687099bea59740b0034aeebf9a65b791358 | refs/heads/master | 2022-12-13T02:25:50.754524 | 2020-02-12T12:08:32 | 2020-02-12T12:08:32 | 232,959,041 | 4 | 3 | null | 2022-03-08T21:10:08 | 2020-01-10T03:36:52 | Python | UTF-8 | Python | false | false | 991 | py | from .models import Car
from rest_framework import serializers
class CarSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = "__all__"
read_only_fields = ["entry_time", "left_time", "time", "paid", "left"]
def create(self, validated_data: dict) -> Car:
"""
Overriding create function to avoid POST with cars that already
are at parking lot and don't left yet.
Cars with plate registered can only enter if they already left the last
time.
"""
try:
cars = Car.objects.filter(plate=validated_data.get("plate"))
last_register = cars.last()
if last_register:
if not last_register.left:
raise serializers.ValidationError(
"Car already at parking lot and don't left yet."
)
except IndexError:
pass
return Car.objects.create(**validated_data)
| [
"thiago76ers@gmail.com"
] | thiago76ers@gmail.com |
4e57e9f8afef32608fce5b2ef3be8fcca3d039e4 | 2f1fd98a76e54320815f5155e10e29937f53fcae | /ex20.py | dacfba32ece4e6030f2b3ee8a27a4aa47cc6a57f | [] | no_license | 260964595/homework-yichun | 354980b31e871ef201559d30ede478918aa43a59 | efa96a62f1cb7dbaeff6107baa849c6ee448553c | refs/heads/master | 2016-09-05T15:13:36.476980 | 2014-02-14T07:28:53 | 2014-02-14T07:28:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file) | [
"260964595@qq.com"
] | 260964595@qq.com |
2a6b069781e6adb8ea16670f2ff1a3dde7106cb2 | f34beda2769fb98aaf118d13319da541a2203963 | /recipes/migrations/0002_recipe_image.py | 8f3067e956a56b53d31b89dc7e279ac391e73ce6 | [] | no_license | MacLure/recipeapp | 4839a6d5b49f3b50aeaea31885e1bbe14b2ec9eb | 5480ba45fe6d40e69d8a2dc2862088e6eaf17d80 | refs/heads/master | 2020-05-07T10:35:02.192346 | 2019-04-20T01:16:01 | 2019-04-20T01:16:01 | 180,423,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.0.7 on 2019-04-13 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(blank=True, default='default.png', upload_to=''),
),
]
| [
"malcolm.maclure@gmail.com"
] | malcolm.maclure@gmail.com |
719262b20195418ddb0703cd539e1a2d6e1886cf | e19214b0aab18702ce9eb1e805d9506a080d9be8 | /Python/print_neat.py | ba0ce81fb234b3b1a05bd78b4df415ccf4c6ebdb | [] | no_license | titanspeed/PDX-Code-Guild-Labs | 8bb62ba1efe4e85e61ac77325e4d86079eb52d17 | c32dc22fd92d1e27ba0e522a9a8da95c38df4a53 | refs/heads/master | 2021-01-19T00:02:51.407727 | 2017-06-07T03:53:36 | 2017-06-07T03:53:36 | 87,141,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | def pretty_print(str):
phonum = str
phonum1 = list(phonum)
phonum1.insert(0, '(')
phonum1.insert(4, ')')
phonum1.insert(5, ' ')
phonum1.insert(-4, '-')
phonum2 = ''.join(phonum1)
print(phonum2)
pretty_print(input('What phone number?: ')) | [
"titanspeed@gmail.com"
] | titanspeed@gmail.com |
7f62f140a16b71334fc63c2066e776c649adea4c | 2fb266cb1860008187cdec533d0970183d107e58 | /server/jencode.py | 6a546f1e97143abc96e21c3693576df8f3cafc95 | [
"MIT"
] | permissive | bizet/xpmanager | d12fe1e16bee227da825297ec4f5f7290182fa2a | 3f5e62e894c5a4df42d610016a22179726b9d621 | refs/heads/master | 2021-01-22T21:32:16.865883 | 2013-11-04T08:18:35 | 2013-11-04T08:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import os,sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import json
import datetime
class JEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return str(obj.strftime('%Y-%m-%d'))
if isinstance(obj, datetime.timedelta):
return str(obj)
return json.JSONEncoder.default(self, obj)
| [
"bizet.cn@hotmail.com"
] | bizet.cn@hotmail.com |
6bd46a4f4244ba797acee64d6fc5e3adb9325864 | 5fe35ca981886c8ec689af788ac6b8db8291b4a0 | /SEBEphen/Spring_scenarios.py | 4e4d93b8e67033680ffa80f2af8dbf0e8d4abfe9 | [] | no_license | DienesB/VG2.0 | cba5e04181f2b5dbd07cdc47fc272f3a87493976 | ad9d5fed803e8b1ae28c5a1aa9dbf1cc67809ebe | refs/heads/master | 2020-04-26T05:56:48.964595 | 2019-08-23T07:45:14 | 2019-08-23T07:45:14 | 173,349,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | # Author: Balazs Dienes
# Contact: dienes.balazs88@gmail.com
# Incorporating mean spring temperature for the calculation of leaf density and transmissivity.
# Three scenarios are possible: "warm spring", "cold spring", and "no spring temperature".
#Import libraries
import numpy as np
def scenario():
print '\nscenario method is running...'
# Read lookup table as a numpy array.
# The lookup table was prepared as a result of literature review.
# The lookup table contains the days on which tree genera frequent in Berlin enter new phenological phases.
# The lookup table also includes the winter and summer transmissivity of tree genera frequent in Berlin.
mainfolder = "C:/Users/Balazs Dienes/PycharmProjects/SEBEphen/mainfolder/"
lookupfolder = mainfolder + 'input_phenology/'
springScenarios = True
springTemperature = 11.27 # threshold:9.53
if (springScenarios == True):
if (springTemperature >= 9.53):
lookuptable = np.genfromtxt(lookupfolder + 'lookup_table_warm_scenario.txt', skip_header=1,
usecols=(1, 2, 3, 4, 5, 6, 7, 8),
delimiter="\t", missing_values="NA")
print "Warm spring scenario is applied as the temperature is: ", springTemperature, "C.\n"
else:
lookuptable = np.genfromtxt(lookupfolder + 'lookup_table_cold_scenario.txt', skip_header=1,
usecols=(1, 2, 3, 4, 5, 6, 7, 8),
delimiter="\t", missing_values="NA")
print "Cold spring scenario is applied as the temperature is: ", springTemperature, "C.\n"
else:
lookuptable = np.genfromtxt(lookupfolder + 'lookup_table_no_scenario.txt', skip_header=1,
usecols=(1, 2, 3, 4, 5, 6, 7, 8),
delimiter="\t", missing_values="NA")
print "Spring temperature is unknown, no climate scenario is applied.\n"
return lookuptable
| [
"noreply@github.com"
] | noreply@github.com |
00d0a14e123abd54a6e59a43184ae690361ef49d | acbb6e1e33cf2c5dae45c73e3d07723ce21f1cf9 | /migrations/versions/ad4630b5d9d4_followers.py | 6b6dd161cc165a486f3c1637ff7b444302d21143 | [] | no_license | Tur-4000/microblog | 24edde54599937bc97bf782861868fea0f57814e | 24de02ed7c1d417b68171079dc366833f7d2e6c7 | refs/heads/master | 2022-05-25T22:16:10.609591 | 2018-08-02T20:34:40 | 2018-08-02T20:34:40 | 141,682,858 | 1 | 0 | null | 2022-05-25T00:20:33 | 2018-07-20T08:05:00 | Python | UTF-8 | Python | false | false | 840 | py | """followers
Revision ID: ad4630b5d9d4
Revises: 6f99f9ee47c0
Create Date: 2018-07-24 17:35:58.696784
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad4630b5d9d4'
down_revision = '6f99f9ee47c0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('followers',
sa.Column('follower_id', sa.Integer(), nullable=True),
sa.Column('followed_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('followers')
# ### end Alembic commands ###
| [
"tur.4000@gmail.com"
] | tur.4000@gmail.com |
a5605a7eed477479e64e9c5d8da412bf6716d2cc | 05a5431bdc547cfa762d8af680e7c5e9a660324a | /clock.py | ff4a780d7b350422cdd8aa2487c3085fe7f540f6 | [] | no_license | CWRatliff/Rover | 4f1ee0f0d5496cd8474b370973b560d431f536a3 | 84330a43f1bafdbb657dec68694cca2271d56a31 | refs/heads/master | 2023-09-03T20:00:40.993436 | 2023-08-25T16:45:40 | 2023-08-25T16:45:40 | 157,921,594 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,753 | py | import tkinter as tk
import tkinter.font as tkFont
import time
###############################################################################
# Parameters and global variables
# Default font size
font_size = -24
# Declare global variables
root = None
dfont = None
frame = None
dtime = None
# Global variable to remember if we are fullscreen or windowed
fullscreen = False
###############################################################################
# Functions
# Toggle fullscreen
def toggle_fullscreen(event=None):
global root
global fullscreen
# Toggle between fullscreen and windowed modes
fullscreen = not fullscreen
root.attributes('-fullscreen', fullscreen)
resize()
# Return to windowed mode
def end_fullscreen(event=None):
global root
global fullscreen
# Turn off fullscreen mode
fullscreen = False
root.attributes('-fullscreen', False)
resize()
# Automatically resize font size based on window size
def resize(event=None):
global time_dfont
global button_dfont
global frame
# Resize font based on frame height (minimum size of 12)
# Use negative number for "pixels" instead of "points"
new_size = -max(12, int((frame.winfo_height() / 2)))
time_dfont.configure(size=new_size)
new_size = -max(12, int((frame.winfo_height() / 30)))
button_dfont.configure(size=new_size)
# Read values from the sensors at regular intervals
def update():
global root
global dtime
# Get local time
local_time = time.localtime()
# Convert time to 12 hour clock
hours = local_time.tm_hour
if hours > 12:
hours -= 12
# Add leading 0s
shours = str(hours)
smin = str(local_time.tm_min)
if hours < 10:
shours = '0' + shours
if local_time.tm_min < 10:
smin = '0' + smin
# Construct string out of time
dtime.set(shours + ':' + smin)
# Schedule the poll() function for another 500 ms from now
root.after(500, update)
###############################################################################
# Main script
# Create the main window
root = tk.Tk()
root.title("My Clock")
# Create the main container
frame = tk.Frame(root, bg='black')
# Lay out the main container (expand to fit window)
frame.pack(fill=tk.BOTH, expand=1)
# Variables for holding temperature and light data
dtime = tk.StringVar()
# Create dynamic font for text
time_dfont = tkFont.Font(family='Courier New', size=font_size)
button_dfont = tkFont.Font(size=font_size)
# Create widgets
label_time = tk.Label( frame,
textvariable=dtime,
font=time_dfont,
fg='red',
bg='black')
button_quit = tk.Button(frame,
text="Quit",
font=button_dfont,
command=root.destroy,
borderwidth=0,
highlightthickness=0,
fg='gray10',
bg='black')
# Lay out widgets in a grid in the frame
label_time.grid(row=0, column=0, padx=20, pady=20)
button_quit.grid(row=1, column=0, padx=5, pady=5, sticky=tk.E)
# Make it so that the grid cells expand out to fill window
frame.rowconfigure(0, weight=10)
frame.rowconfigure(1, weight=1)
frame.columnconfigure(0, weight=1)
# Bind F11 to toggle fullscreen and ESC to end fullscreen
root.bind('<F11>', toggle_fullscreen)
root.bind('<Escape>', end_fullscreen)
# Have the resize() function be called every time the window is resized
root.bind('<Configure>', resize)
# Schedule the poll() function to be called periodically
root.after(20, update)
# Start in fullscreen mode and run
toggle_fullscreen()
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
b54dbcbba5b87d5e208a4286878095d159ab7260 | 4f75cc33b4d65d5e4b054fc35b831a388a46c896 | /test_watchlist.py | 395cd36afd93c199d6f54cfb098279bd0d6044b4 | [] | no_license | Lr-2002/newpage | c3fe2acc451e24f6408996ea1271c61c321de702 | c589ad974e7100aa9b1c2ccc095a959ff68069b6 | refs/heads/main | 2023-09-03T06:13:53.428236 | 2021-11-23T10:41:21 | 2021-11-23T10:41:21 | 402,606,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,048 | py | from os import name
from re import A, T
import unittest
from app import app, db, Movie, User
class WatchlistTestCase(unittest.TestCase):
def setUp(self):
app.config.update(
TESTING = True,
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
)
db.create_all()
user = User(name = 'test', username = 'test')
user.set_password('123')
movie = Movie(title ='Test Movie Title', year = 2000)
db.session.add_all([user,movie])
db.session.commit()
self.client = app.test_client() # create client to test
self.runner = app.test_cli_runner()
# app.test_cli_runner app.test_client
# both of them are built-in test function oin falsk
def tearDown(self):
""" close app and clean everything"""
db.session.remove()
db.drop_all()
def test_app_exist(self):
""" exist_testing by none (if app not exist then the object is nono)"""
self.assertIsNotNone(app)
def test_app_is_testing(self):
""" test_app_is_testing by give app.config"""
self.assertTrue(app.config['TESTING'])
def test_404_page(self):
response = self.client.get('/nothing')
data = response.get_data(as_text=True)
self.assertIn('Page Not Found - 404',data)
# test the response of 404_page
self.assertEqual(response.status_code, 404)
def test_index_page(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertEqual(response.status_code, 200)
def login(self):
self.client.post('/login', data=dict(
username = 'test',
password = '123'
),follow_redirects = True)
def test_create_item(self):
print(1)
self.login()
print(4)
response = self.client.post('/', data=dict(
title='New Movie',
year='2019'
), follow_redirects=True)
print(2)
data = response.get_data(as_text=True)
self.assertIn('Item created.', data)
self.assertIn('New Movie', data)
print(3)
# 测试创建条目操作,但电影标题为空
response = self.client.post('/', data=dict(
title='',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item created.', data)
self.assertIn('Invalid input.', data)
# 测试创建条目操作,但电影年份为空
response = self.client.post('/', data=dict(
title='New Movie',
year=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item created.', data)
self.assertIn('Invalid input.', data)
def test_update_item(self):
self.login()
# 测试更新页面
response = self.client.get('/movie/edit/1')
data = response.get_data(as_text=True)
self.assertIn('Edit', data)
self.assertIn('Test Movie Title', data)
self.assertIn('2000', data)
# 测试更新条目操作
response = self.client.post('/movie/edit/1', data=dict(
title='New Movie Edited',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Item updated.', data)
self.assertIn('New Movie Edited', data)
# 测试更新条目操作,但电影标题为空
response = self.client.post('/movie/edit/1', data=dict(
title='',
year='2019'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item updated.', data)
self.assertIn('Invalid input.', data)
# 测试更新条目操作,但电影年份为空
response = self.client.post('/movie/edit/1', data=dict(
title='New Movie Edited Again',
year=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Item updated.', data)
self.assertNotIn('New Movie Edited Again', data)
self.assertIn('Invalid input.', data)
# 测试删除条目
def test_delete_item(self):
self.login()
response = self.client.post('/movie/delete/1', follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Item Deleted', data)
self.assertNotIn('Test Movie Title', data)
def test_login_protect(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertNotIn('Logout', data)
self.assertIn('Settings', data)
self.assertIn("<form method='post'>", data)
self.assertIn('Delete', data)
self.assertIn('Edit', data)
# 测试登录
def test_login(self):
response = self.client.post('/login', data=dict(
username='test',
password='123'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('Successfully', data)
self.assertIn('logout', data)
self.assertIn('Settings', data)
self.assertIn('Delete', data)
self.assertIn('Edit', data)
self.assertIn("<form method='post'>", data)
# 测试使用错误的密码登录
response = self.client.post('/login', data=dict(
username='test',
password='456'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试使用错误的用户名登录
response = self.client.post('/login', data=dict(
username='wrong',
password='123'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试使用空用户名登录
response = self.client.post('/login', data=dict(
username='',
password='123'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试使用空密码登录
response = self.client.post('/login', data=dict(
username='test',
password=''
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('Successfully', data)
self.assertIn('Invalid username or password', data)
# 测试登出
def test_logout(self):
self.login()
response = self.client.get('/logout', follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('logged out', data)
# self.assertIn('Logout', data)
self.assertIn('Settings', data)
self.assertIn('Delete', data)
self.assertIn('Edit', data)
self.assertIn("<form method='post'>", data)
# 测试设置
def test_settings(self):
self.login()
# 测试设置页面
response = self.client.get('/settings')
data = response.get_data(as_text=True)
self.assertIn('Settings', data)
self.assertIn('Your Name', data)
# 测试更新设置
response = self.client.post('/settings', data=dict(
name='Grey Li',
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('changed', data)
self.assertIn('Grey Li', data)
# 测试更新设置,名称为空
response = self.client.post('/settings', data=dict(
name='',
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertNotIn('changed', data)
# self.assertIn('Invalid input.', data)
if __name__ == '__main__':
unittest.main() | [
"2629651228@qq.com"
] | 2629651228@qq.com |
422a6f41954d22664da8ac08cf79aa26bcbc3704 | dc420a558136bfb98df8f36b68fc84fe01e38547 | /Step 3 - Splice video and Find TenOfTen.py | 547e33c19c40d381871bc4134d8bf22ac6eea633 | [] | no_license | NukeWolf/CNN-10-Pun-Compilation-Editor-Scripts | da8bdb527fcd900d78dd53e44bf66e42d3de7195 | dbbb956e63b90f08e1466714c0e4e609ccd517cc | refs/heads/master | 2023-02-16T13:28:58.467972 | 2021-01-19T03:43:23 | 2021-01-19T03:43:23 | 330,854,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,423 | py | #Purpose: After downloading the videos and placing each video in its own folder,
#With names corresponding to integers, this code will go into each folder and splice
# Each video into the last 300 seconds of each video.
import pytesseract
import cv2
import time
import os
import yaml
with open('videos.yaml','r') as f:
compiledVideos = yaml.load(f,Loader=yaml.FullLoader)
#Final Videos Object
videos = []
#Process the Yaml file into a list in order by Date. The ID value represents the chronological order
for x in range(len(compiledVideos)+1):
videos.append(1)
for x in compiledVideos:
print(compiledVideos[x]['id'])
videos[compiledVideos[x]['id']]=compiledVideos[x]
videos.reverse()
#IDs go from most recent video to oldest video. We reverse so we start in january.
root = os.getcwd()
def saveConfig(videosDict):
cwd = os.getcwd()
os.chdir(root)
config = open('videos.yaml','w')
yaml.dump(videosDict,config)
os.chdir(cwd)
#Select main directory as videos. This is created from the last step.
os.chdir("videos")
cwd = os.getcwd()
print(cwd)
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def splice(mp4):
vidcap = cv2.VideoCapture(mp4)
success,image = vidcap.read()
for x in range(9000):
success,image = vidcap.read()
count = 300
success = True
while success:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
for x in range(30):
success,image = vidcap.read()
#print ('Read a new frame: ', success + time.time()-start)
count += 1
def find10(endFrame):
for count in reversed(range(300,endFrame)):
img = cv2.imread('frame%d.jpg' % count)
text = pytesseract.image_to_string(img)
if ("10 OUT OF 10" in text):
return count
return -1
for index in range(195):
os.chdir(str(index))
files = os.listdir()
print(files[0],index,"Splicing!")
#Takes the downloaded video in the folder and splices it into 300 jpegs
splice(files[0])
print("Finding 10")
#After splicing, it then proceeds to find the frame where it says 10 out of 10 with OCR and then saves it.
frameStart = find10(videos[index]['endFrame'])
#Saves the frame where it starts into the config
compiledVideos[videos[index]['date']]['startFrame'] = frameStart
saveConfig(compiledVideos)
os.chdir(cwd)
| [
"smart.alex.huang@gmail.com"
] | smart.alex.huang@gmail.com |
f5a58c9ab3eef65eed0d84473d1c97fa5542387a | c4a5052afa22f900d3f15adf3a4545149b43e044 | /elomerchant/src/models/train_model_4.py | da08d4c3a25d534fb021ead9ae3aefa73df4c40a | [
"MIT"
] | permissive | nehaboob/Kaggle | e888ff50a19411fb0f92751324236542b694d6b3 | 84e70960371b81120e8f299f1f7d5732d6d280c4 | refs/heads/master | 2020-09-26T04:46:22.902956 | 2019-12-06T19:56:35 | 2019-12-06T19:56:35 | 226,168,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,942 | py | import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import confusion_matrix,accuracy_score, roc_curve, auc
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer, auc, confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, roc_curve
import lightgbm as lgb
from math import sqrt
import csv
from random import randint
from hyperopt import hp
from hyperopt import tpe
from hyperopt import Trials
from hyperopt import fmin
from hyperopt import STATUS_OK
from timeit import default_timer as timer
import pickle
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
# reg_lambda higher for overfitting
# reg_alpha higher for overfitting
global ITERATION
def get_my_config():
exp_desc = 'WIth SKF files and lag level features'
algo = 'LGBM'
return exp_desc, algo
###################################
# logging and helper function
###################################
def cal_rmse(y_true, y_predict):
rms = sqrt(mean_squared_error(y_true, y_predict))
return rms
def get_scores(y_true, y_predict, mode):
scores_df = pd.Series()
scores_df[mode+'_rmse'] = cal_rmse(y_true, y_predict)
return scores_df
def log_metrics(scores_df, train_scores, feature_importances, lgb_params, fold, exp_id):
n = randint(0, 10000)
exp_desc, algo = get_my_config()
with open('results_metrics.csv', 'a') as f:
spamwriter = csv.writer(f)
spamwriter.writerow([
exp_id,
n,
exp_desc,
algo,
lgb_params,
fold,
train_scores['train_rmse'],
scores_df['test_rmse']
])
with open('results_params_imps.csv', 'a') as f:
spamwriter = csv.writer(f)
spamwriter.writerow([
exp_id,
n,
exp_desc,
algo,
lgb_params,
fold,
feature_importances
])
##################################################
# preprocessing and training
##################################################
# XGB training
# take data and params and return the loss to minimize
# log the metrics
def train_XGB(X, y, params, splits, store_results=False, exp_id=None, test_results=False, X_test=None):
start = timer()
exp_desc, algo = get_my_config()
oof_reg_preds = np.zeros(X.shape[0])
if(test_results):
test_pred = np.zeros(X_test.shape[0])
print(params)
feature_imp=pd.Series(index= X.columns.values, data=0)
# other scikit-learn modules
estimator = lgb.LGBMRegressor(boosting='gbdt', objective='regression')
param_grid = {
'n_estimators': [8000, 10000, 15000],
'max_depth': [4, 8, -1],
'num_leaves': [31],
'subsample': [0.6, 0.8, 1.0],
'colsample_bytree': [0.6],
'early_stopping_rounds':['100']
}
gbm = GridSearchCV(estimator, param_grid, cv=5, scoring='neg_mean_squared_error', verbose=10)
gbm.fit(X, y)
print('Best parameters found by grid search are:', gbm.best_params_)
print("BEST CV SCORE: " + str(gbm.best_score_))
return True
################################
# main function
################################
@click.command()
@click.argument('type')
def main(type):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('building features and target variable')
train=pd.read_csv('./data/processed/train_transactions_agg_v9.csv')
train=train.fillna(0)
test=pd.read_csv('./data/processed/test_transactions_agg_v9.csv')
test=test.fillna(0)
cols = [col for col in train.columns if col not in ['first_active_month', 'card_id', 'target', 'Unnamed: 0']]
cols = ['new_category_1_sum','new_category_1_mean','new_subsect_purchase_mean_ratio','dayofweek','new_purchase_mean_month_diff','new_merch_purchase_mean_ratio','hist_merchant_id_nunique_-9','hist_merch_month_diff','hist_weekend_mean','purchase_amount_sum_-7','hist_purchase_amount_max_-2','feature_1','new_category_2_5.0_mean','diff_purchase_sum_6_5','hist_purchase_amount_min_0','new_subsector_id_nunique_1','new_purchase_amount_max_2','hist_first_buy','hist_merchant_id_count_-9','merch_purchase_diff_5_6','hist_purchase_amount_max','hist_purchase_amount_max_-1','new_merch_avg_sales_lag3_std','new_purchase_amount_min_1','hist_purchase_date_uptonow','diff_purchase_lag_mean','hist_installments_sum_0','hist_subsector_id_nunique_0','diff_purchase_sum_7_6','purchase_amount_sum_0','new_merch_numerical_2_min','new_merchant_category_id_nunique','hist_authorized_flag_mean','new_purchase_date_min','hist_purchase_amount_min_-4','hist_category_3_B_sum','new_purchase_month_mean','merch_purchase_diff_lag_mean','merch_purchase_diff_4_5','new_month_diff_mean','hist_purchase_amount_std_-4','hist_month_lag_nunique','hist_purchase_amount_mean_-3','new_purchase_date_ptp_ptp','hist_purchase_amount_mean_0','hist_merch_avg_purchases_lag3_max','hist_purchase_amount_sum_-5','hist_merchant_id_nunique_-3','new_purchase_amount_std','purchase_amount_total','purchase_amount_sum_-3','diff_subsector_id_nunique_5_4','hist_merchant_category_id_nunique_-7','purchase_amount_sum_-5','hist_subsector_id_nunique_-9','hist_purchase_amount_std_-3','hist_merchant_category_id_nunique_-5','diff_purchase_sum_9_8','hist_purchase_amount_min','hist_purchase_month_std','hist_category_1_sum','new_purchase_amount_max','hist_purchase_amount_max_0','hist_merchant_id_nunique_-10','hist_purchase_amount_sum_-4','diff_purchase_sum_8_7','hist_purchase_amount_max_-3','hist_installments_mean_0','hist_purchase_amount_mean_-6','hist_installments_mean_-11','hist_category_2_1.0_mean','hist_installments_mean_-4','diff_merchant_id_nunique_1_0','diff_purchase_sum_10_9','new_first_buy','hist_purchase_date_average','hist_merch_avg_purchases_lag12_max','new_merch_purchase_sum_ratio','hist_purchase_amount_mean_-5','new_city_id_nunique','hist_purchase_amount_min_-5','hist_merch_purchase_sum_ratio','hist_merchant_id_nunique_-4','hist_merchant_category_id_nunique_-9','hist_merch_purchase_mean_ratio','hist_purchase_date_min','hist_purchase_amount_min_-3','hist_merch_avg_sales_lag3_mean','hist_installments_sum','hist_installments_mean_-3','hist_purchase_amount_sum_-1','new_merch_avg_sales_lag6_sum','new_purchase_date_uptonow','hist_installments_mean_-8','hist_installments_std_-6','hist_purchase_amount_sum_-10','hist_purchase_amount_sum_0','elapsed_time','new_category_3_A_sum','new_purchase_date_average','hist_purchase_amount_max_-4','hist_category_3_C_mean','hist_subsector_id_nunique_-11','hist_merchant_id_count_-1','hist_category_1_mean','new_purchase_date_max','new_installments_max_2','new_month_lag_mean','new_merch_avg_sales_lag12_sum','hist_installments_sum_-4','hist_installments_max_-10','diff_purchase_by_merchant','new_purchase_date_diff','new_purchase_amount_min_2','hist_installments_mean_-5','new_purchase_month_min','hist_installments_mean_-6','diff_purchase_sum_1_0','new_purchase_amount_mean_2','hist_installments_max_0','hist_merch_most_recent_sales_range_mean','hist_month_diff_mean','hist_card_id_size','hist_installments_mean_-12','hist_purchase_amount_min_-1','hist_purchase_date_max','feature_3','hist_installments_std','diff_merchant_id_nunique_4_3','diff_purchase_sum_5_4','hist_installments_sum_-6','new_purchase_amount_std_1','feature_2','diff_subsector_id_lag_mean','hist_subsector_id_nunique_-6','hist_purchase_amount_max_-5','purchase_amount_sum_-6','hist_purchase_amount_mean_-2','hist_merch_tran_ratio','purchase_amount_sum_-13','hist_purchase_amount_min_-8','hist_installments_std_-4','hist_merch_numerical_2_mean','hist_installments_sum_-5','hist_subsector_id_nunique_-10','new_merch_most_recent_sales_range_sum','hist_purchase_amount_max_-8','hist_year_nunique','new_month_diff_nunique','hist_merch_avg_sales_lag6_max','hist_purchase_amount_mean_-1','hist_purchase_amount_min_-6','new_purchase_amount_max_1','hist_purchase_amount_std_-1','hist_merchant_category_id_nunique_-3','hist_purchase_amount_mean_-4','purchase_amount_sum_-4','hist_month_nunique','hist_merch_category_4_sum','new_purchase_amount_mean','new_purchase_month_max', 'hist_purchase_amount_std_-5']
X=train[cols]
y=train['target']
X_test = test[cols]
exp_id = randint(0, 10000)
params = {
'boosting_type': 'gbdt',
'colsample_bytree': 0.5170233956444398,
'learning_rate': 0.01143545759270505,
'min_child_weight': 200,
'min_split_gain': 0.24720655947734432,
'n_estimators': 8700,
'num_leaves': 31,
'reg_alpha': 0.14446986536955275,
'reg_lambda': 0.8821246705659376,
'subsample': 0.9444712859527173,
'subsample_for_bin': 140000.0}
train_XGB(X, y, params, 5, False, exp_id, True, X_test)
#test_pred = pd.read_csv('./data/interim/test_pred.csv')
#pd.DataFrame({'card_id':test['card_id'], 'target':test_pred['test_pred']}).to_csv('./data/processed/submission_10.csv', index=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main() | [
"noreply@github.com"
] | noreply@github.com |
7b11a217c464081c230ff8cc59f4bce2964fcaed | 6f791fe957d702a7de9030c1e75edff435ad941f | /Opdracht 8/Opdracht 8.3.py | 0cfd7b0a8aff99c4d9e9295c088c91e647a9900d | [] | no_license | brunhildevink/Python-exercises | 10386830c998771500b16ce778d566bd178945d2 | 58a008737d9434f52f21a1bef0d99f0606d93b8e | refs/heads/master | 2021-08-10T12:06:42.410011 | 2017-11-12T15:01:00 | 2017-11-12T15:01:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | def code(input):
input = input('Geef je naam en bestemming: ')
data = list(input)
output = str(''.join(str(ord(c) + 3) for c in data))
print(output)
return output
code(input)
| [
"brunhilde.vink@student.hu.nl"
] | brunhilde.vink@student.hu.nl |
6be33a60eeaa0a45b7f2af9452a5253e7dc448f9 | ca7450c44c593b4261b8be73c482cf223519ce76 | /venv/bin/rst2odt_prepstyles.py | bc7e0f2cff2e81ab34ea08f9e18ca331d5ca4cbe | [
"MIT"
] | permissive | danielbandeir/ExpoPoisson | 57fa931a70f770ca518575544db49e8799ccbe03 | 861289f7d9df9ea66fcedbafc3fc9f764b4aa5fd | refs/heads/master | 2020-03-19T02:23:18.307185 | 2018-11-06T19:48:23 | 2018-11-06T19:48:23 | 135,624,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | #!/home/daniel/Documentos/git/ExpoPoisson/venv/bin/python3
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| [
"bandeirapbdaniel@gmail.com"
] | bandeirapbdaniel@gmail.com |
da085176e2dd3c04b3ee9aeea6d02d5e23df5e7d | 2b33418f6f375792a0c95cb07706b4de55a35178 | /meregistro/apps/consulta_validez/forms.py | 1f99508c81de5852c8b90e726bc43433874b0284 | [
"BSD-3-Clause"
] | permissive | MERegistro/meregistro | 23afc8fdb48eb855fa611d87c6bcfee1bcbed6d6 | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | refs/heads/master | 2021-01-19T01:23:57.227561 | 2017-12-05T18:45:02 | 2017-12-05T18:45:02 | 1,250,928 | 0 | 0 | null | 2012-09-05T13:54:43 | 2011-01-13T14:55:42 | Python | UTF-8 | Python | false | false | 4,801 | py | # -*- coding: UTF-8 -*-
"""
from django import forms
from apps.registro.models import Jurisdiccion, TipoGestion
from apps.consulta_validez.models import UnidadEducativa, Titulo
"""
from django import forms
from apps.validez_nacional.models import ValidezNacional
from apps.registro.models import Establecimiento, Anexo, Jurisdiccion, TipoGestion
from apps.titulos.models import Cohorte, Carrera, TituloNacional
from itertools import chain
sedes = Establecimiento.objects.order_by('cue').values_list('id', 'cue', 'nombre')
anexos = Anexo.objects.order_by('cue').values_list('id', 'cue', 'nombre')
unidades_educativas = [('', '---------')] + [(ue[0], ue[1] + " - " + ue[2]) for ue in list(chain(sedes, anexos))]
titulos = TituloNacional.objects.order_by('nombre').values_list('nombre')
titulos = [('', '---------')] + [(t[0], t[0]) for t in titulos.distinct('nombre')]
class ConsultaValidezFormFilters(forms.Form):
jurisdiccion = forms.ModelChoiceField(queryset=Jurisdiccion.objects.order_by('nombre'), label='Jurisdiccion', required=False)
tipo_gestion = forms.ModelChoiceField(queryset=TipoGestion.objects.order_by('nombre'), label='Tipo de Gestión', required=False)
cue = forms.CharField(max_length=40, label='Cue', required=False)
unidad_educativa = forms.ChoiceField(choices=unidades_educativas, label='Nombre del ISFD', required=False)
carrera = forms.CharField(max_length=50, label='Carrera', required=False)
titulo = forms.CharField(max_length=50, label='Título', required=False)
cohorte = forms.CharField(max_length=4, label='Cohorte', required=False)
nroinfd = forms.CharField(label='Número de INFD', required=False)
normativas_nacionales = forms.CharField(max_length=40, label='Normativa Nacional', required=False)
def buildQuery(self, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = ValidezNacional.objects.all()
if self.is_valid():
def filter_by(field):
return self.cleaned_data.has_key(field) and self.cleaned_data[field] != '' and self.cleaned_data[field] is not None
if filter_by('jurisdiccion'):
# Puede ser sede o anexo, determinarlo según tipo_unidad_educativa
from django.db.models import Q
q = q.filter(
(Q(tipo_unidad_educativa='Sede') & Q(unidad_educativa_id__in=[e.pk for e in Establecimiento.objects.filter(dependencia_funcional__jurisdiccion=self.cleaned_data['jurisdiccion'])])) |
(Q(tipo_unidad_educativa='Anexo') & Q(unidad_educativa_id__in=[a.pk for a in Anexo.objects.filter(establecimiento__dependencia_funcional__jurisdiccion=self.cleaned_data['jurisdiccion'])]))
)
if filter_by('tipo_gestion'):
# Puede ser sede o anexo, determinarlo según tipo_unidad_educativa
from django.db.models import Q
q = q.filter(
(Q(tipo_unidad_educativa='Sede') & Q(unidad_educativa_id__in=[e.pk for e in Establecimiento.objects.filter(dependencia_funcional__tipo_gestion=self.cleaned_data['tipo_gestion'])])) |
(Q(tipo_unidad_educativa='Anexo') & Q(unidad_educativa_id__in=[a.pk for a in Anexo.objects.filter(establecimiento__dependencia_funcional__tipo_gestion=self.cleaned_data['tipo_gestion'])]))
)
if filter_by('cue'):
q = q.filter(cue__icontains=self.cleaned_data['cue'])
if filter_by('unidad_educativa'):
# Puede ser sede o anexo, determinarlo según tipo_unidad_educativa
from django.db.models import Q
q = q.filter(
(Q(tipo_unidad_educativa='Sede') & Q(unidad_educativa_id__in=[e.pk for e in Establecimiento.objects.filter(pk=self.cleaned_data['unidad_educativa'])])) |
(Q(tipo_unidad_educativa='Anexo') & Q(unidad_educativa_id__in=[a.pk for a in Anexo.objects.filter(pk=self.cleaned_data['unidad_educativa'])]))
)
if filter_by('carrera'):
q = q.filter(carrera__icontains=self.cleaned_data['carrera'])
if filter_by('titulo'):
q = q.filter(titulo_nacional__icontains=self.cleaned_data['titulo'])
if filter_by('cohorte'):
q = q.filter(primera_cohorte__lte=self.cleaned_data['cohorte'], ultima_cohorte__gte=self.cleaned_data['cohorte'])
if filter_by('nroinfd'):
q = q.filter(nro_infd__icontains=self.cleaned_data['nroinfd'].strip())
if filter_by('normativas_nacionales'):
q = q.filter(normativas_nacionales__icontains=self.cleaned_data['normativas_nacionales'])
return q
| [
"luciano.baraglia@gmail.com"
] | luciano.baraglia@gmail.com |
2997912da47e388f1a2e1ac36aac7890aec2dbb0 | 37e84e4169a43f1299f4955c5ce02d6c84ebd08d | /yohack/migrations/0003_auto_20200426_1011.py | 12f23d4c877a8845a91fb9ab400217767faf1381 | [] | no_license | evgenii-ivanov/yohack-2020-scrapping-psychlogy | 0588aa92d4a3de9103c1aaa436e8d5a4c7cec96d | 7df86b01f1ffd36e07e6edaba4ee584cb806afa6 | refs/heads/master | 2022-04-25T05:20:23.452088 | 2020-04-26T14:27:16 | 2020-04-26T14:27:16 | 258,707,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | # Generated by Django 3.0.5 on 2020-04-26 07:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('yohack', '0002_userinterest'),
]
operations = [
migrations.AddField(
model_name='usercharacteristics',
name='activity_score',
field=models.IntegerField(default=500),
),
migrations.CreateModel(
name='Matchings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user1', to=settings.AUTH_USER_MODEL)),
('user2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user2', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='like_user1', to=settings.AUTH_USER_MODEL)),
('user2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='like_user2', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"timeescapenow@gmail.com"
] | timeescapenow@gmail.com |
2731797098fb1c664c1170bf19c72783be7e9407 | a0a4bda67eacbc1fe922591e12fdc5bf1f13a9c0 | /power_method.py | 4297c28b21357dce495b58529353cf33820a75a4 | [] | no_license | zzag/codesamples | cdd713a62e08480988f3372d33d0e8ca1c326d7b | de3496cd572d4b27f9781078c69b23df22ad7c18 | refs/heads/master | 2023-01-21T05:44:34.628273 | 2023-01-09T11:20:39 | 2023-01-09T11:20:39 | 133,409,760 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import numpy as np
def power_method(M):
b = np.ones(M.shape[0])
for i in range(10):
_b = b / abs(b).max()
b = M.dot(_b)
eigenvalue = b.dot(M.dot(b)) / b.dot(b) # Rayleigh quotient
eigenvector = b / np.linalg.norm(b)
return eigenvalue, eigenvector
M = np.array([
[1, 2],
[3, 4]
], dtype="float32")
eigenvalue, eigenvector = power_method(M)
print("Dominant eigenvalue:", eigenvalue)
print("Eigenvector:", eigenvector)
print("M*v =", M.dot(eigenvector))
print("lambda*v =", eigenvector * eigenvalue)
| [
"vladzzag@gmail.com"
] | vladzzag@gmail.com |
3dacc33f7215617274d1918dacbcf4c4e3788fb8 | 6f76510dee731e86d6ae553f060bb6e1149bb570 | /lmtpd/tests.py | b9b309b0c26de0b7786f12d9acafbc07787b97e2 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | moggers87/lmtpd | da154e5972082e0524767a8c5b378d3d02d3c438 | d9aad65fa4e2c3d29fe7dd81d449718e395254d0 | refs/heads/master | 2021-03-13T00:02:19.282293 | 2020-07-05T05:05:41 | 2020-07-05T05:05:41 | 9,945,190 | 1 | 1 | MIT | 2019-12-06T15:54:06 | 2013-05-08T20:24:29 | Python | UTF-8 | Python | false | false | 8,350 | py | # -*- coding: utf-8 -*-
#
# See LICENSE for copyright notices
from __future__ import print_function, unicode_literals
import asyncore
import lmtpd
import os
import shutil
import socket
import tempfile
import threading
import unittest
TO = b"mrs.smoker@example.com"
FROM = b"mrs.non-smoker@example.com"
MSG = b"""Subject: I keep falling off!
Oh! Well I never!
"""
TEST_PORT = int(os.environ.get("LMTPD_TEST_POST", 8899))
class LMTPTestServer(lmtpd.LMTPServer):
reply = None
def process_message(self, *args, **kwargs):
"""Do nothing, server will return 250 OK"""
return self.reply
class LMTPTestAF(unittest.TestCase):
def test_ipv4(self):
first_line = ""
conn = None
socket_file = None
server = None
loop = None
try:
server = LMTPTestServer(("127.0.0.1", TEST_PORT))
loop = threading.Thread(target=asyncore.loop, kwargs={'timeout': 1})
loop.start()
# connect to server
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", TEST_PORT))
socket_file = conn.makefile('rb')
first_line = socket_file.readline()
finally:
getattr(conn, "close", lambda: None)()
getattr(socket_file, "close", lambda: None)()
getattr(server, "close", lambda: None)()
getattr(loop, "join", lambda: None)()
# just check that something sensible has been given
self.assertEqual(first_line[:4], b"220 ")
@unittest.skipIf(os.environ.get("TRAVIS", False), "Travis CI does not support IPv6")
def test_ipv6(self):
first_line = ""
conn = None
socket_file = None
server = None
loop = None
try:
server = LMTPTestServer(("::1", TEST_PORT))
loop = threading.Thread(target=asyncore.loop, kwargs={'timeout': 1})
loop.start()
# connect to server
conn = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
conn.connect(("::1", TEST_PORT))
socket_file = conn.makefile('rb')
first_line = socket_file.readline()
finally:
getattr(conn, "close", lambda: None)()
getattr(socket_file, "close", lambda: None)()
getattr(server, "close", lambda: None)()
getattr(loop, "join", lambda: None)()
# just check that something sensible has been given
self.assertEqual(first_line[:4], b"220 ")
def test_unix(self):
first_line = ""
conn = None
socket_file = None
server = None
loop = None
try:
tempdir = tempfile.mkdtemp()
socket_name = os.path.join(tempdir, "lmtp")
server = LMTPTestServer(socket_name)
loop = threading.Thread(target=asyncore.loop, kwargs={'timeout': 1})
loop.start()
# connect to server
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
conn.connect(socket_name)
socket_file = conn.makefile('rb')
first_line = socket_file.readline()
finally:
getattr(conn, "close", lambda: None)()
getattr(socket_file, "close", lambda: None)()
getattr(server, "close", lambda: None)()
getattr(loop, "join", lambda: None)()
shutil.rmtree(tempdir, ignore_errors=True)
# just check that something sensible has been given
self.assertEqual(first_line[:4], b"220 ")
class LMTPTester(unittest.TestCase):
"""Test cases that connect to a server over a socket"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.socket_name = os.path.join(self.tempdir, "lmtp")
self.server = LMTPTestServer(self.socket_name)
self.loop = threading.Thread(target=asyncore.loop, kwargs={'timeout': 1})
self.loop.start()
# connect to server
self.conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.conn.connect(self.socket_name)
self.file = self.conn.makefile('rb')
def tearDown(self):
self.conn.close()
self.file.close()
self.server.close()
self.loop.join()
shutil.rmtree(self.tempdir)
def reply(self):
line = self.file.readline()
if len(line) == 0:
return (0, b'')
try:
code = int(line[:3])
reply = line[4:]
except (IndexError, ValueError):
code = None
reply = None
return (code, reply)
def do_cmd(self, cmd, flush=False):
if flush:
self.reply()
self.conn.send(cmd)
self.conn.send(b"\r\n")
return self.reply()
def test_conversation(self):
"""Test a basic conversation between client and server"""
code, reply = self.reply()
self.assertEqual(code, 220, reply)
code, reply = self.do_cmd(b"LHLO localhost")
self.assertEqual(code, 250, reply)
code, reply = self.reply()
self.assertEqual(code, 250)
self.assertEqual(reply, b"ENHANCEDSTATUSCODES\r\n")
code, reply = self.reply()
self.assertEqual(reply, b"PIPELINING\r\n")
self.assertEqual(code, 250)
code, reply = self.do_cmd(b"MAIL FROM:<" + FROM + b">")
self.assertEqual(code, 250, reply)
code, reply = self.do_cmd(b"RCPT TO:<" + TO + b">")
self.assertEqual(code, 250, reply)
code, reply = self.do_cmd(b"DATA")
self.assertEqual(code, 354, reply)
self.conn.send(MSG)
self.conn.send(b"\r\n.\r\n")
code, reply = self.reply()
self.assertEqual(code, 250, reply)
def test_MAIL_RCPT_order(self):
"""Test that RCPT can't be used before MAIL"""
code, reply = self.do_cmd(b"RCPT TO:<" + TO + b">", flush=True)
self.assertNotEqual(code, 250)
self.assertEqual(code, 503)
def test_address(self):
"""Test accepting of addresses with and without <>"""
code, reply = self.do_cmd(b"MAIL FROM:<" + FROM + b">", flush=True)
self.assertEqual(code, 250, reply)
self.do_cmd(b"RSET")
code, reply = self.do_cmd(b"MAIL FROM:" + FROM)
self.assertEqual(code, 250)
def test_DATA_after(self):
"""Test DATA can't be used before MAIL and RCPT"""
code, reply = self.do_cmd(b"DATA", flush=True)
self.assertNotEqual(code, 354, b"DATA command accepted before MAIL")
self.do_cmd(b"MAIL FROM:<" + FROM + b">")
code, reply = self.do_cmd(b"DATA")
self.assertNotEqual(code, 354, b"DATA command accepted before RCPT")
def test_RSET(self):
"""Test resetting the state of the connection"""
self.do_cmd(b"MAIL FROM:<" + FROM + b">", flush=True)
code, reply = self.do_cmd(b"RSET")
self.assertEqual(code, 250)
code, reply = self.do_cmd(b"RCPT TO:<" + TO + b">")
self.assertEqual(code, 503, reply)
def test_not_implemented(self):
"""Test that unknown commands get rejected"""
code, reply = self.do_cmd(b"HELO", flush=True)
self.assertEqual(code, 502, reply)
def test_pipeline(self):
"""Test command pipelining works"""
self.reply() # manaully flush message on connect
self.conn.send(b"MAIL FROM:<" + FROM + b">\r\nRCPT TO:<" + TO + b">\r\nDATA\r\n")
# MAIL FROM reply
code, reply = self.reply()
self.assertEqual(code, 250, reply)
# RCPT TO reply
code, reply = self.reply()
self.assertEqual(code, 250, reply)
# DATA reply
code, reply = self.reply()
self.assertEqual(code, 354, reply)
def test_process_message_returns_str(self):
self.server.reply = "451 No you don't"
code, reply = self.reply()
code, reply = self.do_cmd(b"LHLO localhost")
code, reply = self.reply()
code, reply = self.reply()
code, reply = self.do_cmd(b"MAIL FROM:<" + FROM + b">")
code, reply = self.do_cmd(b"RCPT TO:<" + TO + b">")
code, reply = self.do_cmd(b"DATA")
self.conn.send(MSG)
self.conn.send(b"\r\n.\r\n")
code, reply = self.reply()
self.assertEqual(code, 451, reply)
| [
"moggers87+git@moggers87.co.uk"
] | moggers87+git@moggers87.co.uk |
c789e5f2f069242cc4df33612fe2e753488ff350 | 6f5db09bc4db30bd9e6c647ca8e338c62c3f4b6c | /HI/20200623_4_1.py | 962c7b2fb36e0ea7e3e1f278d1c4dcae582e7ffb | [] | no_license | zzarbttoo/TMT | 96cb64caf4603d6c6eb454ea7f3d649d4b2ca9fb | f63800662b705e5661ce416ebb9d64978b0a84d0 | refs/heads/master | 2022-11-29T07:14:27.087788 | 2020-08-09T13:54:16 | 2020-08-09T13:54:16 | 272,377,222 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # 1주자 4번_가장 큰 수
import itertools
numb = [3, 30, 34, 5, 9]
def solution(numbers):
answer=''
numb_list = list(map(str, numbers))
numb_list = list(map(int,map(''.join, itertools.permutations(numb_list)))) #순열 함수
answer = str(max(numb_list)) #순열 조합 중 가장 큰 수
return answer
print(solution(numb))
""" 순열 함수 permutations으로 모든 조합을 만들어 그 중 가장 큰 값을 return 하도록 했는데,
sample test는 통과하였으나 가능한 모든 순열을 모두 만들어 비교하기 때문에 수행 시간 초과""" | [
"junghi5487@gmail.com"
] | junghi5487@gmail.com |
d21b3482eeb0fc664fba87cef3b29a57a58ae907 | bad1906d8ef6b4b11fb1a4f5553482242174fd21 | /randomforest.py | 3004f72d6d24a5fb3cdc7402cea56f9473827bad | [] | no_license | simonxmh/MachineLearningClassifiers | 4d5132579ac1ce4b4c5c54ed6332ef13212d88d9 | 14bbfb02070c4bb975e42c6b2eb9081a2370db48 | refs/heads/master | 2021-03-24T12:01:44.138391 | 2018-01-28T21:22:40 | 2018-01-28T21:22:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
import time
np.random.seed(0)
# print(clf.predict_proba(test[features])[0:10])
def loadTrainingDataset(filename, split):
df = pd.read_csv(filename,header=0)
df.drop('Unnamed: 0', axis=1, inplace=True)
df['is_train'] = np.random.uniform(0, 1, len(df)) <= split
trainingSet, testSet = df[df['is_train']==True], df[df['is_train']==False]
return trainingSet, testSet,df
def loadTestingDataset(filename):
df = pd.read_csv(filename,header=0)
df.drop('Unnamed: 0', axis=1, inplace=True)
return df
def trainModel(df,features):
y = df['label']
clf = RandomForestClassifier(n_estimators=100,n_jobs=-1, random_state=0)
clf.fit(df[features], y)
return clf
# finaldf = pd.DataFrame(clf.predict(test[features]))
# finaldf.to_csv('results.csv')
def getAccuracy(testArray, result):
correct = 0
for i in range(len(testArray)):
if testArray[i] == result[i]:
correct += 1
return (correct/float(len(testArray))) *100.0
def main():
split = 0.67
trainingSet, testSet, fullSet = loadTrainingDataset('trainsetprocessed.csv', split)
print('Train: ' + repr(len(trainingSet)))
print('Test: ' + repr(len(testSet)))
features = trainingSet.columns[1:-2]
# Cross validated set
t = time.time()
model = trainModel(trainingSet,features)
print(np.round_(time.time() - t, 3), 'sec taken for training')
result = model.predict(testSet[features])
testArray = testSet['label'].as_matrix()
accuracy = getAccuracy(testArray,result)
print('Accuracy: ' + repr(accuracy) + '%')
# print("Training Model...")
# t = time.time()
# model = trainModel(fullSet,features)
# print("Finished Training")
# print(np.round_(time.time() - t, 3), 'sec taken for training')
#
# testingSet = loadTestingDataset('testsetprocessed.csv')
# print("Making Predictions...")
# result = model.predict(testingSet)
# print("Finished Predictions")
#
# output = pd.DataFrame(data=result)
# output.to_csv('output5000trees2.csv')
main()
| [
"simh@live.ca"
] | simh@live.ca |
8c1a08628fbe4512d1ff02ecc142ce0ecd80fe1b | f03472a1b928c4db2378f92b051f400dc756a89e | /bruteparser.py | 74719abd7bbf0732b448a5406e7fd9d5a1efd365 | [] | no_license | 10ego/Brute-Parser | 2fca989fac1fd5ee147e6588fe21ae321bc94b66 | 7a978a0422a48d2738b0addd088a0dc8bd849c78 | refs/heads/master | 2020-04-20T08:59:04.302190 | 2019-03-25T15:00:02 | 2019-03-25T15:00:02 | 168,754,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,527 | py | from PyPDF2 import PdfFileReader
import docx2txt
from docx import *
import csv
import subprocess
import os
subprocess.run("ls > filelist.csv", shell=True) #subprocess to create a list of files to parse in folder
script_dir = os.path.dirname(__file__)
output_dir = "RAW_RESULTS" #This is the subdirectory where the output raw .txt files will be saved
try:
subprocess.run("mkdir {}".format(output_dir), shell=True)
except:
pass
counter = 0
with open("filelist.csv") as flist:
l = csv.reader(flist)
filelist = [row[0] for row in l]
docxlist = [files for files in filelist if files[-5:] == ".docx"] #array of .docx files
doclist = [files for files in filelist if files[-4:] == ".doc"] #array of .doc files
pdflist = [files for files in filelist if files[-4:] == ".pdf"] #array of .pdf files
def parsedocx(filename):
global counter
doc = docx2txt.process(filename)
with open(os.path.join(script_dir, output_dir + "/" + filename[:-5] + ".txt"), 'w') as save:
save.write(doc)
counter+=1
def parsedoc(filename):
global counter
cmd = "antiword '{}' > '{}/{}.txt'".format(filename, output_dir, filename[:-4])
subprocess.run(cmd, shell=True)
counter+=1
def parsepdf(filename):
global counter
p = PdfFileReader(filename)
total_pages = p.getNumPages()
with open(os.path.join(script_dir, output_dir + "/" + filename[:-4] + ".txt"), "w") as clr:
clr.write('')
for page in range(total_pages):
page_content = "\n(Page {}/{})\n".format(page, total_pages)+str(p.getPage(page).extractText())
with open(os.path.join(script_dir, output_dir + "/" + filename[:-4] + ".txt"), "a+") as save:
save.write(page_content)
counter+=1
for docx in docxlist:
print("Parsing {} of {} files: docx file {}...".format(counter+1, len(filelist), docx))
parsedocx(docx)
print("Completed processing file {}".format(docx))
for doc in doclist:
print("Parsing {} of {} files: doc file {}...".format(counter+1, len(filelist), doc))
parsedoc(doc)
print("Completed processing file {}".format(doc))
for pdf in pdflist:
print("Parsing {} of {} files: pdf file {}...".format(counter+1, len(filelist), pdf))
parsepdf(pdf)
print("Completed processing file {}".format(pdf))
print("Completed parsing total of {} files".format(len(filelist)))
| [
"noreply@github.com"
] | noreply@github.com |
55f3d48b8bedb08c75415c504602832b5b5c9760 | 0e461c8d2b99493afb884a80c2b851fc5602c4bf | /tools/parse-log.py | 893b45e0c351eca256676b72b6bf8b189848d125 | [] | no_license | ontiyonke/smokey | f6d75757f2b0cfcb7a089f7bfed61839c9c83d28 | 01b0996724891e7fdc3a5912753b50e5246b184e | refs/heads/master | 2020-05-22T18:11:13.596914 | 2018-02-28T15:05:17 | 2018-02-28T15:07:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | #!/usr/bin/env python
import argparse
import re
def extract_python_exceptions(log_file):
"""
Searches a log file for references to Python exceptions
and returns an array of lines describing the type of
exception that occurred.
"""
for line in log_file:
match = re.match('^(\s+)Traceback', line)
if match:
indent = len(match.group(1))
for line in log_file:
line_indent = len(re.match('^\s*', line).group(0))
if line_indent == indent:
yield line.strip()
break
def main():
parser = argparse.ArgumentParser(description=
"""Extracts a list of exceptions from Smokey logs.
Given a log file from Smokey, this script parses
out and prints details of exceptions that occurred.
""")
parser.add_argument('log_file', help="The Smokey log file")
args = parser.parse_args()
for exception in extract_python_exceptions(open(args.log_file)):
print('{}'.format(exception))
if __name__ == '__main__':
main()
| [
"robertknight@gmail.com"
] | robertknight@gmail.com |
9dedcdee6a2d68515c547bd4a1b13efe3b23bdce | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/dnspython-1.15.0/examples/ddns.py | f351524ee738290cfe64177208bb0df88bbff61f | [
"LicenseRef-scancode-warranty-disclaimer",
"ISC",
"Apache-2.0"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 1,204 | py | #!/usr/bin/env python
#
# Use a TSIG-signed DDNS update to update our hostname-to-address
# mapping.
#
# usage: ddns.py <ip-address>
#
# On linux systems, you can automatically update your DNS any time an
# interface comes up by adding an ifup-local script that invokes this
# python code.
#
# E.g. on my systems I have this
#
# #!/bin/sh
#
# DEVICE=$1
#
# if [ "X${DEVICE}" == "Xeth0" ]; then
# IPADDR=`LANG= LC_ALL= ifconfig ${DEVICE} | grep 'inet addr' |
# awk -F: '{ print $2 } ' | awk '{ print $1 }'`
# /usr/local/sbin/ddns.py $IPADDR
# fi
#
# in /etc/ifup-local.
#
import sys
import dns.update
import dns.query
import dns.tsigkeyring
#
# Replace the keyname and secret with appropriate values for your
# configuration.
#
keyring = dns.tsigkeyring.from_text({
'keyname.' : 'NjHwPsMKjdN++dOfE5iAiQ=='
})
#
# Replace "example." with your domain, and "host" with your hostname.
#
update = dns.update.Update('example.', keyring=keyring)
update.replace('host', 300, 'A', sys.argv[1])
#
# Replace "10.0.0.1" with the IP address of your master server.
#
response = dns.query.tcp(update, '10.0.0.1', timeout=10)
| [
"yingchen@cloudera.com"
] | yingchen@cloudera.com |
499388b2165572001dc1138029488a7777cf7e8c | 45fdc51cf264bbd50e59655440eefc91451c50ea | /text/src/textwrap_dedent.py | 5084ccaedb083bd8a9ae1878d3c3217339c0efd4 | [] | no_license | blindij/python3_stl | 2163043f3a9113eac21a48a35685a4a01987e926 | ea138e25f8b5bbf7d8f78e4b1b7e2ae413de4735 | refs/heads/master | 2021-12-24T20:37:54.055116 | 2021-09-29T13:37:38 | 2021-09-29T13:37:38 | 191,508,648 | 0 | 0 | null | 2019-08-27T15:45:53 | 2019-06-12T06:10:30 | Python | UTF-8 | Python | false | false | 142 | py | import textwrap
from textwrap_example import sample_text
dedented_text = textwrap.dedent(sample_text)
print('Dedented')
print(dedented_text)
| [
"blindij@users.noreply.github.com"
] | blindij@users.noreply.github.com |
49bde105991fc66de00af0bad32ba7c993b35317 | e2c08424f2d6fc8bc5a63430824d739df657ee5e | /turtles/starter/ifs.py | 9ac6b3a8da4cf55be766e661c2228ef7aa5a08bb | [] | no_license | emlbarnes/she_codes_python | ca71c9555c6876d69b0e6cf34ae83ea6942af106 | 13859cb4591e52deaa5deff5295c28a945dc404b | refs/heads/main | 2023-07-06T18:52:45.961848 | 2021-08-10T06:32:58 | 2021-08-10T06:32:58 | 374,647,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py |
mylist = [None, 6, None, 1, 2, 5, 0]
for item in mylist:
print()
print (item)
if item is None:
print ('None!')
elif item < 3:
print ('Little!')
| [
"33987921+emlbarnes@users.noreply.github.com"
] | 33987921+emlbarnes@users.noreply.github.com |
3f5d5a2261db4dba1672d87cd75edaf13c28353b | a23716501d5b5c9495f43e8e69c1090a9ea4daae | /aws_boto3.py | 4280cee3d8a457859433ac4ffee3afae1f8541c0 | [] | no_license | jmortega/python_ciberseguridad_2021 | 0b539f399bc176daf00cda58076606168e14b8b5 | 5bf507b0989c19f3512824be54f36d06a39c0215 | refs/heads/main | 2023-07-15T11:54:10.440026 | 2021-08-26T14:06:15 | 2021-08-26T14:06:15 | 400,169,791 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | import boto3
aws_access_key_id = ''
aws_secret_access_key = ''
region_name = 'ap-southeast-2'
session = boto3.session.Session(aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name)
ec2client = session.client('ec2')
client_instance = ec2client.run_instances(
ImageId='ami-30041c53',
KeyName='Keys',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro')
| [
"noreply@github.com"
] | noreply@github.com |
2d23576a7b5ecdc8e2788f1a60990a07cd2f57a2 | 950a56dec9220698c329c013f5c09a1c8113658b | /Week 03 - Python/Extra_Content/ADVANCED_Stu_Resume_Analysis/Solved/resume_analysis.py | d9aa72160b5d4be5ef613c991b9d46cf090cfa68 | [] | no_license | Yannahhh/git-lab-content | 6e6eabdcc9a0fd3c60f10f10b509e3644684f53f | 264e92c2c5b330231d366bdf9390edd7616fccca | refs/heads/master | 2023-03-25T02:52:15.798135 | 2020-04-13T01:38:37 | 2020-04-13T01:38:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,062 | py | # -*- coding: UTF-8 -*-
"""Resume Analysis Module."""
import os
import string
# Counter is used for the bonus solution
from collections import Counter
# Paths
resume_path = os.path.join('resume.md')
# Skills to match
REQUIRED_SKILLS = {"excel", "python", "mysql", "statistics"}
DESIRED_SKILLS = {"r", "git", "html", "css", "leaflet"}
def load_file(filepath):
"""Helper function to read a file and return the data."""
with open(filepath, "r") as resume_file_handler:
return resume_file_handler.read().lower().split()
# Grab the text for a Resume
word_list = load_file(resume_path)
# Create a set of unique words from the resume
resume = set()
# Remove trailing punctuation from words
for token in word_list:
resume.add(token.split(',')[0].split('.')[0])
print(resume)
# Remove Punctuation that were read as whole words
punctuation = set(string.punctuation)
resume = resume - punctuation
print(resume)
# Calculate the Required Skills Match using Set Intersection
print(resume & REQUIRED_SKILLS)
# Calculate the Desired Skills Match using Set Intersection
print(resume & DESIRED_SKILLS)
# Bonus: Resume Word Count
# ==========================
# Initialize a dictionary with default values equal to zero
word_count = {}.fromkeys(word_list, 0)
# Loop through the word list and count each word.
for word in word_list:
word_count[word] += 1
print(word_count)
# Bonus using collections.Counter
word_counter = Counter(word_list)
print(word_counter)
# Comparing both word count solutions
print(word_count == word_counter)
# Top 10 Words
print("Top 10 Words")
print("=============")
# Clean Punctuation
_word_count = [word for word in word_count if word not in string.punctuation]
# Clean Stop Words
stop_words = ["and", "with", "using", "##", "working", "in", "to"]
_word_count = [word for word in _word_count if word not in stop_words]
# Sort words by count and print the top 10
sorted_words = []
for word in sorted(_word_count, key=word_count.get, reverse=True)[:10]:
print(f"Token: {word:20} Count: {word_count[word]}")
| [
"raakeshtrip@gmail.com"
] | raakeshtrip@gmail.com |
43f1faa94723a423196ae617e13bd6ae27f0dc76 | 50e9d0d60bed361d78fe71845e63cc6bf2f4e315 | /stats_info.py | 3cb67cbf02d2bf14114d6dcc6300f5fcbe680f43 | [] | no_license | gigpir/Tesi | fce8a9026c25781d3923810f61f9021a9ea19c61 | dae53e2bfe289bbf91dd936af8c279c9b8e095db | refs/heads/master | 2023-03-05T21:10:48.831197 | 2021-02-16T16:03:42 | 2021-02-16T16:03:42 | 326,991,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import numpy as np
from matplotlib import pyplot as plt
def stats_artists_songs(artists):
#given a dictionary<artist.id><artist>
#print information about number of songs
avg_songs=0
d = dict()
for a in artists:
avg_songs += len(artists[a].song_list)
if len(artists[a].song_list) not in d:
d[len(artists[a].song_list)] = 1
else:
d[len(artists[a].song_list)] += 1
try:
avg_songs /= len(artists.keys())
except ZeroDivisionError:
avg_songs = 0
var = 0
for a in artists:
var += (len(artists[a].song_list)-avg_songs)**2
try:
var /= len(artists.keys())
except ZeroDivisionError:
var = 0
y = np.array(list(d.values()))
x = np.array(list(d.keys()))
# plot the data
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x, y, zorder=1, c='c', s=1, alpha=0.1)
ax.set_title('Distribution of Number of songs per artist')
ax.set_xlim([0, max(x)])
ax.set_xlabel('N of songs')
ax.set_ylim([0, max(y)])
ax.set_ylabel('Count')
fname = "./stats_plot/songsNumber.png"
plt.savefig(fname, dpi=400)
print("avg number of song per artist = %f\nVariance = %f\n" % (avg_songs,var)) | [
"lpgpirisi@gmail.com"
] | lpgpirisi@gmail.com |
abcfc4a925460837c5467bc95c3e4a0a2944ec26 | bffb3e1df31bd705d362510f06590fbe2e438cae | /ML_Hw1/HW1/class_work.py | 85931f4a0f1abec85e13ccacde736939659d8353 | [] | no_license | Giulero/Machine_Learning | 3fba72f5bac6ebaf72a91400869a3b4820c556d2 | 718fea08903d4e3f2b3525ec883381e113ac727d | refs/heads/master | 2020-03-28T23:22:25.757799 | 2018-11-19T15:58:43 | 2018-11-19T15:58:43 | 149,288,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,265 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 24 17:03:34 2016
@author: Giulero"""
from PIL import Image
import numpy as np
import glob
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from matplotlib.colors import ListedColormap
#import seaborn as sns
class HW1:
def __init__(self, img_folder, n_img):
print("---- Path loaded -----")
for i in n_img:
print(" ----- Image n.", i, "-----")
self.img_folder = img_folder+'obj'
self.n_img = n_img
self.X = []
self.X_std = []
self.y = []
self.img_list = []
def X_(self):
img_list = []
img_data_raveled = []
for num in self.n_img:
self.img_list += glob.glob(self.img_folder+str(num)+'_*')
for filename in self.img_list:
im = np.asarray(Image.open(filename).convert("RGB"))
im_raveled = np.ravel(im)
img_data_raveled.append(im_raveled)
#for filename in self.img_list:
# img_data_raveled.append(np.ravel(np.asarray(Image.open(filename),'r').convert("RGB")))
self.X = np.array(img_data_raveled).reshape((len(self.img_list)), -1)
self.X_std = preprocessing.scale(self.X)
return self
def Y(self):
for num in range(0,len(self.n_img)):
self.y += [num]*int(len(self.img_list)/len(self.n_img))
return self
def PCA(self, value):
self.value_ = value
print("\n----- "+str(value-1)+"-"+str(value)+" principal component -----")
pca = PCA(value)
self.X_PCA1 = pca.fit_transform(self.X_std)
self.X_PCA = self.X_PCA1[:,value-2:value]
return self
def split_data(self):
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X_PCA, self.y, test_size=0.5)
return self
def fit(self):
X_train, X_test, y_train, y_test = train_test_split(self.X_PCA, self.y, test_size=0.5, random_state=55)
self.clf = GaussianNB()
self.clf.fit(X_train, y_train)
return self
def covariance_plot(self):
# cm = np.corrcoef(self.X_PCA1)
# sns.set(font_scale=1.5)
# hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size':15})
# sns.set(style='whitegrid', context='notebook')
cov_mat = np.cov(self.X_PCA1.T)
eig_val, eig_vec = np.linalg.eig(cov_mat)
# cm = np.corrcoef(eig_vec)
# hm = sns.heatmap(cm)
cum_exp = np.cumsum(eig_val/np.sum(eig_val))
# plt.plot(range(1, len(eig_val)+1), eig_val/np.sum(eig_val), marker='o')
plt.bar(range(1, len(eig_val)+1), eig_val/np.sum(eig_val), align='center', label='Individual explained variance')
plt.step(range(1, len(eig_val)+1), cum_exp, where='mid', label='Cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='upper left')
plt.savefig('components.pdf', format='pdf', dpi=2000)
plt.show()
# sns.reset_orig()
def accuracy(self):
self.pred = self.clf.predict(self.X_test)
print ("----- Accuracy:", accuracy_score(self.y_test, self.pred),"-----")
return accuracy_score(self.y_test, self.pred)
def scatter_plot(self):
markers = ('o', 'x', 's', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(self.y))])
for idx, cl in enumerate(np.unique(self.y)):
plt.scatter(x=self.X_PCA[self.y == cl, 0], y=self.X_PCA[self.y == cl, 1], alpha=0.8, c=cmap(idx), label='Obj '+str(self.n_img[cl]))
plt.legend(loc='upper left')
#plt.show()
def plt(self):
markers = ('o', 'x', 's', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(self.y))])
x1_min, x1_max = self.X_PCA[:,0].min() - 1, self.X_PCA[:,0].max()+1
x2_min, x2_max = self.X_PCA[:,1].min() - 1, self.X_PCA[:,1].max()+1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, 0.2), np.arange(x2_min, x2_max, 0.2))
#crea una meshgrid con valori tra x_min e x_max con data risoluzione. arange crea un vettore tra x_min e x_max con passo resolution
Z = self.clf.predict(np.array([xx1.ravel(), xx2.ravel()]).T) #.T->Transpose. Ravel mette in riga gli elementi
Z = Z.reshape(xx1.shape) #shape ritorna la dimensione della matrice/vettore. Reshape modella una matrice/vettore dando le dimensioni desiderate
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
self.scatter_plot()
#plt.title('Imgs classification with GaussianNB clf')
plt.title('Accuracy : '+ str(self.accuracy()))
plt.savefig('PCA'+str(self.value_)+'.pdf', format='pdf', dpi = 2000)
#plt.legend(loc='upper left')
plt.show()
| [
"gl.giuseppelerario@gmail.com"
] | gl.giuseppelerario@gmail.com |
0c43f3db6d2706d724e78202eb40dc195b033170 | 4bba0615e2ae5110e8e6198722767dc07dba8e73 | /main/click.py | f4591fbfda6ab700b9e966b4c4b5a0894d4da634 | [] | no_license | Lun4rIum/autoclick | 1cfd9c45f9df3039043959e5e2fca3206e16d76e | b087c707f04051510ad48c8ee0694d7a2bdf7dd8 | refs/heads/main | 2023-05-30T00:03:25.930487 | 2021-06-19T18:13:50 | 2021-06-19T18:13:50 | 378,475,348 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | from pynput.mouse import Button, Controller
import keyboard
import time
mouse = Controller()
while True:
if keyboard.read_key() == "n":
while True:
time.sleep(0.01)
mouse.press(Button.right)
mouse.release(Button.right)
if keyboard.is_pressed('n'):
break
if keyboard.read_key() == "v":
while True:
time.sleep(0.01)
mouse.press(Button.left)
mouse.release(Button.left)
if keyboard.is_pressed('v'):
break
| [
"noreply@github.com"
] | noreply@github.com |
2eb81e6bc89d77f0ee7640edaec9543348a8f465 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/315/101991/submittedfiles/minha_bib.py | 37dfe39276605bb65b991985c20942c3898a1b93 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | # -*- coding: utf-8 -*-
import random
#Simbolo que o Jogador quer utilizar
def solicitaSimbolodoHumano(a):
a = input('\nQual símbolo você deseja utilizar no jogo? ')
while a!='O' and a!='X' and a!='o' and a!='x':
a = input('\nQual símbolo você deseja utilizar no jogo? ')
if a == 'x' or a =='X':
a = ' X '
else:
a = ' O '
return a
#Sorteio de quem ira começar jogando
def sorteioPrimeiraJogada(jogador, nome):
jogador = random.choice((0,1))
if jogador ==1:
print('\nVencedor do sorteio para inicio do jogo : %s'%nome)
else:
print('\nVencedor do sorteio para inicio do jogo : Computador')
return jogador
#Printa o tabuleiro
def mostraTabuleiro(tabuleiro):
print ('')
print (tabuleiro[0][0] + '|' + tabuleiro[0][1] + '|' + tabuleiro[0][2])
print (tabuleiro[1][0] + '|' + tabuleiro[1][1] + '|' + tabuleiro[1][2])
print (tabuleiro[2][0] + '|' + tabuleiro[2][1] + '|' + tabuleiro[2][2])
#Jogada do computador TA COM ERRO
def JogadaComputador(smbPC,tabuleiro):
while True:
ti = random.choice((0,1,2))
tj = random.choice((0,1,2))
if tabuleiro[ti][tj] == ' ':
break
else:
ti = random.choice((0,1,2))
tj = random.choice((0,1,2))
tabuleiro[ti][tj] = smbPC
return tabuleiro
#Verifica se a jogada é valida
def validaJogada(a, tabuleiro, nome):
while True:
if tabuleiro[int(a[0])][int(a[2])] == (' '):
break
else:
print('\nOPS!!! Essa jogada não está disponível. Tente novamente!')
a = input('\nQual a sua jogada, %s? ' %nome)
return a
#sua jogada
def JogadaHumana(smbH,tabuleiro, nome):
mostraTabuleiro(tabuleiro)
n = input('\nQual a sua jogada, %s? ' %nome)
n = validaJogada(n, tabuleiro, nome)
tabuleiro[int(n[0])][int(n[2])] = smbH
return tabuleiro
#Verifica se alguem ganhou
def verificaVencedor(simbolo, tabuleiro):
if tabuleiro[0][0] == simbolo and tabuleiro[0][1] == simbolo and tabuleiro[0][2] == simbolo:
return True
elif tabuleiro[1][0] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[1][2] == simbolo:
return True
elif tabuleiro[2][0] == simbolo and tabuleiro[2][1] == simbolo and tabuleiro[2][2] == simbolo:
return True
elif tabuleiro[0][0] == simbolo and tabuleiro[1][0] == simbolo and tabuleiro[2][0] == simbolo:
return True
elif tabuleiro[1][0] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[1][2] == simbolo:
return True
elif tabuleiro[2][0] == simbolo and tabuleiro[2][1] == simbolo and tabuleiro[2][2] == simbolo:
return True
elif tabuleiro[0][0] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[2][2] == simbolo:
return True
elif tabuleiro[0][2] == simbolo and tabuleiro[1][1] == simbolo and tabuleiro[2][0] == simbolo:
return True
elif (' ') not in tabuleiro:
print('Velha')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
157696dff38f89d8e82a94dc51c66681fbf9d197 | ae7652c7cdbf624340e7e2e83e89592895fad456 | /selenium-explicit_wait2.py | bfa05fc4c311fc31ef1152d3c999df40613702e7 | [] | no_license | sw88tch/stepik-auto-tests-course | 799acbdccb38ce0a26c9c1ec033c60770c31f01e | 1d7060f0619d326d05019855495fcf5bf2d7a59d | refs/heads/main | 2023-04-18T11:07:48.878907 | 2021-05-13T10:49:25 | 2021-05-13T10:49:25 | 363,947,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
import time
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/explicit_wait2.html")
WebDriverWait(browser, 12).until(
EC.text_to_be_present_in_element((By.ID, "price"), "$100"))
button1 = browser.find_element_by_css_selector("#book")
button1.click()
x_element = browser.find_element_by_xpath("/html/body/form/div/div/div/label/span[2]")
x = x_element.text
y = calc(x)
input = browser.find_element_by_xpath("/html/body/form/div/div/div/input")
input.send_keys(y)
button2 = browser.find_element_by_css_selector("#solve")
button2.click()
time.sleep(10)
browser.quit() | [
"fcukmycopyrights@gmail.com"
] | fcukmycopyrights@gmail.com |
ee57f682e295cbfd9747da50306e7deadad5f554 | b66e70a8bb3c53595acd01dceb23298694884b67 | /cloudy/cloudy/models.py | 92cd7f135177bdbc3b72c907c8741df29eb2c148 | [] | no_license | flupke/cloudy-release | d7735a38d79f816c52da3d983c714512a32919b1 | 6b160188a7067f125b107eb68dc8db4bbb4bfdf4 | refs/heads/master | 2016-09-06T05:23:40.856287 | 2013-02-23T18:17:16 | 2013-02-23T18:17:16 | 8,377,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | from django.db import models
class SshIdentity(models.Model):
name = models.CharField(max_length=256)
public = models.TextField()
private = models.TextField()
class HostsGroup(models.Model):
name = models.CharField(max_length=256)
ssh_user = models.CharField(max_length=32, blank=True)
ssh_identity = models.ForeignKey(SshIdentity, blank=True)
class Host(models.Model):
hostname = models.CharField(max_length=256)
alias = models.CharField(max_length=256, blank=True)
group = models.ForeignKey(HostsGroup)
ssh_user = models.CharField(max_length=32, blank=True)
ssh_identity = models.ForeignKey(SshIdentity, blank=True)
class Project(models.Model):
name = models.CharField(max_length=64)
hosts = models.ForeignKey(HostsGroup)
class Check(models.Model):
project = models.ForeignKey(Project)
name = models.CharField(max_length=64)
command = models.TextField()
| [
"luper.rouch@gmail.com"
] | luper.rouch@gmail.com |
b8e6f22145dd002a5ed4f938f018506b9434a57b | c746227453bed96ba2c940c32fed96fdd5258ace | /code_base/layers_bkp.py | 530f4290aaa3b290998722b54b83ee43a2a66b71 | [
"Apache-2.0"
] | permissive | dixantmittal/numpy-cnn-implementation | 48675d0699ca7d08935879cc953204c39e70fab4 | 0de844a5f8d12fceae19aa36f9dff31ced05f1c7 | refs/heads/master | 2021-07-19T19:35:38.203425 | 2017-10-13T14:55:58 | 2017-10-13T14:55:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,329 | py | import numpy as np
import datetime
from code_base.matrix_transformation import *
from code_base.im2col import *
from code_base.classifiers import *
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
N = x.shape[0]
D = np.prod(x.shape[1:])
x_rs = np.reshape(x, (N, -1))
out = x_rs.dot(w) + b
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
- b: Bias, of shape (M,)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
N = x.shape[0]
x_rs = np.reshape(x, (N, -1))
db = dout.sum(axis=0)
dw = x_rs.T.dot(dout)
dx = dout.dot(w.T)
dx = dx.reshape(x.shape)
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
out = np.maximum(0, x)
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
dx = (x >= 0) * dout
return dx
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
mask = (np.random.rand(*x.shape) < p) / p
out = x * mask
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
out = x
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
dx = dout * mask
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward(x, w, b, conv_param):
"""
Forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input in each x-y direction.
We will use the same definition in lecture notes 3b, slide 13 (ie. same padding on both sides).
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + pad - HH) / stride
W' = 1 + (W + pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
N, C, H, W = x.shape
F, C, HH, WW = w.shape
stride = conv_param['stride']
pad = conv_param['pad']
_H = 1 + (H + pad - HH) // stride
_W = 1 + (W + pad - WW) // stride
w_flat = get_flattened_filters(w)
# x_flat = get_receptive_field(x, HH, WW, stride, pad)
x_flat = im2col_indices(x, HH, WW, pad, stride)
activations = np.dot(w_flat, x_flat) + (b)[:, None]
# activations = np.transpose(activations, axes=(1, 0, 2)) + b.reshape(b.shape[0], 1)
out = activations.reshape(F, _H, _W, N)
out = out.transpose(3, 0, 1, 2)
# out = activations.reshape(N, F, _H, _W)
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b, conv_param, x_flat)
return out, cache
def conv_backward(dout, cache):
"""
Backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
x, w, b, conv_param, x_flat = cache
N, C, H, W = x.shape
F, _, hh, ww = w.shape
# print(dout)
db = np.sum(dout, axis=(0, 2, 3))
dout_res = dout.transpose(1, 2, 3, 0).reshape(F, -1)
dw = np.dot(dout_res, x_flat.T)
dw = dw.reshape(w.shape)
w_res = w.reshape(F, -1)
dx_flat = np.dot(w_res.T, dout_res)
dx = col2im_indices(dx_flat, x.shape, hh, ww, conv_param['pad'], conv_param['stride'])
# N, C, H, W = x.shape
# F, _, HH, WW = w.shape
# stride, pad = conv_param['stride'], conv_param['pad']
# _H = 1 + (H + pad - HH) // stride
# _W = 1 + (W + pad - WW) // stride
#
# x_pad = np.pad(x, ((0,), (0,), (pad,), (pad,)), mode='constant', constant_values=0)
# dx = np.zeros_like(x)
# dx_pad = np.zeros_like(x_pad)
# dw = np.zeros_like(w)
# db = np.zeros_like(b)
#
# db = np.sum(dout, axis=(0, 2, 3))
#
# x_pad = np.pad(x, ((0,), (0,), (pad,), (pad,)), mode='constant', constant_values=0)
# for i in range(_H):
# for j in range(_W):
# x_pad_masked = x_pad[:, :, i * stride:i * stride + HH, j * stride:j * stride + WW]
# for k in range(F): # compute dw
# dw[k, :, :, :] += np.sum(x_pad_masked * (dout[:, k, i, j])[:, None, None, None], axis=0)
# for n in range(N): # compute dx_pad
# dx_pad[n, :, i * stride:i * stride + HH, j * stride:j * stride + WW] += np.sum((w[:, :, :, :] *
# (dout[n, :, i, j])[:,
# None, None, None]),
# axis=0)
# dx = dx_pad[:, :, pad:-pad, pad:-pad]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward(x, pool_param):
"""
Forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
N, C, H, W = x.shape
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
stride = pool_param['stride']
_H = 1 + (H - pool_height) // stride
_W = 1 + (W - pool_width) // stride
out = np.zeros((N, C, _H, _W))
for h in range(_H):
for w in range(_W):
field = x[:, :, h * stride:h * stride + pool_height, w * stride:w * stride + pool_width]
out[:, :, h, w] = np.max(field, axis=(2, 3))
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward(dout, cache):
"""
Backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
x, pool_param = cache
N, C, H, W = x.shape
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
stride = pool_param['stride']
_H = 1 + (H - pool_height) // stride
_W = 1 + (W - pool_width) // stride
dx = np.zeros_like(x)
for h in range(_H):
for w in range(_W):
field = x[:, :, h * stride:h * stride + pool_height, w * stride:w * stride + pool_width]
max = np.max(field, axis=(2, 3))
mask = (field == (max)[:, :, None, None])
dx[:, :, h * stride:h * stride + pool_height, w * stride:w * stride + pool_width] += (dout[:, :, h, w])[:,
:, None, None] * mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
| [
"dixant.mittal@gmail.com"
] | dixant.mittal@gmail.com |
76262bda20e1e68938ed5ca28d16e8ac0a7c1760 | d9af2ba0d001173ac55a97adcf003e66f49de80d | /starbase.py | 2456ce33ceec16ef3c95cfebd0d32e597ed2391f | [] | no_license | bamcleod/skycover | ae1063ab483c5a8d47e9c481da04987df05d14ce | c46d74351d3c3f14ac7dd7c66437754e01b178a9 | refs/heads/master | 2022-11-09T10:00:21.411785 | 2022-10-27T19:47:42 | 2022-10-27T19:47:42 | 67,826,878 | 0 | 2 | null | 2022-10-27T19:47:43 | 2016-09-09T19:34:41 | C++ | UTF-8 | Python | false | false | 17,367 | py |
import sys,os,re, subprocess, tempfile
from threading import Thread
"""
Add colnum
Add insert header before, after other header value
Add add/del header
Add add/del row
Add add/del col
Add row formatting
"""
try:
import starbase_data
Starbase_readdata = starbase_data.readdata
except ImportError:
def Starbase_readdata(fp, type, strip) :
if ( strip == True ) :
return [[typ(val.strip()) for (typ, val) in zip(type, row.rstrip("\n").split("\t"))] for row in fp.readlines()]
else :
return [[typ(val) for (typ, val) in zip(type, row.rstrip("\n").split("\t"))] for row in fp.readlines()]
class Starbase(object):
"""
# Starbase Data Tables in python
#
# Read a table from disk:
#
>>> tab = Starbase("input.tab")
#
# A table may be read from a command pipeline by placing "|" as the
# first character in the filename:
#
>>> tab = Starbase('| row < input.tab "X > 3"')
#
# A table may be read from an open file descriptor:
#
>>> fp = open("input.tab")
>>> tab = Starbase(fp)
#
# The data is stored as a list of lists. It can be accessed directly
# from the Starbase object. Row and column indexing is zero based as
# in python. Columns may be indexed by integer or column name string:
#
>>> tab[0][0] # get value at 0, 0
'1'
#
# The values are stored as strings by default. An optional keyword
# patameter "types" may be used to add data types to columns. This
# makes using tables in expressions less painful.
#
tab = Starbase("| jottable 10 x y z", types = { "x" : float, "y" : int })
x = tab[0].x + tab[0].y
print x
2
>>> tab[2][1] = 5 # set value at 0, 0
>>> tab[1]["Y"] # get value at row 1 col "Y"
'2'
>>> tab[4]["Y"] = 9 # set value at row 5 col "Y"
#
# Rows can be dotted too.
#
>>> tab[4].Y
'9'
>>> tab[4].Y = 8
#
# Table header values may be accessed by using python "dot" notation or
# indexing the Starbase object with a string. Note that header values are
# arrays and thus need to be indexed just like rows:
#
>>> tab.label = "label string" # set header value
>>> tab.label[0] # or
'label string'
>>> tab["label"][0]
'label string'
#
#
# Iterating over the table returns each row of the table in turn:
#
>>> for row in tab :
... print row
['1', '1', '1']
['2', '2', '2']
['3', '5', '3']
['4', '4', '4']
['5', '8', '5']
#
# Rows can be sliced and iterated over:
#
>>> for row in tab[0:2] :
... print row
['1', '1', '1']
['2', '2', '2']
#
# The row itself can be iterated of:
#
>>> for val in tab[2] :
... print val
3
5
3
#
# Slice the rows of a data column. This works as an a notation to
# select a column vector for input to numpy array().
#
>>> tab[:].X
['1', '2', '3', '4', '5']
#
# Or slice a few rows of the column:
#
>>> tab[0:2].X
['1', '2']
#
# Alternative "arrays" constructor can be used to create
# a starbase object from a bunch of python list or numpy
# array data.
#
>>> print Starbase.arrays("XXX", [1, 2, 3], "Y", [3, 4, 5]) # doctest: +NORMALIZE_WHITESPACE
XXX Y
--- -
1 3
2 4
3 5
#
# Keyword arguments are also supported, but the order of
# columns in the starbase table is determined by the
# python hash not the order passed to the constructor.
#
>>> print Starbase.arrays(X=[1, 2, 3], Y=[3, 4, 5]) # doctest: +NORMALIZE_WHITESPACE
Y X
- -
3 1
4 2
5 3
#
# The Starbase table may be printed directly. This can safely be used
# for "small" tables (less than several megabytes). For truely huge
# tables, the ">" operator will iterativly print the table to a file
# descriptor and may be faster.
#
>>> print tab # print table # doctest: +NORMALIZE_WHITESPACE
label label string
X Y Z
- - -
1 1 1
2 2 2
3 5 3
4 4 4
5 8 5
>>> fp = open('/tmp/output', 'w')
>>> print >> fp, tab # print to open file fp
#
>>> tab > sys.stdout # write table to sys.stdout # doctest: +NORMALIZE_WHITESPACE
label label string
X Y Z
- - -
1 1 1
2 2 2
3 5 3
4 4 4
5 8 5
>>> tab > "output.tab" # write table to file named "output.tab"
#
# If the output file name passed to ">" begins with "|" the table will be
# filtered through a shell pipeline:
#
>>> t = (Starbase("| jottable 10 x") > "| row 'x <= 3'")
>>> t > sys.stdout
x
-
1
2
3
#
# Rows and header values have an independent existence and can be
# selected and assigned:
#
>>> row = tab[4]
>>> row[2] = 4 # set column 3 of row 6 to 4
>>> tab[0] = tab[3] # copy row 3 to row 0
# Union of two tables
#
>>> print (t + t)
x
-
1
2
3
1
2
3
# Difference of two tables
#
>>> print (tab > "| sorttable -u X") % "X=x" - t # doctest: +NORMALIZE_WHITESPACE
label label string
x
-
4
5
"""
class StarbaseHdr(object):
""" Enables off by 1 access to the header value list reference.
"""
def __init__(self, data) :
self.data = data
def __getitem__(self, indx) :
if ( type(indx) == slice ) :
return self.data[1:][indx]
return self.data[indx+1]
def __setitem__(self, indx, value) :
self.data[indx+1] = str(value)
def __str__(self) :
return "\t".join(self.data[1:])
# StarbaseRow is a friend of Starbase and accesses its __Private members.
#
class StarbaseRow(object):
""" Enables column lookup by string value. Holds the reference to a row
and the column dictionary.
"""
def __init__(self, tabl, indx) :
self.__tabl = tabl
self.__indx = indx
self.__initialized = 1
def __str__(self) :
if ( type(self.__indx) == list or type(self.__indx) == tuple ) :
reply = ""
for i in self.__indx:
reply = reply + "\t".join((str(item) for item in self.__tabl._Starbase__data[i])) + "\n"
return reply
return "\t".join((str(item) for item in self.__tabl._Starbase__data[self.__indx]))
def __getitem__(self, indx) :
if ( type(indx) == list or type(indx) == tuple ) :
xidx = []
for i in indx :
if ( type(i) == str ) :
i = self.__tabl._Starbase__indx[i]
xidx.append(i)
indx = xidx
if ( type(self.__indx) == slice ) :
if ( type(indx) == list or type(indx) == tuple ) :
return [[row[i] for i in indx] for row in self.__tabl._Starbase__data.__getitem__(self.__indx)]
return [row[indx] for row in self.__tabl._Starbase__data.__getitem__(self.__indx)]
if ( type(self.__indx) == list or type(self.__indx) == tuple ) :
if ( type(indx) == list or type(indx) == tuple ) :
reply = []
for row in self.__indx:
reply.append([self.__tabl._Starbase__data[row][i] for i in indx])
return reply
return [row[indx] for row in self.__tabl._Starbase__data.__getitem__(self.__indx)]
if ( type(indx) == list or type(indx) == tuple ) :
return [self.__tabl._Starbase__data[self.__indx][i] for i in indx]
return self.__tabl._Starbase__data[self.__indx][indx]
def __setitem__(self, indx, value) :
if ( type(indx) == str ) :
indx = self.__tabl._Starbase__indx[indx]
self.__tabl._Starbase__data[self.__indx][indx] = self.__tabl._Starbase__type[indx](value)
def __getattr__(self, indx) :
return self.__getitem__(indx)
def __iter__(self) :
return self.__tabl._Starbase__data[self.__indx].__iter__()
def __setattr__(self, indx, value) :
if ( not self.__dict__.has_key("_StarbaseRow__initialized") \
or self.__dict__.has_key(indx) ) :
self.__dict__[indx] = value
return
self.__setitem__(indx, value)
def __length_hint__(self) :
return 0
def where(self, expr):
if ( type(self.__indx) == list ) :
i = 0
indx = []
for i in self.__indx :
row = self.__tabl._Starbase__data.__getitem__(i)
if eval(expr, {} , dict(zip(self.__tabl._Starbase__cols, row))):
indx.append(i)
i = i + 1
if ( len(indx) == 1 ) :
indx = indx[0]
return Starbase.StarbaseRow(self.__tabl, indx)
if ( type(self.__indx) == slice ) :
i = 0
indx = []
for row in self.__tabl._Starbase__data.__getitem__(self.__indx) :
if eval(expr, {} , dict(zip(self.__tabl._Starbase__cols, row))):
indx.append(i)
i = i + 1
if ( len(indx) == 1 ) :
indx = indx[0]
return Starbase.StarbaseRow(self.__tabl, indx)
class StarbasePipeWriter(Thread) :
def __init__(self, table, write) :
Thread.__init__(self)
self.table = table
self.write = write
def run(self) :
self.table > self.write
self.write.close()
def __init__(self, fp=None, types={}, dtype=str, strip=False) :
if ( fp == None ) :
return
if ( type(fp) == str ) :
fp = open(fp, "rU") if ( fp[0:1] != "|" ) else os.popen(fp[1:], "rU")
self.__head = {}
self.__line = []
self.__type = []
self.__cols = []
self.__headline = fp.readline().rstrip().split("\t")
self.__dashline = fp.readline().rstrip().split("\t")
dashes = len([s for s in self.__dashline if re.match('-+' , s.strip())])
# Read lines until the dashline is found
#
while ( not dashes \
or dashes != len([s for s in self.__headline if re.match('\w+', s.strip())]) ) :
if ( re.match('\w+', self.__headline[0].strip()) ) :
self.__head[self.__headline[0].strip()] = len(self.__line)
self.__line.append(self.__headline)
self.__headline = self.__dashline
self.__dashline = fp.readline().rstrip().split("\t")
dashes = 0
for s in self.__dashline:
if re.match('^-+$' , s.strip()):
dashes += 1
i = 0
self.__indx = {}
for col in self.__headline :
col = col.strip()
self.__indx[col] = i
self.__type.append(types[col] if ( types.has_key(col) ) else dtype)
self.__cols.append(col)
i += 1
# Read the data in, converting to types
#
self.__data = Starbase_readdata(fp, self.__type, strip)
self.__initialized = 1
@classmethod
def arrays(self, *args, **kwargs) :
self = Starbase()
args = list(args)
self.__head = {}
self.__line = []
self.__type = []
self.__headline = []
self.__dashline = []
for col in kwargs :
args.append(col)
args.append(kwargs[col])
i = 0
vals = []
self.__indx = {}
while i < len(args) :
col = args[i]
val = args[i+1]
self.__indx[col] = i
self.__headline.append(col)
self.__dashline.append("-" * len(col))
vals.append(val)
i += 2
arry = [val for val in vals]
self.__data = zip(*arry)
self.__initialized = 1
return self
def __str__(self) :
# Cast the table as a string.
#
return ( "\n".join(["\t".join(row) for row in self.__line]) + "\n" if self.__line else "" ) \
+ "\t".join(self.__headline) + "\n" \
+ "\t".join(self.__dashline) + "\n" \
+ "\n".join(("\t".join((str(item) for item in row)) for row in self.__data))
def __iter__(self) :
return self.__data.__iter__()
def __getitem__(self, indx) :
if ( type(indx) == str ) :
return Starbase.StarbaseHdr(self.__line[self.__head[indx]])
return Starbase.StarbaseRow(self, indx)
def __setitem__(self, indx, value) :
if ( type(indx) == str ) :
if ( not self.__head.has_key(indx) ) :
self.__head[indx] = len(self.__line)
self.__line.append([indx])
if ( type(value) == list ) :
self.__line[self.__head[indx]][1:] = [str(v) for v in value]
else :
self.__line[self.__head[indx]][1:] = [str(value)]
return
if ( value.__class__.__name__ == 'StarbaseRow' ) :
value = value._StarbaseRow__tabl.__data[value._StarbaseRow__indx]
if ( type(value) != list ) :
raise TypeError("Starbase set row expected list")
if ( len(self.__headline) != len(value) ) :
raise TypeError("Starbase set row expected list of " + str(len(self.__headline)) )
self.__data[indx] = [typ(val) for (typ, val) in zip(self.__type, value)]
def __getattr__(self, indx) :
return Starbase.StarbaseHdr(self.__line[self.__head[indx]])
def __setattr__(self, indx, value) :
if ( not self.__dict__.has_key("_Starbase__initialized") \
or self.__dict__.has_key(indx) ) :
self.__dict__[indx] = value
return
self.__setitem__(indx, value)
def __binop(self, other, command) :
fd, file1 = tempfile.mkstemp()
os.close(fd)
fd, file2 = tempfile.mkstemp()
os.close(fd)
self > file1
other > file2
return Starbase(command + " " + file1 + " " + file2)
def __add__(self, other) :
return self.__binop(other, "| uniontable")
def __or__(self, other) :
return self.__binop(other, "| uniontable")
def __sub__(self, other) :
return self.__binop(other, "| diffrtable")
def __and__(self, other) :
return self.__binop(other, "| intertable")
def __mod__(self, columns) :
if ( type(columns) == list or type(columns) == tuple ) :
columns = " ".join(columns)
return (self > ("| column " + columns))
def __invert__(self) :
return self > "| transposetable"
def __floordiv__(self, columns) :
if ( type(columns) == list or type(columns) == tuple ) :
columns = " ".join(columns)
return self > ("| sorttable -u " + columns)
def __gt__(self, file) :
if ( type(file) == str ) :
if ( file[0:1] == "|" ) :
if ( file[0:1] == "|" ) :
p = subprocess.Popen(file[1:], shell=True, bufsize=1 \
, stdin=subprocess.PIPE \
, stdout=subprocess.PIPE \
, stderr=subprocess.STDOUT, close_fds=True)
writer = Starbase.StarbasePipeWriter(self, p.stdin)
#if ( file[0:2] == ".:" ) :
# writer = Starbase.StarbaseSokWrite(self, file)
writer.start()
reply = Starbase(p.stdout)
writer.join()
return reply
file = open(file, "w")
for line in self.__line :
print >> file, "\t".join(line)
print >> file, "\t".join(self.__headline)
print >> file, "\t".join(self.__dashline)
for row in self.__data :
print >> file, "\t".join((str(item) for item in row))
return None
def __rshift__(self, file) :
file = open(file, "a")
self > file
if __name__ == '__main__':
# jottable 5 X Y Z > input.tab
#
import doctest
doctest.testmod()
| [
"mpr56@drexel.edu"
] | mpr56@drexel.edu |
aef62cffcdd6a3983978fe05e6b113ce21db958c | fe776380d46b0273c5448a8f42cd6b78202d1518 | /nota_fiscal/__init__.py | bb6fe859062ec7b9e905dc3b50a214cb9718c454 | [] | no_license | frac/calculos_nota_fiscal | fade5995c242ef009ec7d35964e49b16cb3dfbd3 | 5c36c12291efcc804a62fbb3e429ebc6fa3980a3 | refs/heads/master | 2020-05-19T17:46:11.484553 | 2012-05-02T15:11:53 | 2012-05-02T15:11:53 | 4,204,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from calculo import calcula
| [
"petrich@gmail.com"
] | petrich@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.