blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3c77889d9fd2d9198ae6452a80b9c1abe9d8faf | 24341c76b907e75cc9f4f01ceece7edd0853474b | /productList/app/models.py | abb11857b8274546b2fcea898773d2f81f30f0bb | [] | no_license | yasirdis/ProductList | a7e265e2c604e0744e45a76304271b1e526f1494 | e99a50d9bee1f05c43b06ada073affcba2274d4b | refs/heads/master | 2022-12-02T23:28:13.255658 | 2020-08-16T08:51:40 | 2020-08-16T08:51:40 | 287,905,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Product(models.Model):
Id = models.CharField(primary_key = True, max_length = 9)
Name = models.CharField(max_length=20)
Quantity = models.IntegerField()
ImgSrc = models.CharField(max_length = 50)
class UserDetails(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
Phone=models.CharField(max_length=11)
def __str__(self):
return self.user.username
| [
"yasir.dis@gmail.com"
] | yasir.dis@gmail.com |
b54740991dcdeb374d0c865b1317978ac7ed872b | 0edfbe9aa843b89544273e1a19bb707b7dec87fd | /academiya/migrations/0005_master.py | e45bf2cfc29eb602586aa5de6e059451e6cc2e1a | [] | no_license | SmirnovConstantine/Academiya | d5e68735f2b2b395436f4ca4a1f4b4d6424efbaf | 3bdedd57e93916212b3e89538ee07fb732c751f6 | refs/heads/master | 2020-04-28T12:00:56.422285 | 2019-03-12T18:03:15 | 2019-03-12T18:03:15 | 175,262,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # Generated by Django 2.1.5 on 2019-03-01 18:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('academiya', '0004_auto_20190227_2143'),
]
operations = [
migrations.CreateModel(
name='Master',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100)),
('orden', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='academiya.Test_task')),
('planet', models.ForeignKey(max_length=50, on_delete=django.db.models.deletion.CASCADE, to='academiya.Planet')),
],
),
]
| [
"smirnov.konstantin.93@mail.ru"
] | smirnov.konstantin.93@mail.ru |
47e3d13e46c6dbfa3ee99c75aab6a2c114ad1664 | c522a14d47c38dad9ea45aa319e8e2984fb2b739 | /MachineLearning/DeepLearning/DeepLearningWithPython/chapter4/demo3.py | d2fe606b5dd67241c073461029c0870c228e1513 | [] | no_license | motein/Pocketin | 9d4daf262cd21f8d724e3e5059119ae4fd981125 | 32e698180a449b38c4c8bf2b58338566856ba06e | refs/heads/master | 2022-02-03T21:28:48.714659 | 2022-01-27T02:05:10 | 2022-01-27T02:05:10 | 41,902,995 | 0 | 0 | null | 2021-01-14T07:48:01 | 2015-09-04T07:20:09 | Jupyter Notebook | UTF-8 | Python | false | false | 675 | py | '''
Created on Aug 3, 2018
@author: xiongan2
'''
import numpy
import theano.tensor as T
from theano import function
a = T.dmatrix('a')
b = T.dmatrix('b')
c = T.dmatrix('c')
d = T.dmatrix('d')
p = T.dscalar('p')
q = T.dscalar('q')
r = T.dscalar('r')
s = T.dscalar('s')
u = T.dscalar('u')
e = (((a * p) + (b - q) - (c + r )) * d/s) * u
f = function([a,b,c,d,p,q,r,s,u], e)
a_data = numpy.array([[1,1],[1,1]])
b_data = numpy.array([[2,2],[2,2]])
c_data = numpy.array([[5,5],[5,5]])
d_data = numpy.array([[3,3],[3,3]])
print("Expected:", (((a_data * 1.0) + (b_data - 2.0) - (c_data + 3.0 )) * d_data/4.0) * 5.0)
print("Via Theano:", f(a_data,b_data,c_data,d_data,1,2,3,4,5)) | [
"motein@qq.com"
] | motein@qq.com |
6820e6f568cc17fde6e20801dcf10f907e45a876 | ee896644af3107622584e7ba7eb30da7aa1edd3a | /OpenClassrooms/Apprennez-a-programmer-en-Python/Partie_1/leap-year_tester.py | 78dc37db790b092d79de218f37d0dbd91fbf1ad9 | [] | no_license | ChocolateCharlie/Python-exercises | 956c47c765508a0255d18776dcd3df88ab059d50 | d0032242a3004925b5b0552b28b3f4d916f53ef7 | refs/heads/master | 2021-06-06T19:15:19.617492 | 2020-05-29T16:37:00 | 2020-05-29T16:37:00 | 96,107,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | # -*-coding:Latin-1 -*
# Aks user to enter a year
year = input ("Please enter a year : ")
year = int(year)
# Print whether the given year is a leap year or not
if (year % 4) or ((year % 100 == 0) and (year % 400)) :
print (str(year) + " is not a leap year.")
else :
print (str(year) + " is a leap year.")
| [
"aurore.amrit@gmail.com"
] | aurore.amrit@gmail.com |
a3f2a5a005d26ab9af467662fd50ff955da9a329 | 381612e57ef807e573b40b2dfaf062c8fe7a43f7 | /nesi/softbox/api/models/route_models.py | 7aea30390ad3f30d7155b1f369e6370d70560810 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | zcf900/NESi | 1635a405660bb9390843468f34105dd2ef45bd75 | 0db169dd6378fbd097380280cc41440e652de19e | refs/heads/master | 2023-01-31T23:21:02.799923 | 2020-12-18T13:37:43 | 2020-12-18T13:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by Ilya Etingof <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - Janis Groß <https://github.com/unkn0wn-user>
# - Philip Konrath <https://github.com/Connyko65>
# - Alexander Dincher <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
import uuid
from nesi.softbox.api import db
class Route(db.Model):
id = db.Column(db.Integer(), primary_key=True)
dst = db.Column(db.String(23))
gw = db.Column(db.String(23))
metric = db.Column(db.Integer(), default=1)
box_id = db.Column(db.Integer, db.ForeignKey('box.id'))
sub_mask = db.Column(db.Integer(), default=None)
| [
"janis.gross.jg@gmail.com"
] | janis.gross.jg@gmail.com |
b346c06471c05248ec00056a0c1471aae1296317 | e71335877c7fa75ba399167f3b1ac8d484e2d11b | /code/microblog.10/app/routes.py | a588919e6c238cf793cb5fd93bc52b39d3a2be80 | [] | no_license | malzahr9/microblog | c07f62a48a95e884a6d5193b0df8707c050aef5b | 8b9432e6f9aa9e6824eec5c4288043b62b32c1ec | refs/heads/master | 2020-03-16T20:51:51.778738 | 2018-05-03T02:03:27 | 2018-05-03T02:03:27 | 132,975,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,254 | py | from flask import render_template, flash, redirect, request
from flask import url_for
from werkzeug.urls import url_parse
from app import app
from app import db
from app.forms import LoginForm
from app.forms import RegistrationForm
from app.forms import EditProfileForm
from app.forms import PostForm
from app.forms import ResetPasswordRequestForm
from app.forms import ResetPasswordForm
from app.models import Post
from flask_login import current_user, login_user, logout_user
from flask_login import login_required
from app.models import User
from datetime import datetime
from app.email import send_password_reset_email
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
post = Post(body=form.post.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(
page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title='Home', form=form,
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(
page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('user', username=user.username, page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('user', username=user.username, page=posts.prev_num) \
if posts.has_prev else None
return render_template('user.html', user=user, posts=posts.items,
next_url=next_url, prev_url=prev_url)
@app.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile',
form=form)
@app.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot follow yourself!')
return redirect(url_for('user', username=username))
current_user.follow(user)
db.session.commit()
flash('You are following {}!'.format(username))
return redirect(url_for('user', username=username))
@app.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot unfollow yourself!')
return redirect(url_for('user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('You are not following {}.'.format(username))
return redirect(url_for('user', username=username))
@app.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('explore', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('explore', page=posts.prev_num) \
if posts.has_prev else None
return render_template("index.html", title='Explore', posts=posts.items,
next_url=next_url, prev_url=prev_url)
@app.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password')
return redirect(url_for('login'))
return render_template('reset_password_request.html',
title='Reset Password', form=form)
@app.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Your password has been reset.')
return redirect(url_for('login'))
return render_template('reset_password.html', form=form)
| [
"gregdelozier@gmail.com"
] | gregdelozier@gmail.com |
c05cb60b29e99cb7f641740eadd1e391584f1f41 | b369a398ef95a734ce687c9846dc3c3b452cbd5e | /logJogo.py | 0f30bbd2deeb355e0bf06be8923364f3f8f2bcad | [] | no_license | diegomardu/jogo_dados_01 | 65676f9c16dd2bfacf00cf2164f312ca16d814dc | bc73d747960c26516af1e2adaa144c322619d3c9 | refs/heads/master | 2020-05-26T07:19:31.035756 | 2019-06-03T11:19:14 | 2019-06-03T11:19:14 | 188,147,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | class logJogo:
def __init__(self,nome="",pontos=0,resultado=""):
self._nome = nome
self._pontos = pontos
self._resultado = resultado
def getNome(self):
return self._nome
def setNome(self,nome):
self._nome = nome
def getPontos(self):
return self._pontos
def setPontos(self,pontos):
self._pontos = pontos
def getResultado(self):
return self._resultado
def setResultado(self,resultado):
self._resultado = resultado | [
"diegomartins836@gmail.com"
] | diegomartins836@gmail.com |
e6d3db434bb944520c7c67f22da57dc052a000e0 | aa35303b9bebd1ab8042d678e63094c9dc2d780f | /humidity_monitor/__init__.py | 838d4a59235a912fc9f503152dcb2c9dfbf952c1 | [] | no_license | ruaridhwatt/microscripts | b030e9c180c6e19cf147d1c3c609bae05d23e83f | 1ec702d15ca812bc60b030a71509d42581207e18 | refs/heads/main | 2023-07-18T15:59:40.281633 | 2021-09-01T16:55:11 | 2021-09-01T16:55:11 | 393,965,686 | 0 | 0 | null | 2021-08-17T19:33:27 | 2021-08-08T13:17:54 | Python | UTF-8 | Python | false | false | 110 | py | from humidity_monitor.LowHumidity import LowHumidity
from humidity_monitor.HumidityAlarm import HumidityAlarm
| [
"ruaridh.watt@gmail.com"
] | ruaridh.watt@gmail.com |
4581696b15439fb4973aec8ec9df6b1433f267c4 | 325987981dd00ef79589c61b587d66f5537f85a5 | /algorithms/hackerrank/compare-the-triplets/python/main.py | 9e48b95a950ae5b2b4184cd5e651261e6a7e805e | [] | no_license | wfelipe3/learning | 9ab7e5ea756ef8b58739914854e7858dd562cef8 | 06c9c12de0edad7f3e4de856c61a2c8b4ebe0011 | refs/heads/master | 2021-01-19T19:02:45.098461 | 2018-08-27T04:24:40 | 2018-08-27T04:24:40 | 101,184,481 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | def get_values():
s = input()
return s.split(' ')
def to_ints(values):
return list(map(lambda x: int(x), values))
def get_int_values():
return to_ints(get_values())
def have_same_size(x, y):
return len(x) is len(y)
def get_points(x, y):
if not have_same_size(x, y):
raise ValueError('not the same size')
xpoints = 0
ypoints = 0
for i in range(3):
if x[i] > y[i]:
xpoints = xpoints + 1
elif x[i] < y[i]:
ypoints = ypoints + 1
return (xpoints, ypoints)
(x, y)= get_points(get_int_values(), get_int_values())
print(format("{} {}".format(x, y)))
| [
"wfelipe3@gmail.com"
] | wfelipe3@gmail.com |
1f0c844a961102816b1b465c43073eb91f394e79 | 4dd54710e927ff9b9c944aaa4a911a9167c3b5de | /cargonext/organizations/doctype/warehouse_company/warehouse_company.py | e46798131f949b87fba3d77ca6f3fd71b1e6f63d | [
"MIT"
] | permissive | jryandechavez/cargonext | f8d7dc044988a8d3aff6f5267f0d11bba547109f | 2438f21bc69c20b0fdf3560b077a9ea7bc17ec8c | refs/heads/master | 2020-03-20T13:48:24.601665 | 2018-06-15T09:12:58 | 2018-06-15T09:12:58 | 137,466,788 | 0 | 0 | null | 2018-06-15T09:12:23 | 2018-06-15T09:12:23 | null | UTF-8 | Python | false | false | 285 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Opensource Solutions Philippines and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class WarehouseCompany(Document):
pass
| [
"root@ossphdev.localdomain"
] | root@ossphdev.localdomain |
88ff8118af3ff04831d32976774beb4883963270 | af434a5b0d449f003fd1accf1864a0ba69548610 | /candidates/migrations/0010_auto_20180511_1417.py | c9b1b69ba8efcdc12e811340b13561a2700322f1 | [] | no_license | ajarvis3gs/recruiting | 97e1a694bd001cd1ecede25b4926420bfc699eb9 | 30bbc5914b410c2b177287d8bbeb30c28d1f577d | refs/heads/master | 2021-10-11T13:24:31.072970 | 2019-01-26T13:50:53 | 2019-01-26T13:50:53 | 104,217,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2018-05-11 14:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobs', '0022_auto_20180505_1302'),
('candidates', '0009_auto_20180510_1342'),
]
operations = [
migrations.CreateModel(
name='CandidateResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='CandidateResponseMandatoryQualification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('responseText', models.TextField(blank=True, null=True)),
('candidateResponse', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mandatoryQualifications', to='candidates.CandidateResponse')),
('mandatoryQualification', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='candidateResponses', to='jobs.JobMandatoryQualification')),
],
),
migrations.CreateModel(
name='CandidateResponseRequestedQualification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('responseText', models.TextField(blank=True, null=True)),
('candidateResponse', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='requestedQualifications', to='candidates.CandidateResponse')),
('requestedQualification', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='candidateResponses', to='jobs.JobRequestedQualification')),
],
),
migrations.AddField(
model_name='candidate',
name='work_status',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='candidateresponse',
name='candidate',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responses', to='candidates.Candidate'),
),
migrations.AddField(
model_name='candidateresponse',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responses', to='jobs.Job'),
),
]
| [
"aaron-jarvis@idexx.com"
] | aaron-jarvis@idexx.com |
3773ecdd5ab252e3f156de78a6ec7a761ae3b570 | 895b70141b0502bd6f20d5ddb40a0e2e27226279 | /PythonPractice/EnterpriseWechatWeb/page/basePage.py | fd58e70c39366c324c764ce6e53b5bf13ea0b40b | [] | no_license | lqin007/testDemo | 2e774970045b308aa702dd280ef65841a507ce4d | 64efbc8a62660ab80c5ccd53b38d094ea1dc4bd7 | refs/heads/master | 2023-06-14T06:15:16.963446 | 2021-07-16T02:19:43 | 2021-07-16T02:19:43 | 351,643,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class BasePage:
#子类不重写 __init__,实例化子类时,会自动调用父类定义的 __init__构造方法,参数设置默认值base_driver=None
def __init__(self,base_driver=None):
if base_driver is None:
#base_url,定义打开主页面
#base_url = "https://work.weixin.qq.com/wework_admin/frame#index"
#浏览器复用模式
chrome_arg = Options()
chrome_arg.debugger_address = "127.0.0.1:9222"
self.driver = webdriver.Chrome(options=chrome_arg)
#self.driver.get(base_url)
#设置隐式等待
self.driver.implicitly_wait(5)
elif base_driver is not None:
#实例化driver后,后续传递该driver,无需重复实例化
self.driver = base_driver
#封装元素查找方法,便于切换技术栈时代码的可维护性
#使用解元祖操作
def find(self,locator):
return self.driver.find_element(*locator)
def finds(self,locator):
return self.driver.find_elements(*locator)
def end(self):
self.driver.quit()
| [
"1197964844@qq.com"
] | 1197964844@qq.com |
b4e5d65cf41c5b676b3c8fea171d87bae446ba4b | 88608583e66b2084a8fe010d18a4bc779a9ea8eb | /torcv/links/model/inception/__init__.py | 06148b1d3907003901ba7919852b9854ece3df5b | [] | no_license | UdonDa/torcv | c4f1f1cac99d49a5fe0d3edef6293659d807f292 | 49e548d538933f5eb5a4ffe1cb529914b180dae2 | refs/heads/master | 2020-04-28T05:25:09.183032 | 2019-03-19T02:39:52 | 2019-03-19T02:39:52 | 175,019,302 | 1 | 0 | null | 2019-03-19T02:39:53 | 2019-03-11T14:37:18 | Python | UTF-8 | Python | false | false | 51 | py | from torcv.links.model.inception.inception import * | [
"udoooon0727@gmail.com"
] | udoooon0727@gmail.com |
7c18cecf00315c32eb38a70e12917f1ea4a78653 | 5befd324597df48b068a7ebda221db12a591f895 | /yotta/lib/registry_access.py | 69e96096e2a266ace3511bc79832141695333703 | [
"Apache-2.0"
] | permissive | parisk/yotta | 6616002c709e30e44234118ecf3dcbe7b9433f53 | 496b0994aa77854709782d8cc23032aabd50bb59 | refs/heads/master | 2021-01-22T19:04:02.966265 | 2014-10-21T08:40:55 | 2014-10-21T08:40:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,462 | py | # standard library modules, , ,
import re
import logging
from collections import OrderedDict
import uuid
import functools
import json
import binascii
import calendar
import datetime
import hashlib
import itertools
import urllib
import base64
import webbrowser
# restkit, MIT, HTTP client library for RESTful APIs, pip install restkit
from restkit import Resource, BasicAuth, errors as restkit_errors
from restkit.forms import multipart_form_encode
# PyJWT, MIT, Jason Web Tokens, pip install PyJWT
import jwt
# pycrypto, Public Domain, Python Crypto Library, pip install pyCRypto
import Crypto
from Crypto.PublicKey import RSA
# settings, , load and save settings, internal
import settings
# connection_pool, , shared connection pool, internal
import connection_pool
# access_common, , things shared between different component access modules, internal
import access_common
# version, , represent versions and specifications, internal
import version
# Ordered JSON, , read & write json, internal
import ordered_json
# Github Access, , access repositories on github, internal
import github_access
# !!! FIXME get SSL cert for main domain, then use HTTPS
Registry_Base_URL = 'http://registry.yottabuild.org'
Website_Base_URL = 'http://yottabuild.org'
_OpenSSH_Keyfile_Strip = re.compile("^(ssh-[a-z0-9]*\s+)|(\s+.+\@.+)|\n", re.MULTILINE)
logger = logging.getLogger('access')
# Internal functions
class _BearerJWTFilter(object):
def __init__(self, private_key):
super(_BearerJWTFilter, self).__init__()
expires = calendar.timegm((datetime.datetime.utcnow() + datetime.timedelta(hours=2)).timetuple())
prn = _fingerprint(private_key.publickey())
logger.debug('fingerprint: %s' % prn)
token_fields = {
"iss": 'yotta',
"aud": Registry_Base_URL,
"prn": prn,
"exp": str(expires)
}
logger.debug('token fields: %s' % token_fields)
self.token = jwt.encode(token_fields, private_key, 'RS256')
logger.debug('encoded token: %s' % self.token)
def on_request(self, request):
request.headers['Authorization'] = 'Bearer ' + self.token
def _pubkeyWireFormat(pubkey):
return urllib.quote(_OpenSSH_Keyfile_Strip.sub('', pubkey.exportKey('OpenSSH')))
def _fingerprint(pubkey):
stripped = _OpenSSH_Keyfile_Strip.sub('', pubkey.exportKey('OpenSSH'))
decoded = base64.b64decode(stripped)
khash = hashlib.md5(decoded).hexdigest()
return ':'.join([khash[i:i+2] for i in xrange(0, len(khash), 2)])
def _registryAuthFilter():
# basic auth until we release publicly, to prevent outside registry access,
# after that this will be removed
return _BearerJWTFilter(_getPrivateKeyObject())
def _returnRequestError(fn):
''' Decorator that captures un-caught restkit_errors.RequestFailed errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None). '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except restkit_errors.RequestFailed as e:
return "sever returned status %s: %s" % (e.status_int, e.message)
return wrapped
def _handleAuth(fn):
''' Decorator to re-try API calls after asking the user for authentication. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except restkit_errors.Unauthorized as e:
github_access.authorizeUser()
logger.debug('trying with authtoken:', settings.getProperty('github', 'authtoken'))
return fn(*args, **kwargs)
return wrapped
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except restkit_errors.Unauthorized as e:
logger.error('insufficient permission')
return None
return wrapped
def _listVersions(namespace, name):
# list versions of the package:
url = '%s/%s/%s/versions' % (
Registry_Base_URL,
namespace,
name
)
headers = { }
auth = _registryAuthFilter()
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
try:
logger.info('get versions for ' + name)
response = resource.get(
headers = headers
)
except restkit_errors.ResourceNotFound as e:
raise access_common.ComponentUnavailable(
'%s does not exist in the %s registry' % (name, namespace)
)
body_s = response.body_string()
return [RegistryThingVersion(x, namespace, name) for x in ordered_json.loads(body_s)]
def _tarballURL(namespace, name, version):
return '%s/%s/%s/versions/%s/tarball' % (
Registry_Base_URL, namespace, name, version
)
def _getTarball(url, directory, sha256):
auth = _registryAuthFilter()
logger.debug('registry: get: %s' % url)
if not sha256:
logger.warn('tarball %s has no hash to check' % url)
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
#resource = Resource('http://blobs.yottos.org/targets/stk3700-0.0.0.tar.gz', pool=connection_pool.getPool(), follow_redirect=True)
response = resource.get()
# there seems to be an issue with following the redirect using restkit:
# follow redirect manually
if response.status_int == 302 and 'Location' in response.headers:
redirect_url = response.headers['Location']
logger.debug('registry: redirect to: %s' % redirect_url)
resource = Resource(redirect_url, pool=connection_pool.getPool())
response = resource.get()
return access_common.unpackTarballStream(response.body_stream(), directory, ('sha256', sha256))
def _generateAndSaveKeys():
k = RSA.generate(2048)
privatekey_hex = binascii.hexlify(k.exportKey('DER'))
settings.setProperty('keys', 'private', privatekey_hex)
pubkey_hex = binascii.hexlify(k.publickey().exportKey('DER'))
settings.setProperty('keys', 'public', pubkey_hex)
return pubkey_hex, privatekey_hex
def _getPrivateKeyObject():
privatekey_hex = settings.getProperty('keys', 'private')
if not privatekey_hex:
pubkey_hex, privatekey_hex = _generateAndSaveKeys()
return RSA.importKey(binascii.unhexlify(privatekey_hex))
# API
class RegistryThingVersion(access_common.RemoteVersion):
def __init__(self, data, namespace, name):
logger.debug('RegistryThingVersion %s/%s data: %s' % (namespace, name, data))
version = data['version']
self.namespace = namespace
self.name = name
if 'hash' in data and 'sha256' in data['hash']:
self.sha256 = data['hash']['sha256']
else:
self.sha256 = None
url = _tarballURL(self.namespace, self.name, version)
super(RegistryThingVersion, self).__init__(version, url)
def unpackInto(self, directory):
assert(self.url)
_getTarball(self.url, directory, self.sha256)
class RegistryThing(access_common.RemoteComponent):
def __init__(self, name, version_spec, namespace):
self.name = name
self.spec = version.Spec(version_spec)
self.namespace = namespace
@classmethod
def createFromNameAndSpec(cls, version_spec, name, registry):
''' returns a registry component for anything that's a valid package
name (this does not guarantee that the component actually exists in
the registry: use availableVersions() for that).
'''
# we deliberately allow only lowercase, hyphen, and (unfortunately)
# numbers in package names, to reduce the possibility of confusingly
# similar names: if the name doesn't match this then escalate to make
# the user fix it
name_match = re.match('^([a-z0-9-]+)$', name)
if not name_match:
logger.warning(
'Dependency name "%s" is not valid (must contain only lowercase letters, hyphen, and numbers)' % name
)
return None
try:
spec = version.Spec(version_spec)
return RegistryThing(name, version_spec, registry)
except ValueError, e:
pass
return None
def versionSpec(self):
return self.spec
def availableVersions(self):
''' return a list of Version objects, each able to retrieve a tarball '''
return _listVersions(self.namespace, self.name)
def tipVersion(self):
raise NotImplementedError()
@classmethod
def remoteType(cls):
return 'registry'
@_returnRequestError
@_handleAuth
def publish(namespace, name, version, description_file, tar_file, readme_file, readme_file_ext):
''' Publish a tarblob to the registry, if the request fails, an exception
is raised, which either triggers re-authentication, or is turned into a
return value by the decorators. (If successful, the decorated function
returns None)
'''
url = '%s/%s/%s/versions/%s' % (
Registry_Base_URL,
namespace,
name,
version
)
if readme_file_ext == '.md':
readme_section_name = 'readme.md'
elif readme_file_ext == '':
readme_section_name = 'readme'
else:
raise ValueError('unsupported readme type: "%s"' % readne_file_ext)
# description file is in place as text (so read it), tar file is a file
body = OrderedDict([('metadata',description_file.read()), ('tarball',tar_file), (readme_section_name, readme_file)])
headers = { }
body, headers = multipart_form_encode(body, headers, uuid.uuid4().hex)
auth = _registryAuthFilter()
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
response = resource.put(
headers = headers,
payload = body
)
return None
@_friendlyAuthError
@_handleAuth
def listOwners(namespace, name):
''' List the owners of a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
url = '%s/%s/%s/owners' % (
Registry_Base_URL,
namespace,
name
)
auth = _registryAuthFilter()
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
try:
response = resource.get()
except restkit_errors.ResourceNotFound as e:
logger.error('no such %s, "%s"' % (namespace, name))
return None
return ordered_json.loads(response.body_string())
@_friendlyAuthError
@_handleAuth
def addOwner(namespace, name, owner):
''' Add an owner for a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
url = '%s/%s/%s/owners/%s' % (
Registry_Base_URL,
namespace,
name,
owner
)
auth = _registryAuthFilter()
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
try:
response = resource.put()
except restkit_errors.ResourceNotFound as e:
logger.error('no such %s, "%s"' % (namespace, name))
@_friendlyAuthError
@_handleAuth
def removeOwner(namespace, name, owner):
''' Remove an owner for a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
url = '%s/%s/%s/owners/%s' % (
Registry_Base_URL,
namespace,
name,
owner
)
auth = _registryAuthFilter()
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
try:
response = resource.delete()
except restkit_errors.ResourceNotFound as e:
logger.error('no such %s, "%s"' % (namespace, name))
def deauthorize():
if settings.getProperty('keys', 'private'):
settings.setProperty('keys', 'private', '')
if settings.getProperty('keys', 'public'):
settings.setProperty('keys', 'public', '')
def getPublicKey():
''' Return the user's public key (generating and saving a new key pair if necessary) '''
pubkey_hex = settings.getProperty('keys', 'public')
if not pubkey_hex:
k = RSA.generate(2048)
settings.setProperty('keys', 'private', binascii.hexlify(k.exportKey('DER')))
pubkey_hex = binascii.hexlify(k.publickey().exportKey('DER'))
settings.setProperty('keys', 'public', pubkey_hex)
pubkey_hex, privatekey_hex = _generateAndSaveKeys()
return _pubkeyWireFormat(RSA.importKey(binascii.unhexlify(pubkey_hex)))
def testLogin():
url = '%s/users/me' % (
Registry_Base_URL
)
headers = { }
auth = _registryAuthFilter()
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
logger.debug('test login...')
response = resource.get(
headers = headers
)
def getAuthData():
''' Poll the registry to get the result of a completed authentication
(which, depending on the authentication the user chose or was directed
to, will include a github or other access token)
'''
url = '%s/tokens' % (
Registry_Base_URL
)
headers = { }
auth = _registryAuthFilter()
resource = Resource(url, pool=connection_pool.getPool(), filters=[auth])
try:
logger.debug('poll for tokens...')
response = resource.get(
headers = headers
)
except restkit_errors.Unauthorized as e:
logger.debug(str(e))
return None
except restkit_errors.ResourceNotFound as e:
logger.debug(str(e))
return None
except restkit_errors.RequestFailed as e:
logger.debug(str(e))
return None
body = response.body_string()
logger.debug('auth data response: %s' % body);
r = {}
for token in ordered_json.loads(body):
if token['provider'] == 'github':
r['github'] = token['accessToken']
break
logger.debug('parsed auth tokens %s' % r);
return r
def openBrowserLogin(provider=None):
if provider:
query = '?provider=github'
else:
query = ''
webbrowser.open(Website_Base_URL + '/#login/' + getPublicKey() + query)
| [
"James.Crosby@arm.com"
] | James.Crosby@arm.com |
cbf9d28017e06e9d320aa9a46bf5ac75ae0e0080 | ddf0e4e91da6e10f0fa91961c57e8778cf002b42 | /03/tags.py | 69e11335920d7795384a197d780c7b0f9520eada | [] | no_license | joshsteveth/challenges | f0f189f212b99c37d75cdff71084a1ea2e144491 | 64bf50ead275e2248ed1347abb9219fcc8a58361 | refs/heads/master | 2020-03-27T10:45:02.950475 | 2018-09-07T07:45:54 | 2018-09-07T07:45:54 | 146,442,540 | 0 | 0 | null | 2018-09-07T07:45:55 | 2018-08-28T12:14:27 | Python | UTF-8 | Python | false | false | 1,686 | py | TOP_NUMBER = 10
RSS_FEED = 'rss.xml'
SIMILAR = 0.87
import xml.etree.ElementTree as ET
import re
import collections
from itertools import product
from difflib import SequenceMatcher
TAG_HTML = re.compile(r'<category>([^<]+)</category>')
def get_tags():
"""Find all tags in RSS_FEED.
Replace dash with whitespace."""
root = ET.parse(RSS_FEED).getroot()
result = []
for child in root.iter():
if child.tag == 'category':
result.append(child.text.lower().replace('-', ' '))
return result
def get_top_tags(tags):
"""Get the TOP_NUMBER of most common tags"""
result = {}
for t in tags:
if t in result:
result[t] += 1
else:
result[t] = 1
return collections.Counter(result).most_common(TOP_NUMBER)
def get_similarities(tags):
"""Find set of tags pairs with similarity ratio of > SIMILAR"""
pairs = product(tags, repeat=2)
result = []
for p in pairs:
#exclude the same words
if p[0] == p[1]: continue
p = sorted(p)
#also exclude if this pair is already in the result
if p in result: continue
sim = SequenceMatcher(None, p[0], p[1]).ratio()
if sim >= SIMILAR:
result.append(p)
return result
if __name__ == "__main__":
tags = get_tags()
top_tags = get_top_tags(tags)
print('* Top {} tags:'.format(TOP_NUMBER))
for tag, count in top_tags:
print('{:<20} {}'.format(tag, count))
similar_tags = dict(get_similarities(tags))
print()
print('* Similar tags:')
for singular, plural in similar_tags.items():
print('{:<20} {}'.format(singular, plural))
| [
"joshua-steven.theopillus@tu-ilmenau.de"
] | joshua-steven.theopillus@tu-ilmenau.de |
9b35be5ea6f3061ce4b5a90c2ada6c6ad59995f5 | a0cda18d0e73c61f9ccbbf2d14f278bb0a07cc31 | /snakai/strategy/qlearning/state_encoder/__init__.py | e5a34bd486d5faa28d0f68adab5f558f31af7806 | [
"Apache-2.0"
] | permissive | fseasy/snakai | c029d6c04abf65635e30b4bd06d0391dacc154b0 | 361ee03680a6cf44a6df6b644bfe96a2d48149cd | refs/heads/master | 2023-08-29T22:05:25.640539 | 2021-11-06T13:22:08 | 2021-11-06T13:22:08 | 70,814,697 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | # -*- coding: utf-8 -*-
from .state_encoder import StateEncoder | [
"readonlyfile@hotmail.com"
] | readonlyfile@hotmail.com |
484abace33c9beb118a8272843f86bd68c449ba9 | eba28541c3778a0c0ef7179a19e6d81ff012e614 | /Kivy - Interactive Applications and Games in Python Second Edition_Code/Chapter_03_code/08 - Kivy and its properties/drawingspace.py | 728b5853dce6abb6c8df2d50085f2e1abf2cd873 | [] | no_license | dzendjo/WMBot | af8219b71a99c9e9045cc7b3e9371ed0cb406aca | db463cf5fd6ae48566f7cd1773a3171bcde8b8c2 | refs/heads/master | 2021-05-10T12:21:50.932529 | 2018-01-24T16:40:32 | 2018-01-24T16:40:32 | 118,439,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | # File name: drawingspace.py
import kivy
kivy.require('1.9.0')
from kivy.uix.relativelayout import RelativeLayout
class DrawingSpace(RelativeLayout):
def on_children(self, instance, value):
self.status_bar.counter = len(self.children)
| [
"andrii.belon@gmail.com"
] | andrii.belon@gmail.com |
660d3a7974d1dfa4ecbaaca6f579b2be184fe0ed | be8f128c4008a16361af50f4bca1d4f66a603555 | /111-data_ana_new/Fig5h_S11_codes.py | 5ef16a876b44fa410eada3a0448069c2f24f0961 | [] | no_license | Ji2020/my_python_code | e290e72b4f9128921d6cc73a4dd80fe68a63d4c3 | aa05f1ccb39313eced05a84004f38230e3d045f9 | refs/heads/master | 2020-12-28T15:05:58.420951 | 2020-02-03T08:14:24 | 2020-02-03T08:14:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,731 | py | #!/usr/bin/env python
# coding: utf-8
# 整理一下师兄MOSD文章中我在Fig5h和S11数据分析中用到的代码
#
# - 备注:代码还没来得及清理得好看一点;
# - 备注2:是的中间很多步没必要手动做,但是需要强迫自己检查一遍数据有没有哪里代码错漏了特殊情况...所以就保留了几个手动步骤
# In[ ]:
import pandas as pd
import numpy as np
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
# 可能需要先把原始数据拼起来嗯
# In[ ]:
#先把数据拼起来
data_22_24 = pd.read_csv("area_22_24_Feb22.csv")
data_25_28 = pd.read_csv("area_25_28_Feb23.csv")
data_merge = data_22_24.append(data_25_28, ignore_index=True)
data_merge.to_csv("area_22_28_toFeb23.csv")
# 将师兄给的原始数据分开channel并添加MuscleName:
#
# - 注意!other tags在mouse20开始是整型,之前都是字符串形式,记得改引号!
# In[ ]:
#分开channel1和channel2,去掉噪音对应的channel,并加上对应的肌肉名的 转换格式的代码
def procdf(data_input):
#data_output = pd.DataFrame(columns=['VoltNum', 'Trial', 'leftorright', 'Strain', 'miceNum', 'elePosition', 'ledNum', 'MuscleName', 'Result'])
# 先建两个dataframe,之后一个存入channel1的数据,一个存入channel2
data_channel1 = data_input.copy()
data_channel1["MuscleName"] = ""
data_channel2 = data_channel1.copy()
# process channel 1
data_channel1 = data_channel1.drop(columns=['Channel2']) #删掉channel2
data_channel1 = data_channel1.rename(columns={"Channel1": "Result"})
data_channel1['ChannelNum'] = 1
###################other tags在mouse20开始是整型,之前都是字符串形式,记得改引号!
data_channel1.loc[(data_channel1.elePosition == 'C7') & ((data_channel1.miceNum == 1)|(data_channel1.miceNum == 3)|(data_channel1.miceNum == 4)), 'MuscleName'] = 'tricep'
data_channel1.loc[(data_channel1.elePosition == 'C7') & (data_channel1.miceNum > 4) & (data_channel1.otherTags == 1), 'MuscleName'] = 'pectoralis'
data_channel1.loc[(data_channel1.elePosition == 'C7') & (data_channel1.miceNum > 4) & (data_channel1.otherTags == 2), 'MuscleName'] = 'tricep'
data_channel1.loc[(data_channel1.elePosition == 'C7') & (data_channel1.miceNum == 8)& (data_channel1.otherTags == 3), 'MuscleName'] = 'tricep'
data_channel1.loc[(data_channel1.elePosition == 'C7') & (data_channel1.miceNum == 9)& (data_channel1.otherTags == 3), 'MuscleName'] = 'flexor carpi'
# data_channel1.loc[(data_channel1.elePosition == 'Sc') & (data_channel1.miceNum != 2), 'MuscleName'] = 'tibialis anterior'
# print(data_channel1)
# process channel 2
data_channel2 = data_channel2.drop(columns=['Channel1'])
data_channel2 = data_channel2.rename(columns={"Channel2": "Result"}) #index=str,
data_channel2['ChannelNum'] = 2
data_channel2.loc[(data_channel2.elePosition == 'C7') & ((data_channel2.miceNum == 1)|(data_channel2.miceNum == 3)|(data_channel2.miceNum == 4)), 'MuscleName'] = 'extensor carpi'
data_channel2.loc[(data_channel2.elePosition == 'C7') & (data_channel2.miceNum == 2) & (data_channel2.otherTags == 2), 'MuscleName'] = 'extensor carpi'
data_channel2.loc[(data_channel2.elePosition == 'C7') & (data_channel2.miceNum == 2) & (data_channel2.otherTags == 1), 'MuscleName'] = 'tricep'
data_channel2.loc[(data_channel2.elePosition == 'C7') & (data_channel2.miceNum > 4) & (data_channel2.otherTags == 1), 'MuscleName'] = 'extensor carpi'
data_channel2.loc[(data_channel2.elePosition == 'C7') & (data_channel2.miceNum > 4) & (data_channel2.otherTags == 2), 'MuscleName'] = 'digitorum'
data_channel2.loc[(data_channel2.elePosition == 'C7') & (data_channel2.miceNum == 8) & (data_channel2.otherTags == 3), 'MuscleName'] = 'flexor carpi'
data_channel2.loc[(data_channel2.elePosition == 'C7') & (data_channel2.miceNum == 9) & (data_channel2.otherTags == 3), 'MuscleName'] = 'extensor carpi'
data_channel2.loc[(data_channel2.elePosition == 'C7') & (data_channel2.miceNum > 9) & (data_channel2.otherTags == 3), 'MuscleName'] = 'flexor carpi' # Feb21新增规则:othertags3, channel2都是屈腕肌
# data_channel2.loc[(data_channel2.elePosition == 'Sc') & (data_channel2.miceNum == 2) & ((data_channel2.otherTags == 1)|(data_channel2.otherTags == 'jingqian')), 'MuscleName'] = 'tibialis anterior'
# data_channel2.loc[(data_channel2.elePosition == 'Sc') & (data_channel2.miceNum == 2) & ((data_channel2.otherTags == 2)|(data_channel2.otherTags == 'feichang')), 'MuscleName'] = 'gastrocnemius'
# data_channel2.loc[(data_channel2.elePosition == 'Sc') & (data_channel2.miceNum != 2), 'MuscleName'] = 'gastrocnemius'
# join rows
data_output = data_channel1.append(data_channel2, ignore_index=True) #后面一个变量是这样的话排序就是接着排而非直接粘贴原本的从0开始了
data_output = data_output[data_output.MuscleName != ""] #理论上讲,没有对应肌肉名的都是噪音
return(data_output)
########################### 主代码开始 #########################################
data_input = pd.read_csv("Area_result_all-0220.csv")
# print(data_input.otherTags.unique())
data_1 = procdf(data_input)
data_1.to_csv("Area_result_all-0220_spltchan.csv") #, sep='\t'
#print(datareshape_area)
# 然后加trial平均并做归一化(记得归一化根据想要如何解读结果可能需要改)
# In[ ]:
#关于ledall3: 已经重跑过可以用
data1 = pd.read_csv("area_22_28_spltchan.csv") #这里读进来的csv名字和上一个跑过的.to_csv()里面存储的文件名字一样
#如果跑了ledNum转为数字的代码的话,这里文件名是"Area_result_to_mat.csv"
#清理一下ledall的数据
data1 = data1[(data1.elePosition == "C7") & (data1.ledNum != 'ledall') & (data1.ledNum != 'ledall1') & (data1.ledNum != 'ledall2') & (data1.ledNum != 'led1all3') & (data1.ledNum != 'ledall3')] #
mouse = data1.miceNum.unique() #这个函数可以不重复地取出某一列里所有可能值
# print(mouse)
muscle = data1.MuscleName.unique()
# print(muscle)
led = data1.ledNum.unique()
# print(led)
volt = data1.VoltNum.unique()
#加上一行Trails=0, 算trial平均值
for m in range(len(mouse)):
for ms in range(len(muscle)):
for v in range(len(volt)):
for l in range(len(led)):
data_temp = data1[(data1.ledNum == led[l]) & (data1.VoltNum == volt[v]) & (data1.miceNum == mouse[m]) & (data1.MuscleName == muscle[ms])]
if data_temp.empty:
continue
new_row = data_temp.loc[data_temp.Trails == 1,:] #随便复制一行过来,为了填充除了Result和Trails以外的那些列的值
new_row["Trails"] = 0
new_row["Result"] = data_temp.Result.mean()
data1 = data1.append(new_row,ignore_index=True)
#下面进行归一化:新建一列
data1["normResponse_eachmouse_muscle"]= 5 # 这个是初始化操作,规定数据类型是int用的;可以随便赋值一个不在[0,1]之间的数,来标示没有进行计算(因为只对平均值做归一化,其它行是不做的)
for m in range(len(mouse)):
for ms in range(len(muscle)):
data_temp = data1[(data1.miceNum == mouse[m]) & (data1.MuscleName == muscle[ms])] # & (data1.Trails == 0) 不需要,因为要看实验中最大激活到什么程度
max_temp = data_temp.Result.unique().max() #存储这个老鼠、这个肌肉下,所有值中最大的一个,作为归一化的分母
data1.loc[(data1.miceNum == mouse[m]) & (data1.MuscleName == muscle[ms]), "normResponse_eachmouse_muscle"] = data1.Result/max_temp
#存储数据
data1.to_csv("area_22_28_added_avg_n_norm.csv")
# 算selectivity index(最大-第二大)/(最大+第二大),算最大与第二大之间的Wilcoxon P值
#
# (这个应该可以用;不过下面这段代码我做的时候清理过,清理后的暂时找不到了,等找到补上来...)
# In[ ]:
#先加selectivity index最大和第二大的相比的图
data_alltrials = pd.read_csv("area_upto28_added_avg_n_norm.csv")
#先取出所有不同的值,给循环备用
mouse =data_alltrials.miceNum.unique()
led = data_alltrials.ledNum.unique()
volt = data_alltrials.VoltNum.unique()
muscle = data_alltrials.MuscleName.unique()
# ANOVA结果的初始化
# data_alltrials["one-way ANOVA_1-P"] = -1
# data_alltrials["one-way ANOVA_P"] = -1
# data_alltrials["one-way ANOVA_F"] = -1
# data_alltrials["Kruskal–Wallis test_1-P"] = -1
# data_alltrials["Kruskal–Wallis test_P"] = -1
# data_alltrials["Kruskal–Wallis test_F"] = -1
# data_alltrials["Levene's P"] = -1 #注意这个不是1-P!!!!因为这个是看组间SD差异是否显著的!!!
# data_alltrials["Levene's F"] = -1
data_alltrials["SelectivityIndex_2"] = -1
data_alltrials["wilcoxon_P"] = -1
data_alltrials["wilcoxon_statistic"] = -1
data_alltrials["wilcoxon_1-P"] = -1
#再加一列存入最大值是哪块肌肉;初始化
data_alltrials["optimalMuscle"] = "none"
data_alltrials["secondaryMuscle"] = "none"
for m in range(len(mouse)):
for l in range(len(led)):
for v in range(len(volt)):
data_temp = data_alltrials[(data_alltrials.miceNum == mouse[m]) & (data_alltrials.ledNum == led[l]) & (data_alltrials.VoltNum == volt[v])]
# if data_temp.empty:
# continue
if (len(data_temp.MuscleName.unique()) != 5):
continue
#然后随便复制一行,为了填充其它列(也可以手动填,主要是为了循环的那三个量,其它列之后会drop掉)
new_row = data_temp.loc[((data_temp.Trails == 0)&(data_temp.MuscleName == "tricep")),:]
new_row["MuscleName"] = "selectivity_2"
# anova_temp = stats.f_oneway(data_temp[(data_temp['MuscleName'] == muscle[0]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_temp[(data_temp['MuscleName'] == muscle[1]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_temp[(data_temp['MuscleName'] == muscle[2]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_temp[(data_temp['MuscleName'] == muscle[3]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_temp[(data_temp['MuscleName'] == muscle[4]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique())
# new_row["one-way ANOVA_1-P"] = 1-anova_temp[1]
# new_row["one-way ANOVA_P"] = anova_temp[1]
# new_row["one-way ANOVA_F"] = anova_temp[0]
# if (m == 8) & (l=="led1") & (v==3):
# print(data_temp[(data_temp['MuscleName'] == muscle[0]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique())
# print(data_temp["normResponse_eachmouse_muscle"][(data_temp['MuscleName'] == muscle[0]) & (data_temp.Trails != 0)])
# kruskal_temp = stats.kruskal(data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[0]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[1]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[2]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[3]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[4]) & (data_temp.Trails != 0)])
# new_row["Kruskal–Wallis test_1-P"] = 1-kruskal_temp[1]
# new_row["Kruskal–Wallis test_P"] = kruskal_temp[1]
# new_row["Kruskal–Wallis test_F"] = kruskal_temp[0]
# levene_temp = stats.levene(data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[0]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[1]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[2]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[3]) & (data_temp.Trails != 0)],
# data_temp['normResponse_eachmouse_muscle'][(data_temp['MuscleName'] == muscle[4]) & (data_temp.Trails != 0)])
# new_row["Levene's P"] = levene_temp[1]
# new_row["Levene's F"] = levene_temp[0]
#取出trial平均的那些行,用来找最大最小的肌肉,并计算类似DSI的selectivity index
temp_avg = data_temp[(data_temp.Trails == 0)] #不包括new_row因为new_row并没有append到data_temp里面来
all_temp = np.sort(temp_avg.normResponse_eachmouse_muscle.unique())
if v==3:
print(all_temp)
max_temp = all_temp[4]
# min_temp = temp_avg.normResponse_eachmouse_muscle.unique().min()
max2_temp = all_temp[3]
new_row["SelectivityIndex_2"] = (max_temp - max2_temp)/(max_temp + max2_temp)
#顺便记下来最大最小对应的肌肉名(这个应该有更好的方法,但反正这个也不是很慢,我就暂时先用这个了)
temp_max = temp_avg[temp_avg["normResponse_eachmouse_muscle"] == max_temp]
new_row["optimalMuscle"] = temp_max.MuscleName.unique()[0] #加个[0]是因为可能会有重复行(好像是因为有的othertags不同会分开算)
temp_max2 = temp_avg[temp_avg["normResponse_eachmouse_muscle"] == max2_temp]
new_row["secondaryMuscle"] = temp_max2.MuscleName.unique()[0]
wilcoxon_temp = stats.ranksums(data_temp[(data_temp['MuscleName'] == temp_max.MuscleName.unique()[0]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_temp[(data_temp['MuscleName'] == temp_max2.MuscleName.unique()[0]) & (data_temp.Trails != 0)].normResponse_eachmouse_muscle.unique())
new_row["wilcoxon_1-P"] = 1-wilcoxon_temp[1]
new_row["wilcoxon_P"] = wilcoxon_temp[1]
new_row["wilcoxon_statistic"] = wilcoxon_temp[0]
data_alltrials = data_alltrials.append(new_row,ignore_index=True)
data_anovas1 = data_alltrials[(data_alltrials.MuscleName == "selectivity_2")].copy()
data_anovas = data_anovas1[["miceNum", "ledNum", "VoltNum", "SelectivityIndex_2", "optimalMuscle", "secondaryMuscle",
"wilcoxon_P", "wilcoxon_statistic", "wilcoxon_1-P"]].copy()
#以上两行感觉应该可以合起来,但我直接合起来会报错,所以就先分开写了
data_anovas.to_csv("area_upto28_norm_selectivity2_dsi.csv")
#这个输出的数据我挑了一个手动算过了是对的,不知道为啥打印出来的这么诡异。。
#啊哈!因为v==3不是VoltNum==3
# 顺便看一下所有小鼠所有肌肉所有给光组合的selectivity index的分布情况
# In[ ]:
# 做直方图:
data_hist = pd.read_csv("area_upto28_norm_selectivity2_dsi.csv")
all_si2 = data_hist.SelectivityIndex_2.unique()
all_si2_median = np.median(all_si2)
all_si2_std = np.std(all_si2)
all_si2_mean = np.std(all_si2)
print("median:")
print(all_si2_median)
print("mean:")
print(all_si2_mean)
print("std:")
print(all_si2_std)
cutoff = all_si2_median + 2 * all_si2_std
sns.distplot(all_si2, kde=False, bins=20, fit=stats.expon, norm_hist = True)
plt.axvline(x=cutoff, color = "b")
plt.xlim(0,1)
plt.ylabel("Density")
plt.xlabel("selectivity index = (max-second_max)/(max+second_max)")
print("cutoff = median + 2 * std:")
print(cutoff)
# (mu, sigma) = stats.norm.fit(e_t_hat)
# print "mu={0}, sigma={1}".format(mu, sigma)
plt.savefig("selectivity2_distribution_expon-fitted_median2std.png")
plt.savefig("selectivity2_distribution_expon-fitted_median2std.eps")
plt.show()
# 做Fig 5h的柱状图,顺便算ANOVA
# In[ ]:
# 每块肌肉画一张图!!!
data_grouped = pd.read_csv("selectivity2_groupbyoptmuscle.csv")
f, axarr = plt.subplots(5,1,figsize=(5.5,10)) #顺序:几行,几列,figsize=(总横宽,总纵长)
f.tight_layout() #调节构图不要太挤
muscle = ["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"]
print(muscle)
#pec:
plt.subplot(5,1,1)
data_pec = data_grouped[(data_grouped.optimalMuscle == "pectoralis")]
sns.barplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_pec, ci=68, color="lightcoral",errwidth=2, capsize=.1, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"]) #ci是error bar的大小,默认值是95%置信区间1000次bootstrapping
sns.stripplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_pec, color = "dimgrey", size=3, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"])
plt.xlabel("pectoralis")
plt.ylabel("normalized area")
#tri:
plt.subplot(5,1,2)
data_tri = data_grouped[(data_grouped.optimalMuscle == "tricep")]
sns.barplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_tri, ci=68, color="lightcoral",errwidth=2, capsize=.1, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"]) #ci是error bar的大小,默认值是95%置信区间1000次bootstrapping
sns.stripplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_tri, color = "dimgrey", size=3, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"])
plt.xlabel("tricep")
plt.ylabel("normalized area")
#excar:
plt.subplot(5,1,3)
data_excar = data_grouped[(data_grouped.optimalMuscle == "extensor carpi")]
sns.barplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_excar, ci=68, color="lightcoral",errwidth=2, capsize=.1, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"]) #ci是error bar的大小,默认值是95%置信区间1000次bootstrapping
sns.stripplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_excar, color = "dimgrey", size=3, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"])
plt.xlabel("extensor carpi")
plt.ylabel("normalized area")
#flcar
plt.subplot(5,1,4)
data_flcar = data_grouped[(data_grouped.optimalMuscle == "flexor carpi")]
sns.barplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_flcar, ci=68, color="lightcoral",errwidth=2, capsize=.1, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"]) #ci是error bar的大小,默认值是95%置信区间1000次bootstrapping
sns.stripplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_flcar, color = "dimgrey", size=3, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"])
plt.xlabel("flexor carpi")
plt.ylabel("normalized area")
#digi:
plt.subplot(5,1,5)
data_digi = data_grouped[(data_grouped.optimalMuscle == "digitorum")]
sns.barplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_digi, ci=68, color="lightcoral",errwidth=2, capsize=.1, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"]) #ci是error bar的大小,默认值是95%置信区间1000次bootstrapping
sns.stripplot(x="MuscleName", y="normResponse_eachmouse_muscle", data=data_digi, color = "dimgrey", size=3, order=["pectoralis", "tricep", "extensor carpi", "flexor carpi", "digitorum"])
plt.xlabel("digitorum")
plt.ylabel("normalized area")
plt.savefig("groupbymuscle.png")
plt.savefig("groupbymuscle.eps")
plt.show()
print(muscle[0])
anova_temp0 = stats.f_oneway(data_pec[(data_pec['MuscleName'] == muscle[0]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_pec[(data_pec['MuscleName'] == muscle[1]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_pec[(data_pec['MuscleName'] == muscle[2]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_pec[(data_pec['MuscleName'] == muscle[3]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_pec[(data_pec['MuscleName'] == muscle[4]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique())
print(anova_temp0)
# wilcoxon_temp0 = stats.wilcoxon(data_pec[(data_pec['MuscleName'] == muscle[0]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_pec[(data_pec['MuscleName'] == muscle[1]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_pec[(data_pec['MuscleName'] == muscle[2]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_pec[(data_pec['MuscleName'] == muscle[3]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique(),
# data_pec[(data_pec['MuscleName'] == muscle[4]) & (data_pec.Trails != 0)].normResponse_eachmouse_muscle.unique())
# print(anova_temp0)
print(muscle[1])
anova_temp1 = stats.f_oneway(data_tri[(data_tri['MuscleName'] == muscle[0]) & (data_tri.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_tri[(data_tri['MuscleName'] == muscle[1]) & (data_tri.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_tri[(data_tri['MuscleName'] == muscle[2]) & (data_tri.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_tri[(data_tri['MuscleName'] == muscle[3]) & (data_tri.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_tri[(data_tri['MuscleName'] == muscle[4]) & (data_tri.Trails != 0)].normResponse_eachmouse_muscle.unique())
print(anova_temp1)
print(muscle[2])
anova_temp2 = stats.f_oneway(data_excar[(data_excar['MuscleName'] == muscle[0]) & (data_excar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_excar[(data_excar['MuscleName'] == muscle[1]) & (data_excar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_excar[(data_excar['MuscleName'] == muscle[2]) & (data_excar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_excar[(data_excar['MuscleName'] == muscle[3]) & (data_excar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_excar[(data_excar['MuscleName'] == muscle[4]) & (data_excar.Trails != 0)].normResponse_eachmouse_muscle.unique())
print(anova_temp2)
print(muscle[3])
anova_temp3 = stats.f_oneway(data_flcar[(data_flcar['MuscleName'] == muscle[0]) & (data_flcar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_flcar[(data_flcar['MuscleName'] == muscle[1]) & (data_flcar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_flcar[(data_flcar['MuscleName'] == muscle[2]) & (data_flcar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_flcar[(data_flcar['MuscleName'] == muscle[3]) & (data_flcar.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_flcar[(data_flcar['MuscleName'] == muscle[4]) & (data_flcar.Trails != 0)].normResponse_eachmouse_muscle.unique())
print(anova_temp3)
print(muscle[4])
anova_temp4 = stats.f_oneway(data_digi[(data_digi['MuscleName'] == muscle[0]) & (data_digi.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_digi[(data_digi['MuscleName'] == muscle[1]) & (data_digi.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_digi[(data_digi['MuscleName'] == muscle[2]) & (data_digi.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_digi[(data_digi['MuscleName'] == muscle[3]) & (data_digi.Trails != 0)].normResponse_eachmouse_muscle.unique(),
data_digi[(data_digi['MuscleName'] == muscle[4]) & (data_digi.Trails != 0)].normResponse_eachmouse_muscle.unique())
print(anova_temp4)
| [
"1733473428@qq.com"
] | 1733473428@qq.com |
788bb56b328d68ec06846bebf6bfaf2328a15e4c | 6c129c996d20bc5740aa7c69b52b5d714543bc91 | /otaa_node.py | c5953b781f0785b631cb5ef7a4344833a190aab9 | [] | no_license | John-Shan/EECE-PROJECT | b49e79989fcc46f621cff229bd7dd5e661617952 | 04f31e9c8398bfab8b432f7e66d9b436b033d635 | refs/heads/master | 2022-05-21T17:27:59.065027 | 2020-04-23T01:40:07 | 2020-04-23T01:40:07 | 258,064,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | #!/usr/bin/env python
#
# Copyright (c) 2019, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
""" OTAA Node example compatible with the LoPy Nano Gateway """
from network import LoRa
import socket
import binascii
import struct
import time
import config
# initialize LoRa in LORAWAN mode.
# Please pick the region that matches where you are using the device:
# Asia = LoRa.AS923
# Australia = LoRa.AU915
# Europe = LoRa.EU868
# United States = LoRa.US915
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.EU868)
# create an OTA authentication params
dev_eui = binascii.unhexlify('30AEA4FFFE74C2D0')
app_eui = binascii.unhexlify('70B3D57ED002535E')
app_key = binascii.unhexlify('FF4A690B2C496494582A3AB1C6ECBC1D')
# set the 3 default channels to the same frequency (must be before sending the OTAA join request)
lora.add_channel(0, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=5)
lora.add_channel(1, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=5)
lora.add_channel(2, frequency=config.LORA_FREQUENCY, dr_min=0, dr_max=5)
# join a network using OTAA
lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0, dr=config.LORA_NODE_DR)
# wait until the module has joined the network
while not lora.has_joined():
time.sleep(2.5)
print('Not joined yet...')
# remove all the non-default channels
for i in range(3, 16):
lora.remove_channel(i)
# create a LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# set the LoRaWAN data rate
s.setsockopt(socket.SOL_LORA, socket.SO_DR, config.LORA_NODE_DR)
# make the socket non-blocking
s.setblocking(False)
time.sleep(5.0)
for i in range (200):
pkt = b'PKT #' + bytes([i])
print('Sending:', pkt)
s.send(pkt)
time.sleep(4)
rx, port = s.recvfrom(256)
if rx:
print('Received: {}, on port: {}'.format(rx, port))
time.sleep(6)
| [
"noreply@github.com"
] | noreply@github.com |
c8c8edf00fcd24eaf70bbbe8536996f048844169 | a42bddd6499e77f0dac4aecaa30d0eafaabbcdc6 | /backend/fan_zone/tests.py | db7906a213cacaac36f2397dc314803ea0fa2fd0 | [] | no_license | miljanic/cinetubbies | 2277bb20388046fbb9ecf466b6c4d86ae8fee725 | 2dbbda36db14853ee3f51c2ac5c3988d7401cae7 | refs/heads/master | 2021-09-17T04:34:13.424442 | 2018-06-27T18:33:03 | 2018-06-27T18:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,470 | py | from django.test import TestCase
# Create your tests here.
from rest_framework.test import APITestCase
from authentication.serializers import TheaterAdminSerializer
from authentication.serializers import AdminSerializer
from theaters.serializers import AdministrationSerializer as TheaterSerializer
from .categories.models import Category
from .categories.serializers import AdministrationSerializer as \
AdminCategorySerializer
from .props.models import Prop
from .props.official.serializers import RestrictedSerializer as \
RestrictedOfficialPropSerializer
from .props.used.serializers import MemberSerializer as MemberUsedPropSerializer
class PublicCategoryAPI(APITestCase):
test_category = {
'name': 'cat',
'supercategory': None
}
test_subcategory = {
'name': 'cat',
'supercategory': 1
}
def setUp(self):
serializer = AdminCategorySerializer(data=self.test_category)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_subcategory)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
def test_list(self):
response = self.client.get('http://localhost:8000/api/props/categories/')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(len(response.data), 2)
Category.objects.all().delete()
response = self.client.get('http://localhost:8000/api/props/categories/')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.data)
def test_retrieve(self):
response = self.client.get('http://localhost:8000/api/props/categories/1')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(self.test_category['name'], response.data['name'])
response = self.client.get('http://localhost:8000/api/props/categories/2')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(self.test_subcategory['name'], response.data['name'])
response = self.client.get('http://localhost:8000/api/props/categories/99')
self.assertEqual(response.status_code, 404)
class AdminCategoryAPI(APITestCase):
test_category = {
'name': 'cat',
'supercategory': None
}
test_fan_zone_admin = {
'username': 'admin2',
'password': '123456',
'email': 'admin2@test.com',
'role': 'fan_zone_admin',
'theater': '',
}
test_theater_admin = {
'username': 'admin',
'password': '123456',
'email': 'admin@test.com',
'role': 'cinema_admin',
'theater': '',
}
test_system_admin = {
'username': 'sysadmin',
'password': '123456',
'role': 'admin',
'email': 'sysadmin@test.com',
}
def login(self, user):
response = self.client.post(
path='http://localhost:8000/api/auth/login/',
data = {
'username': user['username'],
'password': user['password']
},
format='json'
)
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + response.data['token'])
def post(self, data):
return self.client.post(
path='http://localhost:8000/api/props/categories/',
data=data,
format='json'
)
def delete(self, id):
return self.client.delete(
path="http://localhost:8000/api/props/categories/" + str(id)
)
def put(self, id, data):
return self.client.delete(
path="http://localhost:8000/api/props/categories/" + str(id),
data=data,
format='json'
)
def setUp(self):
serializer = AdminCategorySerializer(data=self.test_category)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_fan_zone_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterAdminSerializer(data=self.test_theater_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_system_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
def test_create(self):
subcategory = {
'name': 'cat',
'supercategory': 1
}
response = self.post(subcategory)
self.assertEqual(response.status_code, 401)
self.login(self.test_fan_zone_admin)
response = self.post(subcategory)
self.assertEqual(response.status_code, 403)
self.login(self.test_theater_admin)
response = self.post(subcategory)
self.assertEqual(response.status_code, 403)
self.login(self.test_system_admin)
response = self.post(subcategory)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
subcategory = {
'supercategory': 1
}
response = self.post(subcategory)
self.assertEqual(response.status_code, 400)
subcategory = {
'name': 'cat',
}
response = self.post(subcategory)
self.assertEqual(response.status_code, 400)
subcategory = {
}
response = self.post(subcategory)
self.assertEqual(response.status_code, 400)
def test_destroy(self):
response = self.delete(1)
self.assertEqual(response.status_code, 401)
self.login(self.test_fan_zone_admin)
response = self.delete(1)
self.assertEqual(response.status_code, 403)
self.login(self.test_theater_admin)
response = self.delete(1)
self.assertEqual(response.status_code, 403)
self.login(self.test_system_admin)
response = self.delete(1)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
response = self.delete(99)
self.assertEqual(response.status_code, 404)
def test_update(self):
category = {
'name': 'cat2',
'supercategory': None
}
response = self.put(1, category)
self.assertEqual(response.status_code, 401)
self.login(self.test_fan_zone_admin)
response = self.put(1, category)
self.assertEqual(response.status_code, 403)
self.login(self.test_theater_admin)
response = self.put(1, category)
self.assertEqual(response.status_code, 403)
self.login(self.test_system_admin)
response = self.put(1, category)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
class PublicOfficialPropAPI(APITestCase):
test_theater_admin = {
'username': 'admin',
'password': '123456',
'email': 'admin@test.com',
'role': 'cinema_admin',
'theater': '',
}
test_fan_zone_admin = {
'username': 'admin2',
'password': '123456',
'email': 'admin2@test.com',
'role': 'fan_zone_admin',
}
test_theater = {
'name': 'theater1',
'address': 'some street',
'kind': 'p',
'admins': [1],
}
test_category_1 = {
'name': 'cat',
'supercategory': None
}
test_category_2 = {
'name': 'cat2',
'supercategory': None
}
test_prop_1 = {
'title': 'Prop1',
'description': 'some profound text here',
'categoryId': 1,
'quantity': 2,
'price': 99.9,
'theaterId': 1,
'imageId': None,
'kind': 'O'
}
test_prop_2 = {
'title': 'Prop2',
'description': 'some profound text here',
'categoryId': 2,
'quantity': 5,
'price': 59.9,
'theaterId': 1,
'imageId': None,
'kind': 'O'
}
def setUp(self):
serializer = TheaterAdminSerializer(data=self.test_theater_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_fan_zone_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterSerializer(data=self.test_theater)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = RestrictedOfficialPropSerializer(data=self.test_prop_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = RestrictedOfficialPropSerializer(data=self.test_prop_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
def test_list(self):
response = self.client.get(path='http://localhost:8000/api/props/official/')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(len(response.data), 2)
response = self.client.get(path='http://localhost:8000/api/props/categories/1/official/')
self.assertEqual(response.status_code, 404)
Prop.official.all().delete()
response = self.client.get(path='http://localhost:8000/api/props/official/')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.data)
def test_count(self):
response = self.client.get(path='http://localhost:8000/api/props/official/count')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(response.data, 2)
response = self.client.get(path='http://localhost:8000/api/props/categories/1/official/count')
self.assertEqual(response.status_code, 404)
Prop.official.all().delete()
response = self.client.get(path='http://localhost:8000/api/props/official/count')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.data)
class RestrictedOfficialPropAPITests(APITestCase):
test_theater_admin = {
'username': 'admin',
'password': '123456',
'email': 'admin@test.com',
'role': 'cinema_admin',
'theater': '',
}
test_fan_zone_admin = {
'username': 'admin2',
'password': '123456',
'email': 'admin2@test.com',
'role': 'fan_zone_admin',
}
test_fan_zone_admin_2 = {
'username': 'admin3',
'password': '123456',
'email': 'admin3@test.com',
'role': 'fan_zone_admin',
'theater': '',
}
test_system_admin = {
'username': 'sysadmin',
'password': '123456',
'role': 'admin',
'email': 'sysadmin@test.com',
}
test_theater = {
'name': 'theater1',
'address': 'some street',
'kind': 'p',
'theater': [1],
'admins': [1]
}
test_theater_2 = {
'name': 'theater1',
'address': 'some street',
'kind': 'p',
'admins': [1],
}
test_category_1 = {
'name': 'cat',
'supercategory': None
}
test_category_2 = {
'name': 'cat2',
'supercategory': None
}
test_prop_1 = {
'title': 'Prop1',
'description': 'some profound text here',
'categoryId': 1,
'quantity': 2,
'price': 99.9,
'theaterId': 1,
'imageId': None,
'kind': 'O'
}
def login(self, user):
response = self.client.post(
path='http://localhost:8000/api/auth/login/',
data = {
'username': user['username'],
'password': user['password']
},
format='json'
)
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + response.data['token'])
def post(self, data):
return self.client.post(
path='http://localhost:8000/api/props/official/',
data=data,
format='json'
)
def delete(self, id):
return self.client.delete(
path="http://localhost:8000/api/props/official/" + str(id)
)
def put(self, id, data):
return self.client.put(
path="http://localhost:8000/api/props/official/" + str(id),
data=data,
format='json'
)
def setUp(self):
serializer = TheaterAdminSerializer(data=self.test_theater_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_fan_zone_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_fan_zone_admin_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_system_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterSerializer(data=self.test_theater)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterSerializer(data=self.test_theater_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = RestrictedOfficialPropSerializer(data=self.test_prop_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
def test_create(self):
test_prop_2 = {
'title': 'Prop2',
'description': 'some profound text here',
'categoryId': 2,
'quantity': 5,
'price': 59.9,
'theaterId': 1,
'imageId': None,
'kind': 'O'
}
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 401)
self.login(self.test_theater_admin)
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 403)
self.login(self.test_fan_zone_admin)
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.login(self.test_system_admin)
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 200)
def test_destroy(self):
test_prop_2 = {
'title': 'Prop2',
'description': 'some profound text here',
'categoryId': 2,
'quantity': 5,
'price': 59.9,
'theaterId': 2,
'imageId': None,
'kind': 'O'
}
serializer = RestrictedOfficialPropSerializer(data=test_prop_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
response = self.delete(2)
self.assertEqual(response.status_code, 401)
self.login(self.test_theater_admin)
response = self.delete(2)
self.assertEqual(response.status_code, 403)
self.login(self.test_fan_zone_admin_2)
response = self.delete(2)
self.assertEqual(response.status_code, 200)
serializer = RestrictedOfficialPropSerializer(data=test_prop_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = RestrictedOfficialPropSerializer(data=test_prop_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
self.login(self.test_system_admin)
response = self.delete(3)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.login(self.test_system_admin)
response = self.delete(1)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
response = self.delete(99)
self.assertEqual(response.status_code, 404)
def test_update(self):
test_prop_2 = {
'title': 'Prop2',
'description': 'some profound text here',
'categoryId': 2,
'quantity': 5,
'price': 59.9,
'theaterId': 1,
'imageId': None,
'kind': 'O'
}
serializer = RestrictedOfficialPropSerializer(data=test_prop_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
response = self.put(2, test_prop_2)
self.assertEqual(response.status_code, 401)
self.login(self.test_theater_admin)
response = self.put(2, test_prop_2)
self.assertEqual(response.status_code, 403)
self.login(self.test_fan_zone_admin)
response = self.put(2, test_prop_2)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
class PublicUsedAPI(APITestCase):
test_user = {
'username': 'user',
'password': '123456',
'email': 'user@test.com',
'role': 'user',
}
test_theater_admin = {
'username': 'admin',
'password': '123456',
'email': 'admin@test.com',
'role': 'cinema_admin',
'theater': '',
}
test_fan_zone_admin = {
'username': 'admin2',
'password': '123456',
'email': 'admin2@test.com',
'role': 'fan_zone_admin',
}
test_theater = {
'name': 'theater1',
'address': 'some street',
'kind': 'p',
'admins': [1],
}
test_category_1 = {
'name': 'cat',
'supercategory': None
}
test_category_2 = {
'name': 'cat2',
'supercategory': None
}
test_prop_1 = {
'title': 'Prop1',
'description': 'some profound text here',
'ownerId': 1,
'categoryId': 1,
'imageId': None,
'expirationDate': '2018-06-01',
'kind': 'U'
}
test_prop_2 = {
'title': 'Prop2',
'description': 'some profound text here',
'ownerId': 1,
'categoryId': 2,
'imageId': None,
'expirationDate': '2018-06-06',
'kind': 'U'
}
def setUp(self):
serializer = TheaterAdminSerializer(data=self.test_theater_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_fan_zone_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterSerializer(data=self.test_theater)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = MemberUsedPropSerializer(data=self.test_prop_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = MemberUsedPropSerializer(data=self.test_prop_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
def test_list(self):
response = self.client.get(path='http://localhost:8000/api/props/used/?all=true')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(len(response.data), 2)
response = self.client.get(path='http://localhost:8000/api/props/used/?category=1&all=true')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(len(response.data), 1)
response = self.client.get(path='http://localhost:8000/api/props/used/?category=2&all=true')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(len(response.data), 1)
response = self.client.get(path='http://localhost:8000/api/props/used/?category=99&all=true')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
Prop.used.all().delete()
response = self.client.get(path='http://localhost:8000/api/props/used/?all=true')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.data)
def test_count(self):
response = self.client.get(path='http://localhost:8000/api/props/used/count?all=true')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(response.data, 2)
response = self.client.get(path='http://localhost:8000/api/props/used/count?category=1&all=true')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(response.data, 1)
response = self.client.get(path='http://localhost:8000/api/props/used/count?category=1&all=true')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.assertEqual(response.data, 1)
response = self.client.get(path='http://localhost:8000/api/props/used/count?category=99&all=true')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, 0)
Prop.used.all().delete()
response = self.client.get(path='http://localhost:8000/api/props/used/count?all=true')
self.assertEqual(response.status_code, 200)
self.assertFalse(response.data)
class MemberUsedPropAPITests(APITestCase):
test_user = {
'username': 'user',
'password': '123456',
'email': 'user@test.com',
'role': 'user',
}
test_theater_admin = {
'username': 'admin',
'password': '123456',
'email': 'admin@test.com',
'role': 'cinema_admin',
'theater': '',
}
test_fan_zone_admin = {
'username': 'admin2',
'password': '123456',
'email': 'admin2@test.com',
'role': 'fan_zone_admin',
}
test_system_admin = {
'username': 'sysadmin',
'password': '123456',
'role': 'admin',
'email': 'sysadmin@test.com',
}
test_category_1 = {
'name': 'cat',
'supercategory': None
}
test_category_2 = {
'name': 'cat2',
'supercategory': None
}
test_prop_1 = {
'title': 'Prop1',
'description': 'some profound text here',
'ownerId': 1,
'categoryId': 1,
'imageId': None,
'expirationDate': '2018-06-01',
'kind': 'U'
}
def login(self, user):
response = self.client.post(
path='http://localhost:8000/api/auth/login/',
data = {
'username': user['username'],
'password': user['password']
},
format='json'
)
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + response.data['token'])
def post(self, data):
return self.client.post(
path='http://localhost:8000/api/props/used/',
data=data,
format='json'
)
def delete(self, id):
return self.client.delete(
path="http://localhost:8000/api/props/used/" + str(id)
)
def put(self, id, data):
return self.client.put(
path="http://localhost:8000/api/props/used/" + str(id),
data=data,
format='json'
)
def setUp(self):
serializer = AdminSerializer(data=self.test_user)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterAdminSerializer(data=self.test_theater_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_fan_zone_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_system_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category_2)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = MemberUsedPropSerializer(data=self.test_prop_1)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
def test_create(self):
test_prop_2 = {
'title': 'Prop2',
'description': 'some profound text here',
'ownerId': 1,
'categoryId': 2,
'imageId': None,
'expirationDate': '2018-06-01',
'kind': 'U'
}
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 401)
self.login(self.test_user)
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 200)
self.login(self.test_theater_admin)
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 200)
self.login(self.test_fan_zone_admin)
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.data)
self.login(self.test_system_admin)
response = self.post(test_prop_2)
self.assertEqual(response.status_code, 200)
# def test_destroy(self):
# test_prop_2 = {
# 'title': 'Prop2',
# 'description': 'some profound text here',
# 'ownerId': 1,
# 'categoryId': 2,
# 'imageId': None,
# 'expirationDate': '2018-06-01',
# 'kind': 'U'
# }
# serializer = MemberUsedPropSerializer(data=test_prop_2)
# if not serializer.is_valid():
# raise Exception(serializer.errors)
# serializer.save()
# response = self.delete(2)
# self.assertEqual(response.status_code, 401)
# self.login(self.test_user)
# response = self.delete(2)
# self.assertEqual(response.status_code, 200)
# self.login(self.test_theater_admin)
# response = self.delete(2)
# self.assertEqual(response.status_code, 403)
# self.login(self.test_fan_zone_admin)
# response = self.delete(2)
# self.assertEqual(response.status_code, 403)
# self.login(self.test_user)
# response = self.delete(2)
# self.assertEqual(response.status_code, 200)
# serializer = MemberUsedPropSerializer(data=test_prop_2)
# if not serializer.is_valid():
# raise Exception(serializer.errors)
# serializer.save()
# serializer = MemberUsedPropSerializer(data=test_prop_2)
# if not serializer.is_valid():
# raise Exception(serializer.errors)
# serializer.save()
# self.login(self.test_system_admin)
# response = self.delete(3)
# self.assertEqual(response.status_code, 200)
# self.assertTrue(response.data)
# self.login(self.test_system_admin)
# response = self.delete(1)
# self.assertEqual(response.status_code, 200)
# self.assertTrue(response.data)
# response = self.delete(99)
# self.assertEqual(response.status_code, 404)
# def test_update(self):
# test_prop_2 = {
# 'title': 'Prop2',
# 'description': 'some profound text here',
# 'ownerId': 1,
# 'categoryId': 2,
# 'imageId': None,
# 'expirationDate': '2018-06-01',
# 'kind': 'U'
# }
# serializer = MemberUsedPropSerializer(data=test_prop_2)
# if not serializer.is_valid():
# raise Exception(serializer.errors)
# serializer.save()
# response = self.put(2, test_prop_2)
# self.assertEqual(response.status_code, 401)
# self.login(self.test_theater_admin)
# response = self.put(2, test_prop_2)
# self.assertEqual(response.status_code, 403)
# self.login(self.test_fan_zone_admin)
# response = self.put(2, test_prop_2)
# self.assertEqual(response.status_code, 403)
# self.login(self.test_user)
# response = self.put(2, test_prop_2)
# self.assertEqual(response.status_code, 200)
class RestrictedUsedPropAPITests(APITestCase):
test_user = {
'username': 'user',
'password': '123456',
'email': 'user@test.com',
'role': 'user',
}
test_theater_admin = {
'username': 'admin',
'password': '123456',
'email': 'admin@test.com',
'role': 'cinema_admin',
'theater': '',
}
test_fan_zone_admin = {
'username': 'admin2',
'password': '123456',
'email': 'admin2@test.com',
'role': 'fan_zone_admin',
}
test_system_admin = {
'username': 'sysadmin',
'password': '123456',
'role': 'admin',
'email': 'sysadmin@test.com',
}
test_theater = {
'name': 'theater1',
'address': 'some street',
'kind': 'p',
'admins': [2],
}
test_category = {
'name': 'cat',
'supercategory': None
}
test_prop = {
'title': 'Prop1',
'description': 'some profound text here',
'ownerId': 1,
'categoryId': 1,
'imageId': None,
'expirationDate': '2018-06-01',
'kind': 'U'
}
def login(self, user):
response = self.client.post(
path='http://localhost:8000/api/auth/login/',
data = {
'username': user['username'],
'password': user['password']
},
format='json'
)
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + response.data['token'])
def put(self, id, data):
return self.client.put(
path="http://localhost:8000/api/props/used/" + str(id) + "/review",
data=data,
format='json'
)
def setUp(self):
serializer = AdminSerializer(data=self.test_user)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterAdminSerializer(data=self.test_theater_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_fan_zone_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminSerializer(data=self.test_system_admin)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = TheaterSerializer(data=self.test_theater)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = AdminCategorySerializer(data=self.test_category)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
serializer = MemberUsedPropSerializer(data=self.test_prop)
if not serializer.is_valid():
raise Exception(serializer.errors)
serializer.save()
def test_update(self):
data = {
'approve': True
}
response = self.put(1, data)
self.assertEqual(response.status_code, 401)
self.login(self.test_user)
response = self.put(1, data)
self.assertEqual(response.status_code, 403)
self.login(self.test_theater_admin)
response = self.put(1, data)
self.assertEqual(response.status_code, 403)
self.login(self.test_fan_zone_admin)
response = self.put(1, data)
self.assertEqual(response.status_code, 200)
self.login(self.test_system_admin)
response = self.put(1, data)
self.assertEqual(response.status_code, 200)
| [
"aleksandar.varga@uns.ac.rs"
] | aleksandar.varga@uns.ac.rs |
bc9256b1d485797fbe0fdf38559ee1bc5e3b67f9 | 283fa090697198f801f49ebaaff8fda78e75c98b | /ass_a/main.py | 499650ba780f358fe8b656e04ebeb3abcedebba6 | [] | no_license | Biggy54321/AVL-BST-Tress-Comparison | fe579a4b5705526069e82b2879979b2a31038db0 | d12f401d4df36a8d045db9e1970d24bf19b98f23 | refs/heads/master | 2022-11-24T20:48:19.585684 | 2020-08-01T08:16:57 | 2020-08-01T08:16:57 | 284,217,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | import time
from Avl import AvlTree
from Bst import BstTree
# Open the test file with keywords (no meanings are inserted for testing)
# The keywords are sorted to test the search times in AVL and BST Dictionary ADT
key_words_file = open("test", "r")
# Read the words from the file
key_words = key_words_file.readlines()
# Remove newline character from each word
key_words = [word.strip() for word in key_words]
# Create an AVL and BST Tree
avl = AvlTree()
bst = BstTree()
# Insert each keyword in both the tree (for testing no meaning is inserted)
for word in key_words:
avl.insert(word, "AVL-tree-key-found")
bst.insert(word, "BST-tree-key-found")
# Get the start time before searching in AVL
st_time = time.time()
# Search for the last keyword of the test file
print(avl.search("lysates"))
# Get the ending time after searching in AVL
en_time = time.time()
# Print the time required for searching a keyword in AVL
print("** Time for AVL search", (en_time - st_time) * 1000, "milli secs", end="\n\n")
# Get the start time before searching in BST
st_time = time.time()
# Search for the last keyword of the test file
print(bst.search("lysates"))
# Get the ending time after searching in BST
en_time = time.time()
# Print the time required for searching a keyword in BST
print("** Time for BST search", (en_time - st_time) * 1000, "milli secs", end="\n\n")
# Close the file
key_words_file.close()
# Print the conclusion
print("INFERENCE: Hence in case of height balanced tree the time required to search for a key is O(lg(n)) and in case of binary search tree the time required is O(n), so maximum number of comparison that may require in case of height balanced is again O(lg(n)), while in case of binary search tree is O(n)")
| [
"biggy.pardeshi@gmail.com"
] | biggy.pardeshi@gmail.com |
a75a02b604904c812799bbe215c35fb925538e5c | a29b234c19643341e5ea1d964cc81accfe9524e4 | /EmailRemover.py | 9c7fccb4be93a705554f23a14302633988935416 | [] | no_license | SilviaF/BlacklistEmailRemover | afca08f67b5b7c7fe0e847454320bd3f9a3a6a82 | b2aa16fa87a84d98c1d06764f2e99ce55eeb18fa | refs/heads/master | 2021-01-10T15:39:37.349475 | 2016-02-29T13:20:48 | 2016-02-29T13:20:48 | 52,792,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | import sys
emailFile = sys.argv[1]
blacklistFile = sys.argv[2]
f = open(emailFile, "r")
lines = f.readlines()
f.close()
f = open(emailFile, "w")
def deleteContent(emailFile):
emailFile.seek(0)
emailFile.truncate()
g = open(blacklistFile, "r")
lines2 = g.readlines()
g.close()
outVal = [word for word in lines if not any(bad in word for bad in lines2)]
for lines3 in outVal:
f.write(lines3)
| [
"silvia.figueroa.ardila@gmail.com"
] | silvia.figueroa.ardila@gmail.com |
f0881d7f2e8edcf749f6b4ed87b7980c6c4e57f4 | 4a283ec6af9748d95bb0f20e12bbc0c6eacb70b4 | /pelicanconf.py | a2996341625f81483958a1cc37ea85431f30e51d | [] | no_license | tschleining/opsech.io | 797d9666647561f3b4a5ff50a45ae5b9809b3477 | 372b9364101dc1e195c90f654f3279a4cb4f556f | refs/heads/master | 2021-01-11T19:52:49.384008 | 2017-01-19T05:01:21 | 2017-01-19T05:01:21 | 79,416,899 | 0 | 0 | null | 2017-01-19T04:54:00 | 2017-01-19T04:54:00 | null | UTF-8 | Python | false | false | 2,106 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Mike'
SITENAME = u'#> opsech.io'
SITEURL = ''
SITESUBTITLE = "Random wanderings of a Linux traveller"
PATH = 'content'
PLUGIN_PATHS = ["plugins/pelican-plugins"]
STATIC_PATHS = ['images','extra','favs']
IGNORE_FILES = ['*.swp','*.kate-swp']
#PLUGINS = ["better_codeblock_line_numbering","better_figures_and_images"]
PLUGINS = ["better_codeblock_line_numbering"]
CHECK_MODIFIED_METHOD = "mtime"
TIMEZONE = 'America/New_York'
DEFAULT_LANG = u'en'
ARTICLE_URL = 'posts/{date:%Y}/{date:%b}/{date:%d}/{slug}.html'
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%b}/{date:%d}/{slug}.html'
PAGE_URL = 'pages/{slug}.html'
PAGE_SAVE_AS = 'pages/{slug}.html'
#NEWEST_FIRST_ARCHIVES = True
#FIGURE_NUMBERS = True
RESPONSIVE_IMAGES = True
# https://github.com/ingwinlu/pelican-twitchy
THEME = 'themes/pelican-twitchy'
PYGMENTS_STYLE = "monokai"
BOOTSTRAP_THEME = "slate"
SHARE = True
CUSTOM_CSS = "extra/custom.css"
SOCIAL = (('Bitbucket','https://bitbucket.org/xenithorb'),
('Github','https://github.com/xenithorb'))
EXPAND_LATEST_ON_INDEX = True
DISQUS_LOAD_LATER = True
DISPLAY_TAGS_ON_MENU = True
#DISPLAY_TAGS_INLINE = True
DISPLAY_RECENT_POSTS_ON_MENU = True
CC_LICENSE = "CC-BY-NC-SA"
# End pelican-twitchy specific settings
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# Typogrify
TYPOGRIFY = True
# For better_codeblock_line_numbering plugin
#MD_EXTENSIONS = [
# 'codehilite(css_class=highlight,linenums=False)',
# 'extra',
# ]
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.toc import TocExtension
MD_EXTENSIONS = [
CodeHiliteExtension(css_class='highlight', linenums=False),
TocExtension(permalink=True),
'markdown.extensions.extra',
'markdown.extensions.figureAltCaption',
]
| [
"xenithorb@users.noreply.github.com"
] | xenithorb@users.noreply.github.com |
7c04685e8ec462c670aa49c19d84ef8baaa47261 | 529d1ab687753e519487287e7644fc1da8a2990a | /addTwoNumbers.py | d7dd5a491a5bcda0de66a0ffd003c6d1a97028e6 | [] | no_license | kau96kim/algorithm | b71ef800c365f8101090292003d7e065eb13168e | 007dafb03d6bf1fb3729b65f4f0825ef5eb1900e | refs/heads/master | 2023-02-12T11:49:59.388553 | 2020-12-29T14:53:14 | 2020-12-29T14:53:14 | 275,054,288 | 1 | 1 | null | 2020-07-11T17:07:27 | 2020-06-26T02:06:04 | Python | UTF-8 | Python | false | false | 1,301 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class LinkedList:
def __init__(self, val=None):
self.head = ListNode(val)
def add(self, val=0):
if self.head.val is None:
self.head = ListNode(val)
else:
node = self.head
while node.next is not None:
node = node.next
node.next = ListNode(val)
def display(self):
node = self.head
while node:
print(node.val)
node = node.next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
firstNumber = ""
while l1:
firstNumber += str(l1.val)
l1 = l1.next
secondNumber = ""
while l2:
secondNumber += str(l2.val)
l2 = l2.next
answer = str(int(firstNumber[::-1]) + int(secondNumber[::-1]))[::-1]
nodeHead = ListNode(val=int(answer[0]))
node = nodeHead
for i in answer[1:]:
node.next = ListNode(val=int(i))
node = node.next
return nodeHead
firstInput = [2, 4, 3]
SecondInput = [5, 6, 4]
firstList = LinkedList()
SecondList = LinkedList()
for i in firstInput:
firstList.add(i)
for i in SecondInput:
SecondList.add(i)
Solution.addTwoNumbers(Solution, firstList.head, SecondList.head)
| [
"kau96kim@gmail.com"
] | kau96kim@gmail.com |
c24afcaf775187b005efbf779c0da4629082645b | 3fa0ffa43a6f852741780950cf55a944120ac4e7 | /blog_project/settings.py | ce94317224484f6c5c9fdb8b5a5f8b36e54652df | [] | no_license | GreenUpOu/Django-BLOG- | a86039825393ac684c2cbdc3005d4e0d626c5045 | d54aeaeef6ceec38f28c2a58ff3c32627bdd711c | refs/heads/master | 2023-07-10T08:40:17.916674 | 2021-08-11T18:24:33 | 2021-08-11T18:24:33 | 395,077,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | """
Django settings for blog_project project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path, os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-j(3)a_&iduqvd-73zzelijh9h8e57!$v@w&%0q$bjffvc)vz+w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
#my apps
'blog',
'accounts',
#default
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' \
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| [
"27062015grisha@gmail.com"
] | 27062015grisha@gmail.com |
c1389065e88b1cdbc62f034dee3346f8f0b03279 | 1251ed3b76c8b44077c6db3914fcd88671303247 | /scripts/fake.py | 6eb31c2c446774074f8ea574a386c5be1c012ef9 | [] | no_license | zhushaojun/student_server | 4fd4f4da41fe4f74be2380007edc9faedd779ec7 | 9dcfe6fb8dc2a7bdf0a9e274388c65d3f369e059 | refs/heads/master | 2021-01-05T11:59:49.075769 | 2020-04-17T02:06:54 | 2020-04-17T02:06:54 | 241,017,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,670 | py | import os
import pathlib
import random
import sys
from datetime import timedelta
import django
# import faker
from django.utils import timezone
# 将项目根目录添加到 Python 的模块搜索路径中
back = os.path.dirname
BASE_DIR = back(back(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
random.seed()
xing = [
'赵', '钱', '孙', '李', '周', '吴', '郑', '王', '冯', '陈', '褚', '卫', '蒋', '沈', '韩', '杨', '朱', '秦', '尤', '许',
'何', '吕', '施', '张', '孔', '曹', '严', '华', '金', '魏', '陶', '姜', '戚', '谢', '邹', '喻', '柏', '水', '窦', '章',
'云', '苏', '潘', '葛', '奚', '范', '彭', '郎', '鲁', '韦', '昌', '马', '苗', '凤', '花', '方', '俞', '任', '袁', '柳',
'酆', '鲍', '史', '唐', '费', '廉', '岑', '薛', '雷', '贺', '倪', '汤', '滕', '殷', '罗', '毕', '郝', '邬', '安', '常',
'乐', '于', '时', '傅', '皮', '卞', '齐', '康', '伍', '余', '元', '卜', '顾', '孟', '平', '黄', '和', '穆', '萧', '尹',
'姚', '邵', '堪', '汪', '祁', '毛', '禹', '狄', '米', '贝', '明', '臧', '计', '伏', '成', '戴', '谈', '宋', '茅', '庞',
'熊', '纪', '舒', '屈', '项', '祝', '董', '梁']
ming = [
'的', '一', '是', '了', '我', '不', '人', '在', '他', '有', '这', '个', '上', '们', '来', '到', '时', '大', '地', '为',
'子', '中', '你', '说', '生', '国', '年', '着', '就', '那', '和', '要', '她', '出', '也', '得', '里', '后', '自', '以',
'会', '家', '可', '下', '而', '过', '天', '去', '能', '对', '小', '多', '然', '于', '心', '学', '么', '之', '都', '好',
'看', '起', '发', '当', '没', '成', '只', '如', '事', '把', '还', '用', '第', '样', '道', '想', '作', '种', '开', '美',
'总', '从', '无', '情', '己', '面', '最', '女', '但', '现', '前', '些', '所', '同', '日', '手', '又', '行', '意', '动',
'方', '期', '它', '头', '经', '长', '儿', '回', '位', '分', '爱', '老', '因', '很', '给', '名', '法', '间', '斯', '知',
'世', '什', '两', '次', '使', '身', '者', '被', '高', '已', '亲', '其', '进', '此', '话', '常', '与', '活', '正', '感',
'见', '明', '问', '力', '理', '尔', '点', '文', '几', '定', '本', '公', '特', '做', '外', '孩', '相', '西', '果', '走',
'将', '月', '十', '实', '向', '声', '车', '全', '信', '重', '三', '机', '工', '物', '气', '每', '并', '别', '真', '打',
'太', '新', '比', '才', '便', '夫', '再', '书', '部', '水', '像', '眼', '等', '体', '却', '加', '电', '主', '界', '门',
'利', '海', '受', '听', '表', '德', '少', '克', '代', '员', '许', '稜', '先', '口', '由', '死', '安', '写', '性', '马',
'光', '白', '或', '住', '难', '望', '教', '命', '花', '结', '乐', '色', '更', '拉', '东', '神', '记', '处', '让', '母',
'父', '应', '直', '字', '场', '平', '报', '友', '关', '放', '至', '张', '认', '接', '告', '入', '笑', '内', '英', '军',
'候', '民', '岁', '往', '何', '度', '山', '觉', '路', '带', '万', '男', '边', '风', '解', '叫', '任', '金', '快', '原',
'吃', '妈', '变', '通', '师', '立', '象', '数', '四', '失', '满', '战', '远', '格', '士', '音', '轻', '目', '条', '呢',
'病', '始', '达', '深', '完', '今', '提', '求', '清', '王', '化', '空', '业', '思', '切', '怎', '非', '找', '片', '罗',
'钱', '紶', '吗', '语', '元', '喜', '曾', '离', '飞', '科', '言', '干', '流', '欢', '约', '各', '即', '指', '合', '反',
'题', '必', '该', '论', '交', '终', '林', '请', '医', '晚', '制', '球', '决', '窢', '传', '画', '保', '读', '运', '及',
'则', '房', '早', '院', '量', '苦', '火', '布', '品', '近', '坐', '产', '答', '星', '精', '视', '五', '连', '司', '巴',
'奇', '管', '类', '未', '朋', '且', '婚', '台', '夜', '青', '北', '队', '久', '乎', '越', '观', '落', '尽', '形', '影',
'红', '爸', '百', '令', '周', '吧', '识', '步', '希', '亚', '术', '留', '市', '半', '热', '送', '兴', '造', '谈', '容',
'极', '随', '演', '收', '首', '根', '讲', '整', '式', '取', '照', '办', '强', '石', '古', '华', '諣', '拿', '计', '您',
'装', '似', '足', '双', '妻', '尼', '转', '诉', '米', '称', '丽', '客', '南', '领', '节', '衣', '站', '黑', '刻', '统',
'断', '福', '城', '故', '历', '惊', '脸', '选', '包', '紧', '争', '另', '建', '维', '绝', '树', '系', '伤', '示', '愿',
'持', '千', '史', '谁', '准', '联', '妇', '纪', '基', '买', '志', '静', '阿', '诗', '独', '复', '痛', '消', '社', '算',
'义', '竟', '确', '酒', '需', '单', '治', '卡', '幸', '兰', '念', '举', '仅', '钟', '怕', '共', '毛', '句', '息', '功',
'官', '待', '究', '跟', '穿', '室', '易', '游', '程', '号', '居', '考', '突', '皮', '哪', '费', '倒', '价', '图', '具',
'刚', '脑', '永', '歌', '响', '商', '礼', '细', '专', '黄', '块', '脚', '味', '灵', '改', '据', '般', '破', '引', '食',
'仍', '存', '众', '注', '笔', '甚', '某', '沉', '血', '备', '习', '校', '默', '务', '土', '微', '娘', '须', '试', '怀',
'料', '调', '广', '蜖', '苏', '显', '赛', '查', '密', '议', '底', '列', '富', '梦', '错', '座', '参', '八', '除', '跑',
'亮', '假', '印', '设', '线', '温', '虽', '掉', '京', '初', '养', '香', '停', '际', '致', '阳', '纸', '李', '纳', '验',
'助', '激', '够', '严', '证', '帝', '饭', '忘', '趣', '支', '春', '集', '丈', '木', '研', '班', '普', '导', '顿', '睡',
'展', '跳', '获', '艺', '六', '波', '察', '群', '皇', '段', '急', '庭', '创', '区', '奥', '器', '谢', '弟', '店', '否',
'害', '草', '排', '背', '止', '组', '州', '朝', '封', '睛', '板', '角', '况', '曲', '馆', '育', '忙', '质', '河', '续',
'哥', '呼', '若', '推', '境', '遇', '雨', '标', '姐', '充', '围', '案', '伦', '护', '冷', '警', '贝', '著', '雪', '索',
'剧', '啊', '船', '险', '烟', '依', '斗', '值', '帮', '汉', '慢', '佛', '肯', '闻', '唱', '沙', '局', '伯', '族', '低',
'玩', '资', '屋', '击', '速', '顾', '泪', '洲', '团', '圣', '旁', '堂', '兵', '七', '露', '园', '牛', '哭', '旅', '街',
'劳', '型', '烈', '姑', '陈', '莫', '鱼', '异', '抱', '宝', '权', '鲁', '简', '态', '级', '票', '怪', '寻', '杀', '律',
'胜', '份', '汽', '右', '洋', '范', '床', '舞', '秘', '午', '登', '楼', '贵', '吸', '责', '例', '追', '较', '职', '属',
'渐', '左', '录', '丝', '牙', '党', '继', '托', '赶', '章', '智', '冲', '叶', '胡', '吉', '卖', '坚', '喝', '肉', '遗',
'救', '修', '松', '临', '藏', '担', '戏', '善', '卫', '药', '悲', '敢', '靠', '伊', '村', '戴', '词', '森', '耳', '差',
'短', '祖', '云', '规', '窗', '散', '迷', '油', '旧', '适', '乡', '架', '恩', '投', '弹', '铁', '博', '雷', '府', '压',
'超', '负', '勒', '杂', '醒', '洗', '采', '毫', '嘴', '毕', '九', '冰', '既', '状', '乱', '景', '席', '珍', '童', '顶',
'派', '素', '脱', '农', '疑', '练', '野', '按', '犯', '拍', '征', '坏', '骨', '余', '承', '置', '臓', '彩', '灯', '巨',
'琴', '免', '环', '姆', '暗', '换', '技', '翻', '束', '增', '忍', '餐', '洛', '塞', '缺', '忆', '判', '欧', '层', '付',
'阵', '玛', '批', '岛', '项', '狗', '休', '懂', '武', '革', '良', '恶', '恋', '委', '拥', '娜', '妙', '探', '呀', '营',
'退', '摇', '弄', '桌', '熟', '诺', '宣', '银', '势', '奖', '宫', '忽', '套', '康', '供', '优', '课', '鸟', '喊', '降',
'夏', '困', '刘', '罪', '亡', '鞋', '健', '模', '败', '伴', '守', '挥', '鲜', '财', '孤', '枪', '禁', '恐', '伙', '杰',
'迹', '妹', '藸', '遍', '盖', '副', '坦', '牌', '江', '顺', '秋', '萨', '菜', '划', '授', '归', '浪', '听', '凡', '预',
'奶', '雄', '升', '碃', '编', '典', '袋', '莱', '含', '盛', '济', '蒙', '棋', '端', '腿', '招', '释', '介', '烧', '误',
'乾', '坤']
def get_name():
x = random.randint(0, len(xing)-1)
m1 = random.randint(0, len(ming)-1)
m2 = random.randint(0, len(ming)-1)
if random.randint(0, 10) < 7:
return xing[x] + ming[m1] + ming[m2]
else:
return xing[x] + ming[m1]
def get_id():
year = str((2015 + random.randint(0, 4))).zfill(4)
college = str(random.randint(0, 15)).zfill(2)
major = str(random.randint(0, 40)).zfill(2)
sid = str(random.randint(0, 50)).zfill(2)
return year + college + major + sid
if __name__ == '__main__':
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "student.settings")
django.setup()
from api.models import Student
from users.models import CustomUser
CustomUser.objects.create_superuser(email="admin@a.cn", password="adminpass").save()
CustomUser.objects.create_user(email="user1@a.cn", password="userpass").save()
CustomUser.objects.create_user(email="user2@a.cn", password="userpass").save()
# Student.objects.all().delete()
# fake = faker.Faker('zh_CN')
for i in range(100):
s = Student.objects.create(name=get_name(), gender=random.choice(('男', '女')), number=get_id())
s.save()
| [
"shj.zhu@gmail.com"
] | shj.zhu@gmail.com |
c3da14028c2e9a9520a1c79f139fc8065132f62d | ccd6258ee2e199806ccc5dbb1063a00bbad7d52f | /SWIFT_XTS_API/pyswift-spacevr-stable/Scripts/spacevr/receive_file.py | 9103c08f4767851f8eb5f165029a15d06b169539 | [
"CC-BY-3.0",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | SpaceVR-O1/OverviewOne | d87a7eb7d7554d0d0b2fe6c14c783db2d5887ee0 | 457cc7665854b0fdbf3f203c2f4a66218bbde28b | refs/heads/master | 2021-05-04T10:30:07.120653 | 2017-09-20T02:40:14 | 2017-09-20T02:40:14 | 53,979,990 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | #!/usr/bin/env python
import os, sys, time, traceback, argparse
sys.path.insert(1, "../../Packages")
from swiftradio.clients import SwiftRadioEthernet
from swiftradio.clients import SwiftUDPClient
import swiftradio
__author__ = "Ethan Sharratt"
__email__ = "sharratt@tethers.com"
__company__ = "Tethers Unlimited Inc."
__status__ = "Development"
__date__ = "Late Updated: 08/02/16"
__doc__ = "Script for sending a file to the radio to be downlinked."
# Create command line parser
parser = argparse.ArgumentParser(prog = "SpaceVR Downlink File", description = __doc__, add_help=True)
parser.add_argument("-i", "--ip_addr", type=str, default="192.168.1.42", help="IPv4 address of the radio.")
parser.add_argument("-p", "--port", type=int, default=30000, help="Port number on the radio to forward data from.")
parser.add_argument("-b", "--bind_port", type=int, default=30500, help="Port number on the Flight Computer to forward data to.")
parser.add_argument("-f", "--filename", type=str, default="rxfile_{}.bin".format(time.strftime("%m%d%Y%H%M")), help="File to save received data to.")
parser.add_argument("-l", "--loop", type=int, default=0, help="Set to 1 to loop file.")
args = parser.parse_args()
SRX_PKTSIZE = 1024
if __name__ == "__main__":
try:
# Open the receive data file
try:
f = open("sampleData.txt", 'wb')
#f = open(args.filename, 'wb')
except:
print "Could not open {}, ensure the filepath is correct.".format(args.filename)
sys.exit(1)
# Instantiate a UDP connection to the uplink port.
try:
udp = SwiftUDPClient(args.ip_addr, args.bind_port, args.port)
udp.connect()
except:
print "Could not open a udp client for the provided IPv4 address and port."
sys.exit(1)
# Send file to radio.
bytes = 0
print "Press CTRL+C to stop receiving data."
while True:
data = udp.read(SRX_PKTSIZE)
if data:
f.write(''.join(data))
except KeyboardInterrupt:
f.close()
udp.disconnect()
except:
traceback.print_exc()
| [
"blaze@spacevr.co"
] | blaze@spacevr.co |
208410d3e358a10f563e5f103349fd22130cf43d | aae3d55b9d2004e04c5917a31408384a4269a425 | /astrodash/save_binned_templates_as_arrays.py | 46d645225b46ac3a4d4829533285989d5f651758 | [
"MIT"
] | permissive | daniel-muthukrishna/astrodash | 5b1ee330d2ae2d9cc43f5c52d0765359aa40673f | acc241ad73133894d93ef16733cf0f1fb4ca7b87 | refs/heads/master | 2023-04-04T03:27:07.480846 | 2023-03-22T17:02:08 | 2023-03-22T17:02:08 | 75,250,754 | 23 | 12 | MIT | 2019-04-26T15:27:30 | 2016-12-01T03:19:18 | Python | UTF-8 | Python | false | false | 4,018 | py | import numpy as np
import pickle
import os
from astrodash.create_arrays import AgeBinning
from astrodash.helpers import temp_list
from astrodash.combine_sn_and_host import BinTemplate
def create_sn_and_host_arrays(snTemplateDirectory, snTempFileList, galTemplateDirectory, galTempFileList, paramsFile):
snTemplates = {}
galTemplates = {}
snList = temp_list(snTempFileList)
galList = temp_list(galTempFileList)
with open(paramsFile, 'rb') as f:
pars = pickle.load(f)
w0, w1, nw, snTypes, galTypes, minAge, maxAge, ageBinSize = pars['w0'], pars['w1'], pars['nw'], pars['typeList'], \
pars['galTypeList'], pars['minAge'], pars['maxAge'], \
pars['ageBinSize']
ageBinning = AgeBinning(minAge, maxAge, ageBinSize)
ageLabels = ageBinning.age_labels()
# Create dictionary of dictionaries for type and age of SN
for snType in snTypes:
snTemplates[snType] = {}
for ageLabel in ageLabels:
snTemplates[snType][ageLabel] = {}
snTemplates[snType][ageLabel]['snInfo'] = []
snTemplates[snType][ageLabel]['names'] = []
for galType in galTypes:
galTemplates[galType] = {}
galTemplates[galType]['galInfo'] = []
galTemplates[galType]['names'] = []
for snFile in snList:
snBinTemplate = BinTemplate(snTemplateDirectory + snFile, 'sn', w0, w1, nw)
nAges = snBinTemplate.nCols
ages = snBinTemplate.ages
snType = snBinTemplate.tType
filename = snBinTemplate.filename
for ageIdx in range(nAges):
age = ages[ageIdx]
if minAge < age < maxAge:
ageBin = ageBinning.age_bin(age)
ageLabel = ageLabels[ageBin]
snInfo = snBinTemplate.bin_template(ageIdx)
snTemplates[snType][ageLabel]['snInfo'].append(snInfo)
snTemplates[snType][ageLabel]['names'].append("%s_%s" % (filename, age))
print("Reading {} {} out of {}".format(snFile, ageIdx, nAges))
for galFile in galList:
galBinTemplate = BinTemplate(galTemplateDirectory + galFile, 'gal', w0, w1, nw)
galType = galBinTemplate.tType
filename = galBinTemplate.filename
galInfo = galBinTemplate.bin_template()
galTemplates[galType]['galInfo'].append(galInfo)
galTemplates[galType]['names'].append(filename)
print("Reading {}".format(galFile))
# Convert lists in dictionaries to numpy arrays
for snType in snTypes:
for ageLabel in ageLabels:
snTemplates[snType][ageLabel]['snInfo'] = np.array(snTemplates[snType][ageLabel]['snInfo'])
snTemplates[snType][ageLabel]['names'] = np.array(snTemplates[snType][ageLabel]['names'])
for galType in galTypes:
galTemplates[galType]['galInfo'] = np.array(galTemplates[galType]['galInfo'])
galTemplates[galType]['names'] = np.array(galTemplates[galType]['names'])
return snTemplates, galTemplates
def save_templates():
scriptDirectory = os.path.dirname(os.path.abspath(__file__))
parameterFile = 'models_v06/models/zeroZ/training_params.pickle'
snTemplateDirectory = os.path.join(scriptDirectory, "../templates/training_set/")
snTempFileList = snTemplateDirectory + 'templist.txt'
galTemplateDirectory = os.path.join(scriptDirectory, "../templates/superfit_templates/gal/")
galTempFileList = galTemplateDirectory + 'gal.list'
saveFilename = 'models_v06/models/sn_and_host_templates.npz'
snTemplates, galTemplates = create_sn_and_host_arrays(snTemplateDirectory, snTempFileList, galTemplateDirectory,
galTempFileList, parameterFile)
np.savez_compressed(saveFilename, snTemplates=snTemplates, galTemplates=galTemplates)
return saveFilename
if __name__ == "__main__":
unCombinedTemplates = save_templates()
| [
"daniel.muthukrishna@gmail.com"
] | daniel.muthukrishna@gmail.com |
ff85b831b8dfe0bcee14cbae20135ccf3d9dfd32 | 5460691645f561c30681299cd132e8ae3ccfa847 | /utils/nms_wrapper.py | 6336096a4c8505c995584b648f11a5b40b28d60c | [] | no_license | praveena2j/Object-Detecion-Based-on-RCNN | f8689a284724e8eb70ebf27549bfb3e52095df8a | f3763c8c73829062b1abefeda3700735cf9ab669 | refs/heads/master | 2022-11-10T00:27:25.375987 | 2019-05-01T15:23:49 | 2019-05-01T15:23:49 | 183,558,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,170 | py | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
#import pyximport; pyximport.install()
#from utils.nms.py_cpu_nms import py_cpu_nms
from utils.nms.cpu_nms import cpu_nms, cpu_soft_nms
from utils.cython_modules.cpu_nms import cpu_nms
try:
from utils.cython_modules.gpu_nms import gpu_nms
gpu_nms_available = True
except ImportError:
gpu_nms_available = False
def nms(dets, thresh, use_gpu_nms=False, device_id=0):
'''
Dispatches the call to either CPU or GPU NMS implementations
'''
if dets.shape[0] == 0:
return []
if gpu_nms_available and use_gpu_nms:
return gpu_nms(dets, thresh, device_id=device_id)
else:
return cpu_nms(dets, thresh)
def soft_nms(dets, sigma=0.5, Nt=0.8, threshold=0.001, method=1):
keep = cpu_soft_nms(np.ascontiguousarray(dets, dtype=np.float32),
np.float32(sigma), np.float32(Nt),
np.float32(threshold),
np.uint8(method))
return keep
def apply_nms_to_single_image_results(coords, labels, scores, use_gpu_nms, device_id, nms_threshold=0.5, conf_threshold=0.0):
'''
Applies nms to the results for a single image.
Args:
coords: (x_min, y_min, x_max, y_max) coordinates for n rois. shape = (n, 4)
labels: the predicted label per roi. shape = (n, 1)
scores: the predicted score per roi. shape = (n, 1)
nms_threshold: the threshold for discarding overlapping ROIs in nms
conf_threshold: a minimum value for the score of an ROI. ROIs with lower score will be discarded
Returns:
nmsKeepIndices - the indices of the ROIs to keep after nms
'''
# generate input for nms
allIndices = []
nmsRects = [[[]] for _ in range(max(labels) + 1)]
coordsWithScores = np.hstack((coords, np.array([scores]).T))
for i in range(max(labels) + 1):
indices = np.where(np.array(labels) == i)[0]
nmsRects[i][0] = coordsWithScores[indices,:]
allIndices.append(indices)
# call nms
_, nmsKeepIndicesList = apply_nms_to_test_set_results(nmsRects, nms_threshold, conf_threshold, use_gpu_nms, device_id)
# map back to original roi indices
nmsKeepIndices = []
for i in range(max(labels) + 1):
for keepIndex in nmsKeepIndicesList[i][0]:
nmsKeepIndices.append(allIndices[i][keepIndex]) # for keepIndex in nmsKeepIndicesList[i][0]]
assert (len(nmsKeepIndices) == len(set(nmsKeepIndices))) # check if no roi indices was added >1 times
return nmsKeepIndices
def apply_nms_to_test_set_results(all_boxes, nms_threshold, conf_threshold, use_gpu_nms, device_id):
'''
Applies nms to the results of multiple images.
Args:
all_boxes: shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score
nms_threshold: the threshold for discarding overlapping ROIs in nms
conf_threshold: a minimum value for the score of an ROI. ROIs with lower score will be discarded
Returns:
nms_boxes - the reduced set of rois after nms
nmsKeepIndices - the indices of the ROIs to keep after nms
'''
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
nms_keepIndices = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
for cls_ind in range(num_classes):
#print(cls_ind)
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if len(dets) == 0:
continue
if len(dets) == 1:
keep = [0]
else:
#print(nms_threshold)
keep = nms(dets.astype(np.float32), nms_threshold, use_gpu_nms, device_id)
#keep = soft_nms(dets.astype(np.float32), sigma=0.5, Nt=0.3, threshold=nms_threshold, method=1)
# also filter out low confidences
if conf_threshold > 0:
keep_conf_idx = np.where(dets[:, -1] > conf_threshold)
keep = list(set(keep_conf_idx[0]).intersection(keep))
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
nms_keepIndices[cls_ind][im_ind] = keep
return nms_boxes, nms_keepIndices
| [
"noreply@github.com"
] | noreply@github.com |
c64b4f28b319eb751bfaa33a16a1ae60a775ece1 | 58d270726e4bbafebe7c3b4233939f1ff1a47864 | /Aquisition/MVC/object_delegate.py | 7f8696d69ed3c348bad2105cd0abe64045b2a15c | [] | no_license | zigorrom/PyFANS | 9c85f5e5c7ede9f48573aff3619f43810c7e2bb0 | 7cdfd13cff562393fdef6d78db57cf1672de477e | refs/heads/master | 2020-09-21T22:45:24.030663 | 2019-06-22T08:55:29 | 2019-06-22T08:55:29 | 67,825,964 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,675 | py | # -*- coding: utf-8 -*-
#pylint: disable-msg=C0103,C0301,W0511,C0111,C0321,W0614,W0401,W0611,W0212
"""
Created on 29.11.2010
@author: popravko
"""
from PyQt4 import QtGui, QtCore
from object_model_view_constants import ObjectModelViewConstants
from object_model_view_utils import ObjectModelViewUtils
from monument.db.data_pool import DataPool
from monument.ui.ui_utils import UiUtils
from monument.app_utils import file_icon
from dialog_editor import DialogEditorForm
from list_dialog_editor import ListDialogEditorForm
class TextEditor(QtGui.QTextEdit):
def keyPressEvent(self, e):
if (e.modifiers() == QtCore.Qt.ShiftModifier and e.key() == QtCore.Qt.Key_Return or
e.modifiers() == QtCore.Qt.ControlModifier and e.key() == QtCore.Qt.Key_Return):
e.ignore()
return QtGui.QTextEdit.keyPressEvent(self, QtGui.QKeyEvent(QtCore.QEvent.KeyPress, QtCore.Qt.Key_Return, QtCore.Qt.NoModifier))
if e.key() == QtCore.Qt.Key_Return:
self.clearFocus()
e.accept()
return
return QtGui.QTextEdit.keyPressEvent(self, e)
class TextBrowser(QtGui.QTextBrowser):
def __init__(self, parent = None):
QtGui.QTextBrowser.__init__(self, parent)
self.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse | QtCore.Qt.TextSelectableByKeyboard | QtCore.Qt.LinksAccessibleByMouse | QtCore.Qt.LinksAccessibleByKeyboard | QtCore.Qt.TextEditable)
self.setOpenExternalLinks(True)
self.setOpenLinks(True)
def keyPressEvent(self, e):
if (e.modifiers() == QtCore.Qt.ShiftModifier and e.key() == QtCore.Qt.Key_Return or
e.modifiers() == QtCore.Qt.ControlModifier and e.key() == QtCore.Qt.Key_Return):
e.ignore()
return QtGui.QTextEdit.keyPressEvent(self, QtGui.QKeyEvent(QtCore.QEvent.KeyPress, QtCore.Qt.Key_Return, QtCore.Qt.NoModifier))
if e.key() == QtCore.Qt.Key_Return:
self.clearFocus()
e.accept()
return
return QtGui.QTextEdit.keyPressEvent(self, e)
class ObjectDelegate(QtGui.QStyledItemDelegate):
"""
Делегат предназначен для редактирования и отображения иерархических объектов,
сопоставленных сущностям БД и полученных из sqlalchemy
"""
def __init__(self, parent = None, classifiers = None, additionalValues = None, typeEditor = None): #IGNORE:W0102
QtGui.QStyledItemDelegate.__init__(self, parent)
self._classifiers = {} if not classifiers else classifiers
self._additionalValues = {} if not additionalValues else additionalValues
self._typeEditor = typeEditor
self._notNullableColumns = []
self._helpers = {
None: NoneEditorHelper(),
ObjectModelViewConstants.TextColumnType: TextEditorHelper(),
ObjectModelViewConstants.HtmlTextColumnType: HtmlTextEditorHelper(),
ObjectModelViewConstants.RestrictedTextColumnType: RestrictedTextEditorHelper(),
ObjectModelViewConstants.ClassifierColumnType: ClassifierEditorHelper(self._classifiers, self._additionalValues),
ObjectModelViewConstants.ImageColumnType: ImageEditorHelper(),
ObjectModelViewConstants.NumberColumnType: NumberEditorHelper(),
ObjectModelViewConstants.DecimalColumnType: NumberEditorHelper(decimal = True),
ObjectModelViewConstants.DateColumnType: DateEditorHelper(),
ObjectModelViewConstants.BooleanFlagColumnType: BooleanFlagEditorHelper(),
ObjectModelViewConstants.CheckImageColumnType: CheckImageHelper(),
ObjectModelViewConstants.FileFormatIconColumnType: FileEditorHelper()
}
def editorEvent(self, event, model, option, ind):
return self._getHelper(ind).editor_event(self, event, model, option, ind)
def paint(self, painter, option, ind):
self._getHelper(ind).paint(self, painter, option, ind)
def setNotNullableColumns(self, columnList = None):
self._notNullableColumns = columnList if columnList is not None else []
def createEditor(self, parent, option, index):
return self._getHelper(index).create_editor(parent, option, index)
def setEditorData(self, editor, index):
return self._getHelper(index).set_editor_data(editor, index)
def setModelData(self, editor, model, index):
model.blockSignals(True)
res = self._getHelper(index).set_model_data(editor, model, index)
if res is not None and not res:
model.blockSignals(False)
return
if not index.isValid():
model.blockSignals(False)
return
objIndex = index
obj = model.data(objIndex, ObjectModelViewConstants.ItemObjectRole).toPyObject()
if obj is None:
objIndex = index.sibling(index.row(), 0)
obj = model.data(objIndex, ObjectModelViewConstants.ItemObjectRole).toPyObject()
if obj is None:
model.blockSignals(False)
return
# изменившиеся данные
attrVal = model.data(index, ObjectModelViewConstants.ValueRole)
valType = attrVal.type()
attrVal = attrVal.toPyObject()
if attrVal is not None:
if valType == QtCore.QVariant.String:
attrVal = attrVal.toLocal8Bit().data()
# dirty workaround for QByteArray handling
elif hasattr(attrVal, 'sanctuary'):
attrVal = attrVal.sanctuary.data()
elif valType == QtCore.QVariant.ByteArray:
attrVal = attrVal.data()
attrName = str(model.data(index, ObjectModelViewConstants.BindingRole).toPyObject())
ObjectModelViewUtils.set_attribute(obj, attrName, attrVal)
model.blockSignals(False)
model.setData(objIndex, obj, ObjectModelViewConstants.ItemObjectRole)
def sizeHint(self, option, index):
return self._getHelper(index).size_hint(self, option, index)
def _getHelper(self, index):
editable = index.model().data(index, ObjectModelViewConstants.EditableRole).toPyObject()
columnType = index.model().data(index, ObjectModelViewConstants.ColumnTypeRole).toPyObject()
funcRoleValue = index.model().data(index, ObjectModelViewConstants.UserFuncRole).toPyObject()
if self._typeEditor:
columnType = self._typeEditor
editable = True
if funcRoleValue:
return NoneEditorHelper(funcRoleValue)
simple_mapped_helper = columnType in self._helpers
if simple_mapped_helper:
return self._helpers[columnType]
elif columnType == ObjectModelViewConstants.DialogColumnType:
return DialogEditorHelper()
elif columnType == ObjectModelViewConstants.ListDialogColumnType:
return ListDialogEditorHelper(not editable)
else:
return self._helpers[None]
class NoneEditorHelper(object):
#pylint: disable-msg=C0103,C0301,W0511,C0111,W0613
def __init__(self, user_func = None):
self._user_func = user_func
def editor_event(self, delegate, event, model, option, ind):
return QtGui.QStyledItemDelegate.editorEvent(delegate, event, model, option, ind)
def paint(self, delegate, painter, option, ind):
QtGui.QStyledItemDelegate.paint(delegate, painter, option, ind)
def create_editor(self, parent, option, index): #IGNORE:W0613
if self._user_func:
self._user_func()
return None
def set_model_data(self, editor, model, index):
return False
def set_editor_data(self, editor, index):
pass
def size_hint(self, delegate, option, index):
return QtGui.QStyledItemDelegate.sizeHint(delegate, option, index)
class CheckImageHelper(NoneEditorHelper):
#pylint: disable-msg=C0103 ,C0301,W0511,C0111,W0201
def paint(self, delegate, painter, option, ind):
checked = ind.model().data(ind, ObjectModelViewConstants.ValueRole).toPyObject()
editable = ind.model().data(ind, ObjectModelViewConstants.EditableRole).toPyObject()
check_box_style_option = QtGui.QStyleOptionButton()
if editable:
check_box_style_option.state |= QtGui.QStyle.State_Enabled
if checked:
check_box_style_option.state |= QtGui.QStyle.State_On
else:
check_box_style_option.state |= QtGui.QStyle.State_Off
check_box_style_option.rect = BooleanFlagEditorHelper.get_check_box_rect(option)
QtGui.QApplication.style().drawControl(QtGui.QStyle.CE_CheckBox, check_box_style_option, painter)
@classmethod
def get_check_box_rect(cls, view_item_style_options):
check_box_style_options = QtGui.QStyleOptionButton()
check_box_rect = QtGui.QApplication.style().subElementRect(
QtGui.QStyle.SE_CheckBoxIndicator,
check_box_style_options)
check_box_point = QtCore.QPoint(
view_item_style_options.rect.x() +
view_item_style_options.rect.width() / 2 -
check_box_rect.width() / 2,
view_item_style_options.rect.y() +
view_item_style_options.rect.height() / 2 -
check_box_rect.height() / 2)
return QtCore.QRect(check_box_point, check_box_rect.size())
class ImageEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111,W0201,W0212,W0622,C0321
PreviewNotAvailableImage = QtGui.QPixmap(':main/preview_is_not_available.png')
class ImageDialog(QtGui.QFileDialog):
changed = QtCore.pyqtSignal(object)
closed = QtCore.pyqtSignal()
def __init__(self, heritageImage, parent = None):
decoder = QtCore.QString.fromUtf8
cap = decoder('Выберите изображение')
dir = decoder('')
fltr = decoder(
'Все поддерживаемые форматы (*.jpg *.jpeg *.bmp *.tiff *.tif *.raw);; \
Изображения JPEG (*.jpg *.jpeg);; \
Изображения TIFF (*.tiff *.tif);; \
Изображения BMP (*.bmp);; \
Необработанные файлы в формате съёмки (*.raw)')
QtGui.QFileDialog.__init__(self, parent, cap, dir, fltr)
self._image = heritageImage
def show(self):
self._accepted = False
res = self.exec_()
if res == QtGui.QDialog.Accepted:
self._accepted = True
res = True
self._image.text = self.selectedFiles()[0]
self.changed.emit(self._image)
self.accept()
else:
self.closed.emit()
self.reject()
return res
def create_editor(self, parent, option, index): #IGNORE:W0613
item = index.model()
objIndex = index.sibling(index.row(), 0)
val = item.data(objIndex, ObjectModelViewConstants.ItemObjectRole).toPyObject()
val.text = ''
return DialogEditorForm(ImageEditorHelper.ImageDialog, val, 'text', True, parent, False)
def set_model_data(self, editor, model, index): #IGNORE:W0613
if editor.dialogResult() == QtGui.QDialog.Rejected: return False
decoder = lambda qstr: qstr.toPyObject().toLocal8Bit().data() if qstr.toPyObject() is not None else None
item = index.model()
imageDataAttr = decoder(item.data(index, ObjectModelViewConstants.ImageDataRole))
imageFormatAttr = decoder(item.data(index, ObjectModelViewConstants.ImageFormatAttributeRole))
imagePreviewAttr = decoder(item.data(index, ObjectModelViewConstants.ImagePreviewAttributeRole))
imageSmallPreviewAttr = decoder(item.data(index, ObjectModelViewConstants.ImageSmallPreviewAttributeRole))
indexObj = index.sibling(index.row(), 0)
obj = item.data(indexObj, ObjectModelViewConstants.ItemObjectRole).toPyObject()
# assert ObjectModelViewUtils.test_attribute(obj, imageDataAttr)
# assert ObjectModelViewUtils.test_attribute(obj, imagePreviewAttr)
# assert ObjectModelViewUtils.test_attribute(obj, imageFormatAttr)
text = editor.text()
fi = QtCore.QFileInfo(text)
format = fi.suffix()
format = format.toLocal8Bit().data()
dfs = [df for df in DataPool.document_formats.items if df.ext.lower() == format.lower()]
if len(dfs):
ObjectModelViewUtils.set_attribute(obj, imageFormatAttr, dfs[0].id)
else:
return False
ba = QtCore.QByteArray()
f = QtCore.QFile(text)
f.open(QtCore.QIODevice.ReadWrite)
ba = f.readAll()
val = ba
class wrapper():
def __init__(self, data):
self.sanctuary = data
wrapped_val = wrapper(val)
ObjectModelViewUtils.set_attribute(obj, imageDataAttr, val.data())
item.setData(index, wrapped_val, ObjectModelViewConstants.ValueRole)
pixmap = QtGui.QPixmap()
ok = pixmap.load(text)
if ok:
pixmap = pixmap.scaled(
UiUtils.compute_new_dimensions(
pixmap,
ObjectModelViewConstants.PreviewSize.width()),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
ba = QtCore.QByteArray()
buffer = QtCore.QBuffer(ba)
buffer.open(QtCore.QIODevice.WriteOnly)
pixmap.save(buffer, format)
ObjectModelViewUtils.set_attribute(obj, imagePreviewAttr, ba.data())
pixmap = pixmap.scaled(
UiUtils.compute_new_dimensions(
pixmap,
ObjectModelViewConstants.PreviewInGridSize.width()),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
ba = QtCore.QByteArray()
buffer = QtCore.QBuffer(ba)
buffer.open(QtCore.QIODevice.WriteOnly)
pixmap.save(buffer, format)
ObjectModelViewUtils.set_attribute(obj, imageSmallPreviewAttr, ba.data())
else:
pixmap = ImageEditorHelper.PreviewNotAvailableImage
ba = QtCore.QByteArray()
buffer = QtCore.QBuffer(ba)
buffer.open(QtCore.QIODevice.WriteOnly)
pixmap.save(buffer, 'PNG')
ObjectModelViewUtils.set_attribute(obj, imagePreviewAttr, ba.data())
ObjectModelViewUtils.set_attribute(obj, imageSmallPreviewAttr, ba.data())
item.setData(index, pixmap, QtCore.Qt.DecorationRole)
class FileEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111,W0201,W0212,W0622,C0321
class FileDialog(QtGui.QFileDialog):
changed = QtCore.pyqtSignal(object)
closed = QtCore.pyqtSignal()
def __init__(self, binarySemantic, parent = None, f = '*.*'):
decoder = QtCore.QString.fromUtf8
cap = decoder('Выберите файл')
dir = decoder('')
fltr = QtCore.QString.fromLocal8Bit(f)
QtGui.QFileDialog.__init__(self, parent, cap, dir, fltr)
self._binarySemantic = binarySemantic
def show(self):
self._accepted = False
res = self.exec_()
if res == QtGui.QDialog.Accepted:
self._accepted = True
res = True
self._binarySemantic.text = self.selectedFiles()[0].toLocal8Bit().data()
self.changed.emit(self._binarySemantic)
self.accept()
else:
self.closed.emit()
self.reject()
return res
def paint(self, delegate, painter, option, ind):
option.displayAlignment = QtCore.Qt.AlignCenter
QtGui.QStyledItemDelegate.paint(delegate, painter, option, ind)
# if ind.model().data(ind, ObjectModelViewConstants.ValueRole).toPyObject():
# pixmap = ind.model().data(ind, QtCore.Qt.DecorationRole)
# pixmap = pixmap.toPyObject()
# if pixmap is not None:
# QtGui.QItemDelegate.drawDecoration(delegate, painter, option, option.rect, pixmap)
def create_editor(self, parent, option, index): #IGNORE:W0613
item = index.model()
objIndex = index.sibling(index.row(), 0)
val = item.data(objIndex, ObjectModelViewConstants.ItemObjectRole).toPyObject()
val.text = ''
acceptable_formats = item.data(index, ObjectModelViewConstants.FileAcceptableFormatsRole).toPyObject()
if acceptable_formats is not None:
acceptable_formats = [f.name + ' (*.' + f.ext + ')' for f in acceptable_formats]
acceptable_formats = ';;'.join(acceptable_formats)
return DialogEditorForm(FileEditorHelper.FileDialog, val, 'text', True, parent, False, acceptable_formats)
def set_model_data(self, editor, model, index):
text = QtCore.QString.fromLocal8Bit(editor.text())
if editor.dialogResult() == QtGui.QDialog.Rejected: return False
decoder = lambda qstr: qstr.toPyObject().toLocal8Bit().data() if qstr.toPyObject() is not None else None
item = index.model()
fileDataAttr = decoder(item.data(index, ObjectModelViewConstants.FileDataAttributeRole))
fileFormatAttr = decoder(item.data(index, ObjectModelViewConstants.FileFormatAttributeRole))
indexObj = index.sibling(index.row(), 0)
obj = item.data(indexObj, ObjectModelViewConstants.ItemObjectRole).toPyObject()
assert ObjectModelViewUtils.test_attribute(obj, fileDataAttr)
assert ObjectModelViewUtils.test_attribute(obj, fileFormatAttr)
if not text:
return False
fi = QtCore.QFileInfo(text)
fileExt = fi.suffix()
acceptableFormats = item.data(index, ObjectModelViewConstants.FileAcceptableFormatsRole)
acceptableFormats = acceptableFormats.toPyObject()
if acceptableFormats is not None:
found = False
fileFormatEntityExtAttribute = decoder(item.data(index, ObjectModelViewConstants.FileFormatEntityExtAttributeRole))
assert fileFormatEntityExtAttribute is not None
for format in acceptableFormats:
assert ObjectModelViewUtils.test_attribute(format, fileFormatEntityExtAttribute)
ext = ObjectModelViewUtils.get_attribute(format, fileFormatEntityExtAttribute)
if ext.upper() == fileExt.toUtf8().data().upper():
ObjectModelViewUtils.set_attribute(obj, fileFormatAttr, format)
found = True
break
if not found:
codec = QtCore.QString.fromUtf8
UiUtils.show_error_without_parent(codec('Ошибка!'), codec('Формат данного файла не поддерживается'))
return False
else:
ObjectModelViewUtils.set_attribute(obj, fileFormatAttr, fileExt.toLocal8Bit().data())
f = QtCore.QFile(text)
f.open(QtCore.QIODevice.ReadWrite)
ba = f.readAll()
f.close()
class wrapper():
def __init__(self, data):
self.sanctuary = data
wrapped_val = wrapper(ba)
ObjectModelViewUtils.set_attribute(obj, fileDataAttr, ba.data())
item.setData(index, wrapped_val, ObjectModelViewConstants.ValueRole)
pixmap = file_icon('.' + fileExt.toUtf8().data())
item.setData(index, pixmap, QtCore.Qt.DecorationRole)
return True
class TextEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111
def create_editor(self, parent, option, index): #IGNORE:W0613
te = TextEditor(parent)
return te
def set_model_data(self, editor, model, index):
new_value = editor.toPlainText()
model_data = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
changed = model_data != new_value
model.setData(index, new_value, QtCore.Qt.EditRole)
model.setData(index, new_value, QtCore.Qt.ToolTipRole)
model.setData(index, new_value, ObjectModelViewConstants.ValueRole)
return changed
def set_editor_data(self, editor, index):
model_data = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
editor.setPlainText(model_data if model_data is not None else '')
editor.selectAll()
class HtmlTextEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111
def paint(self, delegate, painter, option, index):
options = QtGui.QStyleOptionViewItemV4(option)
delegate.initStyleOption(options,index)
style = QtGui.QApplication.style() if options.widget is None else options.widget.style()
doc = QtGui.QTextDocument()
doc.setHtml(options.text)
options.text = ""
style.drawControl(QtGui.QStyle.CE_ItemViewItem, options, painter)
ctx = QtGui.QAbstractTextDocumentLayout.PaintContext()
# Highlighting text if item is selected
#if (optionV4.state & QStyle::State_Selected)
#ctx.palette.setColor(QPalette::Text, optionV4.palette.color(QPalette::Active, QPalette::HighlightedText));
textRect = style.subElementRect(QtGui.QStyle.SE_ItemViewItemText, options)
painter.save()
painter.translate(textRect.topLeft())
painter.setClipRect(textRect.translated(-textRect.topLeft()))
doc.documentLayout().draw(painter, ctx)
painter.restore()
def size_hint(self, delegate, option, index):
options = QtGui.QStyleOptionViewItemV4(option)
delegate.initStyleOption(options,index)
doc = QtGui.QTextDocument()
doc.setHtml(options.text)
doc.setTextWidth(options.rect.width())
return QtCore.QSize(doc.idealWidth(), doc.size().height())
def create_editor(self, parent, option, index): #IGNORE:W0613
te = TextBrowser(parent)
return te
def set_model_data(self, editor, model, index):
new_value = editor.toHtml()
model_data = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
changed = model_data != new_value
model.setData(index, new_value, QtCore.Qt.EditRole)
model.setData(index, new_value, QtCore.Qt.ToolTipRole)
model.setData(index, new_value, ObjectModelViewConstants.ValueRole)
return changed
def set_editor_data(self, editor, index):
model_data = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
editor.setHtml(model_data if model_data is not None else '')
editor.selectAll()
class BooleanFlagEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111,W0613
def paint(self, delegate, painter, option, ind):
checked = ind.model().data(ind, ObjectModelViewConstants.ValueRole).toPyObject()
editable = ind.model().data(ind, ObjectModelViewConstants.EditableRole).toPyObject()
check_box_style_option = QtGui.QStyleOptionButton()
if editable:
check_box_style_option.state |= QtGui.QStyle.State_Enabled
if checked:
check_box_style_option.state |= QtGui.QStyle.State_On
else:
check_box_style_option.state |= QtGui.QStyle.State_Off
check_box_style_option.rect = BooleanFlagEditorHelper.get_check_box_rect(option)
QtGui.QApplication.style().drawControl(QtGui.QStyle.CE_CheckBox, check_box_style_option, painter)
@classmethod
def get_check_box_rect(cls, view_item_style_options):
check_box_style_options = QtGui.QStyleOptionButton()
check_box_rect = QtGui.QApplication.style().subElementRect(
QtGui.QStyle.SE_CheckBoxIndicator,
check_box_style_options)
check_box_point = QtCore.QPoint(
view_item_style_options.rect.x() +
view_item_style_options.rect.width() / 2 -
check_box_rect.width() / 2,
view_item_style_options.rect.y() +
view_item_style_options.rect.height() / 2 -
check_box_rect.height() / 2)
return QtCore.QRect(check_box_point, check_box_rect.size())
def editor_event(self, delegate, event, model, option, ind):
editable = ind.model().data(ind, ObjectModelViewConstants.EditableRole).toPyObject()
if not editable: return False
if event.type() in (QtCore.QEvent.MouseButtonRelease, QtCore.QEvent.MouseButtonDblClick):
if (event.button() != QtCore.Qt.LeftButton or
not BooleanFlagEditorHelper.get_check_box_rect(option).contains(event.pos())):
return False
elif event.type() == QtCore.QEvent.KeyPress:
if event.key() not in (QtCore.Qt.Key_Space, QtCore.Qt.Key_Select):
return False
else:
return False
if not ind.isValid():
model.blockSignals(False)
return False
model.blockSignals(True)
obj_index = ind
obj = model.data(obj_index, ObjectModelViewConstants.ItemObjectRole).toPyObject()
if obj is None:
obj_index = ind.sibling(ind.row(), 0)
obj = model.data(obj_index, ObjectModelViewConstants.ItemObjectRole).toPyObject()
if obj is None:
model.blockSignals(False)
return False
# изменившиеся данные
attr_val = model.data(ind, ObjectModelViewConstants.ValueRole).toPyObject()
if attr_val is None:
return False
attr_name = str(model.data(ind, ObjectModelViewConstants.BindingRole).toPyObject())
ObjectModelViewUtils.set_attribute(obj, attr_name, not attr_val)
model.setData(ind, not attr_val, ObjectModelViewConstants.ValueRole)
model.blockSignals(False)
return model.setData(obj_index, obj, ObjectModelViewConstants.ItemObjectRole)
class RestrictedTextEditorHelper(TextEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111
def set_model_data(self, editor, model, index):
notNull = index.model().data(index, ObjectModelViewConstants.notNullRole).toBool()
new_value = editor.toPlainText().trimmed()
if notNull and new_value.simplified().isEmpty():
return False
model_data = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
changed = model_data != new_value
model.setData(index, new_value, QtCore.Qt.EditRole)
model.setData(index, new_value, QtCore.Qt.ToolTipRole)
model.setData(index, new_value, ObjectModelViewConstants.ValueRole)
return changed
class NumberEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111,W0702
def __init__(self, decimal = False):
NoneEditorHelper.__init__(self)
self._decimal = decimal
def create_editor(self, parent, option, index): #IGNORE:W0613
if self._decimal:
dsb = QtGui.QDoubleSpinBox(parent)
dsb.setMinimum(0)
dsb.setMaximum(500000)
dsb.setSingleStep(0.1)
dsb.setDecimals(5)
return dsb
else:
sb = QtGui.QSpinBox(parent)
sb.setMinimum(0)
sb.setMaximum(500000)
sb.setSingleStep(1)
return sb
def set_model_data(self, editor, model, index):
new_value = editor.value()
model.setData(index, str(new_value), QtCore.Qt.EditRole)
model.setData(index, new_value, ObjectModelViewConstants.ValueRole)
return True
def set_editor_data(self, editor, index):
model_data = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
editor.setValue(model_data if model_data else 0)
class DateEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111
def create_editor(self, parent, option, index): #IGNORE:W0613
dte = QtGui.QDateEdit(parent)
UiUtils.setup_date_edit(dte)
return dte
def set_model_data(self, editor, model, index):
new_value = editor.date()
if new_value != editor.minimumDate():
date = new_value.toPyDate()
model.setData(index, new_value.toString("dd.MM.yyyy"), QtCore.Qt.EditRole)
model.setData(index, date, ObjectModelViewConstants.ValueRole)
else:
model.setData(index, '', QtCore.Qt.EditRole)
model.setData(index, None, ObjectModelViewConstants.ValueRole)
return True
def set_editor_data(self, editor, index):
model_data = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
if model_data is not None:
date = QtCore.QDate(model_data)
editor.setDate(date)
else:
editor.setDate(editor.minimumDate())
class ClassifierEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111,C0321
def __init__(self, classifiers, additional_items):
NoneEditorHelper.__init__(self)
self._classifiers = classifiers
self._additional_items = additional_items
def create_editor(self, parent, option, index): #IGNORE:W0613
classifier_name = index.model().data(index, ObjectModelViewConstants.TypeNameRole).toPyObject().toUtf8().data()
assert classifier_name in self._classifiers and classifier_name in self._additional_items
items = self._classifiers[classifier_name]
additional_items = self._additional_items[classifier_name]
decoder_from_local = QtCore.QString.fromLocal8Bit
decoder_from_utf = QtCore.QString.fromUtf8
ref_col = index.model().data(index, ObjectModelViewConstants.ReferenceAttributeRole).toPyObject().toUtf8().data()
combo = QtGui.QComboBox(parent)
combo.view().setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
combo.view().setTextElideMode(QtCore.Qt.ElideRight)
combo.setEditable(True)
combo.setInsertPolicy(QtGui.QComboBox.NoInsert)
for i in range(len(additional_items)):
combo.addItem(decoder_from_utf(additional_items[i]), -i)
for i in items:
assert hasattr(i, 'id') and hasattr(i, ref_col)
combo.addItem(decoder_from_local(i.__getattribute__(ref_col)), i.id)
return combo
def set_model_data(self, editor, model, index):
classifier_name = index.model().data(index, ObjectModelViewConstants.TypeNameRole).toPyObject().toUtf8().data()
assert classifier_name in self._classifiers and classifier_name in self._additional_items
classifier = self._classifiers[classifier_name]
additional_items = self._additional_items[classifier_name]
current_index = editor.currentIndex()
if current_index == -1: return False
new_value = editor.itemData(current_index, QtCore.Qt.EditRole).toPyObject().toLocal8Bit().data()
new_index = editor.itemData(current_index, QtCore.Qt.UserRole).toPyObject()
def find_in_classifier(ind):
for i in classifier:
assert hasattr(i, 'id')
if i.id == ind: return i #IGNORE:C0321
return None
def find_in_additional(ind):
if ind in additional_items:
return additional_items[additional_items.index(ind)]
return None
decoder = QtCore.QString.fromLocal8Bit
new_classifier_value = find_in_classifier(new_index)
new_additional_value = None
if new_classifier_value is None:
new_additional_value = find_in_additional(str(decoder(new_value).toUtf8().data()))
if new_classifier_value is not None:
model.setData(index, new_classifier_value, ObjectModelViewConstants.ValueRole)
model.setData(index, decoder(new_value), QtCore.Qt.EditRole)
model.setData(index, new_index, ObjectModelViewConstants.ClassifierIndexRole)
elif new_additional_value is not None:
model.setData(index, None, QtCore.Qt.EditRole)
model.setData(index, new_index, ObjectModelViewConstants.ClassifierIndexRole)
model.setData(index, None, ObjectModelViewConstants.ValueRole)
else: return False
return True
def set_editor_data(self, editor, index):
current_index = index.model().data(index, ObjectModelViewConstants.ClassifierIndexRole).toPyObject()
editor_index = editor.findData(current_index)
if editor_index != -1:
editor.setCurrentIndex(editor_index)
class DialogEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111
def __init__(self, read_only = True):
"""
Конструктор
@param read_only: признак возможности прямого редактирования текстового поля
"""
NoneEditorHelper.__init__(self)
self._read_only = read_only
def set_editable(self, editable):
self._read_only = not editable
def create_editor(self, parent, option, index): #IGNORE:W0613
local_encoder = lambda qvar: qvar.toPyObject().toLocal8Bit().data() if qvar.toPyObject() is not None else None
dialog_type = index.model().data(index, ObjectModelViewConstants.DialogTypeRole).toPyObject()
ref_col = local_encoder(index.model().data(index, ObjectModelViewConstants.ReferenceAttributeRole))
bound_object = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
view_mode_args = index.model().data(index, ObjectModelViewConstants.DialogViewModeArgsRole).toPyObject()
return DialogEditorForm(dialog_type, bound_object, ref_col, self._read_only, parent, viewModeArgs = view_mode_args)
def set_model_data(self, editor, model, index): #IGNORE:W0613
decoder = QtCore.QString.fromLocal8Bit
result = editor.result()
text = editor.text()
index.model().setData(index, decoder(text), QtCore.Qt.EditRole)
index.model().setData(index, decoder(text), QtCore.Qt.ToolTipRole)
index.model().setData(index, result, ObjectModelViewConstants.ValueRole)
return editor.dialogResult()
def set_editor_data(self, editor, index):
pass
class ListDialogEditorHelper(NoneEditorHelper):
#pylint: disable-msg=C0103,C0301,W0511,C0111,W0142
def __init__(self, readOnly = False):
"""
Конструктор
@param readOnly: признак возможности прямого редактирования текстового поля
"""
NoneEditorHelper.__init__(self)
self._readOnly = readOnly
def create_editor(self, parent, option, index): #IGNORE:W0613
local_encoder = lambda qvar: qvar.toPyObject().toLocal8Bit().data() if qvar.toPyObject() is not None else None
dialog_type = index.model().data(index, ObjectModelViewConstants.DialogTypeRole).toPyObject()
ref_col = local_encoder(index.model().data(index, ObjectModelViewConstants.ReferenceAttributeRole))
bound_object = index.model().data(index, ObjectModelViewConstants.ValueRole).toPyObject()
model = index.model().data(index, ObjectModelViewConstants.DialogModelRole).toPyObject()
columns = index.model().data(index, ObjectModelViewConstants.DialogColumnsRole).toPyObject()
multiple = index.model().data(index, ObjectModelViewConstants.DialogMultipleRole).toPyObject()
viewModeArgs = index.model().data(index, ObjectModelViewConstants.DialogViewModeArgsRole).toPyObject()
params = {}
if viewModeArgs is not None:
for k, v in viewModeArgs.iteritems():
params[k.toUtf8().data()] = v.toUtf8().data()
return ListDialogEditorForm(dialog_type, bound_object, ref_col, self._readOnly, parent, model, columns, multiple, **params)
def set_model_data(self, editor, model, index): #IGNORE:W0613
decoder = QtCore.QString.fromLocal8Bit
result = editor.result()
text = editor.text()
index.model().setData(index, decoder(text), QtCore.Qt.EditRole)
index.model().setData(index, decoder(text), QtCore.Qt.ToolTipRole)
index.model().setData(index, result, ObjectModelViewConstants.ValueRole)
return editor.dialogResult()
def set_editor_data(self, editor, index):
pass
| [
"https://github.com/zigorrom/DigitalAnalyzer.git"
] | https://github.com/zigorrom/DigitalAnalyzer.git |
fe859e987e409c2143886230f1b8476b384bfbf2 | 3663768129f930919451a3721b3dd081bf82512c | /selectionSort.py | bbbacdc193f1046a2f9b3039e220162a75b632ac | [] | no_license | dodo5575/scripts | 07b1e4219991319eead42499d0f492aeebbc2b35 | 864a83591df3c593b761af2a1a3e12bf7db806ee | refs/heads/master | 2020-04-14T02:27:45.729595 | 2016-10-24T20:09:17 | 2016-10-24T20:09:17 | 50,631,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | #!/usr/bin/env python
# This script sorts a (N x 2) array.
# Usage: python selectionSort.py input
# Chen-Yu Li cli56@illinois.edu
# 2014/3/5
## Standard module
import sys, os
import numpy as np
import re
## User's own module
sys.path.append('/home/cli56/scripts')
import ReadData
## Read input
print "Start loading data..."
array=ReadData.loadAscii(sys.argv[1])
print "Finished loading data!"
## set output file name
inputPrefix = re.split('\.', sys.argv[1])
inputPrefix_noType = ''
for i in range(len(inputPrefix)-1):
inputPrefix_noType = inputPrefix_noType + inputPrefix[i]
if i < (len(inputPrefix)-2):
inputPrefix_noType = inputPrefix_noType + '.'
s="_sorted.dat"
outputPrefix = inputPrefix_noType+s
## Compare and sort
print "Start sorting..."
for j in range(0,(len(array)-1)):
imin = j
for i in range(j+1,len(array)):
if array[i][0] < array[imin][0]:
imin = i
if imin != j:
tmp_value1 = array[imin][0]
tmp_value2 = array[imin][1]
array[imin][0] = array[j][0]
array[imin][1] = array[j][1]
array[j][0] = tmp_value1
array[j][1] = tmp_value2
print "Finished sorting!"
## Write output
output = open(outputPrefix,'w')
for i in range(0,len(array)):
output.write('%f\t%f\n' % (array[i][0],array[i][1]))
print "Finished writing output!"
output.close()
| [
"dodo5575@gmail.com"
] | dodo5575@gmail.com |
2cfa6771adfad25d67580278d8e90dd45946b759 | 046207f434966462fff55f634ba5a450d2208534 | /CodeUp/1277_0200.py | d529aa1563e5cd2f9c1f8347f1cc93d651e5b6cf | [] | no_license | sungsikyang92/pythonStudy | e293e1ac8af443809f840ccee7052a8f57480b70 | 26522b5e232ccd9ab25c52122d254aa7249a8fdf | refs/heads/master | 2023-07-04T16:58:40.318976 | 2021-08-04T02:00:27 | 2021-08-04T02:00:27 | 365,398,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | count = int(input())
nums = list(map(int, input().split()))
print(nums[0],nums[int(count/2)],nums[-1]) | [
"sungsik.yang92@gmail.com"
] | sungsik.yang92@gmail.com |
ecaafa8cae35fafaa86eced2f153ffe26d0043c8 | 22cee2ff56a1a27cd0e3df3d15f88f038359e04d | /mysqlcheck/wsgi.py | c24d25362d6774b402de41e459a1e59425757aa9 | [] | no_license | linux-devops/inception_web | 432363cc5f71b66521b4a53ccbb078ed4eafac21 | 0eca72d8565a26f81fce8474b2ead17f5a3b3cce | refs/heads/master | 2021-01-12T18:06:03.613166 | 2016-10-20T01:38:31 | 2016-10-20T01:38:31 | 71,324,580 | 1 | 0 | null | 2016-10-19T06:09:47 | 2016-10-19T06:09:46 | null | UTF-8 | Python | false | false | 1,142 | py | """
WSGI config for mysqlcheck project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysqlcheck.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"chengzhen.yu@weimob.com"
] | chengzhen.yu@weimob.com |
991bcaf8cb990d983c035fe00885e2558db45090 | 8ac3c90d814d3b6702190f6f1b6eef49ba35bcae | /tmp1.py | 573da33b1b018c0c86e377929a6f9bb0d18ebf6b | [
"MIT"
] | permissive | oskaerd/BLE_Mesh | 67b73f8be80dd974846811463c988e41aaad79cb | 1a980b525dca915beecd32af19259099adf2322f | refs/heads/master | 2021-11-29T07:53:57.183402 | 2019-01-28T21:12:27 | 2019-01-28T21:12:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import socket
import _thread
import atexit
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print('socet created')
PORT = 9001
IP_ADDR = '192.168.0.38'
s.bind((IP_ADDR, PORT))
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.Input(id='my-id', value='initial value', type='text'),
html.Div(id='my-div')
])
@app.callback(
Output(component_id='my-div', component_property='children'),
[Input(component_id='my-id', component_property='value')]
)
def update_output_div(input_value):
return 'You\'ve entered "{}"'.format(input_value)
def on_close():
global s
print('port closed')
s.close()
def get_data(dummy):
while True:
data, addr = s.recvfrom(1024)
print('received: ' + str(list(data)))
def run_app(dummy):
while True:
app.run_server(debug=True)
if __name__ == '__main__':
atexit.register(on_close)
_thread.start_new_thread( get_data, ('data',))
#_thread.start_new_thread( run_app, ('app',) )
app.run_server(debug=True, host = '127.0.0.1')
while True:
pass
| [
"oskbag@gmail.com"
] | oskbag@gmail.com |
c288c52f8ee60885fe587a639279c2976ed3966e | 9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100 | /configs/example_old_map_1228.py | 5fa96132f84104c6dbd4a5e0ca228a05b0c82a8a | [] | no_license | SiyuanLee/caps | 0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3 | 476802e18ca1c7c88f1e29ed66a90c350aa50c1f | refs/heads/master | 2021-06-20T22:48:16.230354 | 2021-02-22T13:21:57 | 2021-02-22T13:21:57 | 188,695,489 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,754 | py | """
This is the example config file
larger lr
beta no bias
lower explr
comment: too small!
not target beta
"""
import numpy as np
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 5, 1, 0, 0, 0, 6, 0, 1],
[1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1],
[1, 0, 2, 0, 0, 0, 2, 0, 7, 0, 1],
[1, 0, 2, 0, 0, 0, 2, 0, 0, 0, 1],
[1, 9, 2, 9, 9, 9, 2, 9, 9, 9, 1],
[1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1],
[1, 2, 0, 1, 0, 2, 0, 1, 0, 2, 1],
[1, 2, 9, 1, 9, 2, 8, 1, 9, 2, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
8: True,
}
rewards = {
"positive": 0, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
######### dqn only #########
# ensure correct import
import os
import sys
__file_path = os.path.abspath(__file__)
__dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/'
sys.path.append(__dqn_dir)
__cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/'
from dqn_utils import PiecewiseSchedule
# load the random sampled obs
import pickle
pkl_file = __cur_dir + 'eval_obs_array_random_old_map.pkl'
with open(pkl_file, 'rb') as f:
eval_obs_array = pickle.loads(f.read())
def seed_func():
return np.random.randint(0, 1000)
num_timesteps = 2e6 # 40 epoch
learning_freq = 4
# training iterations to go
num_iter = num_timesteps / learning_freq
# piecewise learning rate
lr_multiplier = 1.0
learning_rate = PiecewiseSchedule([
(0, 1e-4 * lr_multiplier),
(num_iter / 10, 1e-4 * lr_multiplier),
(num_iter / 2, 5e-5 * lr_multiplier),
], outside_value=5e-5 * lr_multiplier)
learning_rate_term = PiecewiseSchedule([
(0, 2e-4 * lr_multiplier),
(num_iter / 40, 1e-3 * lr_multiplier),
(num_iter / 20, 1e-2 * lr_multiplier),
(num_iter / 10, 5e-2 * lr_multiplier),
(num_iter * 3 / 4, 5e-3 * lr_multiplier),
(num_iter * 7 / 8, 5e-4 * lr_multiplier),
], outside_value=5e-4 * lr_multiplier)
# piecewise exploration rate
exploration = PiecewiseSchedule([
(0, 1.0),
(num_iter / 40, 0.97),
(num_iter * 3 / 8, 0.7),
(num_iter * 7 / 8, 0.05),
], outside_value=0.05)
######### transfer only #########
import tensorflow as tf
source_dirs = [
# an old map policy
'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_1c_12_07_17_22:15:51/dqn',
'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_2_12_13_17_19:12:07/dqn',
#'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_3_12_13_17_19:13:03/dqn',
'/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/old_map_mod_target_4_12_23_17_16:20:56/dqn',
]
transfer_config = {
'source_dirs': source_dirs,
'online_q_omega': False, # default false off policy with experience replay
'q_omega_uniform_sample': False, # default false
'four_to_two': True, # default false frame_history_len must be 4!
'source_noop': False, # default false (false means source policies HAS noop action)
'no_share_para': True, # default false set to true to stop sharing parameter between q network and q_omega/term
'xi': 0.005, # default none you may specify a constant. none means xi = 0.5 (q_omega_val - q_omega_second_max)
'target_beta': False, # default false (true means using target beta)
'termination_stop': True, # default false train cnn when training beta online
'learning_rate_term': learning_rate_term,
'beta_no_bias': True, # default false prune bias for termination function
}
dqn_config = {
'seed': seed_func, # will override game settings
'num_timesteps': num_timesteps,
'replay_buffer_size': 1000000,
'batch_size': 32,
'gamma': 0.99,
'learning_starts': 50000,
'learning_freq': learning_freq,
'frame_history_len': 4,
'target_update_freq': 10000,
'grad_norm_clipping': 10,
'learning_rate': learning_rate,
'exploration': exploration,
'eval_obs_array': eval_obs_array, # TODO: construct some eval_obs_array
'room_q_interval': 1e5, # q_vals will be evaluated every room_q_interval steps
'epoch_size': 5e4, # you decide any way
'config_name': str.split(__file_path, '/')[-1].replace('.py', ''), # the config file name
'transfer_config': transfer_config,
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# configs for dqn
'dqn_config': dqn_config,
# work automatically only for aigym wrapped version
'fps': 1000,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': True,
'episode_length': 1200,
'episode_end_sleep': 0., # sec
} | [
"lisiyuan@bupt.edu.cn"
] | lisiyuan@bupt.edu.cn |
fb30f63ea2395b0dcca9405b88c567a7a4bb60d6 | e7dd192123f404367e9623a357366643742fa723 | /kubernetes/test/test_scheduling_v1beta1_api.py | 700a3b463994104380586917c18869a0959fe020 | [
"Apache-2.0"
] | permissive | itholic/python | 1772725582f28af445efb233eca6c9139da3ae49 | dffe577a062e17057270ae80fa677ffd83e9d183 | refs/heads/master | 2020-09-12T08:59:16.847326 | 2019-11-15T20:40:32 | 2019-11-15T20:40:32 | 222,375,164 | 0 | 0 | Apache-2.0 | 2019-11-18T06:05:45 | 2019-11-18T06:05:43 | null | UTF-8 | Python | false | false | 1,718 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.scheduling_v1beta1_api import SchedulingV1beta1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestSchedulingV1beta1Api(unittest.TestCase):
"""SchedulingV1beta1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.scheduling_v1beta1_api.SchedulingV1beta1Api() # noqa: E501
def tearDown(self):
pass
def test_create_priority_class(self):
"""Test case for create_priority_class
"""
pass
def test_delete_collection_priority_class(self):
"""Test case for delete_collection_priority_class
"""
pass
def test_delete_priority_class(self):
"""Test case for delete_priority_class
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_priority_class(self):
"""Test case for list_priority_class
"""
pass
def test_patch_priority_class(self):
"""Test case for patch_priority_class
"""
pass
def test_read_priority_class(self):
"""Test case for read_priority_class
"""
pass
def test_replace_priority_class(self):
"""Test case for replace_priority_class
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"haoweic@google.com"
] | haoweic@google.com |
14a9a439e418179da03d34626f60523f515e90d1 | 99689d79e95d4bab14934fe0f4399e7fe28dbaf4 | /plot3d/plot3d.py | 070496649709e4031ca5f4728cda3fa617b1a0da | [] | no_license | darksun190/plot3d_Vitesco | 926d4092e65df0e80f2ef309bba3a9a04b9f4a0f | 684dfe5d5cb3bb1d26d563163634c84099ba5f45 | refs/heads/master | 2020-08-16T04:19:25.062995 | 2019-10-16T04:02:07 | 2019-10-16T04:02:07 | 215,453,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py |
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
n_angles = 180
radius = 65
angles = np.linspace(0,np.pi / 6,n_angles)
x1 = radius * np.cos(angles)
y1 = radius * np.sin(angles)
n_z1 = 80
n_z2 = 40
n_z3 = 60
h_z1 = 10
h_z2 = 100
h_z3 = 120
z1 = np.concatenate((np.linspace(1,h_z1,n_z1) , np.linspace(h_z1,h_z2,n_z2),np.linspace(h_z2,h_z3,n_z3)),axis=0)
x2 = (radius-5) * np.cos(angles)
y2 = (radius-5) * np.sin(angles)
z2 = np.concatenate((np.linspace(1,h_z1,n_z1) , np.linspace(h_z1,h_z2,n_z2),np.linspace(h_z2,h_z3,n_z3)),axis=0)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(x1,y1,z1,linewidth=0.1)
ax.scatter(x2,y2,z2,linewidth=0.1)
plt.xlim(0,radius * 1.2)
plt.ylim(0,radius * 1.2)
plt.show()
| [
"darksun190@hotmail.com"
] | darksun190@hotmail.com |
1d477bdc2d24efe805ae12ada9589a200b99ac7d | f2658c4bd7f833ace25ac2b63e88317b05f4602d | /2017 July/2017-July-11/st_rdf_test/model2/RelationsConstruction.py | 80db921a3a777c4028c6f12a17dbc2aa3c535f55 | [] | no_license | xiaochao00/telanav_diary | e4c34ac0a14b65e4930e32012cc2202ff4ed91e2 | 3c583695e2880322483f526c98217c04286af9b2 | refs/heads/master | 2022-01-06T19:42:55.504845 | 2019-05-17T03:11:46 | 2019-05-17T03:11:46 | 108,958,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,410 | py | #-------------------------------------------------------------------------------
# Name: RelationsConstruction model
# Purpose: this model is used to mapping the
# columns: [ ]
#
# Author: rex
#
# Created: 2016/01/20
# Copyright: (c) rex 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
from record import Record
from constants import *
import os
import sys
import datetime
import json
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
GLOBAL_KEY_PREFIX = "relations_construction_"
CSV_SEP = '`'
LF = '\n'
#(key, category, function)
STATISTIC_KEYS = (
("type",False,"type"),
)
class RelationsConstruction(Record):
def __init__(self, region):
Record.__init__(self)
self.dump_file = os.path.join(ROOT_DIR, "temporary", self.__class__.__name__)
self.stat = {}
self.region = region
def dump2file(self):
cmd = "SELECT \
DISTINCT(rc.condition_id), \
rc.condition_type \
FROM \
public.rdf_condition AS rc LEFT JOIN public.rdf_nav_strand AS rns ON rns.nav_strand_id=rc.nav_strand_id \
LEFT JOIN public.rdf_nav_link AS rnl ON rns.link_id = rnl.link_id \
WHERE rc.condition_type='3' AND rnl.iso_country_code IN (%s)"%(REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '`'"%(cmd),open(self.dump_file,"w"))
def get_statistic(self):
try:
self.dump2file()
except:
print "Oops! Some table or schema don't exist! Please check the upper sql"
return {}
processcount = 0
with open(self.dump_file, "r",1024*1024*1024) as csv_f:
for line in csv_f:
line = line.rstrip()
line_p = line.split(CSV_SEP)
if len(line_p) < 1:
continue
self.__statistic(line_p)
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
# write to file
with open(os.path.join(ROOT_DIR, "output", "stat", self.__class__.__name__), 'w') as stf:
stf.write(json.dumps(self.stat))
return self.stat
def __statistic(self,line):
for keys in STATISTIC_KEYS:
try:
getattr(self,'_RelationsConstruction__get_'+keys[2])(keys,line)
except:
print "The statistic [ %s ] didn't exist"%(keys[2])
print ("Unexpected error:[ RelationsConstruction.py->__statistic] "+str(sys.exc_info()))
def __count(self,key):
if self.stat.has_key(key):
self.stat[key] += 1
else:
self.stat[key] = 1
# all statistic method
def __get_type(self,keys,line):
if '\N' != line[0]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
if __name__ == "__main__":
# use to test this model
bg = datetime.datetime.now()
stat = RelationsConstruction('na').get_statistic()
keys = stat.keys()
print "==>"
print "{%s}"%(",".join(map(lambda px: "\"%s\":%s"%(px,stat[px]) ,keys)))
print "<=="
ed = datetime.datetime.now()
print "Cost time:"+str(ed - bg)
| [
"1363180272@qq.com"
] | 1363180272@qq.com |
2b9c41b7c02effb049ec061e9907bd1df67be817 | 0d97fe84550145e0a80fd0fd336857caac724ea3 | /scrapy_58_ganji/源码/ganji/ganji_jiangsu/6/ganji/settings.py | 1ebf3fb95926dabe0e24e8e772e6b29f08767dd4 | [] | no_license | caiyingyi/scrapy_58_ganji_ | 311f9061c903ba34821967ece970dd2d946dc1bb | 23567902399cb06e5e76232401b4eaa066ebb757 | refs/heads/master | 2021-01-21T20:56:25.963166 | 2017-05-24T12:53:36 | 2017-05-24T12:53:36 | 92,291,279 | 2 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | # -*- coding: utf-8 -*-
# Scrapy settings for ganji project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ganji'
SPIDER_MODULES = ['ganji.spiders']
NEWSPIDER_MODULE = 'ganji.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = ["Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",]
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Cache-Control':'private',
'Connection': 'keep-alive',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ganji.middlewares.GanjiSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ganji.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ganji.pipelines.GanjiPipeline': 300,
}
MONGODB_SERVER = "localhost"
MONGODB_PORT = 27017
MONGODB_DB = "jiangsu"
#更改数据库名称
MONGODB_COLLECTION = "6"
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"caiyingyi902@163.com"
] | caiyingyi902@163.com |
d706b671b513d6c46ac6d9c2c739f89f1b6c2853 | 7a88fc18f30d5dd3ac935877d4d9268a56c296be | /di_website/publications/migrations/0039_audiovisualmedia_featured_media.py | 1fb52c70bc4c2b00925255998041d9bd5bcf5036 | [] | no_license | devinit/DIwebsite-redesign | 745a480b7ba0feffa34dc664548ee4c5a7b4d470 | 9ec46823c67cdd4f35be255896bf30d8f6362666 | refs/heads/develop | 2023-08-30T04:06:20.951203 | 2023-08-07T12:06:07 | 2023-08-07T12:06:07 | 184,287,370 | 1 | 0 | null | 2023-08-28T14:34:57 | 2019-04-30T15:29:25 | HTML | UTF-8 | Python | false | false | 611 | py | # Generated by Django 2.2.4 on 2020-05-03 16:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailmedia', '0003_copy_media_permissions_to_collections'),
('publications', '0038_merge_20200503_1638'),
]
operations = [
migrations.AddField(
model_name='audiovisualmedia',
name='featured_media',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailmedia.Media'),
),
]
| [
"debukali@gmail.com"
] | debukali@gmail.com |
f2501939e36bbdece2eb8e51d00a77b47c7b8aa2 | 9cca5377b6a124158604255c6aa2e03be70209ac | /script/term_count/extract_feature_multi.py | 246b3c617da5956ef5c7711feeb43a960f23582e | [
"Apache-2.0"
] | permissive | Trietptm-on-Coding-Algorithms/flash_feature_extraction | dd9fd36f590f3c9b4d1c44594a0cc90104b62e24 | 29226a4c0e81240fd5c53fd9c80b0f6b0f5a8f95 | refs/heads/master | 2020-05-07T22:02:11.923829 | 2018-04-28T09:41:39 | 2018-04-28T09:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,777 | py | import os
import time
import pdb
import subprocess
import json
import collections
import sys
import time
import datetime
from multiprocessing import Process, Queue
import decimal
from decimal import getcontext, Decimal
#from split import SplitHelper
queue = Queue()
def produce_file(dir_path):
all_files = []
for root,dirs,files in os.walk(dir_path):
for file in files:
if file.endswith('.json') or file.endswith('.txt'):
continue
file_path = os.path.join(root,file)
all_files.append(file_path)
return all_files
def extract(file_path,out_folder,label,feature_file):
filename=os.path.basename(file_path)
out_path=out_folder+"\\"+filename+".json"
if not os.path.exists(out_path):
cmd="FlashML.exe -l {0} -p {1} -o {2} -f {3}".format(label,file_path,out_path,feature_file)
#print cmd
print file_path
subprocess.call(cmd,shell=True)
def worker(queue,out_folder,label,feature_file):
for file_path in iter(queue.get,"STOP"):
extract(file_path,out_folder,label,feature_file)
#insert_mysql(file_path,file_md5)
return True
def main(label,out_folder,flash_folder,feature_file):
all_files = produce_file(flash_folder)
for file_path in all_files:
queue.put(file_path)
workers = 4
processes = []
for w in xrange(workers):
p = Process(target=worker,args=(queue,out_folder,label,feature_file))
p.start()
processes.append(p)
queue.put("STOP")
for p in processes:
p.join()
def splitfile(out_file):
if os.path.exists(out_file):
try:
s = SplitHelper()
s.analyze(out_file, 0.7)
except Exception,e:
print help_msg
print 'Exception: {}'.format(str(e))
#def pre_process(label,out_folder):
#for file in os.listdir(folder):
#if file.endswith('.txt'):
if __name__ == "__main__":
if len(sys.argv)<3:
print_help()
exit(0)
label=sys.argv[1]
flash_folder=sys.argv[2]
feature_file=sys.argv[3]
out=os.path.basename(feature_file)
if '.txt' in out:
out=out[0:-4]
global out_folder
out_folder=os.path.join(os.getcwd(),out)
#out_file=os.path.join(flash_folder,"out_json")+"\\"+"out_file.txt"
if not os.path.exists(out_folder):
os.mkdir(out_folder)
start_time = time.strftime('%c', time.localtime(time.time()))
main(label,out_folder,flash_folder,feature_file)
end_time = time.strftime('%c', time.localtime(time.time()))
#merge(label,out_folder,flash_folder)
print "start_time",start_time
print "end_time",end_time
#splitfile(out_file)
#pre_process(label,out_folder)
| [
"michael_du@ubuntu-16.04-server-md"
] | michael_du@ubuntu-16.04-server-md |
4db43b3627ce71b65078c3610a3ad71319c4c739 | a512b8893b0d2de827d6292e810f3a98b41e132c | /Week4/Day6/Solutions/Python/prog3.py | e8ebb18c93788e86255dbf1b31875bd34116bfa1 | [] | no_license | Audarya07/Daily-Flash-Codes | d771079fd0d470e2d3e05679f17f32fb64b4f426 | cf96ca2b1676b038e243fac67be778381492ffeb | refs/heads/master | 2022-11-06T15:37:47.180729 | 2020-06-25T16:20:55 | 2020-06-25T16:20:55 | 274,960,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | for num in range(1,101):
sum = 0
for i in range(1,num):
if num%i==0:
sum+=i
if sum==num:
continue
else:
print(num,end=" ")
print()
| [
"audiuttarwar2000@gmail.com"
] | audiuttarwar2000@gmail.com |
ad52f3a7125dd0292edf58325bcec219f7496f8f | 820792aa7c20a6947e63987bb3aea6bc0b36ece6 | /simulaDecorator.py | 59941f7e2420489424f8f47d5c47699d713c132f | [] | no_license | lualmeidasouza/python | 30febd04e3fe68dbe5b112741a98342a43897a25 | e705db936ddc4bf7664017afd6a1dc3a40103a17 | refs/heads/master | 2020-12-10T12:07:30.575844 | 2020-10-28T02:20:55 | 2020-10-28T02:20:55 | 233,589,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 10:11:46 2020
@author: Luciana
"""
def meu_decorador(func):
def empacotador():
print('Antes da chamada da função')
func()
print('Depois da chamada da função')
return empacotador
@meu_decorador
def diga_onde():
print('diga_onde() function')
#estas três linhas acima, nada
diga_onde()
| [
"noreply@github.com"
] | noreply@github.com |
5abefd1028b6cccfdaa7eb87f9bf76914e4e80f7 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc063/A/answers/322689_irin0890.py | 4d6bb223af0fd8d70f6f64a17ba23573afd95cd4 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | a, b = map(int, input().split())
if a+b >= 10:
print('error')
else:
print(a+b) | [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
f72184d6b4bb44ad7c3ada178c36203581a0126a | 582a4cc9a5a98b68ae4c9554d9f2509218f01b2b | /FortexSrl/FortexSrl/wsgi.py | 24fd13592a801ae503ee59502b3fc94316d78cd1 | [] | no_license | sbritos58/FortexSrl. | 82979faf0ec208f07ae8e1159edc0e578d35540c | cec2ffbd638acd749d7adf623b537a07a0fd31be | refs/heads/master | 2022-12-26T22:06:26.366216 | 2020-09-02T19:53:32 | 2020-09-02T19:53:32 | 253,104,346 | 0 | 0 | null | 2022-12-08T07:28:16 | 2020-04-04T21:43:40 | Python | UTF-8 | Python | false | false | 395 | py | """
WSGI config for FortexSrl project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FortexSrl.settings')
application = get_wsgi_application()
| [
"s.britos@hotmail.com"
] | s.britos@hotmail.com |
c2857c69467516c3483fe9bcbd8bdc698496353f | 82d7bfc98ee4531c06e0f6dd37fc9675cd9b6726 | /presentation.py | afa8acbf1d7001a8b8b15f32cf24abea040ac516 | [] | no_license | fonstelien/windows-exploder | 4eb2ad557fe0390bbaf20e545769f2b82e12ed82 | 9fe5de6fe7e1f8cc4358d07d17cdef1b4763785b | refs/heads/master | 2020-05-31T12:23:28.438185 | 2019-08-28T19:20:30 | 2019-08-28T19:20:30 | 190,278,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | #!/usr/bin/env python3
import urwid
import walker
def init_widget():
banner = urwid.BigText(markup=("programpresentation", "Windows Exploder"),
font=urwid.HalfBlock5x4Font())
banner = urwid.Padding(w=banner, align="center", width="clip")
signature = urwid.Text(
markup=("programpresentation", "V.0.1 by Olav Fønstelien"),
align="center")
divider = [urwid.Divider()]*5
return urwid.SimpleListWalker(divider+[banner, signature])
class PresentationWidget(walker.Walker):
def __init__(self, get_markup):
self.get_markup = get_markup # A function object
super(PresentationWidget, self).__init__()
self._selectable = False
self.update()
def update(self, presentation="", force=False):
if not (presentation or force):
return
presentation = presentation.splitlines()
markup_list = list()
for line in presentation:
markup_list.append(self.get_markup(line))
self.set_content(markup_list)
def reset_widget(self):
if self.focus is None:
return
self.focus_position = 0
| [
"olav.fonstelien@gmail.com"
] | olav.fonstelien@gmail.com |
400feaffc50ee73a60b61ba5d967b5143b6c12d4 | b9fe26fd53475c5b60eece092712a20822e95c43 | /tools/create_test_alarms.py | 8253284cc3717b13b631cef51fb8285ccd319c3a | [
"Apache-2.0",
"MIT",
"ISC",
"CC0-1.0"
] | permissive | ifit/aws-media-services-application-mapper | f2fa065691fb00ad0e6b957662e137548b72bcd6 | e508836f9eac318f3da26f4c57cc40cc272a5e94 | refs/heads/master | 2022-11-28T07:04:23.208576 | 2020-07-13T23:23:59 | 2020-08-04T00:12:23 | 279,688,382 | 0 | 0 | Apache-2.0 | 2020-08-03T23:22:16 | 2020-07-14T20:39:21 | null | UTF-8 | Python | false | false | 1,217 | py | import boto3
import copy
import time
import json
ALARM_TEMPLATE = {
"AlarmName": "1193839-0 Active Alerts",
"AlarmDescription": "1193839-0 Active Alerts",
"ActionsEnabled": True,
"OKActions": [
],
"AlarmActions": [
],
"InsufficientDataActions": [],
"MetricName": "ActiveAlerts",
"Namespace": "MediaLive",
"Statistic": "Maximum",
"Dimensions": [{
"Name": "ChannelId",
"Value": "1193839"
},
{
"Name": "Pipeline",
"Value": "0"
}
],
"Period": 10,
"EvaluationPeriods": 1,
"DatapointsToAlarm": 1,
"Threshold": 1.0,
"ComparisonOperator": "GreaterThanOrEqualToThreshold",
"TreatMissingData": "missing"
}
TOTAL_ALARMS = 500
client = boto3.client("cloudwatch")
for index in range(TOTAL_ALARMS):
print(index)
alarm_configuration = copy.deepcopy(ALARM_TEMPLATE)
alarm_configuration["AlarmName"] = f"MSAM Test Alarm {time.time()}"
alarm_configuration["AlarmDescription"] = "MSAM Testing Only, Do Not Use"
print(json.dumps(alarm_configuration))
response = client.put_metric_alarm(**alarm_configuration)
print(json.dumps(response))
time.sleep(0.25)
| [
"timt@ifit.com"
] | timt@ifit.com |
07ff6980884d70cacc711dfc287bfbf96c7c733e | f4b694982027ac362de1e9d6755f2943d0355a06 | /DECSKS-03 -- Convergence of FD formulation of high order CS/pyfiles/plots_df9_comparison.py | 6ef2da6f2b41cabe799772b8df49ec3244e370d7 | [] | no_license | dsirajud/IPython-notebooks | 55275e44191c16f5393571522787993f931cfd98 | 6ad9d978c611558525fc9d716af101dc841a393b | refs/heads/master | 2021-01-15T15:33:57.119172 | 2016-07-13T20:08:29 | 2016-07-13T20:08:29 | 35,054,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import matplotlib.pyplot as plt
import numpy as np
from convergence_routines import *
Nx = 2488
x, dx, L = domain(_Nx = Nx)
L2error, df9_approx = FD_derivative_matrix_formulation(_dn = 9, _p = 3, _Nx = Nx)
df9_exact = df9(x)
plt.plot(x,df9_exact, label = 'exact df9', linewidth = 3)
plt.hold('on')
plt.plot(x,df9_approx, label = 'approx df9', linewidth = 1, color = "red")
# compare with the function whose derivative this is
df8_exact = df8(x)
plt.plot(x,df8_exact * np.abs(np.min(df9_approx)) / np.abs(np.min(df8_exact)), label = 'exact df4', linewidth = 1, color = "cyan")
plt.hold('off')
plt.legend(loc = 'best')
plt.grid()
plt.show()
| [
"sirajuddin@wisc.edu"
] | sirajuddin@wisc.edu |
c8e8653a9579078797ccc3d7b99b52d659ca1637 | f087f6be63379ef532ae4125cc5fc377ea4a606e | /admin.py | f39b1822720649f958a405e4f1439e002d0e8b66 | [] | no_license | itchybumDev/EmployMeBot | e6a926a91f7f8f31c7cac4e8cae54379bce6dfcb | 5233a05f3fd15fda78548b1c74d883da81a114d2 | refs/heads/master | 2023-01-12T06:01:13.760866 | 2020-11-18T14:09:00 | 2020-11-18T14:09:00 | 305,191,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,462 | py | import csv
import pickle
from model.Job import Job
from model.Seeker import Seeker
from model.User import User
user_dict = {}
dev_team = []
job_dict = {}
seeker_dict = {}
def loadDataOnStartup():
loadUserDict()
loadDevTeam()
loadJobDict()
loadSeekerDict()
def saveDataOnShutDown():
saveUserDict()
saveDevTeam()
saveJobDict()
saveSeekerDict()
def isAdmin(chatId):
return str(chatId) in dev_team
def getSeeker(chatId):
return seeker_dict.get(int(chatId))
def getSeekerDict():
return seeker_dict
def getJobDict():
return job_dict
def getJob(id) -> Job:
return job_dict.get(id)
def getUserDict():
return user_dict
def getUser(chatId) -> User:
return user_dict.get(chatId)
def isSeekerRegistered(chatId):
return chatId in seeker_dict.keys()
def isJobAvailableForTaking(jobId):
return jobId in job_dict.keys() and job_dict.get(jobId).isPublish()
def addNewJob(job: Job):
print("Attempt to add new job")
if job.id in job_dict:
print('Job is already in the database')
return job_dict.get(job.id)
else:
print('New Job added')
job_dict.setdefault(job.id, job)
saveJobDict()
return job
def updateJob(job: Job):
print('Updating job')
job_dict[job.id] = job
saveJobDict()
return job
def saveJobDict():
global job_dict
with open("./db/jobData.pickle", 'wb') as handle:
pickle.dump(job_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open("./db/jobData.csv", 'w', newline='') as file:
fieldnames = Job.getFields_name()
writer = csv.DictWriter(file, delimiter=';', fieldnames=fieldnames)
writer.writeheader()
for v in job_dict.values():
writer.writerow(v.toExcelRow())
return True
def loadJobDict():
global job_dict
try:
with open('./db/jobData.pickle', 'rb') as handle:
job_dict = pickle.load(handle)
print(job_dict)
except IOError:
print("Job Dict data is not found, initialize to empty")
def addUser(user: User):
print("Attempt to add new user")
if user.id in user_dict:
print('User is already in the database')
return user_dict.get(user.id)
else:
print('New User added')
user_dict.setdefault(user.id, user)
saveUserDict()
return user
def saveUserDict():
global user_dict
with open("./db/userData.pickle", 'wb') as handle:
pickle.dump(user_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open("./db/userData.csv", 'w', newline='') as file:
writer = csv.writer(file)
for v in user_dict.values():
writer.writerow(v.toExcelRow())
return True
def loadUserDict():
global user_dict
try:
with open('./db/userData.pickle', 'rb') as handle:
user_dict = pickle.load(handle)
except IOError:
print("User Dict data is not found, initialize to empty")
def addSeeker(seeker: Seeker):
print("Attempt to add new job seeker")
if seeker.id in seeker_dict:
print('Job Seeker is already in the database')
return seeker_dict.get(seeker.id)
else:
print('New Job Seeker added')
seeker_dict.setdefault(seeker.id, seeker)
saveSeekerDict()
return seeker
def saveSeekerDict():
global seeker_dict
with open("./db/seekerData.pickle", 'wb') as handle:
pickle.dump(seeker_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open("./db/seekerData.csv", 'w', newline='') as file:
fieldnames = Seeker.getFields_name()
writer = csv.DictWriter(file, delimiter=';', fieldnames=fieldnames)
writer.writeheader()
for v in seeker_dict.values():
writer.writerow(v.toExcelRow())
return True
def loadSeekerDict():
global seeker_dict
try:
with open('./db/seekerData.pickle', 'rb') as handle:
seeker_dict = pickle.load(handle)
except IOError:
print("Seeker dict data is not found, initialize to empty")
def loadDevTeam():
with open("./db/dev_team.csv", newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
dev_team.append(str(row[0]))
def saveDevTeam():
with open("./db/dev_team.csv", 'w', newline='') as file:
writer = csv.writer(file)
for v in dev_team:
writer.writerow([v])
return True
| [
"buffolio.bot@gmail.com"
] | buffolio.bot@gmail.com |
6ee2719c8747fe7dbd0c2eee592c976c2dc46ea6 | bede4adbf0438fd36131bfe200f218a585b7eecf | /www/downloads/py/python_light_control/main.py | c2512114c0d4951aeead9b4eaddd24e85f7f269c | [] | no_license | tomasmo-dev/tomasmo-dev.github.io | 380d5fdbcdc93f227d11675b2fdf7f3a75846240 | 59bbfef4e3e1f11413b900edf4535de38fb4355b | refs/heads/master | 2023-03-05T09:04:37.592484 | 2021-02-16T19:12:33 | 2021-02-16T19:12:33 | 333,893,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import threading
import time
import serial
from pynput.keyboard import Key, Listener
lastData = None
ar = serial.Serial('COM3', 9600, timeout=0.1)
def on_press(ser):
global lastData
if lastData != 'S':
lastData = 'S'
ser.write(b'S')
def on_release(ser):
global lastData
if lastData != 'E':
lastData = 'E'
ser.write(b'E')
def Listen(instance):
print("Listening serial")
while True:
data = ar.readline()
if data:
print(data.decode("ASCII"))
time.sleep(2)
listening = threading.Thread(target=Listen, args=(ar, ), daemon=True)
listening.start()
time.sleep(2)
print("Listening to keyboard")
with Listener(on_press=lambda press: on_press(ar), on_release=lambda _ : on_release(ar)) as listen:
listen.join()
| [
"tomasmo@email.cz"
] | tomasmo@email.cz |
6a90cbd5a6753bbcd0f5304a4dbf439941b2315a | bcc86ffc98d542c4c993823e6a03cdaa51640081 | /softmax.py | 8c25a611839cf26d2a0ccee98c1daa9125b171d3 | [] | no_license | Ikerzly/zly | 8ec5192c6ef7142a7330acdc0a49e32541b9bccb | b8b5b633057c7a73194024005f0a5bc8702bf69d | refs/heads/master | 2020-03-28T19:49:33.137249 | 2018-09-16T14:53:31 | 2018-09-16T16:47:10 | 149,013,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | #!/usr/bin/env python3
#-*-coding:utf-8 -*-
import numpy
def softmax(inMatrix):
m,n=numpy.shape(inMatrix)
outMatrix=numpy.mat(numpy.zeros((m,n)))
soft_sum=0
for idx in range(0,n):
outMatrix[0,idx]=math.exp(inMatrix[0,idx])
soft_sum+=outMatrix[0,idx]
for idx in range(0,n):
outMatrix[0,idx]=outMatrix[0,idx]/soft_sum
return outMatrix
| [
"lmk881111@yeah.net"
] | lmk881111@yeah.net |
edcaf8f371f0398ce72d8397dff48c5609fbabfb | dcba99bde52c0d9bd4727e3217030ce0b54622e3 | /listings/migrations/0001_initial.py | 95a8d09cf558d5f42ae796d9e56ea8fa7c2b4a65 | [] | no_license | waldo7/btre_project | 8ebfcdc8e3fbec79f805a144145bfb1d5b1c7382 | abbf4cc7d9f4748217c4e665f25ed30f5a693c12 | refs/heads/master | 2020-04-16T03:59:37.615841 | 2019-01-11T14:30:17 | 2019-01-11T14:30:17 | 165,251,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | # Generated by Django 2.1.4 on 2019-01-04 01:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('zipcode', models.CharField(max_length=200)),
('description', models.TextField(blank=True, null=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/')),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.Realtor')),
],
),
]
| [
"waldo.ramones@gmail.com"
] | waldo.ramones@gmail.com |
12b757cc2f3e235b84708079863eabbca176c0ea | 78ba7bb51845bbe3f57dff251665b21ad2605e3b | /allennlp/data/dataset_readers/semantic_dependency_parsing.py | ee68a452db5b49a7aff358491b310f47e8a6541d | [
"Apache-2.0"
] | permissive | shaleenx/allennlp | 4352f6d79585a5690c22e81746b07f2dafdb5a69 | 410402c041366c905aa4a236363b374f0d314633 | refs/heads/master | 2020-09-21T04:40:00.063999 | 2020-01-15T18:48:25 | 2020-01-15T18:48:25 | 224,680,882 | 0 | 0 | Apache-2.0 | 2019-11-28T15:23:32 | 2019-11-28T15:23:31 | null | UTF-8 | Python | false | false | 4,438 | py | from typing import Dict, List, Tuple
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import AdjacencyField, MetadataField, SequenceLabelField
from allennlp.data.fields import Field, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.instance import Instance
logger = logging.getLogger(__name__)
FIELDS = ["id", "form", "lemma", "pos", "head", "deprel", "top", "pred", "frame"]
def parse_sentence(
sentence_blob: str,
) -> Tuple[List[Dict[str, str]], List[Tuple[int, int]], List[str]]:
"""
Parses a chunk of text in the SemEval SDP format.
Each word in the sentence is returned as a dictionary with the following
format:
'id': '1',
'form': 'Pierre',
'lemma': 'Pierre',
'pos': 'NNP',
'head': '2', # Note that this is the `syntactic` head.
'deprel': 'nn',
'top': '-',
'pred': '+',
'frame': 'named:x-c'
Along with a list of arcs and their corresponding tags. Note that
in semantic dependency parsing words can have more than one head
(it is not a tree), meaning that the list of arcs and tags are
not tied to the length of the sentence.
"""
annotated_sentence = []
arc_indices = []
arc_tags = []
predicates = []
lines = [
line.split("\t")
for line in sentence_blob.split("\n")
if line and not line.strip().startswith("#")
]
for line_idx, line in enumerate(lines):
annotated_token = {k: v for k, v in zip(FIELDS, line)}
if annotated_token["pred"] == "+":
predicates.append(line_idx)
annotated_sentence.append(annotated_token)
for line_idx, line in enumerate(lines):
for predicate_idx, arg in enumerate(line[len(FIELDS) :]):
if arg != "_":
arc_indices.append((line_idx, predicates[predicate_idx]))
arc_tags.append(arg)
return annotated_sentence, arc_indices, arc_tags
def lazy_parse(text: str):
for sentence in text.split("\n\n"):
if sentence:
yield parse_sentence(sentence)
@DatasetReader.register("semantic_dependencies")
class SemanticDependenciesDatasetReader(DatasetReader):
"""
Reads a file in the SemEval 2015 Task 18 (Broad-coverage Semantic Dependency Parsing)
format.
# Parameters
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
The token indexers to be applied to the words TextField.
"""
def __init__(self, token_indexers: Dict[str, TokenIndexer] = None, lazy: bool = False) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading semantic dependency parsing data from: %s", file_path)
with open(file_path) as sdp_file:
for annotated_sentence, directed_arc_indices, arc_tags in lazy_parse(sdp_file.read()):
# If there are no arc indices, skip this instance.
if not directed_arc_indices:
continue
tokens = [word["form"] for word in annotated_sentence]
pos_tags = [word["pos"] for word in annotated_sentence]
yield self.text_to_instance(tokens, pos_tags, directed_arc_indices, arc_tags)
@overrides
def text_to_instance(
self, # type: ignore
tokens: List[str],
pos_tags: List[str] = None,
arc_indices: List[Tuple[int, int]] = None,
arc_tags: List[str] = None,
) -> Instance:
fields: Dict[str, Field] = {}
token_field = TextField([Token(t) for t in tokens], self._token_indexers)
fields["tokens"] = token_field
fields["metadata"] = MetadataField({"tokens": tokens})
if pos_tags is not None:
fields["pos_tags"] = SequenceLabelField(pos_tags, token_field, label_namespace="pos")
if arc_indices is not None and arc_tags is not None:
fields["arc_tags"] = AdjacencyField(arc_indices, token_field, arc_tags)
return Instance(fields)
| [
"noreply@github.com"
] | noreply@github.com |
4e5b3fd3ae5d0943fdd2fedb0332f1298f319bb1 | c0d52f073d5d0bb115332ac568ee7b6047a01d13 | /flask_websub/errors.py | 41cc39ef023e93f056a4ab40b6c1a68e017636fa | [
"ISC"
] | permissive | marten-de-vries/Flask-WebSub | 3008335107fc929a30645cd37fa1cb9b046917cb | 1853582a7f60e79c9ac1bd03b0bf30b36dab77f6 | refs/heads/master | 2023-08-21T03:42:07.637228 | 2021-03-06T22:02:13 | 2021-03-06T22:05:47 | 101,514,291 | 20 | 4 | ISC | 2019-02-19T22:18:33 | 2017-08-26T21:29:45 | Python | UTF-8 | Python | false | false | 408 | py | class FlaskWebSubError(Exception):
"""Base class for flask_websub errors"""
class DiscoveryError(FlaskWebSubError):
"""For errors during canonical topic url and hub url discovery"""
class SubscriberError(FlaskWebSubError):
"""For errors while subscribing to a hub"""
class NotificationError(FlaskWebSubError):
"""Raised when the input of the send_change_notification task is invalid"""
| [
"m@rtendevri.es"
] | m@rtendevri.es |
c627cb646fdba505642d346dcd6284dd907bc54f | 2b82dba50d0e54eafbc17c63dd3c8fee070b04cf | /config/settings/local.py | a66e78c7052825e4cf5b4d833b61dc458c94da96 | [
"MIT"
] | permissive | venkat0708/aphdstreaming | 58c80bec32703d2561860d1384bd6794d884679e | 8171cad90cf3abded74d5346a8a0a0a09207da87 | refs/heads/master | 2020-10-01T04:33:50.055516 | 2019-12-11T21:03:44 | 2019-12-11T21:03:44 | 227,456,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="65QKKPPf7X0CdDHkG36LdM8YkXL7PdBkhySz2jpOn395EmoupsdvAfwWligVPwss",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = env("EMAIL_HOST", default="mailhog")
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1" for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| [
"eswar0077@gmail.com"
] | eswar0077@gmail.com |
d59800358316a58679932c187a9225e40f43364e | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/learningCurve/leaveOneOut/lrClassifierF.py | 36c020e785dfac7d8a00613b3398404787143651 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,134 | py | # 9 September 2019
# Kiyoto Aramis Tanemura
# I modified the rfClassifier.py script to implement a logistic regression classifier. This classifier runs faster than the random forest classifier and Jun previously observed comparable results between logistic regression and random forest classifiers for the protein folding system. Due to the lesser time cost, I may sample a greater hyperparameter space using the logistic regression classifier. If the sampling yields a region in which overfitting is not observed, then I can refine the search. If the results are similar to that of the random forest classifier, then I may have exhausted the dataset for generalizability.
# Modified 26 October 2019 by Kiyoto Aramis Tanemura. Apply logistic regression classifier to CASF-PPI dataset.
# Modified 2020-02-09 by KAT. Code generalized for public use on GitHub.
import pandas as pd
import numpy as np
import os
import json
import pickle
#from multiprocessing import Pool
from time import time
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from random import shuffle, random
#os.chdir('/mnt/scratch/tanemur1/')
toc = time()
# Randomize input file orders
pathToInput = 'data/comparison_descriptors/'
pathToOutput = 'results/learningCurve/'
fileNames = [x for x in os.listdir(pathToInput) if '.csv' in x]
shuffle(fileNames) # note: shuffle is in-place. Do not assign to variable
# Specify training set fraction
train_fraction = 0.99
if len(fileNames) * train_fraction == int(len(fileNames) * train_fraction):
train_file_number = int(len(fileNames) * train_fraction)
else:
train_file_number = int(len(fileNames) * train_fraction + 1)
x_train = pd.DataFrame()
y_train = pd.DataFrame()
# Read individual csv for comparison descriptors, append to train_data, and partition to x_train, y_train
fileNamesWithPath = [pathToInput + fileName for fileName in fileNames]
def read_csv(filePath):
return pd.read_csv(filePath, index_col = 0)
print('begin read training set')
#with Pool(np.min([train_file_number, 28])) as p:
# train_dataList = list(p.map(read_csv, fileNamesWithPath[:train_file_number]))
train_dataList = list(map(read_csv, fileNamesWithPath[:train_file_number]))
print('begin append DF | ', (time() - toc) / 60, ' min')
# Append DataFrames into one. While loop used to reduce append operations. Iteratively, DFs in a list are appended
# to the following DF.
while len(train_dataList) != 1:
number = int(len(train_dataList) / 2)
for i in range(number):
train_dataList[2 * i] = train_dataList[2 * i].append(train_dataList[2 * i + 1], sort = True)
for j in range(number):
del train_dataList[j + 1]
x_train = train_dataList[0]
del train_dataList
print('train_data dimensions', x_train.shape, ' | ', (time() - toc) / 60, ' min')
y_train = x_train['class']
x_train = x_train.drop('class', axis = 1) # x_train contains only nonbonding descriptors
feature_names = x_train.columns
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
y_train = y_train.values
print('Dimensions x_train ', x_train.shape, ' | y_train', y_train.shape)
# Define a logistic regression classifier along with pertinent hyperparameters. Here, default values are used.
clf = LogisticRegression(penalty='l2', verbose = 1)
def sampleRationalVals(minVal, maxVal):
return 2 ** (random() * (np.log2(maxVal) - np.log2(minVal)) + np.log2(minVal))
def sampleRationalList(minVal, maxVal):
theList = []
for i in range(int(2 * np.log2(maxVal - minVal) + 1)):
theVal = sampleRationalVals(minVal, maxVal)
theList.append(theVal)
return theList
parameters = {
# include any hyperparameters to sample. Otherwise, leave empty to perform five fold cross validation with default values. For example:
# 'C': sampleRationalList(0.001, 1000),
# 'solver': ['newton-cg', 'lbfgs', 'sag','saga']
}
print('begin RandomizedSearchCV | ' + str((time() - toc)/60) + ' mins')
randomized_search = RandomizedSearchCV(estimator = clf, param_distributions = parameters, n_iter = 1, scoring = 'accuracy', refit = True, cv = 5, verbose = 1, n_jobs = 1, pre_dispatch = 'n_jobs', return_train_score=True)
randomized_search.fit(x_train, y_train)
print('begin output | ', (time() - toc) / 60 / 60, ' hours')
tic = time()
with open(pathToOutput + 'bestParamF.json', 'w') as g:
json.dump(randomized_search.best_estimator_.get_params(), g)
with open(pathToOutput + 'modelF.pkl', 'wb') as h:
pickle.dump(randomized_search, h)
with open(pathToOutput + 'trainingSetF.txt', 'w') as i:
i.write('Training set:\n')
for pdbID in fileNames[:train_file_number]:
i.write(pdbID + '\n')
i.write('\nJob time: ' + str((tic - toc) / 60 / 60) + ' hours')
with open(pathToOutput + 'standardScalerF.pkl', 'wb') as j:
pickle.dump(scaler, j)
bestCoefficient = randomized_search.best_estimator_.coef_
coefDf = pd.DataFrame(bestCoefficient, columns = feature_names)
with open(pathToOutput + 'coefficientsF.csv', 'w') as f:
coefDf.to_csv(f)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
d0b85feedea312ffebdaa1d99294f604d982637e | 14bd724db9e1d8062abc6636414db4950920d2fa | /client_raspberry_pi/conect.py | b8a084d52f37354a4efeb84188a631868dc6a54d | [] | no_license | php5185/Electronic_Voting_System | 188cc2967e4384ef7127aff88914e34c639cd169 | 8269f8a05eafbdb585e50722245bc989ee348e61 | refs/heads/master | 2023-03-04T06:24:29.807155 | 2021-02-18T23:53:45 | 2021-02-18T23:53:45 | 340,203,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,780 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'conect.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
import time
import _thread
import mysql.connector as mariadb
import subprocess
import sys
import psutil
from random import randint
import os
import shlex
from subprocess import call, PIPE, STDOUT
Nointernet = None
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QDialog, QLineEdit, QApplication, QMessageBox#*
from PyQt4.QtCore import SIGNAL#*
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class extQLineEdit(QLineEdit):
def __init__(self,parent):
QLineEdit.__init__(self,parent)
def mousePressEvent(self,QMouseEvent):
self.emit(SIGNAL("clicked()"))
##class Ui_Dialog1(object):
##
## def setupUi(self, Dialog):
class Ui_Dialog(QDialog):
#def setupUi(self, Dialog):
def __init__(self):
QDialog.__init__(self, parent=None)
self.setObjectName(_fromUtf8("Dialog"))
#self.resize(1280, 740)
self.setFixedSize(1280, 740)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setGeometry(QtCore.QRect(0,-5,1280,740))
self.setStyleSheet(_fromUtf8("background-color: rgb(255, 255, 255);"))
self.label = QtGui.QLabel(self)
self.label.setGeometry(QtCore.QRect(450, 295, 91, 33))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(self)
self.label_2.setGeometry(QtCore.QRect(450, 375, 91, 33))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.txtUser = extQLineEdit(self)
self.txtUser.setGeometry(QtCore.QRect(610, 295, 265, 33))
self.txtUser.setObjectName(_fromUtf8("txtUser"))
self.connect(self.txtUser,SIGNAL("clicked()"), self.printText)
self.textPass = extQLineEdit(self) #QtGui.QLineEdit(self)
self.textPass.setGeometry(QtCore.QRect(610, 375, 265, 33))
self.textPass.setObjectName(_fromUtf8("textPass"))
self.textPass.setEchoMode(QtGui.QLineEdit.Password)
self.connect(self.textPass,SIGNAL("clicked()"), self.printText)
self.btnlogin = QtGui.QPushButton(self)
self.btnlogin.setGeometry(QtCore.QRect(515, 440, 140, 31))
self.btnlogin.setStyleSheet(_fromUtf8("background-color: rgb(170, 170, 255);"))
self.btnlogin.setObjectName(_fromUtf8("btnlogin"))
self.btncancel = QtGui.QPushButton(self)
self.btncancel.setGeometry(QtCore.QRect(685, 440, 140, 31))
self.btncancel.setStyleSheet(_fromUtf8("background-color: rgb(170, 170, 255);"))
self.btncancel.setObjectName(_fromUtf8("btncancel"))
self.lbl_escudo_1 = QtGui.QLabel(self)
self.lbl_escudo_1.setGeometry(QtCore.QRect(30, 20, 61, 61))
self.lbl_escudo_1.setStyleSheet(_fromUtf8("image: url(:/JCE/EscudoDom.png);"))
self.lbl_escudo_1.setText(_fromUtf8(""))
self.lbl_escudo_1.setObjectName(_fromUtf8("lbl_escudo_1"))
self.lbl_JCE = QtGui.QLabel(self)
self.lbl_JCE.setGeometry(QtCore.QRect(400, 10, 500, 71))
self.lbl_JCE.setStyleSheet(_fromUtf8("image: url(:/JCE/JCEpag1.jpg);"))
self.lbl_JCE.setText(_fromUtf8(""))
self.lbl_JCE.setObjectName(_fromUtf8("lbl_JCE"))
self.lbl_escudo_3 = QtGui.QLabel(self)
self.lbl_escudo_3.setGeometry(QtCore.QRect(1180, 20, 61, 61))
self.lbl_escudo_3.setStyleSheet(_fromUtf8("image: url(:/JCE/EscudoDom.png);"))
self.lbl_escudo_3.setText(_fromUtf8(""))
self.lbl_escudo_3.setObjectName(_fromUtf8("lbl_escudo_3"))
self.lbl_info = QtGui.QLabel(self)
self.lbl_info.setGeometry(QtCore.QRect(260, 90, 841, 41))
self.lbl_info.setObjectName(_fromUtf8("lbl_info"))
self.label_7 = QtGui.QLabel(self)
self.label_7.setGeometry(QtCore.QRect(0, 150, 1280, 70))
self.label_7.setStyleSheet(_fromUtf8("background-color: rgb(0, 85, 255);"))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_3 = QtGui.QLabel(self)
self.label_3.setGeometry(QtCore.QRect(450, 230, 111, 21))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.lblestado = QtGui.QLabel(self)
self.lblestado.setGeometry(QtCore.QRect(610, 230, 250, 21))
self.lblestado.setText(_fromUtf8("Desconectado"))
self.lblestado.setObjectName(_fromUtf8("lblestado"))
self.label_13 = QtGui.QLabel(self)
self.label_13.setGeometry(QtCore.QRect(900, 440, 411, 21))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
_thread.start_new_thread(self.Internet, ())
self.btncancel.clicked.connect(self.closeE)
self.btnlogin.clicked.connect(self.login)
def Internet(self): #un hilo
global Nointernet
if is_network_alive()!=True:
try:
#self.Etiqueta_1_8.SetLabel("Desconectado")
stat="Desconectado"
self.lblestado.setText(_fromUtf8("Desconectado"))
Nointernet =1
#print ('no hay')
except:
pass
#print ('Error no hay internet ')
else:
try:
#self.Etiqueta_1_8.SetLabel("Conectado")
command = ("iwgetid -r")
p = subprocess.Popen(command, universal_newlines=True,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
text = p.stdout.read()
retcode = p.wait()
estado="Conectado a la red "+text
self.lblestado.setText(_fromUtf8(estado))
Nointernet =0
#print ('hay')
except:
pass
#print ('Error hay internet')
def closeE(self, event):
y=0
for pid in psutil.pids():
try:
p = psutil.Process(pid)
if p.name() == "florence" and y==0:
p.terminate()
#p.wait()
y=1
except:
pass
sys.exit()
def printText(self):
y=0
for pid in psutil.pids():
try:
p = psutil.Process(pid)
if p.name() == "florence" and y==0:
#_thread.start_new_thread(self.colegio, ())
y=1
except:
pass
if(y==0):
_thread.start_new_thread(self.colegio, ())
def colegio(self):
subprocess.check_output(["florence"])
def login(self, event):
y=0
for pid in psutil.pids():
try:
p = psutil.Process(pid)
if p.name() == "florence" and y==0:
p.terminate()
#p.wait()
y=1
except:
pass
if self.txtUser.text() and self.textPass.text():
ssid=self.txtUser.text()
clave = self.textPass.text()
f=open("wpa_supplicant.conf","w")
f.write("ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\n"+
"update_config=1\n"+
"country=GB\n"+
"\n"+
"network={\n" +
"ssid="+ "\""+ssid +"\""+"\n"+
"psk="+ "\""+clave +"\""+"\n"+
"key_mgmt=WPA-PSK\n"+
"}")
f.close()
os.system('sudo rm /etc/wpa_supplicant/wpa_supplicant.conf')
os.system('sudo mv wpa_supplicant.conf /etc/wpa_supplicant/')
#print(111)
os.system('sudo service mysql stop')
time.sleep(1)
#print(123)
os.system('sudo reboot')
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("SSID o Contraseña Incorrecto")
msg.setWindowTitle("Elecciones 2020")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def retranslateUi(self, QDialog):
QDialog.setWindowTitle(_translate("QDialog", "Login", None))
self.label.setText(_translate("QDialog", "SSID:", None))
self.label_2.setText(_translate("QDialog", "Contraseña:", None))
self.label_13.setText(_translate("QDialog", "*Los Cambios Toman Efecto Después de Reiniciar", None))
self.btnlogin.setText(_translate("QDialog", "Conectar*", None))
self.btncancel.setText(_translate("QDialog", "Cancelar", None))
self.label_3.setText(_translate("QDialog", "Estado Actual:", None))
self.label_7.setText(_translate("QDialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:18pt; font-weight:600; color:#ffffff;\">Conexión a Internet</span></p>", None))#"<p align=\"center\"><span style=\" font-size:18pt; font-weight:600; color:#ffffff;\"> </span></p></body></html>", None))
self.lbl_info.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-size:10pt; font-weight:600;\">ELECCIONES ORDINARIAS GENERALES DEL 17 DE MAYO DEL 2020 PARA ELEGIR AL PRESIDENTE Y VICEPRESIDENTE DE LA REPÚBLICA</span></p></body></html>", None))
def get_return_code_of_simple_cmd(cmd, stderr=STDOUT):
"""Execute a simple external command and return its exit status."""
args = shlex.split(cmd)
return call(args, stdout=PIPE, stderr=stderr)
def is_network_alive():
cmd = "ping -c 1 www.google.com"
return get_return_code_of_simple_cmd(cmd) == 0
import JCE
if __name__ == "__main__":
import sys
## app = QtGui.QApplication(sys.argv)
## Dialog = QtGui.QDialog()
## ui = Ui_Dialog1()
## ui.setupUi(Dialog)
## Dialog.show()
## sys.exit(app.exec_())
app= QApplication(sys.argv)
form = Ui_Dialog()
form.show()
sys.exit(app.exec_())
| [
"php5185@rit.edu"
] | php5185@rit.edu |
18a28d5e4e839646f65336d3d49006c5a957223d | de0584cdd6a0b452efa3c8bd0e1e43286853c814 | /preprocess/huff/clean_huffpost.py | a2a2d91bc756e5a1c5826ea7fe1277733daea635 | [] | no_license | johnsonice/triplet-loss | a325ecd229b5346aaca4cb0556bbc18e9e4eae26 | 71c13dfa7631ec93c564d9dc9da4fcf667eb9500 | refs/heads/master | 2023-08-24T17:49:01.593415 | 2021-10-23T16:27:26 | 2021-10-23T16:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | import json
from random import shuffle
#cleaning up text
import re
def get_only_chars(line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") #replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +',' ',clean_line) #delete extra spaces
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
def clean_dataset(file_path, output_path_train, output_path_test):
lines = open(file_path, 'r').readlines()
category_to_headlines = {}
for line in lines:
d = json.loads(line[:-1])
category = d['category']
headline = d['headline']
if len(headline) > 10:
if category in category_to_headlines:
category_to_headlines[category].append(headline)
else:
category_to_headlines[category] = [headline]
category_to_id = {category: i for i, category in enumerate(list(sorted(list(category_to_headlines.keys()))))}
train_writer = open(output_path_train, 'w')
test_writer = open(output_path_test, 'w')
for category, headlines in category_to_headlines.items():
_id = category_to_id[category]
shuffle(headlines)
test_headlines = headlines[:300]
train_headlines = headlines[300:1000]
for train_headline in train_headlines:
train_writer.write('\t'.join([str(_id), get_only_chars(train_headline)]) + '\n')
for test_headline in test_headlines:
test_writer.write('\t'.join([str(_id), get_only_chars(test_headline)]) + '\n')
if __name__ == "__main__":
clean_dataset('News_Category_dataset_v2.json', 'huffpost/train.txt', 'huffpost/test.txt') | [
"jason.weng.wei@gmail.com"
] | jason.weng.wei@gmail.com |
e736dc68900dfd50718377d0095f797ff560d0e3 | c0799281d43614bf1344c4cb045e69a449a7ed24 | /celeryconfig.py | 94b6dfb5a3303f7f1974b283615780e2175d8ee4 | [] | no_license | wonderjar/celery-example | 2daf7129cdf261abf146e74e5e6bf3de961cf098 | 2bc771a457cc9692d183cb0192722504e256779b | refs/heads/master | 2020-04-08T11:24:40.137445 | 2018-11-27T09:54:55 | 2018-11-27T09:54:55 | 159,304,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | broker_url = 'amqp://XXXX:XXXX@XXXX:XXXX//'
result_backend = 'rpc://'
task_serializer = 'json'
result_serializer = 'json'
accept_content = ['json']
timezone = 'Asia/Shanghai'
enable_utc = True
beat_schedule = {
'add-every-30-seconds': {
'task': 'tasks.add',
'schedule': 2.0,
'args': (16, 16)
},
}
| [
"jarancn@gmail.com"
] | jarancn@gmail.com |
a0f042399c854efeeae2f22745708993359d89e0 | 8a11814f757b22cacd89ae618265d6705393ba78 | /amplify/agent/data/statsd.py | 8c17a990d29c16671f7bda85bf50d173b786d17e | [
"BSD-2-Clause"
] | permissive | ngonsol/nginx-amplify-agent | e763bfcc82cf103b4eb2ce49269dfccaec0cb9af | c711579208465578b03dda5db40ccc7dc8f31b81 | refs/heads/master | 2021-01-18T03:17:04.494068 | 2016-05-18T20:17:25 | 2016-05-18T20:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,307 | py | # -*- coding: utf-8 -*-
import copy
import time
from collections import defaultdict
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class StatsdClient(object):
def __init__(self, address=None, port=None, interval=None, object=None):
# Import context as a class object to avoid circular import on statsd. This could be refactored later.
from amplify.agent.common.context import context
self.context = context
self.address = address
self.port = port
self.object = object
self.interval = interval
self.current = defaultdict(dict)
self.delivery = defaultdict(dict)
def average(self, metric_name, value):
"""
Same thing as histogram but without p95
:param metric_name: metric name
:param value: metric value
"""
if metric_name in self.current['average']:
self.current['average'][metric_name].append(value)
else:
self.current['average'][metric_name] = [value]
def timer(self, metric_name, value):
"""
Histogram with 95 percentile
The algorithm is as follows:
Collect all the data samples for a period of time (commonly a day, a week, or a month).
Sort the data set by value from highest to lowest and discard the highest 5% of the sorted samples.
The next highest sample is the 95th percentile value for the data set.
:param metric_name: metric name
:param value: metric value
"""
if metric_name in self.current['timer']:
self.current['timer'][metric_name].append(value)
else:
self.current['timer'][metric_name] = [value]
def incr(self, metric_name, value=None, rate=None, stamp=None):
"""
Simple counter with rate
:param metric_name: metric name
:param value: metric value
:param rate: rate
:param stamp: timestamp (current timestamp will be used if this is not specified)
"""
timestamp = stamp or int(time.time())
if value is None:
value = 1
# new metric
if metric_name not in self.current['counter']:
self.current['counter'][metric_name] = [[timestamp, value]]
return
# metric exists
slots = self.current['counter'][metric_name]
last_stamp, last_value = slots[-1]
# if rate is set then check it's time
if self.interval and rate:
sample_duration = self.interval * rate
# write to current slot
if timestamp < last_stamp + sample_duration:
self.current['counter'][metric_name][-1] = [last_stamp, last_value + value]
else:
self.current['counter'][metric_name].append([last_stamp, value])
else:
self.current['counter'][metric_name][-1] = [last_stamp, last_value + value]
def agent(self, metric_name, value, stamp=None):
"""
Agent metrics
:param metric_name: metric
:param value: value
:param stamp: timestamp (current timestamp will be used if this is not specified)
"""
timestamp = stamp or int(time.time())
self.current['gauge'][metric_name] = [(timestamp, value)]
def gauge(self, metric_name, value, delta=False, prefix=False, stamp=None):
"""
Gauge
:param metric_name: metric name
:param value: metric value
:param delta: metric delta (applicable only if we have previous values)
:param stamp: timestamp (current timestamp will be used if this is not specified)
"""
timestamp = stamp or int(time.time())
if metric_name in self.current['gauge']:
if delta:
last_stamp, last_value = self.current['gauge'][metric_name][-1]
new_value = last_value + value
else:
new_value = value
self.current['gauge'][metric_name].append((timestamp, new_value))
else:
self.current['gauge'][metric_name] = [(timestamp, value)]
def flush(self):
if not self.current:
return
results = {}
delivery = copy.deepcopy(self.current)
self.current = defaultdict(dict)
# histogram
if 'timer' in delivery:
timers = {}
timestamp = int(time.time())
for metric_name, metric_values in delivery['timer'].iteritems():
if len(metric_values):
metric_values.sort()
length = len(metric_values)
timers['G|%s' % metric_name] = [[timestamp, sum(metric_values) / float(length)]]
timers['C|%s.count' % metric_name] = [[timestamp, length]]
timers['G|%s.max' % metric_name] = [[timestamp, metric_values[-1]]]
timers['G|%s.median' % metric_name] = [[timestamp, metric_values[int(round(length / 2 - 1))]]]
timers['G|%s.pctl95' % metric_name] = [[timestamp, metric_values[-int(round(length * .05))]]]
results['timer'] = timers
# counters
if 'counter' in delivery:
counters = {}
for k, v in delivery['counter'].iteritems():
# Aggregate all observed counters into a single record.
last_stamp = v[-1][0] # Use the oldest timestamp.
total_value = 0
for timestamp, value in v:
total_value += value
# Condense the list of lists 'v' into a list of a single element. Remember that we are using lists
# instead of tuples because we need mutability during self.incr().
counters['C|%s' % k] = [[last_stamp, total_value]]
results['counter'] = counters
# gauges
if 'gauge' in delivery:
gauges = {}
for k, v in delivery['gauge'].iteritems():
# Aggregate all observed gauges into a single record.
last_stamp = v[-1][0] # Use the oldest timestamp.
total_value = 0
for timestamp, value in v:
total_value += value
# Condense list of tuples 'v' into a list of a single tuple using an average value.
gauges['G|%s' % k] = [(last_stamp, float(total_value)/len(v))]
results['gauge'] = gauges
# avg
if 'average' in delivery:
averages = {}
timestamp = int(time.time()) # Take a new timestamp here because it is not collected previously.
for metric_name, metric_values in delivery['average'].iteritems():
if len(metric_values):
length = len(metric_values)
averages['G|%s' % metric_name] = [[timestamp, sum(metric_values) / float(length)]]
results['average'] = averages
return {
'metrics': copy.deepcopy(results),
'object': self.object.definition
}
| [
"dedm@nginx.com"
] | dedm@nginx.com |
22e373cdfc187ba4af35252c0e3b1eec310a6a88 | bfa8b019ae083d093616b933c03c6664ea484f92 | /hello_devops/urls.py | 9da648dd20bd16590f9df700e8fb46930d1b2e95 | [] | no_license | eugenedotn/hello_devops | bfd6ea2707d38a4bca57eec210d3abb7ff45e0e0 | e68d224b19ca7f5c97b9dbd584e95b870afd729f | refs/heads/master | 2016-08-11T06:58:28.883422 | 2015-12-24T16:41:24 | 2015-12-24T16:41:24 | 48,550,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | """hello_devops URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
from hello_devops import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.hello_index, name='hello'),
]
| [
"1fornothing1forme@gmail.com"
] | 1fornothing1forme@gmail.com |
3d725712e172cee8591768772262237bc21dcaae | 830465731dfda87b4141546262f20d74c29297bf | /GENERAL/RADARCTF/Logo/sol.py | d32c2f2933fdf57751dd6485d243603bc52c9566 | [] | no_license | jchen8tw-research/CTF | f559d7ca0e16a730335b11caeeae208c42e8bf17 | f49615c24437a9cc6a2c20d6b30cb5abf7a32b71 | refs/heads/master | 2023-03-17T12:29:08.630613 | 2021-03-23T06:31:26 | 2021-03-23T06:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import os
import binascii
import struct
misc = open("logo.png","rb").read()
for i in range(1024):
data = misc[12:16] + struct.pack('>i',i)+ misc[20:29]
crc32 = binascii.crc32(data) & 0xffffffff
if crc32 == 0xB65879B0:
print i | [
"cpr1014@gmail.com"
] | cpr1014@gmail.com |
132bffaf19feddd933acb81afabaaa8c2cefbf98 | bdb763414a8b35341deef2f2363c13a039ce2c32 | /mysite/views.py | 2a6b8dc81e359dc46067c8ee1d537d1e4a83257c | [] | no_license | sanketdeshmane/textUtils | 0cc463fc656538eff271799ef7fbc2fd456af87c | 7c9b9ba14667c9707f4884948352f51d4e39a506 | refs/heads/main | 2023-04-07T09:29:38.934689 | 2021-04-18T08:58:41 | 2021-04-18T08:58:41 | 359,091,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,130 | py | # I have created this file - Harry video #17
from django.http import HttpResponse
from django.shortcuts import render
def analyze(request):
#Get the text
djtext = request.POST.get('text', 'default')
# Check checkbox values
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('cap', 'off')
newlineremover = request.POST.get('nlrm', 'off')
extraspaceremover = request.POST.get('sprm', 'off')
#Check which checkbox is on
if removepunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params = {'purpose':'Removed Punctuations', 'analyzed_text': analyzed}
djtext = analyzed
# return render(request, 'analyze.html', params)
if(fullcaps=="on"):
analyzed = ""
for char in djtext:
analyzed = analyzed + char.upper()
params = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
# Analyze the text
# return render(request, 'analyze.html', params)
if(extraspaceremover=="on"):
analyzed = ""
for index, char in enumerate(djtext):
if not(djtext[index] == " " and djtext[index+1]==" "):
analyzed = analyzed + char
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
# Analyze the text
# return render(request, 'analyze.html', params)
if (newlineremover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char!="\r":
analyzed = analyzed + char
else:
print("no")
print("pre", analyzed)
params = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
if(removepunc != "on" and newlineremover!="on" and extraspaceremover!="on" and fullcaps!="on"):
return HttpResponse("please select any operation and try again")
return render(request, 'analuze.html', params)
| [
"sanketdeshmane@gmail.com"
] | sanketdeshmane@gmail.com |
1da7a6a5dfc37dfc0b476535430b0390648ecc19 | 7344690ce790d0d12fc073f00633ba6d0addfd3d | /app.py | a45f2b31bf19cad14af9a4c32a2c2a47d9412622 | [] | no_license | prateek-mehra/codeforces-suggester | ceaf9d6a5f129d53bb070ca740d4bc980e9f767a | db8ae5033325b8d677a65927100b68f4490c7f44 | refs/heads/main | 2023-03-04T17:53:00.464007 | 2021-02-23T13:08:08 | 2021-02-23T13:08:08 | 304,351,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,811 | py | from flask import Flask
from flask import render_template
from flask import request,redirect,url_for
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/login',methods=["POST","GET"])
def login():
if request.method=="POST":
name=request.form["username"]
return redirect(url_for("user",usr=name))
else:
return render_template('login.html')
@app.route('/<usr>')
def user(usr):
if usr is None:
return "ERROR"
import matplotlib.pyplot as plt
from pprint import pprint
import requests,random
from collections import defaultdict
verdicts=defaultdict(list)
page="https://codeforces.com/api/user.status?handle="+usr
page+="&from=1&count=450"
open=requests.get(page).json()
res=open["result"]
nm=[]
problems=[]
probset=[]
tagset=[]
tagle=[]
strong_topics=[]
strength={'2-sat':0,'chinese remainder theorem':0,'greedy':0,'binary search':0,'brute force':0,'combinatorics':0,'constructive algorithms':0,'data structures':0,'dfs and similar':0,'bitmasks':0,'*special':0
,'divide and conquer':0,'dp':0,'dsu':0,'fft':0,'expression parsing':0,'flows':0,'games':0,'geometry':0,'graph matchings':0,'implementation':0,'hashing':0,'graphs':0,'interactive':0,'math':0,'matrices':0,'meet-in-the-middle':0,'number theory':0, 'probabilities':0,'schedules':0,'shortest paths':0,'sortings':0,'string suffix structures':0,'strings':0,'ternary search':0,'trees':0,'two pointers':0}
total={'2-sat':0,'chinese remainder theorem':0,'greedy':0,'binary search':0,'brute force':0,'combinatorics':0,'constructive algorithms':0,'data structures':0,'dfs and similar':0,'bitmasks':0,'*special':0
,'divide and conquer':0,'dp':0,'dsu':0,'fft':0,'expression parsing':0,'flows':0,'games':0,'geometry':0,'graph matchings':0,'implementation':0,'hashing':0,'graphs':0,'interactive':0,'math':0,'matrices':0,'meet-in-the-middle':0,'number theory':0, 'probabilities':0,'schedules':0,'shortest paths':0,'sortings':0,'string suffix structures':0,'strings':0,'ternary search':0,'trees':0,'two pointers':0}
final={'2-sat':0,'chinese remainder theorem':0,'greedy':0,'binary search':0,'brute force':0,'combinatorics':0,'constructive algorithms':0,'data structures':0,'dfs and similar':0,'bitmasks':0,'*special':0
,'divide and conquer':0,'dp':0,'dsu':0,'fft':0,'expression parsing':0,'flows':0,'games':0,'geometry':0,'graph matchings':0,'implementation':0,'hashing':0,'graphs':0,'interactive':0,'math':0,'matrices':0,'meet-in-the-middle':0,'number theory':0, 'probabilities':0,'schedules':0,'shortest paths':0,'sortings':0,'string suffix structures':0,'strings':0,'ternary search':0,'trees':0,'two pointers':0}
#storing all the problems and tags from the json file
for item in res:
problems.append(item["problem"]["name"])
verdicts[item["problem"]["name"]].append(item["verdict"])
nm.append(item["problem"]["tags"])
#making sets of unique problems and tags
for i in range(len(problems)-1):
if(problems[i]!=problems[i+1]):
probset.append(problems[i])
tagset.append(nm[i])
#calculating the user's strength in a particular topic by storing the topics of which the user has solved a problem in the first try.
for i in range(len(probset)):
if(len(verdicts[probset[i]])==1 and verdicts[probset[i]][0]=="OK"):
for j in range(len(tagset[i])):
strength[tagset[i][j]]+=1
#storing the total attempts per topic
for i in range(len(probset)):
for j in range(len(tagset[i])):
total[tagset[i][j]]+=1
#storing the ratio of strength to total attempts for each topic
for i in total:
if(total[i]>0):
final[i]=strength[i]/total[i]
#sorting the ratios in descending order
sort_strength = sorted(final.items(), key=lambda x: x[1], reverse=True)
ctr2=0
strongest=[]
#storing the strong(top 10) and strongest(top 5) topics
for i in sort_strength:
ctr2+=1
if(ctr2<=10):
strong_topics.append(i[0])
if(ctr2<=5):
strongest.append(i[0])
weak=strong_topics.copy()
weak.reverse()
links=[]
sample="http://codeforces.com/problemset?tags="
ctr=0
#storing the comparitively weaker topics
for i in range(len(weak)):
if ctr<5:
ctr+=1
links.append(weak[i])
improve=links.copy()
linknew=[]
for i in range(len(links)):
linknew.append(links[i].replace(" ","%20"))
#creating links for strongest topics
stronglinks=[]
weaklinks=linknew.copy()
for i in range(len(strongest)):
stronglinks.append(sample+strongest[i].replace(" ","%20"))
#creating links for weaker topics
for i in range(len(weaklinks)):
weaklinks[i]=sample+weaklinks[i]
#fetching user rating
page2="https://codeforces.com/api/user.info?handles="
page2+=usr
res2=requests.get(page2).json()["result"]
rank=res2[0]["rating"]
ctr=0
probid=[]
indices=[]
for i in range(len(linknew)):
if ctr<5:
ctr+=1
page3="https://codeforces.com/api/problemset.problems?tags="
page3+=linknew[i]
res3=requests.get(page3).json()["result"]
#storing problem details of problems relating to weaker topics, around the user's rating
for i in res3["problems"]:
if "rating" in i:
if i["rating"]>=rank and i["rating"]-rank<=200:
probid.append(i["contestId"])
indices.append(i["index"])
if rank>=3500:
if rank-i["rating"]<=200:
probid.append(i["contestId"])
indices.append(i["index"])
#Creating final direct links to 25 problems randomly generated out of the list of problems fetched according to the above criteria
finallinks=[]
sample2="https://codeforces.com/problemset/problem"
for i in range(25):
a=random.randrange(1,len(probid))
finallinks.append(sample2+"/"+str(probid[a])+"/"+str(indices[a]))
return render_template('suggester.html',Myname=usr,topics=strongest,links=finallinks,improve=improve,stronglinks=stronglinks,weaklinks=weaklinks)
if __name__=="__main__":
name=None
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
28cced1675a0ffae2d057b019aba08fb43a1d2ed | 1cf103334377383a0631d22124b622eb153ff2a8 | /portafolio/urls.py | 84bf34dc05b589bba69968af0c3bf074ce307724 | [] | no_license | rarivero12/backend-A | c706ef0824edcdda4214d240d22db7bf5bcf4576 | e81e7b2887036ea6e7667a57864ee740caa055aa | refs/heads/master | 2020-04-25T01:23:55.588800 | 2019-02-26T15:28:40 | 2019-02-26T15:28:40 | 171,700,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | from django.urls import path
from .restviews import *
from rest_framework.routers import DefaultRouter
from django.conf import settings
from django.conf.urls.static import static
app_name = 'portafolio'
#Rutas predeterminadas de django rest
router = DefaultRouter()
router.register('polls', PollViewSet, base_name='polls')
router.register('contacto', ContactoViewSet, base_name='contacto')
router.register('imagen', ImagenViewSet, base_name='imagen')
router.register('archivo', ArchivoViewSet, base_name='archivo')
router.register('categoriaport', CategoriaPortafolioViewSet, base_name='categoriaport')
router.register('colaborador', ColaboradorViewSet, base_name='colaborador')
router.register('portafolio', PortafolioViewSet, base_name='portafolio')
router.register('tipoServicio',TipoServicioViewSet,base_name='tipo_servicio')
# Servicios
router.register('servicio', ServicioViewSet, base_name='servicio')
#rutas por mi
urlpatterns = [
path("pol/", PollList.as_view(), name="polls_list"),
path("choices/", ChoiceList.as_view(), name="choice_list"),
path("users/", UserViewSet.as_view(), name="user_list"),
path("groups/", GroupViewSet.as_view(), name="group_list"),
path("polls/<int:pk>/", PollDetail.as_view(), name="polls_detail"),
path("vote/", CreateVote.as_view(), name="create_vote"),
]
urlpatterns += router.urls
#Para poder ver las imagnes
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"rafaelarturo16@gmail.com"
] | rafaelarturo16@gmail.com |
047edf652bc638394b4c798d7e16353af18c1600 | 3c74da0c1cb7f59e3dadb62bb6925d88e3174e65 | /src/cheapatlas/pipelines/buildings_classification/__init__.py | 5cfdc4101f0056485f9c776a4e71502f2d230423 | [
"MIT"
] | permissive | o0oBluePhoenixo0o/CheapAtlas | c248cfe22c25e3eb73c8060626ffb76599cd93e6 | 467c8673b62e83514025e141c4c83efe65533a6b | refs/heads/main | 2023-02-21T08:02:54.641907 | 2021-01-14T22:59:46 | 2021-01-14T22:59:46 | 314,072,759 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a boilerplate pipeline 'buildings_classification'
generated using Kedro 0.16.6
"""
from .pipeline import create_pipeline # NOQA
| [
"trungnnn1908@gmail.com"
] | trungnnn1908@gmail.com |
7e1b2877ce3f5524c2c098b0c9778489b2b16919 | 6ba4a5649f375b27e0c5cf7ddc7641d661020102 | /Server.py | beee2a033f668c4bbd2dc85f754c53dbbe2032f0 | [] | no_license | jwall-tech/Basic-Networking-PyApp | fda85e64475025b062a079c8abc9cab7cec14235 | 8265e4bc947f613e2c65f8317a7c3eb716d72277 | refs/heads/main | 2023-07-24T09:13:24.433885 | 2021-09-09T19:16:16 | 2021-09-09T19:16:16 | 404,841,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | import time
import threading
import logging
import socket
class Connection():
def __init__(self,server,address,connection):
self.address = address
self.ident = "USER "+str(address[0])
self.connection = connection
self.server = server
self.requestCache = {}
self.connected = True
def func(data):
print("from "+str(self.ident)+" data: "+str(data))
if data.startswith("setuser"):
self.ident = data[8:]
msg = "changed username to "+self.ident
self.connection.send(msg.encode())
else:
self.connection.send(data.encode())
self.DataFunc = func
self.addRequestCache(time.time(),"Connection")
def conLoop():
while True:
data = self.connection.recv(1024).decode()
if not data:
break
self.DataFunc(data)
self.conLoopThread = threading.Thread(target=conLoop)
self.conLoopThread.start()
def addRequestCache(self,Timestamp,RequestData):
self.requestCache[str(Timestamp)] = RequestData
def disconnect(self):
self.connected = False
self.connection = None
self.server.connsInt -= 1
def newIdent(self,ident):
self.ident = ident
def clearCache(self):
self.requestCache = {}
def newDataFunc(self,newFunc):
self.DataFunc = newFunc
class Server():
def __init__(self,host,port,maxConnections):
self.host = host
self.port = port
self.maxConns = maxConnections
self.connsInt = 0
self.connections = {}
self.socket = socket.socket()
self.socket.bind((host,port))
def ListenForConnection():
self.socket.listen(2)
conn,addr = self.socket.accept()
print("conn from: "+str(addr))
self.newConnection(conn,addr)
while True:
if self.connsInt < self.maxConns:
ListenForConnection()
def newConnection(self,conn,addr):
self.connsInt =+ 1
myCon = Connection(self,addr,conn)
self.connections[str(addr)] = myCon
myServ = Server("IP","PORT","MAXUSERS")
| [
"noreply@github.com"
] | noreply@github.com |
fcc001c385ef8e0f79744742ed86cd4dc56be772 | 2878ea8c4d64842917c4ad9da7599db1b424a007 | /test/conftest.py | fec74a890a8496f71cd0f215f8cb2ca4c1de26c5 | [
"MIT"
] | permissive | eukaryote/pytest-tornasync | 664fbb219533b90f092d93110a727fd390bd45ef | 9f1bdeec3eb5816e0183f975ca65b5f6f29fbfbb | refs/heads/master | 2023-06-08T17:56:18.373347 | 2019-07-16T01:59:33 | 2019-07-16T01:59:33 | 59,433,062 | 24 | 3 | MIT | 2022-12-11T03:45:38 | 2016-05-22T20:33:06 | Python | UTF-8 | Python | false | false | 182 | py | import pytest
import tornado.web
from test import MainHandler
pytest_plugins = ["pytester"]
@pytest.fixture
def app():
return tornado.web.Application([(r"/", MainHandler)])
| [
"sapientdust+github@gmail.com"
] | sapientdust+github@gmail.com |
56890908acd23b1f2928c6955e447115bdf7bb0c | f3836aefad8f2565508602a5cf6a04952a2b8b8a | /underliner/z-Archive/canvas-test.py | 2a4fd9762d12dcc61fb14cdbcf5b0ad5512f0466 | [] | no_license | jtanadi/robofontScripts | c53f41806d4ace7a2cca858722b036cb2912ccca | 3ac46077541125f3120c56992681d68451292b78 | refs/heads/master | 2021-01-20T10:55:12.811172 | 2018-11-02T12:06:09 | 2018-11-02T12:06:09 | 101,657,892 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | from mojo.canvas import Canvas
from vanilla import *
from fontTools.pens.cocoaPen import CocoaPen
from mojo.drawingTools import *
f = CurrentFont()
g = f["g"]
class GlyphWindow:
def __init__(self):
self.size = 0.25
self.w = Window((400, 400))
self.w.slider = Slider((10, 5, -10, 22),
minValue = 0.1,
maxValue = 0.4,
value=self.size,
callback=self.sliderCallback)
self.w.canvas = Canvas((10, 30, -10, -10),
hasHorizontalScroller = False,
hasVerticalScroller = False,
delegate=self)
self.w.open()
def sliderCallback(self, sender):
self.size = sender.get()
self.w.canvas.update()
def draw(self):
#rect(10, 10, self.size, self.size)
pen = CocoaPen(f)
fill(0,0,0, 0.5)
stroke(0,0,0,1)
translate(0,80)
scale(self.size)
g.draw(pen)
drawPath(pen.path)
GlyphWindow() | [
"jesentanadi@JesenTanadi.local"
] | jesentanadi@JesenTanadi.local |
aa47fb7c1e07c0d30da339dcd48ee80d105fa369 | 1965f5f6170c2ac250136bf6b250a4823224dd1c | /info/modules/index/views.py | 051a534a50496cb1e9aae85f74a6eb74318e965a | [] | no_license | ctxuege/information27 | fa815e9a88358116b6c516cea176c893bf9d979c | fecc5bc42a01261b24abaf5b6b4eeb769b83c69a | refs/heads/master | 2020-09-30T13:46:15.748943 | 2019-12-13T09:46:12 | 2019-12-13T09:46:12 | 227,299,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from . import index_blu
from info import redis_store
@index_blu.route('/')
def index():
# 向redis中保存一个值 name itcast
redis_store.set('name','itcast')
return 'index' | [
"c921967783@qq.com"
] | c921967783@qq.com |
a93537878ffd806c18c1610a412cc93ae33cd137 | ef91b74131b8791800d2365982edbfaf515ef54a | /day5/02_mechanicalsoup/ex04_ms_bruteforce.py | 0dc8e5c55602be49f9e89abf437c845b588d7a1a | [] | no_license | akatkar/python-training-examples | ec749f58698fc1cfa458246ce11069f94d25027e | 8afa2e347c079a84740f4559e78c1be55eed5226 | refs/heads/master | 2020-05-16T03:32:22.613770 | 2019-04-22T09:15:14 | 2019-04-22T09:25:17 | 182,726,662 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | import argparse
from getpass import getpass
import mechanicalsoup
def bruteforce(username, password):
browser = mechanicalsoup.StatefulBrowser()
# browser.set_verbose(2)
browser.open("https://github.com")
browser.follow_link("login")
browser.select_form('#login form')
browser["login"] = username
browser["password"] = password
resp = browser.submit_selected()
# Uncomment to launch a web browser on the current page:
# browser.launch_browser()
# verify we are now logged in
page = browser.get_current_page()
messages = page.find("div", class_="flash-messages")
if messages:
print(messages.text)
assert page.select(".logout-form")
print(page.title.text)
# verify we remain logged in (thanks to cookies) as we browse the rest of
# the site
page3 = browser.open("https://github.com/MechanicalSoup/MechanicalSoup")
assert page3.soup.select(".logout-form")
def main():
with open('pass.txt') as fp:
passList = fp.readlines()
for item in passList:
username, password = item.strip().split(",")
bruteforce(username, password)
if __name__ == '__main__':
main() | [
"alikatkar@gmail.com"
] | alikatkar@gmail.com |
b01c493823be9345a6df864e89c3add6851675a8 | ff4d64f0a349ecef865c75ba4fc955eec2d407db | /app/parser_engine/pySBD/pysbd/lang/hindi.py | e29bf38f6e416832886e3524123d45eaf6241a4c | [
"MIT"
] | permissive | SemanticSearching/SSApp | 87117fee17c223ffba8ca410289f3a0911b9834a | 571fab6409febd4a670351b3cd25a4196a260a50 | refs/heads/master | 2023-08-04T15:00:29.818858 | 2021-09-20T22:42:46 | 2021-09-20T22:42:46 | 339,630,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # -*- coding: utf-8 -*-
from pysbd.abbreviation_replacer import AbbreviationReplacer
from pysbd.lang.common import Common, Standard
class Hindi(Common, Standard):
iso_code = 'hi'
SENTENCE_BOUNDARY_REGEX = r'.*?[।\|!\?]|.*?$'
Punctuations = ['।', '|', '.', '!', '?']
class AbbreviationReplacer(AbbreviationReplacer):
SENTENCE_STARTERS = []
| [
"wy_cloud@hotmail.com"
] | wy_cloud@hotmail.com |
adf96ea220c291a473afde77e02ed5098c2524d5 | 2b074ab6e281ff0806711358060d2f93386625f9 | /bin/test_bnn.py | cca849e47b95799028656c0fa8112996aeb9293f | [] | no_license | egstatsml/esBPS | 7c1942fed00be45297d15489d03d650f84e6fa31 | aeb7ccc94fd0d0e5584ba92ad7b3c3e5b5b3f209 | refs/heads/main | 2023-05-09T00:34:17.712951 | 2021-06-06T23:30:59 | 2021-06-06T23:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,428 | py | import math
from datetime import datetime
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import sklearn.model_selection
import sklearn.preprocessing
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
import arviz as az
tfd = tfp.distributions
from tbnn.pdmp.bps import BPSKernel, CovPBPSKernel, PBPSKernel
from tbnn.pdmp.poisson_process import AdaptivePSBPSampler
tf.enable_v2_behavior()
def dense(X, W, b, activation):
return activation(tf.matmul(X, W) + b)
def build_network(weights_list, biases_list, activation=tf.nn.tanh):
def model(X):
net = X
print('here')
print(X.shape)
i = 0
for (weights, biases) in zip(weights_list[:-1], biases_list[:-1]):
print('i = {}'.format(i))
net = dense(net, weights, biases, activation)
# final linear layer
net = tf.matmul(net, weights_list[-1]) + biases_list[-1]
preds = net[:, 0]
# preds and std_devs each have size N = X.shape(0) (the number of data samples)
# and are the model's predictions and (log-sqrt of) learned loss attenuations, resp.
return tfd.Normal(loc=preds, scale=0.20)
return model
def network_forward(X, weights_list, biases_list, activation=tf.nn.tanh):
net = X
print('here')
print(X.shape)
i = 0
for (weights, biases) in zip(weights_list[:-1], biases_list[:-1]):
print('i = {}'.format(i))
net = dense(net, weights, biases, activation)
# final linear layer
net = tf.matmul(net, weights_list[-1]) + biases_list[-1]
preds = net[:, 0]
return preds
def get_initial_state(weight_prior_fns, bias_prior_fns, num_features=1, num_hidden=20, layers=None):
"""generate starting point for creating Markov chain
of weights and biases for fully connected NN
Keyword Arguments:
layers {tuple} -- number of nodes in each layer of the network
Returns:
list -- architecture of FCNN with weigths and bias tensors for each layer
"""
if layers is not None:
assert layers[-1] == 1
if layers is None:
layers = (
num_features,
num_hidden,
num_hidden // 2,
1,
)
print('layers = {}'.format(layers))
architecture = []
for idx in range(len(layers) - 1):
print(idx)
weigths = weight_prior_fns[idx].sample((layers[idx], layers[idx + 1]))
biases = bias_prior_fns[idx].sample((layers[idx + 1]))
# weigths = tf.zeros((layers[idx], layers[idx + 1]))
# biases = tf.zeros((layers[idx + 1]))
architecture.extend((weigths, biases))
return architecture
def bnn_joint_log_prob_fn(weight_prior_fns, bias_prior_fns, X, y, *args):
weights_list = args[::2]
biases_list = args[1::2]
# prior log-prob
lp = sum(
[tf.reduce_sum(fn.log_prob(w)) for fn, w in zip(weight_prior_fns, weights_list)]
)
lp += sum([tf.reduce_sum(fn.log_prob(b)) for fn, b in zip(bias_prior_fns, biases_list)])
#lp = lp * 0.1
# likelihood of predicted labels
network = build_network(weights_list, biases_list)
labels_dist = network(X.astype("float32"))
lp += tf.reduce_sum(labels_dist.log_prob(y))
return lp
def bnn_neg_joint_log_prob_fn(weight_prior, bias_prior, X, y, *args):
lp = bnn_joint_log_prob_fn(weight_prior, bias_prior, X, y, *args)
return -1.0 * lp
def bnn_likelihood_log_prob_fn(X, y, *args):
weights_list = args[::2]
biases_list = args[1::2]
# likelihood of predicted labels
network = build_network(weights_list, biases_list)
labels_dist = network(X.astype("float32"))
lp = tf.reduce_sum(labels_dist.log_prob(y))
return lp
def trace_fn(current_state, results, summary_freq=100):
#step = results.step
#with tf.summary.record_if(tf.equal(step % summary_freq, 0)):
# for idx, tensor in enumerate(current_state, 1):
# count = str(math.ceil(idx / 2))
# name = "weights_" if idx % 2 == 0 else "biases_" + count
# tf.summary.histogram(name, tensor, step=tf.cast(step, tf.int64))
return results
@tf.function
def graph_hmc(*args, **kwargs):
"""Compile static graph for tfp.mcmc.sample_chain.
Since this is bulk of the computation, using @tf.function here
signifcantly improves performance (empirically about ~5x).
"""
return tfp.mcmc.sample_chain(*args, **kwargs)
def nest_concat(*args):
return tf.nest.map_structure(lambda *parts: tf.concat(parts, axis=0), *args)
def run_hmc(
target_log_prob_fn,
step_size=0.01,
num_leapfrog_steps=3,
num_burnin_steps=1000,
num_adaptation_steps=800,
num_results=1000,
num_steps_between_results=0,
current_state=None,
logdir="/tmp/data/output/hmc/",
resume=None):
"""Populates a Markov chain by performing `num_results` gradient-informed steps with a
Hamiltonian Monte Carlo transition kernel to produce a Metropolis proposal. Either
that or the previous state is appended to the chain at each step.
Arguments:
target_log_prob_fn {callable} -- Determines the HMC transition kernel
and thereby the stationary distribution that the Markov chain will approximate.
Returns:
(chain(s), trace, final_kernel_result) -- The Markov chain(s), the trace created by `trace_fn`
and the kernel results of the last step.
"""
assert (current_state, resume) != (None, None)
# Set up logging.
stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = logdir + stamp
summary_writer = tf.summary.create_file_writer(logdir)
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn, step_size=step_size, num_leapfrog_steps=num_leapfrog_steps
)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(
kernel, num_adaptation_steps=num_adaptation_steps
)
#kernel = tfp.mcmc.MetropolisAdjustedLangevinAlgorithm(target_log_prob_fn=target_log_prob_fn, step_size=0.01, volatility_fn = lambda *args: 0.)
if resume is None:
prev_kernel_results = kernel.bootstrap_results(current_state)
step = 0
else:
prev_chain, prev_trace, prev_kernel_results = resume
step = len(prev_chain)
current_state = tf.nest.map_structure(lambda chain: chain[-1], prev_chain)
tf.summary.trace_on(graph=True, profiler=True)
with summary_writer.as_default():
tf.summary.trace_export(
name="mcmc_sample_trace", step=step, profiler_outdir=logdir
)
chain, trace, final_kernel_results = graph_hmc(
kernel=kernel,
current_state=current_state,
num_burnin_steps=num_burnin_steps,
num_results=num_burnin_steps + num_results,
previous_kernel_results=prev_kernel_results,
num_steps_between_results=num_steps_between_results,
trace_fn=partial(trace_fn, summary_freq=20),
return_final_kernel_results=True,
)
summary_writer.close()
if resume:
chain = nest_concat(prev_chain, chain)
trace = nest_concat(prev_trace, trace)
return chain, trace, final_kernel_results
def run_bps(target_log_prob_fn,
num_results=1000,
current_state=None):
kernel = BPSKernel(
target_log_prob_fn=target_log_prob_fn,
store_parameters_in_results=True,
lambda_ref=1.0)
# kernel = tfp.mcmc.UncalibratedHamiltonianMonteCarlo(
# target_log_prob_fn=joint_log_prob,
# num_leapfrog_steps=3,
# step_size=1.)
# start sampling
samples, kernel_results = graph_hmc(
num_results=num_results,
current_state=initial_state,
kernel=kernel)
return samples, kernel_results, []
def run_bps_test(target_log_prob_fn,
num_results=1000,
current_state=None):
kernel = CovPBPSKernel(
target_log_prob_fn=target_log_prob_fn,
store_parameters_in_results=True,
ipp_sampler=AdaptivePSBPSampler,
lambda_ref=1.0)
# kernel = tfp.mcmc.UncalibratedHamiltonianMonteCarlo(
# target_log_prob_fn=joint_log_prob,
# num_leapfrog_steps=3,
# step_size=1.)
# start sampling
bps_results = graph_hmc(
num_results=num_results,
current_state=initial_state,
return_final_kernel_results=True,
kernel=kernel)
samples = bps_results.all_states
# final kernel results used to initialise next call of loop
kernel_results = bps_results.final_kernel_results
# diag_var = [np.var(x, axis=0) for x in samples]
# kernel_results = kernel_results._replace(preconditioner=diag_var)
# samples, kernel_results = graph_hmc(
# num_results=num_results,
# current_state=initial_state,
# previous_kernel_results=kernel_results,
# kernel=kernel)
return samples, kernel_results, []
def get_data(num_data=100, test_size=0.1, random_state=0):
X_train = np.linspace(0, 2 * np.pi, num_data)
y_train = np.sin(X_train) + 0.2 * np.random.randn(*X_train.shape)
X_test = np.linspace(-.5, 2 * np.pi + 0.5, num_data)
y_test = np.sin(X_test) + 0.2 * np.random.randn(*X_test.shape)
# features_train = np.linspace(0, 2 * np.pi, num_data)
# labels_train = np.sin(features) + 0.2 * np.random.randn(*features.shape)
# X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
# features, labels, test_size=test_size, random_state=random_state
# )
print(X_train.shape)
train_sort = np.argsort(X_train)
test_sort = np.argsort(X_test)
print(train_sort.shape)
X_train = X_train[train_sort].reshape(-1, 1)
y_train = y_train[train_sort].reshape(-1, 1)
X_test = X_test[test_sort].reshape(-1, 1)
y_test = y_test[test_sort].reshape(-1, 1)
# print(X_train.shape)
X_scaler = sklearn.preprocessing.StandardScaler().fit(X_train)
y_scaler = sklearn.preprocessing.StandardScaler().fit(y_train)
X_train = X_scaler.transform(X_train)
X_test = X_scaler.transform(X_test)
y_train = y_scaler.transform(y_train)
y_test = y_scaler.transform(y_test)
return (X_train, X_test), (y_train, y_test), (X_scaler, y_scaler)
def get_map(target_log_prob_fn, state, num_iters=1000, save_every=100):
state_vars = [tf.Variable(s) for s in state]
opt = tf.optimizers.Adam()
def map_loss():
return -target_log_prob_fn(*state_vars)
@tf.function
def minimize():
opt.minimize(map_loss, state_vars)
traces = [[] for _ in range(len(state))]
for i in range(num_iters):
if i % save_every == 0:
for t, s in zip(traces, state_vars):
t.append(s.numpy())
minimize()
return [np.array(t) for t in traces]
def plot_curves(chain, name='plot_curves'):
weights_list = chain[::2]
biases_list = chain[1::2]
train_trace = []
test_trace = []
for i in range(len(weights_list[0])):
network = build_network([w[i] for w in weights_list], [b[i] for b in biases_list])(X_train.astype(np.float32))
train_trace.append(-tf.reduce_mean(network.log_prob(y_train[:, 0])).numpy())
network = build_network([w[i] for w in weights_list], [b[i] for b in biases_list])(X_test.astype(np.float32))
test_trace.append(-tf.reduce_mean(network.log_prob(y_test[:, 0])).numpy())
plt.plot(train_trace, label='train')
plt.plot(test_trace, label='test')
plt.legend(loc='best')
plt.savefig(name + '.png')
def run_bps_and_plot(initial_state, num_results=1000, plot_name='bps'):
chain, trace, final_kernel_results = run_bps_test(
bnn_neg_joint_log_prob,
num_results=num_results,
current_state=initial_state)
print(chain)
# print("Acceptance rate:",
# trace.inner_results.is_accepted[-1000:].numpy().mean())
print('ESS full chain')
for c in chain:
print("min ESS/step", tf.reduce_min(tfp.mcmc.effective_sample_size(c[-1000:, ...]) / 1000).numpy())
print("max ESS/step", tf.reduce_max(tfp.mcmc.effective_sample_size(c[-1000:, ...]) / 1000).numpy())
print("mean ESS/step", tf.reduce_mean(tfp.mcmc.effective_sample_size(c[-1000:, ...]) / 1000).numpy())
# subsampled = [x[sub_idx, ...] for x in chain]
weights_list = []
for w_idx in range(0, len(chain)):
weight_list = []
for mcmc_idx in range(0, num_results - 1):
weights_a = chain[w_idx][mcmc_idx, ...]
weights_b = chain[w_idx][mcmc_idx + 1, ...]
weight_list.append((weights_a.numpy() + weights_b.numpy()) / 2.0)
weights_list.append(np.reshape(np.vstack(weight_list), [-1, *chain[w_idx].shape[1:]]))
print('ESS subsampled chain')
sub_idx = np.arange(0, num_results -1, 50)
subsampled = [x[sub_idx, ...] for x in weights_list]
for c in subsampled:
print("sub min ESS/step", tf.reduce_min(tfp.mcmc.effective_sample_size(c[-1000:, ...]) / 1000))
print("sub max ESS/step", tf.reduce_max(tfp.mcmc.effective_sample_size(c[-1000:, ...]) / 1000))
print("sub mean ESS/step", tf.reduce_mean(tfp.mcmc.effective_sample_size(c[-1000:, ...]) / 1000))
return [x.numpy() for x in chain]#subsampled#chain
def run_hmc_and_plot(initial_state, num_results=1000, plot_name='hmc'):
chain, trace, final_kernel_results = run_hmc(
bnn_joint_log_prob,
num_burnin_steps=5000,
num_leapfrog_steps=10,
num_adaptation_steps=10000,
num_results=num_results,
step_size=1e-4,
current_state=initial_state)
print("Acceptance rate:",
trace.inner_results.is_accepted[-1000:].numpy().mean())
for c in chain:
print("ESS/step", tf.reduce_min(tfp.mcmc.effective_sample_size(c[-1000:]) / 1000).numpy())
for c in chain:
print(c.shape)
plt.figure()
plt.title("Chains")
for i in range(10):
plt.plot(chain[4][:, i, 0])
plt.savefig(plot_name + '_chains.png')
plt.figure()
plt.title("Step size")
plt.plot(trace.inner_results.accepted_results.step_size)
plt.savefig(plot_name + '_step_size.png')
return chain
def build_prior(layer_num_units):
weights_prior = []
bias_prior = []
for num_units in layer_num_units:
p_scale = 0.5 * tf.sqrt(1.0 / tf.cast(num_units, dtype=tf.float32))
weights_prior.append(tfd.Normal(loc=0., scale=p_scale))
bias_prior.append(tfd.Normal(loc=0., scale=p_scale))
return weights_prior, bias_prior
def get_layer_units(num_features=1, num_hidden=200):
layers = (
num_features,
num_hidden,
num_hidden // 2,
1,
)
return layers
def examine_rate(model, bnn_neg_joint_log_prob,
state, X_train, y_train, num_samp=1000):
kernel = CovPBPSKernel(
target_log_prob_fn=bnn_neg_joint_log_prob,
store_parameters_in_results=True,
lambda_ref=0.0001)
bps_results = kernel.bootstrap_results(state)
for test_iter in range(0, 10):
state, bps_kernel_results = kernel.one_step(state, bps_results)
velocity = bps_kernel_results.velocity
# bps_results = tfp.mcmc.sample_chain(num_results=1,
# current_state=state,
# kernel=kernel,
# trace_fn=None)
print(bps_results)
velocity = bps_results.velocity
preconditioner = bps_results.preconditioner
# run bootstrap to initialise velocity component
#bps_results = kernel.bootstrap_results(state)
# now iterate over the time steps to evaluate the
#print('velocity = {}'.format(velocity))
time_dt = tf.constant(0.0001, dtype=tf.float32)
time = tf.Variable(0.0, dtype=tf.float32)
test = np.zeros(num_samp)
for i in range(0, num_samp):
test[i] = kernel.examine_event_intensity(state, velocity, preconditioner, time).numpy()
time = time + time_dt
time_arr = np.linspace(0, time_dt.numpy() * num_samp, 1000)
plt.figure()
plt.plot(time_arr, test)
plt.xlabel('time')
plt.ylabel('IPP intensity')
plt.savefig('regression_ipp_test_{}.png'.format(test_iter))
plt.savefig('regression_ipp_test_{}.pdf'.format(test_iter))
np.save('time_array.npy', time_arr)
np.save('test_array.npy', test)
if __name__ == '__main__':
num_results = 20000
layer_num_units = get_layer_units()
weight_prior_fns, bias_prior_fns = build_prior(layer_num_units)
(X_train, X_test), (y_train, y_test), scalers = get_data(num_data=1000)
bnn_joint_log_prob = partial(
bnn_joint_log_prob_fn, weight_prior_fns, bias_prior_fns, X_train, y_train[:, 0]
)
print('l = {}'.format(layer_num_units))
initial_state = get_initial_state(weight_prior_fns, bias_prior_fns, layers=layer_num_units)
bnn_likelihood_log_prob = partial(
bnn_likelihood_log_prob_fn, X_train, y_train[:, 0]
)
bnn_neg_joint_log_prob = partial(
bnn_neg_joint_log_prob_fn, weight_prior_fns, bias_prior_fns, X_train, y_train[:, 0]
)
z = 0
#print(initial_state)
for s in initial_state:
print("State shape", s.shape)
z += s.shape.num_elements()
print("Total params", z)
# run HMC
# hmc_chain = run_hmc_and_plot(initial_state, 'default_hmc')
# plot_curves([c[::50] for c in hmc_chain], name='hmc_chains')
# plt.ylim(-1, 2)
# plt.yticks(np.linspace(-1, 2, 16));
# get MAP
map_trace = get_map(bnn_joint_log_prob, initial_state, num_iters=1000, save_every=100)
map_initial_state = [tf.constant(t[-1]) for t in map_trace]
for x in map_initial_state:
print(x.shape)
# HMC from MAP
#hmc_from_map_chain = run_bps_and_plot(map_initial_state, num_results=num_results,
# plot_name='hmc_from_map')
weights_list = map_initial_state[::2]
biases_list = map_initial_state[1::2]
pred = network_forward(X_train.astype(np.float32), weights_list, biases_list)
plt.plot(X_train, pred, color='k')
plt.scatter(X_test, y_test, color='b', alpha=0.5)
plt.savefig('pred_map.png')
print(map_initial_state)
# model = build_network(weights_list, biases_list)
# examine_rate(model, bnn_neg_joint_log_prob,
# map_initial_state, X_train, y_train, num_samp=1000)
hmc_from_map_chain = run_bps_and_plot(map_initial_state, num_results=num_results,
plot_name='hmc_from_map')
weights_chain = hmc_from_map_chain[::2]
biases_chain = hmc_from_map_chain[1::2]
num_returned_samples = weights_chain[0].shape[0]
# perform prediction for each iteration
sample_idx = np.arange(500, num_returned_samples, 10)
num_plot = sample_idx.size
pred = np.zeros([num_plot, y_test.size])
plt.figure()
pred_idx = 0
for i in sample_idx:
weights_list = [x[i, ...] for x in weights_chain]
biases_list = [x[i, ...] for x in biases_chain]
pred[pred_idx, :] = network_forward(X_test.astype(np.float32), weights_list, biases_list)
plt.plot(X_test, pred[pred_idx, :], alpha=0.05, color='k')
pred_idx += 1
plt.scatter(X_train, y_train, color='b', alpha=0.01)
plt.savefig('pred.png')
plt.savefig('pred.pdf')
#print(pred)
print(weights_chain[0].shape)
# samples = np.array(weights_chain[0]).reshape(weights_chain[0].size, -1).T
# corr = np.corrcoef(samples)
# fig, ax = plt.subplots()
# ax0 = ax.matshow(corr)
# fig.colorbar(ax0, ax=ax)
# plt.savefig('corr.pdf')
#plot_curves([c[::50] for c in hmc_from_map_chain])
#plt.ylim(-1, 2)
#plt.yticks(np.linspace(-1, 2, 16));
for i in range(0, len(weights_chain)):
print('weight_chain[{}] shape = {}'.format(i, weights_chain[i].shape))
plt.figure()
for layer_idx in range(0, len(weights_chain)):
for param_idx in np.arange(0, weights_chain[layer_idx].shape[1], 10):
sample = np.reshape(weights_chain[layer_idx][1000:,param_idx, 0], [1, num_returned_samples - 1000])
sample_az = az.from_tfp(posterior=sample)
print(sample_az.posterior)
az.plot_trace(sample_az)
plt.savefig('./bnn_test_figs/trace_test_{}_{}.png'.format(layer_idx, param_idx))
plt.clf()
az.plot_autocorr(sample_az, max_lag=sample.size)
plt.savefig('./bnn_test_figs/autocorr_test_{}_{}.png'.format(layer_idx, param_idx))
plt.clf()
| [
"ethanjgoan@gmail.com"
] | ethanjgoan@gmail.com |
6fa0120c0dac223ee1b37a073e42edc072381ee1 | 617ecebd2647be1bdedf518cdb916720c828f1ea | /cfg.py | 744002bcb533124765bfb1614a95e185d1f6e62c | [] | no_license | DmitryOdinoky/myAudioClassification | 6756881a4f606cfeb63ab8fce8338122551c6d10 | e7257dcf551419a86623ff3204985a9db9cfcac5 | refs/heads/master | 2020-11-29T13:34:59.181699 | 2019-12-26T03:00:53 | 2019-12-26T03:00:53 | 230,124,766 | 0 | 0 | null | 2019-12-26T02:44:59 | 2019-12-25T16:06:24 | Python | UTF-8 | Python | false | false | 398 | py | import os
class config:
def __init__(self, mode='conv',nfilt=26,nfeat=13,nfft=512,rate=16000):
self.mode = mode
self.nfilt = nfilt
self.nfeat = nfeat
self.nfft = nfft
self.rate = rate
self.step = int(rate/10)
self.model_path = os.path.join('models', mode + '.model')
self.p_path = os.path.join('pickles', mode + '.p')
| [
"Dmitrijs.Odinokijs@edu.rtu.lv"
] | Dmitrijs.Odinokijs@edu.rtu.lv |
b7e354e619441ed94e5d37f443d710fd7e20347c | b78fa7520e6ec806fb00f64162d8e704616ac3a9 | /fractals.py | c492500ba873a31971833c981361f3c7ca5a4dc0 | [] | no_license | Dragneel7/fractal-assignment | 252b9bd4bc44f28ae3c7ce833da71be28f24f5d1 | e648c5bf0d9e85160950a82576bf85fa307daa20 | refs/heads/master | 2020-04-20T16:29:29.354696 | 2019-02-03T12:59:30 | 2019-02-03T12:59:30 | 168,960,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | import pygame, math, sys, time
iterations = int(sys.argv[1]) # No. of iterations to run the fractal generating algorithm.
pygame.init()
# Create a new surface and window to display the fractal tree pattern.
surface_height, surface_width = 1200, 1000
main_surface = pygame.display.set_mode((surface_height,surface_width))
pygame.display.set_caption("My Fractal Tree Pattern")
def draw_tree(order, theta, sz, posn, heading, color=(0,0,0), depth=0):
""" Function to draw the fractal tree pattern.
:param order: integer, No. pf divisions from the tree
:param theta: float, Angle by which to rotate the next fractal pattern
:param sz: integer, Size of new fractal pattern
:param posn: float, Position for the new pattern
:param heading: float, width of the pattern
:param color: integer, color of the new patter
:param depth: integer, depth of the fractal
"""
trunk_ratio = 0.3 # The relative ratio of the trunk to the whole tree.
# Length of the trunk
trunk = sz * trunk_ratio
delta_x = trunk * math.cos(heading)
delta_y = trunk * math.sin(heading)
(u, v) = posn
newpos = (u + delta_x, v + delta_y)
pygame.draw.line(main_surface, color, posn, newpos)
if order > 0:
""" Make 2 halfs for the fractal tree symmetrical around the trunk.
"""
if depth == 0:
color1 = (255, 0, 0)
color2 = (0, 0, 255)
else:
color1 = color
color2 = color
# make the recursive calls, which can be considered as zooming into the fractal pattern.
newsz = sz*(1 - trunk_ratio)
draw_tree(order-1, theta, newsz, newpos, heading-theta, color1, depth+1)
draw_tree(order-1, theta, newsz, newpos, heading+theta, color2, depth+1)
def main():
theta = 0
for _ in range(iterations):
theta += 0.01 # Update the angle
main_surface.fill((255, 255, 0))
draw_tree(9, theta, surface_height*0.9, (surface_width//2, surface_width-50), -math.pi/2)
pygame.display.flip()
time.sleep(20) # Makes the fractal tree visible for 20 sec.
main() # Calling the main function
| [
"sainisurya1@gmail.com"
] | sainisurya1@gmail.com |
905a3a7e7df6433808cdaee54c2a2cd0898303da | a3dea9386e6d061b09de7774a57d80f465470463 | /pyLib/footprintTools.py | fb3b66f59247454a20e0460e9fadb6ebae608ec0 | [
"MIT"
] | permissive | biglimp/P4UL | 37934122f3315fdf809817d62d8515c577bc6315 | e08c7952d0b851b61ed802356383d79ebe616592 | refs/heads/master | 2020-05-18T23:35:26.112077 | 2019-01-04T15:58:15 | 2019-01-04T15:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,182 | py | import operator
import numpy as np
import sys
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeNumpyZFootprintRaw( filename, arr ):
fstr = filename.strip('.npz')
print(' Writing raw footprint data to file {}.npz ...'.format(fstr))
dims = np.shape(arr)
if( dims[1] != 9 ):
sys.exit(" Error: dims[1] does not equal 9. Exiting ...")
np.savez_compressed(fstr, \
xO=arr[:,0], yO=arr[:,1], zO=arr[:,2], \
xt=arr[:,3], yt=arr[:,4], zt=arr[:,5], \
ut=arr[:,6], vt=arr[:,7], wt=arr[:,8] )
print(' ... done! ')
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeNumpyZFootprintIJK(fn, xO, yO, zO, xt, yt, zt, ut, vt, wt, dxyz):
fstr = fn.split('.npz')[0]
np.savez_compressed( fstr, \
xO=xO, yO=yO, zO=zO, xt=xt, yt=yt, zt=zt, ut=ut, vt=vt, wt=wt, dxyz=dxyz )
print(' {}.npz saved successfully!'.format(fstr) )
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readNumpyZFootprintRaw( filename ):
'''
The saved .npz file contains
'''
print ' Read raw footprint file {} ...'.format(filename)
try: dat = np.load(filename)
except: sys.exit(' Cannot read file {}. Exiting ...'.format(filename))
xO = dat['xO']; yO = dat['yO']; zO = dat['zO']
xt = dat['xt']; yt = dat['yt']; zt = dat['zt']
ut = dat['ut']; vt = dat['vt']; wt = dat['wt']
dat.close()
return xO, yO, zO, xt, yt, zt, ut, vt, wt
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeNumpyZFootprint(filename, F, X, Y, Z, C, Ids=None ):
fstr = filename.split('.npz')[0]
if( Ids != None ):
np.savez_compressed( fstr , F=F, X=X, Y=Y, Z=Z, C=C, Ids=Ids )
else:
np.savez_compressed( fstr , F=F, X=X, Y=Y, Z=Z, C=C )
print(' {}.npz saved successfully!'.format(fstr))
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readNumpyZFootprint( filename, IdsOn=False ):
print ' Read footprint file {} ...'.format(filename)
try: dat = np.load(filename)
except: sys.exit(' Cannot read file {}. Exiting ...'.format(filename))
F = dat['F']; X = dat['X']; Y = dat['Y']; Z = dat['Z']; C = dat['C']
if( IdsOn ):
try: Ids = dat['Ids'].item() # .item() returns the dict inside 0-array.
except: Ids = None
dat.close()
if( IdsOn ):
return F, X, Y, Z, C, Ids
else:
return F, X, Y, Z, C
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fp2mshIJ(pxO, pyO, pzO, xG, yG, dx, dy ): # IJ as in indecies.
# Elegant and much faster. Use this!
# pxO: particle x-origin, xG: x-grid coordinates.
# First, Create meshgrid from the grid coords.
X, Y = np.meshgrid( xG, yG )
T = np.zeros( np.shape(X) ) # Target
Z = np.zeros( np.shape(X) ) # Heights
ix = ( pxO / dx ).astype(int); iy = ( pyO / dy ).astype(int)
# The loop must be explicitly written open because
# the repeated additions to cells are not accounted properly.
for i in xrange(len(ix)):
T[iy[i],ix[i]] += 1.
Z[iy[:],ix[:]] = pzO[:]
return T, X, Y, Z
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fp2mshBM( pxO, pyO, pzO, xG, yG, dx, dy ): # BM as in boolean matrix.
# Elegant routine, but awfully slow. Don't use this!
# pxO: particle x-origin, xG: x-grid coordinates.
# First, Create meshgrid from the grid coords.
X, Y = np.meshgrid( xG, yG )
# Then a mesh variable for storing the hits and the topography height.
T = np.zeros( np.shape(X) )
Z = np.zeros( np.shape(X) )
for xi in xG:
print(' Grid x-coord = {} '.format(xi))
x1 = xi-dx/2.; x2 = xi+dx/2.
PXb = ((x1 <= pxO) * (pxO < x2))
if( PXb.any() ):
for yi in yG:
y1 = yi-dy/2.; y2 = yi+dy/2.
# Utilizing the seeding coordinates (origin coords), extract how many hits each cell gets.
PXYb = PXb * ((y1 <= pyO) * (pyO < y2))
if( PXYb.any()):
# Create a boolean matrix which isolates (with True value) the desired grid cell.
MXb = ((x1 <= X) * (X < x2) )
MXYb = MXb * ((y1 <= Y) * (Y < y2) )
Z[MXYb] = np.mean( pzO[ PXYb ] )
T += np.sum( PXYb.astype(int) ) * MXYb.astype(int)
PXb = None; MXb = None; MXYb = None # Clear
return T, X, Y, Z
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def coordsFootprintGrid( NxG, dxG, pxO, pyO, verbose=False ):
# Max values.
xG_max = NxG[0]*dxG[0] # Max dimensions.
yG_max = NxG[1]*dxG[1]
'''
Note: At this point we assume non-cyclic boundary cond. for
the south/north boundaries. Therefore, the particles will be
absorbed if they come in contact with the y-normal boundaries.
The footprint-grid will have to be extended backward only in
the x-direction.
'''
# Smallest and largest x/y-value recorded:
x_min = np.min( pxO ); y_min = np.min( pyO )
x_max = np.max( pxO ); y_max = np.max( pyO )
if(verbose):
print( ' min(xO) = {}, max(xO) = {}'.format(x_min, x_max))
print( ' min(yO) = {}, max(yO) = {}'.format(y_min, y_max))
# Define an integer factor for domain multiplication/extension.
fx = 0.
if( x_min < 0. ):
fx = int( abs(x_min) / xG_max ) + 1.
# Coordinates for the extended footprint grid. Cell-centers.
xD = np.linspace(-fx*xG_max+dxG[0]/2., xG_max-dxG[0]/2., (fx*NxG[0]+NxG[0])) # Last term: resolution.
yD = np.linspace(dxG[1]/2. , yG_max-dxG[1]/2., NxG[1] )
return xD, yD
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def idAppendices(fstring, ijkOn=False):
if( ijkOn ):
fileId = fstring.strip('.npz') # file ID string.
fileId = fileId[-13:]
varId = fileId[-8:]; varId = varId.replace('.','_') # variable ID string.
else:
fileId = str()
varId = str(fn)
return fileId, varId
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def percentileFootprintIds( F , p ):
# 50, 75, 90
p = p/100.
Fsum = np.sum(F)
Fpsum= p*Fsum
fmax = np.max(F) # maximum value.
fv = 0.5*fmax
df = fmax/350. # values to increment.
tol = Fsum/2000.
ic = 0
while 1:
ic += 1
fv -= df
idx = (F>fv)
Fchecksum = np.sum(F[idx])
if( (Fpsum-Fchecksum) < tol ):
print(' i={}) TARGET vs. CURRENT: {} vs. {}'.format(ic,Fpsum,Fchecksum))
break
return idx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeCrossWindSum( F , X, fname, idx=None ):
import scipy.ndimage as sn # contains the filters
nY, nX = np.shape(F) # nRows, nCols
Fm = np.zeros( nX )
if( idx != None): Fx = F*idx
else: Fx = F.copy()
for i in xrange( nX ):
Fm[i] = np.sum(Fx[:,i])
Fx = None
idx = (np.abs(Fm) > 0.) # Select only non-zero entries
Fm[idx] = sn.gaussian_filter( Fm[idx], sigma=2.5 )
if( fname ):
np.savetxt(fname+'_ysum.dat', np.c_[X[0,:],Fm] ) # x,y,z equal sized 1D arrays
return Fm
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= BEGIN KORMANN & MEIXNER =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def kormann_and_meixner_fpr(z_0, z_m, u, sigma_v, L, X, Y, x_off=0., y_off=0. ):
from scipy.optimize import fsolve
from scipy.special import gamma
Kappa = 0.41 # Von Karman const.
# Bounds of integration for Eq. 42 to 46, defined on p.218
z_1 = 3.*z_0
z_2 = (1.+Kappa)*z_m
# Data tuple for passing information to fsolve.
data =(L, z_0, z_1, z_2, z_m)
# Final roots for m and n
m0 = 0.5
m = fsolve( feqn_m, m0, args=data )[0]
n = fsolve( feqn_n, m0, args=data )[0]
# Inversion of Eq 31
u_star = u * Kappa / (np.log(z_m/z_0) + fopt1(L, z_m, z_m))
# Eq (41), part 1
U = u_star/Kappa * ( Iz_n(m , L, z_0/z_m, z_1, z_2, z_m, 2 ) + \
+ Iz_n(m , L, z_0, z_1, z_2, z_m, 4, fopt1) ) \
/ ( Iz_n(2.*m, L, z_0, z_1, z_2, z_m, 1 ) * z_m**m )
# Eq (41), part 2
K = Kappa*u_star * Iz_n(n, L, z_0, z_1, z_2, z_m, 4, fopt2)\
/ ( Iz_n(2.*n, L, z_0, z_1, z_2, z_m, 1 ) * z_m**(n-1.))
# r is defined at the top of p.213, mu after Eq. 18
r = 2.+m-n
mu = (1.+m)/r
# Eq. 19
xsi = U * z_m**r /( r**2 * K )
# Eq. 21
Xm = np.abs(X-x_off)
Ym = np.abs(Y-y_off)
Idm = (X-x_off)>0.
phi_x = ( gamma(mu)**(-1) * xsi**(mu)/( Xm**(1.+mu) ) * np.exp(-xsi/np.max(Xm,1e-10)) )* Idm
# Cross wind diffusion
# Eq. 18
u_bar = gamma(mu)/gamma(1./r) * (r**2*K/U)**(m/r)*U*Xm**(m/r)
# Eq. 9, definition of sig right after it
sig = sigma_v*Xm/u_bar
D_y = (np.sqrt(2.*np.pi)*sig)**(-1) * np.exp(-Ym**2./(2.*sig**2))
phi = D_y * phi_x
return phi[:,::-1]
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fopt1(L, z, z_m=None):
# This is used in eq 39 with J1 (Iz_4) and J2 (Iz_5).
if( L>0 ):
psi_m = 5.*z/L
else:
zeta = (1. - 16.*z/L)**(0.25)
psi_m = (-2.)*np.log((1.+zeta)/2.) - np.log((1.+zeta**2)/2.) + 2.*np.arctan(zeta) - np.pi/2.
return psi_m
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fopt2(L, z, z_m):
# This is used in eq 40 with J1 (Iz_4) and J2 (Iz_5).
if( L>0 ): phi_c = 1. + 5.*z/L
else: phi_c = (1. - (16. * z/L))**(-0.5)
rz = z/(phi_c * z_m)
return rz
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
'''Following integrals (Eq 42-46) are solved numerically.
They're all bundled within the same function to form a unified
interface. This reduces code duplication. '''
def Iz_n(P, L, z_0, z_1, z_2, z_m, opt=1, fuser=None):
az1 = (z_1/z_m); az2 = (z_2/z_m)
dz = (az2-az1)/1000.
az = np.arange(az1, az2, dz) + dz/2.
if( opt == 1 ): # I_1
c = az**P * dz
elif( opt == 2 ): # I_2
c = az**P * np.log(az/z_0) *dz
elif( opt == 3 ): # I_3
c = az**P * np.log(az) * np.log(az/z_0) *dz
elif( opt == 4 ): # J_1
c = az**P * fuser(L, az*z_m, z_m) * dz
elif( opt == 5 ): # J_2
c = az**P * fuser(L, az*z_m, z_m)*np.log(az) * dz
return np.sum(c)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def feqn_m( M, *data ):
L, z_0, z_1, z_2, z_m = data
A = Iz_n(2*M, L, z_0 , z_1, z_2, z_m, 1 ) * \
( Iz_n( M, L, z_0/z_m, z_1, z_2, z_m, 3 ) + Iz_n(M, L, z_0, z_1, z_2, z_m, 5, fopt1) )
B = Iz_n(2*M, L, 1 , z_1, z_2, z_m, 2 ) * \
( Iz_n( M, L, z_0/z_m, z_1, z_2, z_m, 2 ) + Iz_n(M, L, z_0, z_1, z_2, z_m, 4, fopt1) )
return (B - A)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def feqn_n( N, *data ):
L, z_0, z_1, z_2, z_m = data
A = Iz_n(2*N, L, z_0, z_1, z_2, z_m, 1 ) * Iz_n(N, L, z_0, z_1, z_2, z_m, 5, fopt2)
B = Iz_n(2*N, L, 1 , z_1, z_2, z_m, 2 ) * Iz_n(N, L, z_0, z_1, z_2, z_m, 4, fopt2)
return (B - A)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= END KORMANN & MEIXNER =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= BEGIN KLJUN =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def kljun_fpr(z_0, z_m, u_mean, sigma_v, L, Xt, Yt, z_i, us, x_off, y_off, nx=4000):
rs=[1.]; wd = 0.; crop = True
fdict = FFP(z_m, z_0, u_mean, z_i, L, sigma_v, us, None, rs, wd, nx, crop)
fp = fdict['f_2d'].copy(); Xp = fdict['x_2d']; Yp = fdict['y_2d']
dXt = Xt[0,2]-Xt[0,1]; dYt = Yt[2,0]-Yt[1,0]
dXp = Xp[0,2]-Xt[0,1]; dYp = Yp[2,0]-Yt[1,0]
ipt = (Xp[0,:]/dXt).astype(int); jpt = (Yp[:,0]/dYt).astype(int)
print(' Xp = {} '.format(Xp[0,:]))
print(' ipt = {} '.format(ipt))
fdict = None
# To be finalized ...
return None # Do not use yet
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= END KLJUN =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
| [
"mikko.auvinen@gmail.com"
] | mikko.auvinen@gmail.com |
9cdc5953fa52c0c13a48fd139f2abf63be4bcdb2 | 95777f5257f00aa982d94812f46658ace2e92bd2 | /pytorch/pytorchcv/models/model_store.py | 3ddbf0ceb2ababece6f08a706124b80d1c589957 | [
"MIT"
] | permissive | yangkang779/imgclsmob | ea2c1f9223a3419375e8339c7e941daba69a56a7 | 9d189eae8195d045dfb4b25bec2501b2c42a154a | refs/heads/master | 2020-05-07T08:16:23.658714 | 2019-04-08T16:20:33 | 2019-04-08T16:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,684 | py | """
Model store which provides pretrained models.
"""
__all__ = ['get_model_file', 'load_model', 'download_model', 'calc_num_params']
import os
import zipfile
import logging
import hashlib
_model_sha1 = {name: (error, checksum, repo_release_tag) for name, error, checksum, repo_release_tag in [
('alexnet', '2093', '6429d865d917d57d1198e89232dd48a117ddb4d5', 'v0.0.108'),
('vgg11', '1137', '8a64fe7a143dca1d9031475cb6bea5379f4bac3d', 'v0.0.109'),
('vgg13', '1075', '24178cabf4864a238086c7f6f625261acdcbb35c', 'v0.0.109'),
('vgg16', '0892', '10f44f684420e4278427a764f96f5aa9b91ec766', 'v0.0.109'),
('vgg19', '0839', 'd4e69a0d393f4d46f1d9c4d4ba96f5a83de3399c', 'v0.0.109'),
('bn_vgg11b', '1019', '98d7e914a32f1022618ffa390e78c6a523dfcdc1', 'v0.0.110'),
('bn_vgg13b', '0963', 'cf9352f47805c18798c0f80ab0e158ec5401331e', 'v0.0.110'),
('bn_vgg16b', '0874', 'af4f2d0bbfda667e6b7b3ad4cda5ca331021cd18', 'v0.0.110'),
('bn_vgg19b', '0840', 'b6919f7f74b3174a86818062b2d1d4cf5a110b8b', 'v0.0.110'),
('bninception', '0804', '99ff87081fbd04cfe4193910674ffef7cc84b4b0', 'v0.0.139'),
('resnet10', '1436', '67d9a618e8670497386af806564f7ac1a4dbcd76', 'v0.0.248'),
('resnet12', '1328', 'd7d2f4d6c7fcf3aff0458533ae5204b7f0eee2d7', 'v0.0.253'),
('resnet14', '1246', 'd5b55c113168c02f1b39b65f8908b0db467a2d74', 'v0.0.256'),
('resnet16', '1118', 'd54bc41afa244476ca28380111f66d188905ecbc', 'v0.0.259'),
('resnet18_wd4', '1785', 'fe79b31f56e7becab9c014dbc14ccdb564b5148f', 'v0.0.262'),
('resnet18_wd2', '1327', '6654f50ad357f4596502b92b3dca2147776089ac', 'v0.0.263'),
('resnet18_w3d4', '1106', '3636648b504e1ba134947743eb34dd0e78feda02', 'v0.0.266'),
('resnet18', '0982', '0126861b4cd7f7b14196b1e01827da688f8bab6d', 'v0.0.153'),
('resnet34', '0780', '3f775482a327e5fc4850fbb77785bfc55e171e5f', 'v0.0.291'),
('resnet50', '0658', '828686d7a4b0bef906d7bcc115efd894fc5c1e0a', 'v0.0.147'),
('resnet50b', '0645', 'a53df64c736194427d0bd01eadf468e95d45fd35', 'v0.0.146'),
('resnet101', '0622', 'ab0cf005bbe9b17e53f9e3c330c6147a8c80b3a5', 'v0.0.1'),
('resnet101b', '0561', '9fbf0696ed7fe3dbe496d70fff56118674dd0d83', 'v0.0.145'),
('resnet152', '0550', '800b2cb1959a0d3648483e86917502b8f63dc37e', 'v0.0.144'),
('resnet152b', '0534', 'e02a8bf77357f553d57086c3f351f914c765e187', 'v0.0.143'),
('preresnet10', '1421', 'b3973cd4461287d61df081d6f689d293eacf2248', 'v0.0.249'),
('preresnet12', '1348', '563066fa8fcf8b5f19906b933fea784965d68192', 'v0.0.257'),
('preresnet14', '1239', '4be725fd3f06c99c46817fce3b69caf2ebc62414', 'v0.0.260'),
('preresnet16', '1108', '06d8c87e29284dac19a9019485e210541532411a', 'v0.0.261'),
('preresnet18_wd4', '1811', '41135c15210390e9a564b14e8ae2ebda1a662ec1', 'v0.0.272'),
('preresnet18_wd2', '1340', 'c1fe4e314188eeb93302432d03731a91ce8bc9f2', 'v0.0.273'),
('preresnet18_w3d4', '1105', 'ed2f9ca434b6910b92657eefc73ad186396578d5', 'v0.0.274'),
('preresnet18', '0972', '5651bc2dbb200382822a6b64375d240f747cc726', 'v0.0.140'),
('preresnet34', '0774', 'fd5bd1e883048e29099768465df2dd9e891803f4', 'v0.0.300'),
('preresnet50', '0685', 'd81a7aca0384c6d65ee0e5c1f3ba854591466346', 'v0.0.2'),
('preresnet50b', '0687', '65be98fbe7b82c79bccd9c794ce9d9a3482aec9c', 'v0.0.2'),
('preresnet101', '0591', '4bacff796e113562e1dfdf71cfa7c6ed33e0ba86', 'v0.0.2'),
('preresnet101b', '0603', 'b1e37a09424dde15ecba72365d46b1f59abd479b', 'v0.0.2'),
('preresnet152', '0555', 'c842a030abbcc21a0f2a9a8299fc42204897a611', 'v0.0.14'),
('preresnet152b', '0591', '2c91ab2c8d90f3990e7c30fd6ee2184f6c2c3bee', 'v0.0.2'),
('preresnet200b', '0588', 'f7104ff306ed5de2c27f3c855051c22bda167981', 'v0.0.45'),
('preresnet269b', '0581', '1a7878bb10923b22bda58d7935dfa6e5e8a7b67d', 'v0.0.239'),
('resnext101_32x4d', '0611', 'cf962440f11fe683fd02ec04f2102d9f47ce38a7', 'v0.0.10'),
('resnext101_64x4d', '0575', '651abd029bcc4ce88c62e1d900a710f284a8281e', 'v0.0.10'),
('seresnet50', '0640', '8820f2af62421ce2e1df989d6e0ce7916c78ff86', 'v0.0.11'),
('seresnet101', '0589', '5e6e831b7518b9b8a049dd60ed1ff82ae75ff55e', 'v0.0.11'),
('seresnet152', '0576', '814cf72e0deeab530332b16fb9b609e574afec61', 'v0.0.11'),
('seresnext50_32x4d', '0554', '99e0e9aa4578af9f15045c1ceeb684a2e988628a', 'v0.0.12'),
('seresnext101_32x4d', '0505', '0924f0a2c1de90dc964c482b7aff6232dbef3600', 'v0.0.12'),
('senet154', '0461', '6512228c820897cd09f877527a553ca99d673956', 'v0.0.13'),
('ibn_resnet50', '0641', 'e48a1fe5f7e448d4b784ef4dc0f33832f3370a9b', 'v0.0.127'),
('ibn_resnet101', '0561', '5279c78a0dbfc722cfcfb788af479b6133920528', 'v0.0.127'),
('ibnb_resnet50', '0686', 'e138995e6acda4b496375beac6d01cd7a9f79876', 'v0.0.127'),
('ibn_resnext101_32x4d', '0542', 'b5233c663a4d207d08c21107d6c951956e910be8', 'v0.0.127'),
('ibn_densenet121', '0725', 'b90b0615e6ec5c9652e3e553e27851c8eaf01adf', 'v0.0.127'),
('ibn_densenet169', '0651', '96dd755e0df8a54349278e0cd23a043a5554de08', 'v0.0.127'),
('airnet50_1x64d_r2', '0590', '3ec422128d17314124c02e3bb0f77e26777fb385', 'v0.0.120'),
('airnet50_1x64d_r16', '0619', '090179e777f47057bedded22d669bf9f9ce3169c', 'v0.0.120'),
('airnext50_32x4d_r2', '0551', 'c68156e5e446a1116b1b42bc94b3f881ab73fe92', 'v0.0.120'),
('bam_resnet50', '0658', '96a37c82bdba821385b29859ad1db83061a0ca5b', 'v0.0.124'),
('cbam_resnet50', '0605', 'a1172fe679622224dcc88c00020936ad381806fb', 'v0.0.125'),
('pyramidnet101_a360', '0620', '3a24427baf21ee6566d7e4c7dee25da0e5744f7f', 'v0.0.104'),
('diracnet18v2', '1170', 'e06737707a1f5a5c7fe4e57da92ed890b034cb9a', 'v0.0.111'),
('diracnet34v2', '0993', 'a6a661c0c3e96af320e5b9bf65a6c8e5e498a474', 'v0.0.111'),
('densenet121', '0803', 'f994107a83aed162916ff89e2ded4c5af5bc6457', 'v0.0.3'),
('densenet161', '0644', 'c0fb22c83e8077a952ce1a0c9703d1a08b2b9e3a', 'v0.0.3'),
('densenet169', '0719', '271391051775ba9bbf458a6bd77af4b3007dc892', 'v0.0.3'),
('densenet201', '0663', '71ece4ad7be5d1e2aa4bbf6f1a6b32ac2562d847', 'v0.0.3'),
('condensenet74_c4_g4', '0828', '5ba550494cae7081d12c14b02b2a02365539d377', 'v0.0.4'),
('condensenet74_c8_g8', '1006', '3574d874fefc3307f241690bad51f20e61be1542', 'v0.0.4'),
('peleenet', '1151', '9c47b80297ac072a923cda763b78e7218cd52d3a', 'v0.0.141'),
('wrn50_2', '0641', '83897ab9f015f6f988e51108e12518b08e1819dd', 'v0.0.113'),
('drnc26', '0755', '35405bd52a0c721f3dc64f18d433074f263b7339', 'v0.0.116'),
('drnc42', '0657', '7c99c4608a9a5e5f073f657b92f258ba4ba5ac77', 'v0.0.116'),
('drnc58', '0601', '70ec1f56c23da863628d126a6ed0ad10f037a2ac', 'v0.0.116'),
('drnd22', '0823', '5c2c6a0cf992409ab388e04e9fbd06b7141bdf47', 'v0.0.116'),
('drnd38', '0695', '4630f0fb3f721f4a2296e05aacb1231ba7530ae5', 'v0.0.116'),
('drnd54', '0586', 'bfdc1f8826027b247e2757be45b176b3b91b9ea3', 'v0.0.116'),
('drnd105', '0548', 'a643f4dcf9e4b69eab06b76e54ce22169f837592', 'v0.0.116'),
('dpn68', '0727', '438492331840612ff1700e7b7d52dd6c0c683b47', 'v0.0.17'),
('dpn98', '0553', '52c55969835d56185afa497c43f09df07f58f0d3', 'v0.0.17'),
('dpn131', '0548', '0c53e5b380137ccb789e932775e8bd8a811eeb3e', 'v0.0.17'),
('darknet_tiny', '1784', '4561e1ada619e33520d1f765b3321f7f8ea6196b', 'v0.0.69'),
('darknet_ref', '1718', '034595b49113ee23de72e36f7d8a3dbb594615f6', 'v0.0.64'),
('darknet53', '0564', 'b36bef6b297055dda3d17a3f79596511730e1963', 'v0.0.150'),
('irevnet301', '0841', '95dc8d94257bf16027edd7077b785a8676369fca', 'v0.0.251'),
('bagnet9', '2961', 'cab1179284e9749697f38c1c7e5f0e172be12c89', 'v0.0.255'),
('bagnet17', '1884', '6b2a100f8d14d4616709586483f625743ed04769', 'v0.0.255'),
('bagnet33', '1301', '4f17b6e837dacd978b15708ffbb2c1e6be3c371a', 'v0.0.255'),
('dla34', '0794', '04698d78b16f2d08e4396b5b0c9f46cb42542242', 'v0.0.202'),
('dla46c', '1323', 'efcd363642a4b479892f47edae7440f0eea05edb', 'v0.0.282'),
('dla46xc', '1269', '00d3754ad0ff22636bb1f4b4fb8baebf4751a1ee', 'v0.0.293'),
('dla60', '0669', 'b2cd6e51a322512a6cb45414982a2ec71285daad', 'v0.0.202'),
('dla60x', '0598', '88547d3f81c4df711b15457cfcf37e2b703ed895', 'v0.0.202'),
('dla60xc', '1091', '0f6381f335e5bbb4c69b360be61a4a08e5c7a9de', 'v0.0.289'),
('dla102', '0605', '11df13220b44f51dc8c925fbd9fc334bc8d115b4', 'v0.0.202'),
('dla102x', '0577', '58331655844f9d95bcf2bb90de6ac9cf3b66bd5e', 'v0.0.202'),
('dla102x2', '0536', '079361117045dc661b63ce4b14408d403bc91844', 'v0.0.202'),
('dla169', '0566', 'ae0c6a82acfaf9dc459ac5a032106c2727b71d4f', 'v0.0.202'),
('fishnet150', '0604', 'f5af4873ff5730f589a6c4a505ede8268e6ce3e3', 'v0.0.168'),
('espnetv2_wd2', '2015', 'd234781f81e5d1b5ae6070fc851e3f7bb860b9fd', 'v0.0.238'),
('espnetv2_w1', '1345', '550d54229d7fd8f7c090601c2123ab3ca106393b', 'v0.0.238'),
('espnetv2_w5d4', '1218', '85d97b2b1c9ebb176f634949ef5ca6d7fe70f09c', 'v0.0.238'),
('espnetv2_w3d2', '1129', '3bbb49adaa4fa984a67f82862db7dcfc4998429e', 'v0.0.238'),
('espnetv2_w2', '0961', '13ba0f7200eb745bacdf692905fde711236448ef', 'v0.0.238'),
('squeezenet_v1_0', '1766', 'afdbcf1aef39237300656d2c5a7dba19230e29fc', 'v0.0.128'),
('squeezenet_v1_1', '1772', '25b77bc39e35612abbe7c2344d2c3e1e6756c2f8', 'v0.0.88'),
('squeezeresnet_v1_0', '1809', '25bfc02edeffb279010242614e7d73bbeacc0170', 'v0.0.178'),
('squeezeresnet_v1_1', '1821', 'c27ed88f1b19eb233d3925efc71c71d25e4c434e', 'v0.0.70'),
('sqnxt23_w1', '1906', '97b74e0c4d6bf9fc939771d94b2f6dd97de34024', 'v0.0.171'),
('sqnxt23v5_w1', '1785', '2fe3ad67d73313193a77690b10c17cbceef92340', 'v0.0.172'),
('sqnxt23_w3d2', '1350', 'c2f21bce669dbe50fba544bcc39bc1302f63e1e8', 'v0.0.210'),
('sqnxt23v5_w3d2', '1301', 'c244844ba2f02dadd350dddd74e21360b452f9dd', 'v0.0.212'),
('sqnxt23_w2', '1100', 'b9bb7302824f89f16e078f0a506e3a8c0ad9c74e', 'v0.0.240'),
('sqnxt23v5_w2', '1066', '229b0d3de06197e399eeebf42dc826b78f0aba86', 'v0.0.216'),
('shufflenet_g1_wd4', '3729', '47dbd0f279da6d3056079bb79ad39cabbb3b9415', 'v0.0.134'),
('shufflenet_g3_wd4', '3653', '6abdd65e087e71f80345415cdf7ada6ed2762d60', 'v0.0.135'),
('shufflenet_g1_wd2', '2261', 'dae4bdadd7d48bee791dff2a08cd697cff0e9320', 'v0.0.174'),
('shufflenet_g3_wd2', '2080', 'ccaacfc8d9ac112c6143269df6e258fd55b662a7', 'v0.0.167'),
('shufflenet_g1_w3d4', '1711', '161cd24aa0b2e2afadafa69b44a28af222f2ec7a', 'v0.0.218'),
('shufflenet_g3_w3d4', '1650', '3f3b0aef0ce3174c78ff42cf6910c6e34540fc41', 'v0.0.219'),
('shufflenet_g1_w1', '1389', '4cfb65a30761fe548e0b5afbb5d89793ec41e4e9', 'v0.0.223'),
('shufflenet_g2_w1', '1363', '07256203e217a7b31f1c69a5bd38a6674bce75bc', 'v0.0.241'),
('shufflenet_g3_w1', '1348', 'ce54f64ecff87556a4303380f46abaaf649eb308', 'v0.0.244'),
('shufflenet_g4_w1', '1335', 'e2415f8270a4b6cbfe7dc97044d497edbc898577', 'v0.0.245'),
('shufflenet_g8_w1', '1342', '9a979b365424addba75c559a61a77ac7154b26eb', 'v0.0.250'),
('shufflenetv2_wd2', '1865', '9c22238b5fa9c09541564e8ed7f357a5f7e8cd7c', 'v0.0.90'),
('shufflenetv2_w1', '1163', 'c71dfb7a814c8d8ef704bdbd80995e9ea49ff4ff', 'v0.0.133'),
('shufflenetv2_w3d2', '0942', '26a9230405d956643dcd563a5a383844c49b5907', 'v0.0.288'),
('shufflenetv2_w2', '1249', 'b9f9e84cbf49cf63fe2a89e9c48a9fb107f591d7', 'v0.0.84'),
('shufflenetv2b_wd2', '1822', '01d18d6fa1a6136f605a4277f47c9a757f9ede3b', 'v0.0.157'),
('shufflenetv2b_w1', '1125', '6a5d3dc446e6a00cf60fe8aa2f4139d74d766305', 'v0.0.161'),
('shufflenetv2b_w3d2', '0911', 'f2106fee0748d7f0d40db16b228782b6d7636737', 'v0.0.203'),
('shufflenetv2b_w2', '0834', 'cb36b92ca4ca3bee470b739021d01177e0601c5f', 'v0.0.242'),
('menet108_8x1_g3', '2076', '6acc82e46dfc1ce0dd8c59668aed4a464c8cbdb5', 'v0.0.89'),
('menet128_8x1_g4', '1959', '48fa80fc363adb88ff580788faa8053c9d7507f3', 'v0.0.103'),
('menet160_8x1_g8', '2084', '0f4fce43b4234c5bca5dd76450b698c2d4daae65', 'v0.0.154'),
('menet228_12x1_g3', '1316', '5b670c42031d0078e2ae981829358d7c1b92ee30', 'v0.0.131'),
('menet256_12x1_g4', '1252', '14c6c86df96435c693eb7d0fcd8d3bf4079dd621', 'v0.0.152'),
('menet348_12x1_g3', '0958', 'ad50f635a1f7b799a19a0a9c71aa9939db8ffe77', 'v0.0.173'),
('menet352_12x1_g8', '1200', '4ee200c5c98c64a2503cea82ebf62d1d3c07fb91', 'v0.0.198'),
('menet456_24x1_g3', '0799', '826c002244f1cdc945a95302b1ce5c66d949db74', 'v0.0.237'),
('mobilenet_wd4', '2249', '1ad5e8fe8674cdf7ffda8450095eb96d227397e0', 'v0.0.62'),
('mobilenet_wd2', '1355', '41a21242c95050407df876cfa44bb5d3676aa751', 'v0.0.156'),
('mobilenet_w3d4', '1076', 'd801bcaea83885b16a0306b8b77fe314bbc585c3', 'v0.0.130'),
('mobilenet_w1', '0895', '7e1d739f0fd4b95c16eef077c5dc0a5bb1da8ad5', 'v0.0.155'),
('fdmobilenet_wd4', '3098', '2b22b709a05d7ca6e43acc6f3a9f27d0eb2e01cd', 'v0.0.177'),
('fdmobilenet_wd2', '2015', '414dbeedb2f829dcd8f94cd7fef10aae6829f06f', 'v0.0.83'),
('fdmobilenet_w3d4', '1641', '5561d58aa8889d8d93f2062a2af4e4b35ad7e769', 'v0.0.159'),
('fdmobilenet_w1', '1338', '9d026c04112de9f40e15fa40457d77941443c327', 'v0.0.162'),
('mobilenetv2_wd4', '2451', '05e1e3a286b27c17ea11928783c4cd48b1e7a9b2', 'v0.0.137'),
('mobilenetv2_wd2', '1493', 'b82d79f6730eac625e6b55b0618bff8f7a1ed86d', 'v0.0.170'),
('mobilenetv2_w3d4', '1082', '8656de5a8d90b29779c35c5ce521267c841fd717', 'v0.0.230'),
('mobilenetv2_w1', '0887', '13a021bca5b679b76156829743f7182da42e8bb6', 'v0.0.213'),
('igcv3_wd4', '2871', 'c9f28301391601e5e8ae93139431a9e0d467317c', 'v0.0.142'),
('igcv3_wd2', '1732', '8c504f443283d8a32787275b23771082fcaab61b', 'v0.0.132'),
('igcv3_w3d4', '1140', '63f43cf8d334111d55d06f2f9bf7e1e4871d162c', 'v0.0.207'),
('igcv3_w1', '0920', '12385791681f09adb3a08926c95471f332f538b6', 'v0.0.243'),
('mnasnet', '1174', 'e8ec017ca396dc7d39e03b383776b8cf9ad20a4d', 'v0.0.117'),
('darts', '0874', '74f0c7b690cf8bef9b54cc5afc2cb0f2a2a83630', 'v0.0.118'),
('xception', '0549', 'e4f0232c99fa776e630189d62fea18e248a858b2', 'v0.0.115'),
('inceptionv3', '0565', 'cf4061800bc1dc3b090920fc9536d8ccc15bb86e', 'v0.0.92'),
('inceptionv4', '0529', '5cb7b4e4b8f62d6b4346855d696b06b426b44f3d', 'v0.0.105'),
('inceptionresnetv2', '0490', '1d1b4d184e6d41091c5ac3321d99fa554b498dbe', 'v0.0.107'),
('polynet', '0452', '6a1b295dad3f261b48e845f1b283e4eef3ab5a0b', 'v0.0.96'),
('nasnet_4a1056', '0816', 'd21bbaf5e937c2e06134fa40e7bdb1f501423b86', 'v0.0.97'),
('nasnet_6a4032', '0421', 'f354d28f4acdde399e081260c3f46152eca5d27e', 'v0.0.101'),
('pnasnet5large', '0428', '65de46ebd049e494c13958d5671aba5abf803ff3', 'v0.0.114'),
('resnetd50b', '0565', 'ec03d815c0f016c6517ed7b4b40126af46ceb8a4', 'v0.0.296'),
('resnetd101b', '0473', 'f851c920ec1fe4f729d339c933535d038bf2903c', 'v0.0.296'),
('resnetd152b', '0482', '112e216da50eb20d52c509a28c97b05ef819cefe', 'v0.0.296'),
('nin_cifar10', '0743', '795b082470b58c1aa94e2f861514b7914f6e2f58', 'v0.0.175'),
('nin_cifar100', '2839', '627a11c064eb44c6451fe53e0becfc21a6d57d7f', 'v0.0.183'),
('nin_svhn', '0376', '1205dc06a4847bece8159754033f325f75565c02', 'v0.0.270'),
('resnet20_cifar10', '0597', '9b0024ac4c2f374cde2c5052e0d0344a75871cdb', 'v0.0.163'),
('resnet20_cifar100', '2964', 'a5322afed92fa96cb7b3453106f73cf38e316151', 'v0.0.180'),
('resnet20_svhn', '0343', '8232e6e4c2c9fac1200386b68311c3bd56f483f5', 'v0.0.265'),
('resnet56_cifar10', '0452', '628c42a26fe347b84060136212e018df2bb35e0f', 'v0.0.163'),
('resnet56_cifar100', '2488', 'd65f53b10ad5d124698e728432844c65261c3107', 'v0.0.181'),
('resnet56_svhn', '0275', '6e08ed929b8f0ee649f75464f06b557089023290', 'v0.0.265'),
('resnet110_cifar10', '0369', '4d6ca1fc02eaeed724f4f596011e391528536049', 'v0.0.163'),
('resnet110_cifar100', '2280', 'd8d397a767db6d22af040223ec8ae342a088c3e5', 'v0.0.190'),
('resnet110_svhn', '0245', 'c971f0a38943d8a75386a60c835cc0843c2f6c1c', 'v0.0.265'),
('resnet164bn_cifar10', '0368', '74ae9f4bccb7fb6a8f3f603fdabe8d8632c46b2f', 'v0.0.179'),
('resnet164bn_cifar100', '2044', '8fa07b7264a075fa5add58f4c676b99a98fb1c89', 'v0.0.182'),
('resnet164bn_svhn', '0242', '549413723d787cf7e96903427a7a14fb3ea1a4c1', 'v0.0.267'),
('resnet1001_cifar10', '0328', '77a179e240808b7aa3534230d39b845a62413ca2', 'v0.0.201'),
('resnet1001_cifar100', '1979', '2728b558748f9c3e70db179afb6c62358020858b', 'v0.0.254'),
('resnet1202_cifar10', '0353', '1d5a21290117903fb5fd6ba59f3f7e7da7c08836', 'v0.0.214'),
('preresnet20_cifar10', '0651', '76cec68d11de5b25be2ea5935681645b76195f1d', 'v0.0.164'),
('preresnet20_cifar100', '3022', '3dbfa6a2b850572bccb28cc2477a0e46c24abcb8', 'v0.0.187'),
('preresnet20_svhn', '0322', 'c3c00fed49c1d6d9deda6436d041c5788d549299', 'v0.0.269'),
('preresnet56_cifar10', '0449', 'e9124fcf167d8ca50addef00c3afa4da9f828f29', 'v0.0.164'),
('preresnet56_cifar100', '2505', 'ca90a2be6002cd378769b9d4e7c497dd883d31d9', 'v0.0.188'),
('preresnet56_svhn', '0280', 'b51b41476710c0e2c941356ffe992ff883a3ee87', 'v0.0.269'),
('preresnet110_cifar10', '0386', 'cc08946a2126a1224d1d2560a47cf766a763c52c', 'v0.0.164'),
('preresnet110_cifar100', '2267', '3954e91581b7f3e5f689385d15f618fe16e995af', 'v0.0.191'),
('preresnet110_svhn', '0279', 'aa49e0a3c4a918e227ca2d5a5608704f026134c3', 'v0.0.269'),
('preresnet164bn_cifar10', '0364', '429012d412e82df7961fa071f97c938530e1b005', 'v0.0.196'),
('preresnet164bn_cifar100', '2018', 'a8e67ca6e14f88b009d618b0e9b554312d862174', 'v0.0.192'),
('preresnet164bn_svhn', '0258', '94d42de440d5f057a38f4c8cdbdb24acfee3981c', 'v0.0.269'),
('preresnet1001_cifar10', '0265', '9fedfe5fd643e7355f1062a6db68da310c8962be', 'v0.0.209'),
('preresnet1001_cifar100', '1841', '88f14ed9df1573e98b0ec2a07009a15066855fda', 'v0.0.283'),
('preresnet1202_cifar10', '0339', '6fc686b02191226f39e25a76fc5da26857f7acd9', 'v0.0.246'),
('resnext29_32x4d_cifar10', '0315', '30413525cd4466dbef759294eda9b702bc39648f', 'v0.0.169'),
('resnext29_32x4d_cifar100', '1950', '13ba13d92f6751022549a3b370ae86d3b13ae2d1', 'v0.0.200'),
('resnext29_32x4d_svhn', '0280', 'e85c5217944cdfafb0a538dd7cc817cffaada7c4', 'v0.0.275'),
('resnext29_16x64d_cifar10', '0241', '4133d3d04f9b10b132dcb959601d36f10123f8c2', 'v0.0.176'),
('pyramidnet110_a48_cifar10', '0372', 'eb185645cda89e0c3c47b11c4b2d14ff18fa0ae1', 'v0.0.184'),
('pyramidnet110_a48_cifar100', '2095', '95da1a209916b3cf4af7e8dc44374345a88c60f4', 'v0.0.186'),
('pyramidnet110_a48_svhn', '0247', 'd48bafbebaabe9a68e5924571752b3d7cd95d311', 'v0.0.281'),
('pyramidnet110_a84_cifar10', '0298', '7b835a3cf19794478d478aced63ca9e855c3ffeb', 'v0.0.185'),
('pyramidnet110_a84_cifar100', '1887', 'ff711084381f217f84646c676e4dcc90269dc516', 'v0.0.199'),
('pyramidnet110_a270_cifar10', '0251', '31bdd9d51ec01388cbb2adfb9f822c942de3c4ff', 'v0.0.194'),
('pyramidnet164_a270_bn_cifar10', '0242', 'daa2a402c1081323b8f2239f2201246953774e84', 'v0.0.264'),
('pyramidnet200_a240_bn_cifar10', '0244', '44433afdd2bc32c55dfb1e8347bc44d1c2bf82c7', 'v0.0.268'),
('pyramidnet236_a220_bn_cifar10', '0247', 'daa91d74979c451ecdd8b59e4350382966f25831', 'v0.0.285'),
('pyramidnet272_a200_bn_cifar10', '0239', '586b1ecdc8b34b69dcae4ba57f71c24583cca9b1', 'v0.0.284'),
('densenet40_k12_cifar10', '0561', '8b8e819467a2e4c450e4ff72ced80582d0628b68', 'v0.0.193'),
('densenet40_k12_cifar100', '2490', 'd182c224d6df2e289eef944d54fea9fd04890961', 'v0.0.195'),
('densenet40_k12_svhn', '0305', 'ac0de84a1a905b768c66f0360f1fb9bd918833bf', 'v0.0.278'),
('densenet40_k12_bc_cifar10', '0643', '6dc86a2ea1d088f088462f5cbac06cc0f37348c0', 'v0.0.231'),
('densenet40_k12_bc_cifar100', '2841', '1e9db7651a21e807c363c9f366bd9e91ce2f296f', 'v0.0.232'),
('densenet40_k12_bc_svhn', '0320', '320760528b009864c68ff6c5b874e9f351ea7a07', 'v0.0.279'),
('densenet40_k24_bc_cifar10', '0452', '669c525548a4a2392c5e3c380936ad019f2be7f9', 'v0.0.220'),
('densenet40_k24_bc_cifar100', '2267', '411719c0177abf58eddaddd05511c86db0c9d548', 'v0.0.221'),
('densenet40_k24_bc_svhn', '0290', 'f4440d3b8c974c9e1014969f4d5832c6c90195d5', 'v0.0.280'),
('densenet40_k36_bc_cifar10', '0404', 'b1a4cc7e67db1ed8c5583a59dc178cc7dc2c572e', 'v0.0.224'),
('densenet40_k36_bc_cifar100', '2050', 'cde836fafec1e5d6c8ed69fd3cfe322e8e71ef1d', 'v0.0.225'),
('densenet100_k12_cifar10', '0366', '26089c6e70236e8f25359de6fda67b84425888ab', 'v0.0.205'),
('densenet100_k12_cifar100', '1964', '5e10cd830c06f6ab178e9dd876c83c754ca63f00', 'v0.0.206'),
('densenet100_k24_cifar10', '0313', '397f0e39b517c05330221d4f3a9755eb5e561be1', 'v0.0.252'),
('densenet100_k12_bc_cifar10', '0416', 'b9232829b13c3f3f2ea15f4be97f500b7912c3c2', 'v0.0.189'),
('densenet100_k12_bc_cifar100', '2119', '05a6f02772afda51a612f5b92aadf19ffb60eb72', 'v0.0.208'),
('densenet190_k40_bc_cifar10', '0252', '2896fa088aeaef36fcf395d404d97ff172d78943', 'v0.0.286'),
('densenet250_k24_bc_cifar10', '0267', 'f8f9d3052bae1fea7e33bb1ce143c38b4aa5622b', 'v0.0.290'),
('xdensenet40_2_k24_bc_cifar10', '0531', 'b91a9dc35877c4285fe86f49953d1118f6b69e57', 'v0.0.226'),
('xdensenet40_2_k24_bc_cifar100', '2396', '0ce8f78ab9c6a4786829f816ae0615c7905f292c', 'v0.0.227'),
('xdensenet40_2_k36_bc_cifar10', '0437', 'ed264a2060836c7440f0ccde57315e1ec6263ff0', 'v0.0.233'),
('xdensenet40_2_k36_bc_cifar100', '2165', '6f68f83dc31dea5237e6362e6c6cfeed48a8d9e3', 'v0.0.234'),
('wrn16_10_cifar10', '0293', 'ce810d8a17a2deb73eddb5bec8709f93278bc53e', 'v0.0.166'),
('wrn16_10_cifar100', '1895', 'bef9809c845deb1b2bb0c9aaaa7c58bd97740504', 'v0.0.204'),
('wrn16_10_svhn', '0278', '5ab2a4edd5398a03d2e28db1b075bf0313ae5828', 'v0.0.271'),
('wrn28_10_cifar10', '0239', 'fe97dcd6d0dd8dda8e9e38e6cfa320cffb9955ce', 'v0.0.166'),
('wrn28_10_svhn', '0271', 'd62b6bbaef7228706a67c2c8416681f97c6d4688', 'v0.0.276'),
('wrn40_8_cifar10', '0237', '8dc84ec730f35c4b8968a022bc045c0665410840', 'v0.0.166'),
('wrn40_8_svhn', '0254', 'dee59602c10e5d56bd9c168e8e8400792b9a8b08', 'v0.0.277'),
('ror3_56_cifar10', '0543', '44f0f47d2e1b609880ee1b623014c52a9276e2ea', 'v0.0.228'),
('ror3_56_cifar100', '2549', '34be6719cd128cfe60ba93ac6d250ac4c1acf0a5', 'v0.0.229'),
('ror3_56_svhn', '0269', '5a9ad66c8747151be1d2fb9bc854ae382039bdb9', 'v0.0.287'),
('ror3_110_cifar10', '0435', 'fb2a2b0499e4a4d92bdc1d6792bd5572256d5165', 'v0.0.235'),
('ror3_110_cifar100', '2364', 'd599e3a93cd960c8bfc5d05c721cd48fece5fa6f', 'v0.0.236'),
('ror3_110_svhn', '0257', '155380add8d351d2c12026d886a918f1fc3f9fd0', 'v0.0.287'),
('ror3_164_cifar10', '0393', 'de7b6dc60ad6a297bd55ab65b6d7b1225b0ef6d1', 'v0.0.294'),
('ror3_164_cifar100', '2234', 'd37483fccc7fc1a25ff90ef05ecf1b8eab3cc1c4', 'v0.0.294'),
('ror3_164_svhn', '0273', 'ff0d9af0d40ef204393ecc904b01a11aa63acc01', 'v0.0.294'),
('rir_cifar10', '0328', '414c3e6088ae1e83aa1a77c43e38f940c18a0ce2', 'v0.0.292'),
('rir_cifar100', '1923', 'de8ec24a232b94be88f4208153441f66098a681c', 'v0.0.292'),
('rir_svhn', '0268', '12fcbd3bfc6b4165e9b23f3339a1b751b4b8f681', 'v0.0.292'),
('shakeshakeresnet20_2x16d_cifar10', '0515', 'ef71ec0d5ef928ef8654294114a013895abe3f9a', 'v0.0.215'),
('shakeshakeresnet20_2x16d_cifar100', '2922', '4d07f14234b1c796b3c1dfb24d4a3220a1b6b293', 'v0.0.247'),
('shakeshakeresnet20_2x16d_svhn', '0317', 'a693ec24fb8fe2c9f15bcc6b1050943c0c5d595a', 'v0.0.295'),
('shakeshakeresnet26_2x32d_cifar10', '0317', 'ecd1f8337cc90b5378b4217fb2591f2ed0f02bdf', 'v0.0.217'),
('shakeshakeresnet26_2x32d_cifar100', '1880', 'b47e371f60c9fed9eaac960568783fb6f83a362f', 'v0.0.222'),
('shakeshakeresnet26_2x32d_svhn', '0262', 'c1b8099ece97e17ce85213e4ecc6e50a064050cf', 'v0.0.295'),
('pspnet_resnetd101b_voc', '8144', 'c22f021948461a7b7ab1ef1265a7948762770c83', 'v0.0.297'),
('pspnet_resnetd50b_ade20k', '3687', '13f22137d7dd06c6de2ffc47e6ed33403d3dd2cf', 'v0.0.297'),
('pspnet_resnetd101b_ade20k', '3797', '115d62bf66477221b83337208aefe0f2f0266da2', 'v0.0.297'),
('pspnet_resnetd101b_cityscapes', '7172', '0a6efb497bd4fc763d27e2121211e06f72ada7ed', 'v0.0.297'),
('pspnet_resnetd101b_coco', '6741', 'c8b13be65cb43402fce8bae945f6e0d0a3246b92', 'v0.0.297'),
('deeplabv3_resnetd101b_voc', '8024', 'fd8bf74ffc96c97b30bcd3b6ce194a2daed68098', 'v0.0.298'),
('deeplabv3_resnetd152b_voc', '8120', 'f2dae198b3cdc41920ea04f674b665987c68d7dc', 'v0.0.298'),
('deeplabv3_resnetd50b_ade20k', '3713', 'bddbb458e362e18f5812c2307b322840394314bc', 'v0.0.298'),
('deeplabv3_resnetd101b_ade20k', '3784', '977446a5fb32b33f168f2240fb6b7ef9f561fc1e', 'v0.0.298'),
('deeplabv3_resnetd101b_coco', '6773', 'e59c1d8f7ed5bcb83f927d2820580a2f4970e46f', 'v0.0.298'),
('deeplabv3_resnetd152b_coco', '6899', '7e946d7a63ed255dd38afacebb0a0525e735da64', 'v0.0.298'),
('fcn8sd_resnetd101b_voc', '8040', '66edc0b073f0dec66c18bb163c7d6de1ddbc32a3', 'v0.0.299'),
('fcn8sd_resnetd50b_ade20k', '3339', 'e1dad8a15c2a1be1138bd3ec51ba1b100bb8d9c9', 'v0.0.299'),
('fcn8sd_resnetd101b_ade20k', '3588', '30d05ca42392a164ea7c93a9cbd7f33911d3c1af', 'v0.0.299'),
('fcn8sd_resnetd101b_coco', '6011', 'ebe2ad0bc1de5b4cecade61d17d269aa8bf6df7f', 'v0.0.299'),
]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError('Pretrained model for {name} is not available.'.format(name=model_name))
error, sha1_hash, repo_release_tag = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join('~', '.torch', 'models')):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TORCH_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = '{name}-{error}-{short_sha1}.pth'.format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if _check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning('Mismatch in the content of model file detected. Downloading again.')
else:
logging.info('Model file not found. Downloading to {}.'.format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + '.zip'
_download(
url='{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip'.format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if _check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""
Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns
-------
str
The file path of the downloaded file.
"""
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0"
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading {} from {}...'.format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError("Failed downloading url {}".format(url))
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if sha1_hash and not _check_sha1(fname, sha1_hash):
raise UserWarning('File {} is downloaded but the content hash does not match.'
' The repo may be outdated or download may be incomplete. '
'If the "repo_url" is overridden, consider switching to '
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
else:
print("download failed, retrying, {} attempt{} left"
.format(retries, 's' if retries > 1 else ''))
return fname
def _check_sha1(file_name, sha1_hash):
"""
Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
file_name : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(file_name, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
def load_model(net,
file_path,
ignore_extra=True):
"""
Load model state dictionary from a file.
Parameters
----------
net : Module
Network in which weights are loaded.
file_path : str
Path to the file.
ignore_extra : bool, default True
Whether to silently ignore parameters from the file that are not present in this Module.
"""
import torch
if ignore_extra:
pretrained_state = torch.load(file_path)
model_dict = net.state_dict()
pretrained_state = {k: v for k, v in pretrained_state.items() if k in model_dict}
net.load_state_dict(pretrained_state)
else:
net.load_state_dict(torch.load(file_path))
def download_model(net,
model_name,
local_model_store_dir_path=os.path.join('~', '.torch', 'models'),
ignore_extra=True):
"""
Load model state dictionary from a file with downloading it if necessary.
Parameters
----------
net : Module
Network in which weights are loaded.
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TORCH_HOME/models
Location for keeping the model parameters.
ignore_extra : bool, default True
Whether to silently ignore parameters from the file that are not present in this Module.
"""
load_model(
net=net,
file_path=get_model_file(
model_name=model_name,
local_model_store_dir_path=local_model_store_dir_path),
ignore_extra=ignore_extra)
def calc_num_params(net):
"""
Calculate the count of trainable parameters for a model.
Parameters
----------
net : Module
Analyzed model.
"""
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
| [
"osemery@gmail.com"
] | osemery@gmail.com |
db1d4fa197ecdb2072bea925cb065fb2266a9d7c | 1d81fbc4e62528ae3aed1bd378f1885216b93fe2 | /parrot_mania/parrots/models.py | bca58c5d80b2c80b5b109aa1f57f4dd661e698b2 | [] | no_license | JoosepAlviste/parrot-mania | 20fbb0da1ed8a3841d6955bb8bfd0e7bf02fdd15 | 88100cfddd8ee968d2a7871829e908025a11915c | refs/heads/master | 2022-12-10T20:37:32.031277 | 2019-04-08T07:39:52 | 2019-04-08T07:39:52 | 161,609,321 | 2 | 0 | null | 2022-12-08T05:01:34 | 2018-12-13T08:42:48 | JavaScript | UTF-8 | Python | false | false | 202 | py | from django.db import models
from accounts.models import User
class Parrot(models.Model):
name = models.CharField(max_length=255)
link = models.TextField()
user = models.ForeignKey(User)
| [
"joosep.alviste@gmail.com"
] | joosep.alviste@gmail.com |
4fde42aa00afae2430b0304d259e2ba046fa2021 | 439e4f5624c6dd03cb9bcab8ab995e4fc0711d4b | /WebScrape.py | 753cde2d0bf6a8d2dabeef5b73923e1d7a9a21ee | [] | no_license | IamAono/PriceChecker | dabb2b46513ee92b4a874ae29f783763a2d5992e | 1367ee7f77e891d8aede4c703f49d66356651772 | refs/heads/main | 2023-03-26T04:18:18.916244 | 2021-03-27T01:39:20 | 2021-03-27T01:39:20 | 310,997,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,188 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from Item import Item
import pickle
import time
options = webdriver.ChromeOptions()
options.add_argument('--headless') # so that the web browser doesn't open
options.add_argument("--log-level=3") # to ignore deprecation error
driver_path = "C:\\Drivers\\chromedriver.exe"
driver = webdriver.Chrome(executable_path = driver_path, chrome_options = options)
items = [] # list of items
try:
items = pickle.load(open("C:\\Github\\PriceChecker\\save.p","rb"))
print("exists")
except:
pass
# will return the list of items whose price has changed
def price_change():
changes = []
for item in items:
driver.get(item.link)
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, item.xPath))
)
price = driver.find_element_by_xpath(item.xPath).text
if item.price != price:
changes.append([item.name, item.price, price])
item.newPrice(price) # now we update the price
finally:
pass
return changes
# adds a link to the ditionary as a key with the xPath and price as its value
def add(name, link, xPath):
driver.get(link)
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xPath))
)
price = driver.find_element_by_xpath(xPath).text
items.append(Item(name, link, xPath, price))
finally:
pass
if __name__ == "__main__":
changes = price_change()
if(len(changes) == 0):
print("No change in prices")
else:
for c in changes:
print(c[0], "went from", c[1], "to", c[2])
while True:
print("1. add item\n2. remove item\n3. view items\n4. price history\n5. exit")
r = input()
if r == '1':
print("Name: ")
name = input()
print("Link: ")
link = input()
print("xPath: ")
xPath = input()
add(name, link, xPath)
elif r == '2':
print("These are the items currently saved")
for i in range(0, len(items)):
print(i, items[i].name)
print("Enter in the number of the item you want to remove")
remove = int(input())
if remove >= 0 and remove < len(items):
del items[remove]
print("Successfully removed")
else:
print("That is not a valid number")
elif r == '3':
for item in items:
print("Name:", item.name, "price:", item.price)
elif r == '4':
for item in items:
print("Price history for", item.name)
item.viewPriceHist()
elif r == '5':
pickle.dump(items, open("C:\\Github\\PriceChecker\\save.p", "wb"))
driver.quit()
break
else:
print("That is not a valid response.")
| [
"43483411+IamAono@users.noreply.github.com"
] | 43483411+IamAono@users.noreply.github.com |
5c7e847bfc504d35d735736a91de4edeb6ad63e0 | 112a3e68276cbf1ac3b1d62c62689ebb9fba6d33 | /src/main/domain/APICall.py | 350bfb95478a20460fd250216b8f5030871e594e | [] | no_license | continueing/python_based_cohort_analysis_system | c2865f62b7dab4c2d92505df4e09b3ab5660a968 | e296feb1c0806a9a2e895dcd0b4d9911408f2db3 | refs/heads/master | 2021-01-19T03:11:22.526883 | 2014-06-19T04:39:28 | 2014-06-19T04:39:28 | 20,987,775 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,148 | py | from datetime import datetime
import json
from src.main.util.TimeFormatter import TimeFormatter
__author__ = 'continueing'
class APICall():
JSON_PARM_URL = 'path_name'
JSON_PARM_CREATED_DATE_TIME = 'created'
JSON_PARM_PARM = 'request_params'
TYPE_RECOMMENDATION_CLASSES = 4
TYPE_RECOMMENDATION_SUBCATEGORY = 5
TYPE_PROMOTION = 6
TYPE_SHALLOW_SEARCH = 0
TYPE_DETAIL_VIEW = 1
TYPE_FILTER = 2
TYPE_PAYMENT = 3
@staticmethod
def fromJson(aJsonString):
jsonDict= json.loads(aJsonString)
return APICall(anUrl=jsonDict[APICall.JSON_PARM_URL], aCreatedDateTime=TimeFormatter.toDatetime(jsonDict[APICall.JSON_PARM_CREATED_DATE_TIME]), aParm=jsonDict[APICall.JSON_PARM_PARM].__str__())
@staticmethod
def fromDict(aJsonDict):
return APICall(anUrl=aJsonDict[APICall.JSON_PARM_URL], aCreatedDateTime=TimeFormatter.toDatetime(aJsonDict[APICall.JSON_PARM_CREATED_DATE_TIME]), aParm=aJsonDict[APICall.JSON_PARM_PARM].__str__())
def __init__(self, anUrl, aCreatedDateTime, aParm):
self.url = anUrl;
self.createdDateTime = aCreatedDateTime;
self.parm = str(aParm);
self.type = self.determineType()
self.checkFormat()
def checkFormat(self):
if(isinstance( self.url, str) is not True):
raise Exception("url should be string")
if(isinstance( self.createdDateTime, datetime) is not True):
raise Exception("aCreatedDateTime should be datetime")
if(isinstance( self.parm, str) is not True):
raise Exception("aParm should be string")
def determineType(self):
if(self.checkShallowSearch()):
return APICall.TYPE_SHALLOW_SEARCH
if(self.checkDetailView()):
return APICall.TYPE_DETAIL_VIEW
if(self.checkFilter()):
return APICall.TYPE_FILTER
if(self.checkPayment()):
return APICall.TYPE_PAYMENT
if(self.checkRecommendationClasses()):
return APICall.TYPE_RECOMMENDATION_CLASSES
if(self.checkRecommendationSubcategory()):
return APICall.TYPE_RECOMMENDATION_SUBCATEGORY
if(self.checkPromotion()):
return APICall.TYPE_PROMOTION
return None
def typeToString(self):
if APICall.TYPE_PAYMENT == self.type:
return 'payment'
if APICall.TYPE_SHALLOW_SEARCH == self.type:
return 'shallow search'
if APICall.TYPE_DETAIL_VIEW == self.type:
return 'detail view'
if APICall.TYPE_FILTER == self.type:
return 'filter'
if APICall.TYPE_RECOMMENDATION_CLASSES == self.type:
return 'recommendation classes'
if APICall.TYPE_RECOMMENDATION_SUBCATEGORY == self.type:
return 'recommendation subcategory'
if APICall.TYPE_PROMOTION == self.type:
return 'promotion'
return "None"
def __str__(self):
return self.url + '\n' + self.parm + '\n' + TimeFormatter.toDatetimeString(self.createdDateTime) + '\n' + self.typeToString()
def checkRecommendationClasses(self):
# example of url : /classes/recommend/classes
splitUrl = str(self.url).split('/')
return splitUrl.__len__() == 4 and splitUrl[1] == 'classes' and splitUrl[2] == 'recommend' and splitUrl[3] == 'classes'
def checkRecommendationSubcategory(self):
# example of url : /classes/recommend/subcategory
splitUrl = str(self.url).split('/')
return splitUrl.__len__() == 4 and splitUrl[1] == 'classes' and splitUrl[2] == 'recommend' and splitUrl[3] == 'subcategory'
def checkPromotion(self):
# example of url : /classes/promotion
splitUrl = str(self.url).split('/')
return splitUrl.__len__() == 3 and splitUrl[1] == 'classes' and splitUrl[2] == 'promotion'
def checkClassSummarySearch(self):
# example of url : /classes/dance/etc/1
splitUrl = str(self.url).split('/')
return ( splitUrl.__len__() == 5 and splitUrl[1] == 'classes'
and (splitUrl[2] == 'dance'or splitUrl[2] == 'music') )
def checkFilter(self):
# example of url : /classes/dance/etc/1 with filter
# return ( self.checkClassSummarySearch() and self.parm.__len__() > 2 )
return ( self.checkClassSummarySearch() and self.parm != "{}" )
def checkPayment(self):
# example of url : /foradmin/before_payment
splitUrl = str(self.url).split('/')
return splitUrl.__len__() == 3 and splitUrl[1] == 'foradmin' and splitUrl[2] == 'before_payment'
def checkShallowSearch(self):
# example of url : /classes/dance/etc/1 without parm
return ( self.checkClassSummarySearch() and self.parm == "{}" )
def checkDetailView(self):
# example of url: /classes/12/13
splitUrl = str(self.url).split('/')
if( splitUrl.__len__() == 4 and splitUrl[1] == 'classes' ):
try :
int(splitUrl[2])
int(splitUrl[3])
except :
return False
return True
else:
return False | [
"continueing@gmail.com"
] | continueing@gmail.com |
afe80cd11a5038eaaccd37b223d1d65bc39aef6c | 5c09245bbb1018e9f06e322593dcb3d18d35e968 | /Backend/apps/user_operation/migrations/0001_initial.py | a620767ac3ca88f904589d1d76124492443cd302 | [] | no_license | alexeliecohen/Ecommerce_Platform | f8da53ba624128062dc7e42d74523778c04a8713 | 68aaeb099f24173868e48270e885459e9f441b2d | refs/heads/master | 2022-03-17T22:10:09.040355 | 2019-12-04T08:43:18 | 2019-12-04T08:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-10-10 21:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.datetime_safe
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('province', models.CharField(default='', max_length=100, verbose_name='Province')),
('city', models.CharField(default='', max_length=100, verbose_name='City')),
('post_code', models.CharField(default='', max_length=100, verbose_name='Post Code')),
('address', models.CharField(default='', max_length=100, verbose_name='Detail Address')),
('signer_name', models.CharField(default='', max_length=100, verbose_name='Signer')),
('signer_mobile', models.CharField(default='', max_length=10, verbose_name='Phone Number')),
('add_time', models.DateTimeField(default=django.utils.datetime_safe.datetime.now, verbose_name='Add Time')),
],
options={
'verbose_name': 'Delivery Address',
'verbose_name_plural': 'Delivery Address',
},
),
migrations.CreateModel(
name='UserFav',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('add_time', models.DateTimeField(default=django.utils.datetime_safe.datetime.now, verbose_name='Add Time')),
],
options={
'verbose_name': 'User Favorite',
'verbose_name_plural': 'User Favorite',
},
),
migrations.CreateModel(
name='UserLeavingMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_type', models.IntegerField(choices=[(1, 'Comment'), (2, 'Complaint'), (3, 'Inquiry'), (4, 'After Sales'), (5, 'Suggestion')], default=1, help_text='Message Type: 1(Comment),2(Complaint),3(Inquiry),4(After Sales),5(Suggestion)', verbose_name='Message Type')),
('subject', models.CharField(default='', max_length=100, verbose_name='Subject')),
('message', models.TextField(default='', help_text='Content', verbose_name='Content')),
('file', models.FileField(help_text='Uploaded File', upload_to='message/images/', verbose_name='Uploaded File')),
('add_time', models.DateTimeField(default=django.utils.datetime_safe.datetime.now, verbose_name='Add Time')),
],
options={
'verbose_name': 'User Comments',
'verbose_name_plural': 'User Comments',
},
),
]
| [
"mtldong@gmail.com"
] | mtldong@gmail.com |
841f2ef6833614c53073ef654bf3a5e560e16c38 | 078be050b62b1179341a4775383b82b77f132d83 | /src/ml_model/ml_model/pipeline.py | 88f6abba30ec3731303eeb12c7a00b929b5235c3 | [] | no_license | creyesp/ml_to_production | 9b479940f50a2742bdc037734f0ecc932bb5aa01 | 33fe61701a338e7c6f516784cd8b1c9fd4e20d00 | refs/heads/main | 2023-02-19T22:30:49.131647 | 2021-01-11T18:34:44 | 2021-01-11T18:34:44 | 321,346,807 | 1 | 0 | null | 2021-01-11T18:34:45 | 2020-12-14T12:52:59 | Python | UTF-8 | Python | false | false | 2,243 | py | from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.base import clone
from ml_model.preprocessing import preprocessors as pp
from ml_model.config import config
import logging
_logger = logging.getLogger(__name__)
numeric_transformer = Pipeline(
steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler()),
])
categorical_transformer = Pipeline(
steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(categories='auto', handle_unknown='ignore')),
])
preprocessor_num = ColumnTransformer(
transformers=[
('num', numeric_transformer, config.NUMERICAL_FEATURES),
('passthrough', 'passthrough', config.BINARY_FEATURES),
('drop', 'drop', config.DROP_FEATURES + config.CATEGORICAL_FEATURES),
],
n_jobs=-1)
preprocessor_cat = ColumnTransformer(
transformers=[
('num', numeric_transformer, config.NUMERICAL_FEATURES),
('cat', categorical_transformer, config.CATEGORICAL_FEATURES),
('passthrough', 'passthrough', config.BINARY_FEATURES),
('drop', 'drop', config.DROP_FEATURES),
],
n_jobs=-1)
rfp_num = Pipeline(
[
('preprocessor', preprocessor_num),
('rf_regressor', RandomForestRegressor(n_estimators=100,
min_samples_leaf=5,
n_jobs=-1))
]
)
rfp = Pipeline(
[
('preprocessor', preprocessor_cat),
('rf_regressor', RandomForestRegressor(n_estimators=100,
min_samples_leaf=5,
n_jobs=-1))
]
)
def get_model(name:str = 'rf'):
if name == 'rf':
model_ = clone(rfp)
else:
raise ValueError('invalid model name')
return model_
class Model_Factory:
def __init__(self, model_type):
self.model_tyṕe = model_type
@classmethod
def from_name(cls):
pass
| [
"noreply@github.com"
] | noreply@github.com |
8221dde14b547b99f9faed2041f581a2b8211915 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/storage/azure-mgmt-storage/generated_samples/queue_operation_get.py | 2fe4ed0f330180c8f1b2f904034727d53e124075 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,547 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.storage import StorageManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-storage
# USAGE
python queue_operation_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = StorageManagementClient(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.queue.get(
resource_group_name="res3376",
account_name="sto328",
queue_name="queue6185",
)
print(response)
# x-ms-original-file: specification/storage/resource-manager/Microsoft.Storage/stable/2023-01-01/examples/QueueOperationGet.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
93e3bd4b1afc9e703e039cc480c877dd003ab3e5 | 81d98c6ea1eb5f710a02e4f12c9e3cc044056ad8 | /passwordGenerator.py | d27bdd02efd4fa13628cca31fe97f7e691c7eda6 | [] | no_license | Robzabel/Portfolio-Projects | bf0170763a238ee6079c0ad6aa136a46f7860201 | 199cd744579268e648b7c85e5ea6acc0bf0a9db0 | refs/heads/master | 2023-05-07T08:15:47.747708 | 2021-05-26T20:14:50 | 2021-05-26T20:14:50 | 357,875,081 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,999 | py | import string
import random
def gen(): # A function to generate the passwords
s1 = string.ascii_uppercase # Create a variable to hold all alphabet letters in an upprecase string
s2 = string.ascii_lowercase # Create a variable for all lowercase letters
s3 = string.digits # Create a variable to hold 0-9
s4 = string.punctuation # Create a variable to hold all special characters
passLength = int(input('Please enter a password length: ')) # Accept a length valuse from the keyboard
s = [] # Create an empty list variable
s.extend(list(s1)) # The extend method adds all the characters from s1 - 4 into the list of s
s.extend(list(s2))
s.extend(list(s3))
s.extend(list(s4))
random.shuffle(s) # Randomly shuffle the contents of the list
password = (''.join(s[:passLength])) # Take a blank string and join the first random characters from 0 to the specified length
print(password) # Print the password
pw = ''
for letter in range(passLength): # This method demonstrates hoe to use string concatination to provide the same outcome
pw+=s[letter] # Because pw was declared outside the loop, every iteration the character at position letter is appended
print(pw)
wp = []
for char in range(passLength): # This method demonstrates hot to achieve the same objective with a list of characters
wp.append(s[char])
print(''.join(wp))
gen()
| [
"robzabel88@gmail.com"
] | robzabel88@gmail.com |
6f4c7736b4f7b3b3be54a806fa5fed52f9e446db | e3c6dcf5a77ae0b930087bb5849352a088dbc2e4 | /hamon_shu/segments/segment_03/.handlers.py | 4687984f7cf378e302baf025675dc29baf63361d | [] | no_license | Catsvilles/hamon_shu | 684cda44661ba18724af6719e4efc5f763c3cf61 | 35b377074cff9900193018446668aeb5440475be | refs/heads/master | 2022-12-04T08:00:46.779614 | 2020-08-26T21:25:57 | 2020-08-26T21:25:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,913 | py | import abjad
handler_to_value = abjad.OrderedDict(
[
(
'violin_1_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 38),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 45),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 59),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_1_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 34),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 45),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 25),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 52),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'violin_2_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 26),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 72),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 24),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 57),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'viola_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 38),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_three',
abjad.OrderedDict(
[
('pitch_count', 44),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_one',
abjad.OrderedDict(
[
('pitch_count', 34),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_two',
abjad.OrderedDict(
[
('pitch_count', 55),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'cello_pitch_handler_four',
abjad.OrderedDict(
[
('pitch_count', 14),
('chord_boolean_count', -1),
('chord_groups_count', -1),
]
),
),
(
'dynamic_handler_one',
abjad.OrderedDict(
[
('count_1', 39),
('count_2', 12),
('count_3', 26),
('count_4', 12),
('count_5', 39),
]
),
),
(
'dynamic_handler_two',
abjad.OrderedDict(
[
('count_1', 10),
('count_2', 3),
('count_3', 6),
('count_4', 3),
('count_5', 10),
]
),
),
(
'articulation_handler_three',
abjad.OrderedDict(
[
('count', 92),
('vector_count', 92),
]
),
),
(
'articulation_handler_two',
abjad.OrderedDict(
[
('count', 19),
('vector_count', 19),
]
),
),
]
) | [
"gregoryrowlandevans@gmail.com"
] | gregoryrowlandevans@gmail.com |
a5383df810265e4945475896f6da240aa1c41b9e | 4caf6f897a358409a9ceaa3d6175d465f1bc5383 | /FYP/India/Future Forecast Model/confirm/indiafutureRNN.py | 9198fd4fe2ddffe130a83ac8ba4c9aca42bc0852 | [] | no_license | meenakshiramesh/FYP | b5b15bfa10b7c424331c041642419be466a37777 | bb17b15bd6fd92092465fe094bbbe32c50df00de | refs/heads/master | 2023-02-15T03:45:54.913957 | 2021-01-07T13:15:30 | 2021-01-07T13:15:30 | 326,727,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 10:55:42 2020
@author: Aparajita Das
"""
# Part 1 -Preprocessing
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
np.random.seed(36)
# Importing the training set
dataset_train = pd.read_csv('indiafull.csv')
training_set = dataset_train.iloc[:, 4:5].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure
X_train = []
y_train = []
for i in range(85, 102):
X_train.append(training_set_scaled[i-85:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Part 2 - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 45, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer nd some Dropout regularisation
regressor.add(LSTM(units = 45, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 45, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 45))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = 80 , batch_size = 20)
# Part 3 - Making the predictions
# Getting the real data
dataset_test = pd.read_csv('indiatest.csv')
real_confirmed_rate = dataset_test.iloc[:, 4:5].values
# Getting the predicted data
dataset_total = pd.concat((dataset_train['Confirmed'], dataset_test['Confirmed']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 85:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(85, 100):
X_test.append(inputs[i-85:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_confirmed_rate = regressor.predict(X_test)
predicted_confirmed_rate = sc.inverse_transform(predicted_confirmed_rate)
# Part 4 - Visualising the results
# Making structure for visualising
df_old = pd.read_csv('indiafull.csv', usecols = ['Date', 'Confirmed'])
df_pred = pd.read_csv('indiatest.csv', usecols = ['Date'])
df_pred['Confirmed'] = predicted_confirmed_rate
frames = [df_old, df_pred]
df_result = pd.concat(frames)
copy = df_result
copy = copy.drop('Date', axis=1)
copy_df_date = df_result
copy_df_date = copy_df_date.drop('Confirmed', axis=1)
# Visualizing predicted Data
datelist2 = list(copy_df_date.iloc[:, 0].values)
copy['Date'] = datelist2
copy = copy.set_index(['Date'])
copy.plot()
dates = list(dataset_test.iloc[:, 0].values)
df_3 = pd.DataFrame(predicted_confirmed_rate)
df_4 = dates
df_3['Date'] = df_4
df_3 = df_3.set_index(['Date'])
df_54 = copy[:102].copy(deep = True)
df_54.plot()
#visualization of future forecast/prediction
plt.plot(df_54, color = 'blue', label = 'Real Covid19 Confirmed Case')
plt.plot(copy, color = 'red', label = 'Predicted Covid19 Confirmed Case', alpha = 0.4)
plt.title('India Covid19 Daywise Confirm Prediction')
plt.xticks(rotation=60)
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(4))
plt.tight_layout()
plt.xlabel('Days')
plt.ylabel('Cases')
plt.legend()
plt.show() | [
"meenakshir17048@it.ssn.edu.in"
] | meenakshir17048@it.ssn.edu.in |
560ba1036cedaa7535985e80ac45ea83e6c5361e | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/compute/v20201201/availability_set.py | 25d7c983e0cf8a6ed9d97e871af5b85db623d5b4 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,366 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['AvailabilitySetArgs', 'AvailabilitySet']
@pulumi.input_type
class AvailabilitySetArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
availability_set_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
platform_update_domain_count: Optional[pulumi.Input[int]] = None,
proximity_placement_group: Optional[pulumi.Input['SubResourceArgs']] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None):
"""
The set of arguments for constructing a AvailabilitySet resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] availability_set_name: The name of the availability set.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[int] platform_fault_domain_count: Fault Domain count.
:param pulumi.Input[int] platform_update_domain_count: Update Domain count.
:param pulumi.Input['SubResourceArgs'] proximity_placement_group: Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
:param pulumi.Input['SkuArgs'] sku: Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] virtual_machines: A list of references to all virtual machines in the availability set.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if availability_set_name is not None:
pulumi.set(__self__, "availability_set_name", availability_set_name)
if location is not None:
pulumi.set(__self__, "location", location)
if platform_fault_domain_count is not None:
pulumi.set(__self__, "platform_fault_domain_count", platform_fault_domain_count)
if platform_update_domain_count is not None:
pulumi.set(__self__, "platform_update_domain_count", platform_update_domain_count)
if proximity_placement_group is not None:
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_machines is not None:
pulumi.set(__self__, "virtual_machines", virtual_machines)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="availabilitySetName")
def availability_set_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the availability set.
"""
return pulumi.get(self, "availability_set_name")
@availability_set_name.setter
def availability_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_set_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> Optional[pulumi.Input[int]]:
"""
Fault Domain count.
"""
return pulumi.get(self, "platform_fault_domain_count")
@platform_fault_domain_count.setter
def platform_fault_domain_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "platform_fault_domain_count", value)
@property
@pulumi.getter(name="platformUpdateDomainCount")
def platform_update_domain_count(self) -> Optional[pulumi.Input[int]]:
"""
Update Domain count.
"""
return pulumi.get(self, "platform_update_domain_count")
@platform_update_domain_count.setter
def platform_update_domain_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "platform_update_domain_count", value)
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@proximity_placement_group.setter
def proximity_placement_group(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "proximity_placement_group", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
A list of references to all virtual machines in the availability set.
"""
return pulumi.get(self, "virtual_machines")
@virtual_machines.setter
def virtual_machines(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "virtual_machines", value)
class AvailabilitySet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_set_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
platform_update_domain_count: Optional[pulumi.Input[int]] = None,
proximity_placement_group: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
__props__=None):
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] availability_set_name: The name of the availability set.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[int] platform_fault_domain_count: Fault Domain count.
:param pulumi.Input[int] platform_update_domain_count: Update Domain count.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] proximity_placement_group: Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] virtual_machines: A list of references to all virtual machines in the availability set.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AvailabilitySetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
:param str resource_name: The name of the resource.
:param AvailabilitySetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AvailabilitySetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_set_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
platform_update_domain_count: Optional[pulumi.Input[int]] = None,
proximity_placement_group: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AvailabilitySetArgs.__new__(AvailabilitySetArgs)
__props__.__dict__["availability_set_name"] = availability_set_name
__props__.__dict__["location"] = location
__props__.__dict__["platform_fault_domain_count"] = platform_fault_domain_count
__props__.__dict__["platform_update_domain_count"] = platform_update_domain_count
__props__.__dict__["proximity_placement_group"] = proximity_placement_group
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_machines"] = virtual_machines
__props__.__dict__["name"] = None
__props__.__dict__["statuses"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20201201:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20150615:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20150615:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20160330:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20160330:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20160430preview:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20170330:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20171201:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20171201:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20180401:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20180601:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20181001:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20190301:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20190701:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20191201:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20200601:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20210301:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20210301:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20210401:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20210401:AvailabilitySet"), pulumi.Alias(type_="azure-native:compute/v20210701:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20210701:AvailabilitySet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AvailabilitySet, __self__).__init__(
'azure-native:compute/v20201201:AvailabilitySet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AvailabilitySet':
"""
Get an existing AvailabilitySet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AvailabilitySetArgs.__new__(AvailabilitySetArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["platform_fault_domain_count"] = None
__props__.__dict__["platform_update_domain_count"] = None
__props__.__dict__["proximity_placement_group"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["statuses"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_machines"] = None
return AvailabilitySet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> pulumi.Output[Optional[int]]:
"""
Fault Domain count.
"""
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="platformUpdateDomainCount")
def platform_update_domain_count(self) -> pulumi.Output[Optional[int]]:
"""
Update Domain count.
"""
return pulumi.get(self, "platform_update_domain_count")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def statuses(self) -> pulumi.Output[Sequence['outputs.InstanceViewStatusResponse']]:
"""
The resource status information.
"""
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
A list of references to all virtual machines in the availability set.
"""
return pulumi.get(self, "virtual_machines")
| [
"noreply@github.com"
] | noreply@github.com |
566658173d08058551db7cfbad6ea922522ea588 | 892b4d1b7d415d6fbdba57e9e3e7d777b63b3264 | /doc11.py | f6458f84383b316f78b04134ab36ea6cd27ede62 | [] | no_license | mohseenxor/python-example | ea613485c8f855ce774c65d4e839ff1a8a220387 | c39f936c554675eec4c92ffa10947448852da4f1 | refs/heads/master | 2022-07-28T09:27:54.721573 | 2020-05-21T05:03:02 | 2020-05-21T05:03:02 | 265,751,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | def func1():
# n = 5
print("n value is ",n)
def func2():
n = 10
print("n value is",n)
func1()
func2()
| [
"mohseenkhan.pathan@xoriant.com"
] | mohseenkhan.pathan@xoriant.com |
963e5689a23aee34972f31ee5ca3518d6869306f | 76ff6ffe16fdb7ba10b61eaed7009f7e2e0aa40e | /gemah_ripah/merchants/forms.py | 15b92122dc9a6f302277066e241116b2bfe37a3a | [] | no_license | rpribadi/GemahRipah | c5d15b07dd7c1e55497cf9ab2c70dddaeaf13be9 | f9c7ddd076deed1110928844aa7f2744018846e5 | refs/heads/master | 2021-01-10T20:47:56.564351 | 2016-08-13T13:33:05 | 2016-08-13T13:33:05 | 26,705,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.forms import ModelForm
from models import Merchant
class MerchantForm(ModelForm):
class Meta:
model = Merchant
exclude = ()
| [
"pribadi.riki@gmail.com"
] | pribadi.riki@gmail.com |
f38cf335b8fab60a2d1b2f67a4620fe3e0c47847 | dfe3191eee14251b958589f9b383fd5f8798d47e | /habanero/__init__.py | 7fae4d724c918f2b4ae26eb4c4d44980330b35cc | [
"MIT"
] | permissive | kyleniemeyer/habanero | 39257428cc442ec764edd3616749db10af783262 | 6338f22f06912a4f1af5f0459ff8329906442489 | refs/heads/master | 2021-01-14T11:53:45.396972 | 2016-07-11T15:59:41 | 2016-07-11T15:59:41 | 66,299,090 | 0 | 0 | null | 2016-08-22T18:47:17 | 2016-08-22T18:47:16 | Python | UTF-8 | Python | false | false | 1,192 | py | # -*- coding: utf-8 -*-
# habanero
'''
habanero library
~~~~~~~~~~~~~~~~~~~~~
habanero is a low level client for the Crossref search API.
Usage::
from habanero import Crossref
cr = Crossref()
# setup a different base URL
Crossref(base_url = "http://some.other.url")
# setup an api key
Crossref(api_key = "123456")
# Make request against works route
cr.works(ids = '10.1371/journal.pone.0033693')
# curl options
## For example, set a timeout
cr.works(query = "ecology", timeout=0.1)
## advanced logging
### setup first
import requests
import logging
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
### then make request
cr.works(query = "ecology")
'''
__title__ = 'habanero'
__version__ = '0.2.6'
__author__ = 'Scott Chamberlain'
__license__ = 'MIT'
from .crossref import Crossref
from .cn import content_negotiation, csl_styles
from .counts import citation_count
from .exceptions import *
| [
"myrmecocystus@gmail.com"
] | myrmecocystus@gmail.com |
7fb3bf54b02d30240863a6b82c4d95ea5980a4e8 | 9eb2d422156e1beed4a8b60cbced979f8aa375de | /src/AppHistory.py | 77f5002e5f19e3491e031a11ab9ae416c3e7c98d | [] | no_license | songdingxx/JobApplicationHistoryManager | e7701f5b07d84f6a3dc83f32343eecb2576c4553 | 56f48caf2c48a384a2535c08dcfd72b8564c2481 | refs/heads/main | 2023-03-05T13:19:59.444398 | 2021-02-16T09:47:20 | 2021-02-16T09:47:20 | 339,284,482 | 1 | 0 | null | 2021-02-16T09:47:20 | 2021-02-16T04:27:28 | Python | UTF-8 | Python | false | false | 1,692 | py | from ProcessInput import processInput
from Util import processString, processStringArray
from ConnDB import getConnection, getDatabase, getCollection, closeConnection
MONGODB_HOST = "localhost"
MONGODB_PORT = 27017
DB_NAME = "ApplicationHistory"
COLLECTION_NAME = "histories"
VALID_OPERATION = {"insert", "count", "updatestatus", "findjobs", "total"}
def main():
conn = getConnection(MONGODB_HOST, MONGODB_PORT)
db = getDatabase(conn, DB_NAME)
collection = getCollection(db, COLLECTION_NAME)
while(True):
command = input("Please type your command - Type \"help\" for help\n")
if command == "quit":
closeConnection(conn)
break
if command == "help":
print()
print("Operation, [Parameters] Seperated by comma")
print("[insert, Company Name, JobID,Position]")
print("[count,Parameter, Value]")
print("[updateStatus, Company Name, param, Status, Option (default = 0) (0: param = JobID, 1:param = Position)]")
print("[findJobs, Company Name, param, Option (default = 0) (0: param = JobID, 1:param = Position)]")
print("[total]")
print("[quit]")
print()
else:
commands = command.split(",")
try:
c_ = processString(commands[0])
params = processStringArray(commands[1:])
except Exception as e:
print(e.args)
if c_ not in VALID_OPERATION:
print("\nInvalid operation!\n")
continue
processInput(c_, params, collection)
if __name__ == "__main__":
main() | [
"songdingxx@gmail.com"
] | songdingxx@gmail.com |
bebc257d0dcb6e0ea2cfb787f7693981a21da8a0 | 9880c13cbad091c22d29a83f26e65a6ee29192e9 | /tmp/.venv/bin/pip3.6 | 14c7d811ba7308ca6e97fe975da49b575077100b | [] | no_license | kgrozis/python_automation | f2296fe50fa0e11aa48a511440aca5e4d56bc9b5 | 94101c3f9b34d871975c781339b87d91efe1bf80 | refs/heads/master | 2020-04-08T08:48:34.532264 | 2018-11-26T15:58:03 | 2018-11-26T15:58:03 | 159,194,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | 6 | #!/Volumes/kgrozis/python/automation/tmp/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kevingrozis@juniper.net"
] | kevingrozis@juniper.net |
0e37bf6580d3248893e8a1c0e5dd6d1ebbe57409 | 8efe56ee34c455a6b1336897f6d457acbc9c10f9 | /tests/metarl/tf/models/test_cnn_model.py | 6d8f599ade02b39db52ef608f933167b30287246 | [
"MIT"
] | permissive | neurips2020submission11699/metarl | ab18d11e708bf569d76cb2fab2bcce089badd111 | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | refs/heads/master | 2022-10-15T22:03:09.948673 | 2020-06-11T19:22:55 | 2020-06-11T19:30:58 | 268,410,657 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,567 | py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from metarl.tf.models import CNNModel
from metarl.tf.models import CNNModelWithMaxPooling
from tests.fixtures import TfGraphTestCase
class TestCNNModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 5
self.input_width = 10
self.input_height = 10
self.obs_input = np.ones(
(self.batch_size, self.input_width, self.input_height, 3))
input_shape = self.obs_input.shape[1:] # height, width, channel
self._input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
# yapf: disable
@pytest.mark.parametrize('filters, in_channels, strides', [
(((32, (1, 1)),), (3, ), (1, )), # noqa: E122
(((32, (3, 3)),), (3, ), (1, )),
(((32, (3, 3)),), (3, ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2)),
])
# yapf: enable
def test_output_value(self, filters, in_channels, strides):
model = CNNModel(filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph)
output = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
expected_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize(
'filters, in_channels, strides, pool_strides, pool_shapes',
[
(((32, (1, 1)), ), (3, ), (1, ), (1, 1), (1, 1)), # noqa: E122
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (1, 1)),
(((32, (3, 3)), ), (3, ), (1, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (2, 2), (2, 2)),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2), (1, 1), (1, 1)),
])
# yapf: enable
def test_output_value_max_pooling(self, filters, in_channels, strides,
pool_strides, pool_shapes):
model = CNNModelWithMaxPooling(
filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
pool_strides=pool_strides,
pool_shapes=pool_shapes,
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph)
output = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
height_size = int(
(height_size - pool_shapes[0]) / pool_strides[0]) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
width_size = int(
(width_size - pool_shapes[1]) / pool_strides[1]) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
expected_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((32, (1, 1)),), (1, )), # noqa: E122
(((32, (3, 3)),), (1, )),
(((32, (3, 3)),), (2, )),
(((32, (1, 1)), (64, (1, 1))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (2, 2)),
])
# yapf: enable
def test_is_pickleable(self, filters, strides):
model = CNNModel(filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph)
with tf.compat.v1.variable_scope('cnn_model/cnn/h0', reuse=True):
bias = tf.compat.v1.get_variable('bias')
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_shape = self.obs_input.shape[1:] # height, width, channel
input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
outputs = model_pickled.build(input_ph)
output2 = sess.run(outputs, feed_dict={input_ph: self.obs_input})
assert np.array_equal(output1, output2)
| [
"neurips2020submission11699@gmail.com"
] | neurips2020submission11699@gmail.com |
e12d86d370dcf165b72e5e841e08d996d6f90d10 | e65d16ea1e8d412bac75a809be6d390126bdf528 | /homeassistant/components/hassio/issues.py | 2af0a6ed76485bddc49bf5c1300208967512a4f9 | [
"Apache-2.0"
] | permissive | syssi/home-assistant | 6347d57866cb16ab9d4499ad38e2be6f0399077f | fd43687833741b21221769d46b4d1ecef8a94711 | refs/heads/dev | 2023-08-17T09:31:52.680518 | 2023-06-11T14:22:12 | 2023-06-11T14:22:12 | 97,874,495 | 6 | 16 | Apache-2.0 | 2023-09-13T06:31:21 | 2017-07-20T20:12:37 | Python | UTF-8 | Python | false | false | 11,530 | py | """Supervisor events monitor."""
from __future__ import annotations
import asyncio
from dataclasses import dataclass, field
import logging
from typing import Any, TypedDict
from typing_extensions import NotRequired
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.issue_registry import (
IssueSeverity,
async_create_issue,
async_delete_issue,
)
from .const import (
ATTR_DATA,
ATTR_HEALTHY,
ATTR_ISSUES,
ATTR_SUGGESTIONS,
ATTR_SUPPORTED,
ATTR_UNHEALTHY,
ATTR_UNHEALTHY_REASONS,
ATTR_UNSUPPORTED,
ATTR_UNSUPPORTED_REASONS,
ATTR_UPDATE_KEY,
ATTR_WS_EVENT,
DOMAIN,
EVENT_HEALTH_CHANGED,
EVENT_ISSUE_CHANGED,
EVENT_ISSUE_REMOVED,
EVENT_SUPERVISOR_EVENT,
EVENT_SUPERVISOR_UPDATE,
EVENT_SUPPORTED_CHANGED,
ISSUE_KEY_SYSTEM_DOCKER_CONFIG,
PLACEHOLDER_KEY_REFERENCE,
UPDATE_KEY_SUPERVISOR,
SupervisorIssueContext,
)
from .handler import HassIO, HassioAPIError
ISSUE_KEY_UNHEALTHY = "unhealthy"
ISSUE_KEY_UNSUPPORTED = "unsupported"
ISSUE_ID_UNHEALTHY = "unhealthy_system"
ISSUE_ID_UNSUPPORTED = "unsupported_system"
INFO_URL_UNHEALTHY = "https://www.home-assistant.io/more-info/unhealthy"
INFO_URL_UNSUPPORTED = "https://www.home-assistant.io/more-info/unsupported"
PLACEHOLDER_KEY_REASON = "reason"
UNSUPPORTED_REASONS = {
"apparmor",
"connectivity_check",
"content_trust",
"dbus",
"dns_server",
"docker_configuration",
"docker_version",
"cgroup_version",
"job_conditions",
"lxc",
"network_manager",
"os",
"os_agent",
"restart_policy",
"software",
"source_mods",
"supervisor_version",
"systemd",
"systemd_journal",
"systemd_resolved",
}
# Some unsupported reasons also mark the system as unhealthy. If the unsupported reason
# provides no additional information beyond the unhealthy one then skip that repair.
UNSUPPORTED_SKIP_REPAIR = {"privileged"}
UNHEALTHY_REASONS = {
"docker",
"supervisor",
"setup",
"privileged",
"untrusted",
}
# Keys (type + context) of issues that when found should be made into a repair
ISSUE_KEYS_FOR_REPAIRS = {
"issue_mount_mount_failed",
"issue_system_multiple_data_disks",
"issue_system_reboot_required",
ISSUE_KEY_SYSTEM_DOCKER_CONFIG,
}
_LOGGER = logging.getLogger(__name__)
class SuggestionDataType(TypedDict):
"""Suggestion dictionary as received from supervisor."""
uuid: str
type: str
context: str
reference: str | None
@dataclass(slots=True, frozen=True)
class Suggestion:
"""Suggestion from Supervisor which resolves an issue."""
uuid: str
type: str
context: SupervisorIssueContext
reference: str | None = None
@property
def key(self) -> str:
"""Get key for suggestion (combination of context and type)."""
return f"{self.context}_{self.type}"
@classmethod
def from_dict(cls, data: SuggestionDataType) -> Suggestion:
"""Convert from dictionary representation."""
return cls(
uuid=data["uuid"],
type=data["type"],
context=SupervisorIssueContext(data["context"]),
reference=data["reference"],
)
class IssueDataType(TypedDict):
"""Issue dictionary as received from supervisor."""
uuid: str
type: str
context: str
reference: str | None
suggestions: NotRequired[list[SuggestionDataType]]
@dataclass(slots=True, frozen=True)
class Issue:
"""Issue from Supervisor."""
uuid: str
type: str
context: SupervisorIssueContext
reference: str | None = None
suggestions: list[Suggestion] = field(default_factory=list, compare=False)
@property
def key(self) -> str:
"""Get key for issue (combination of context and type)."""
return f"issue_{self.context}_{self.type}"
@classmethod
def from_dict(cls, data: IssueDataType) -> Issue:
"""Convert from dictionary representation."""
suggestions: list[SuggestionDataType] = data.get("suggestions", [])
return cls(
uuid=data["uuid"],
type=data["type"],
context=SupervisorIssueContext(data["context"]),
reference=data["reference"],
suggestions=[
Suggestion.from_dict(suggestion) for suggestion in suggestions
],
)
class SupervisorIssues:
"""Create issues from supervisor events."""
def __init__(self, hass: HomeAssistant, client: HassIO) -> None:
"""Initialize supervisor issues."""
self._hass = hass
self._client = client
self._unsupported_reasons: set[str] = set()
self._unhealthy_reasons: set[str] = set()
self._issues: dict[str, Issue] = {}
@property
def unhealthy_reasons(self) -> set[str]:
"""Get unhealthy reasons. Returns empty set if system is healthy."""
return self._unhealthy_reasons
@unhealthy_reasons.setter
def unhealthy_reasons(self, reasons: set[str]) -> None:
"""Set unhealthy reasons. Create or delete repairs as necessary."""
for unhealthy in reasons - self.unhealthy_reasons:
if unhealthy in UNHEALTHY_REASONS:
translation_key = f"{ISSUE_KEY_UNHEALTHY}_{unhealthy}"
translation_placeholders = None
else:
translation_key = ISSUE_KEY_UNHEALTHY
translation_placeholders = {PLACEHOLDER_KEY_REASON: unhealthy}
async_create_issue(
self._hass,
DOMAIN,
f"{ISSUE_ID_UNHEALTHY}_{unhealthy}",
is_fixable=False,
learn_more_url=f"{INFO_URL_UNHEALTHY}/{unhealthy}",
severity=IssueSeverity.CRITICAL,
translation_key=translation_key,
translation_placeholders=translation_placeholders,
)
for fixed in self.unhealthy_reasons - reasons:
async_delete_issue(self._hass, DOMAIN, f"{ISSUE_ID_UNHEALTHY}_{fixed}")
self._unhealthy_reasons = reasons
@property
def unsupported_reasons(self) -> set[str]:
"""Get unsupported reasons. Returns empty set if system is supported."""
return self._unsupported_reasons
@unsupported_reasons.setter
def unsupported_reasons(self, reasons: set[str]) -> None:
"""Set unsupported reasons. Create or delete repairs as necessary."""
for unsupported in reasons - UNSUPPORTED_SKIP_REPAIR - self.unsupported_reasons:
if unsupported in UNSUPPORTED_REASONS:
translation_key = f"{ISSUE_KEY_UNSUPPORTED}_{unsupported}"
translation_placeholders = None
else:
translation_key = ISSUE_KEY_UNSUPPORTED
translation_placeholders = {PLACEHOLDER_KEY_REASON: unsupported}
async_create_issue(
self._hass,
DOMAIN,
f"{ISSUE_ID_UNSUPPORTED}_{unsupported}",
is_fixable=False,
learn_more_url=f"{INFO_URL_UNSUPPORTED}/{unsupported}",
severity=IssueSeverity.WARNING,
translation_key=translation_key,
translation_placeholders=translation_placeholders,
)
for fixed in self.unsupported_reasons - (reasons - UNSUPPORTED_SKIP_REPAIR):
async_delete_issue(self._hass, DOMAIN, f"{ISSUE_ID_UNSUPPORTED}_{fixed}")
self._unsupported_reasons = reasons
@property
def issues(self) -> set[Issue]:
"""Get issues."""
return set(self._issues.values())
def add_issue(self, issue: Issue) -> None:
"""Add or update an issue in the list. Create or update a repair if necessary."""
if issue.key in ISSUE_KEYS_FOR_REPAIRS:
placeholders: dict[str, str] | None = None
if issue.reference:
placeholders = {PLACEHOLDER_KEY_REFERENCE: issue.reference}
async_create_issue(
self._hass,
DOMAIN,
issue.uuid,
is_fixable=bool(issue.suggestions),
severity=IssueSeverity.WARNING,
translation_key=issue.key,
translation_placeholders=placeholders,
)
self._issues[issue.uuid] = issue
async def add_issue_from_data(self, data: IssueDataType) -> None:
"""Add issue from data to list after getting latest suggestions."""
try:
data["suggestions"] = (
await self._client.get_suggestions_for_issue(data["uuid"])
)[ATTR_SUGGESTIONS]
except HassioAPIError:
_LOGGER.error(
"Could not get suggestions for supervisor issue %s, skipping it",
data["uuid"],
)
return
self.add_issue(Issue.from_dict(data))
def remove_issue(self, issue: Issue) -> None:
"""Remove an issue from the list. Delete a repair if necessary."""
if issue.uuid not in self._issues:
return
if issue.key in ISSUE_KEYS_FOR_REPAIRS:
async_delete_issue(self._hass, DOMAIN, issue.uuid)
del self._issues[issue.uuid]
def get_issue(self, issue_id: str) -> Issue | None:
"""Get issue from key."""
return self._issues.get(issue_id)
async def setup(self) -> None:
"""Create supervisor events listener."""
await self.update()
async_dispatcher_connect(
self._hass, EVENT_SUPERVISOR_EVENT, self._supervisor_events_to_issues
)
async def update(self) -> None:
"""Update issues from Supervisor resolution center."""
data = await self._client.get_resolution_info()
self.unhealthy_reasons = set(data[ATTR_UNHEALTHY])
self.unsupported_reasons = set(data[ATTR_UNSUPPORTED])
# Remove any cached issues that weren't returned
for issue_id in set(self._issues.keys()) - {
issue["uuid"] for issue in data[ATTR_ISSUES]
}:
self.remove_issue(self._issues[issue_id])
# Add/update any issues that came back
await asyncio.gather(
*[self.add_issue_from_data(issue) for issue in data[ATTR_ISSUES]]
)
@callback
def _supervisor_events_to_issues(self, event: dict[str, Any]) -> None:
"""Create issues from supervisor events."""
if ATTR_WS_EVENT not in event:
return
if (
event[ATTR_WS_EVENT] == EVENT_SUPERVISOR_UPDATE
and event.get(ATTR_UPDATE_KEY) == UPDATE_KEY_SUPERVISOR
):
self._hass.async_create_task(self.update())
elif event[ATTR_WS_EVENT] == EVENT_HEALTH_CHANGED:
self.unhealthy_reasons = (
set()
if event[ATTR_DATA][ATTR_HEALTHY]
else set(event[ATTR_DATA][ATTR_UNHEALTHY_REASONS])
)
elif event[ATTR_WS_EVENT] == EVENT_SUPPORTED_CHANGED:
self.unsupported_reasons = (
set()
if event[ATTR_DATA][ATTR_SUPPORTED]
else set(event[ATTR_DATA][ATTR_UNSUPPORTED_REASONS])
)
elif event[ATTR_WS_EVENT] == EVENT_ISSUE_CHANGED:
self.add_issue(Issue.from_dict(event[ATTR_DATA]))
elif event[ATTR_WS_EVENT] == EVENT_ISSUE_REMOVED:
self.remove_issue(Issue.from_dict(event[ATTR_DATA]))
| [
"noreply@github.com"
] | noreply@github.com |
df3e45e1b81e8ebf74ecd66d807b74856d4d4161 | 2672ad4d130a01fe81e3291558babd8da23a997f | /day-5 work/CLASS.py | 1639825ff4c08780c222bc64880c4745328eb81a | [] | no_license | Naganandhinisri/python-beginner | 04d6a577ccb5a1d847ee302c310d866111a37b8a | 0cfa19077dfaf2f74f39c81062ee675e5659e02e | refs/heads/master | 2020-04-19T04:11:59.041153 | 2019-03-29T04:37:19 | 2019-03-29T04:37:19 | 167,956,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | class Fruit:
def apple(self):
print(" good for health")
f = Fruit()
f.apple()
# print(hex(id(f))) | [
"techops@Intern4-MacBook-Pro.local"
] | techops@Intern4-MacBook-Pro.local |
5d6438c6384680b41f7cbf38f5587a3135e42999 | df5bf9ab4f9b80f7ecc04838e6e26c7eff9893d0 | /8ball.py | a865c8c0eb72d5da61d29c017dd5420aeeb30d33 | [] | no_license | Alaska47/8ball-manager | abdbf58d97aa58ccfbe4c0c294a6ce4317b38986 | dadd737af930c6aa29bea300c15bead7373b1276 | refs/heads/master | 2020-08-31T05:45:36.688831 | 2017-06-22T04:59:01 | 2017-06-22T04:59:01 | 94,393,021 | 0 | 0 | null | 2017-06-15T02:51:18 | 2017-06-15T02:51:17 | null | UTF-8 | Python | false | false | 3,195 | py | #!/usr/local/bin/python3
def selectplayer(prompt, playerlist):
print(prompt)
while(True):
searchq = input("Search: ").lower()
if(len(searchq)==0):
return None # means cancel
matches = []
for p in playerlist:
if p[0].lower().find(searchq)!=-1:
matches.append(p)
if(len(matches)==0):
print("Sorry, no matches were found, please search again")
elif(len(matches)==1):
print("Found <" + matches[0][0] + ">, is this correct? (Y/n)")
inp = input("")
if len(inp)==0 or inp[0].lower()=="y":
return matches[0]
else:
print("Found %d matches, please be more specific"%len(matches))
for i in matches:
print("<%s>"%i[0])
#game constants:
k = 150
## on start, load up data file
print("Loading in player data from 8ball.dat...")
f = open("8ball.dat")
#format:
#Name,pfp-link,elo
#ideally sorted by increasing elo, but will handle that later
players = []
for line in f.read().split("\n"):
if(len(line)<1):
continue
pdat = line.split(",")
players.append([pdat[0],pdat[1],int(pdat[2])])
print("Player data of %d players loaded"%len(players))
command = "none"
while len(command)>0 and command[0]!="q":
print("Enter a command:")
command = input("> ").lower()
if len(command)==0 or command[0]=="q":
continue
if(command[0]=="h"):
print("===Help Menu===")
print("display - shows all current player dat")
print("add - add a new player")
print("update - add a new game result")
print("quit")
elif(command[0]=="d"):
padsize = max([len(p[0]) for p in players] + [4])+2
print("Name" + " "*(padsize-4) + "Elo")
for player in players:
print(player[0] + " "*(padsize-len(player[0])) + str(player[2]))
elif(command[0]=="a"):
nname = input("Name? ")
npfp = input("Profile Photo Link? ")
nelo = 1000
players.append([nname, npfp, nelo])
elif(command[0]=="u"):
p1 = selectplayer("Select Player 1", players)
if p1==None:
continue
p2 = selectplayer("Select Player 2", players)
if p2==None:
continue
print("Who won? [1/2]")
inp = ""
while inp not in ["1","2"]:
inp = input("> ")
if inp not in ["1","2"]:
print("Please type '1' or '2'")
# update elo according to formula
winner = p1 if inp=="1" else p2
loser = p2 if inp=="1" else p1
ratingchange = max(5,int(k / ((10**((winner[2]-loser[2])/400))+1)))
print("%s, %d -> %d"%(winner[0],winner[2],winner[2]+ratingchange))
print("%s, %d -> %d"%(loser[0],loser[2],loser[2]-ratingchange))
winner[2] += ratingchange
loser[2] -= ratingchange
print("Updating player data in 8ball.dat...")
f = open("8ball.dat","w")
players2 = [(-p[2], p[0], p[1]) for p in players]
players2.sort()
for p in players2:
f.write("%s,%s,%d\n"%(p[1],p[2],-p[0]))
f.close()
print("Done updating data!")
print("Thanks for using 8-ball Rating Manager 1.0")
| [
"neilt1111@yahoo.com"
] | neilt1111@yahoo.com |
e31f5d2437e8a36b3cdacb33ba5746494464b856 | 5c6dc206c690feb4de8599b33b442cf8ba597220 | /Code Test Documents/RetrieveDataFromDatabase.py | 27f4eeb95eb83858bef6ef5d5ae067128aad19c6 | [] | no_license | GeorgeZaf7/Patient-Registration-System-PyQt5 | 3bd0eb711f8d1296c13c2ad344ff2936b239a89c | b57ae0c55097f21a78c8b9f18133334327fb9c5d | refs/heads/master | 2022-07-07T03:22:12.222358 | 2020-03-26T12:49:24 | 2020-03-26T12:49:24 | 245,228,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | import sqlite3
conn = sqlite3.connect("C:/Users/Georgios/PycharmProjects/Patient_Registration_System/Patient_Medical_Records/John_Smith.db")
cur = conn.cursor()
#=========This is to return dates===================
'''validMonth = False
while not validMonth:
lookForMonth = input('Which months data? (Enter 1 to 12): ')
try:
validMonth = 1<=int(lookForMonth)<=12
except:
pass
sqlCmd = 'SELECT date from pat_med_rec WHERE SUBSTR(date,4,2)="%.2i"' % int(lookForMonth)
for row in conn.execute(sqlCmd):
print(row)'''
# ====================This is to return everything based on a date ==========================
'''dates = '%%/%%/2020'
cur.execute("SELECT * from pat_med_rec WHERE date LIKE '%%/%%/2019'")
alpha = cur.fetchall()
print(dates)
print(alpha)'''
#===================This is to return dates but all in one list================
'''lookForMonth = '1'
cur.execute('SELECT date from pat_med_rec WHERE SUBSTR(date,4,2)="%.2i"' % int(lookForMonth))
alpha = cur.fetchall()
print(alpha)'''
test = '2020-01-%%'
cur.execute("SELECT * FROM pat_med_rec WHERE date > ?", (test,))
alpha = cur.fetchall()
print(alpha)
conn.commit()
conn.close() | [
"noreply@github.com"
] | noreply@github.com |
398bcbd622db6e4f7f487cbcc40cedad7c9e518e | ab4e5c960c48a751d066adfad93d6bfda23271b8 | /ccs/api_methods.py | 91bc10dbccacf43ba3095d0a6b23ed6845718445 | [] | no_license | iburunat/doc_suomi | 23cef5f4ed5000e362e0d66904b6aef9b1aea541 | e9debe662df7e6be591a29943e11ba83db248c01 | refs/heads/main | 2023-04-17T21:22:12.268736 | 2021-05-13T22:45:25 | 2021-05-13T22:45:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | #%%
from youtube_dl import YoutubeDL
import requests
from spotify_auth import *
import pandas as pd
#%%
# Download audio from youtube.
def youtube_to_audio(link: str, format: str):
ydl_opts = { 'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': format,}],}
with YoutubeDL(ydl_opts) as ydl:
ydl.download([link])
def string_formatter(s: str):
strArr = list(s)
for i, c in enumerate(strArr):
if c == ' ': strArr[i] = '%20'
return "".join(strArr).lower()
#Get respective spotify IDs for each song
def to_spotify_id(artist: str, track: str, header: dict):
try:
artist = string_formatter(artist)
track = string_formatter(track)
query = f"https://api.spotify.com/v1/search?q=track:{track}%20artist:{artist}&type=track&limit=1"
obj = requests.get(query, headers = header).json()
return pd.DataFrame(obj['tracks']['items'])[['href', 'id', 'popularity']]
except:
return pd.DataFrame({'href': ["fail"], "id": ["fail"], "popularity": ["fail"]})
def audio_analysis(tracks: list):
track_description = [requests.get(f"https://api.spotify.com/v1/audio-features/{i}", headers = header).json() for i in tracks]
return track_description
# %%
to_spotify_id("Joao Bosco", "jade", header)
# %%
| [
"pasoneto@gmail.com"
] | pasoneto@gmail.com |
e86ffabd307890dae29d32af4e4daac4fadb1fc7 | aeb06702da8b0161b87c5e7fbdd493b57c41121b | /utils_.py | e285f30e25302aabd581f30cb6978df0f142a6e3 | [
"MIT"
] | permissive | 1061700625/HardNet_MultiDataset | 8f4dcc992f00ac53e1a873df3e8ab30da22626f8 | ed9363f3561955274fa534aeff04558f230adf2f | refs/heads/master | 2023-08-11T17:53:23.336536 | 2020-06-18T14:12:18 | 2020-06-18T14:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,249 | py | # import math, numpy as np, sys, PIL, random, os, smtplib, socket, shutil, subprocess, cv2
import math, PIL, random, shutil, cv2
import torch.nn.init
# import torchvision.transforms as transforms
# from email.mime.text import MIMEText
from PIL import Image
from fire import Fire
from glob import glob
from os.path import join as pjoin
from os.path import dirname as getdir
from os.path import basename as getbase
from os.path import splitext
from tqdm.auto import tqdm
import torch
import torch.nn.functional as F
from read_write_model import *
import time
def CircularGaussKernel(kernlen=None, circ_zeros = False, sigma = None, norm = True):
assert ((kernlen is not None) or sigma is not None)
if kernlen is None:
kernlen = int(2.0 * 3.0 * sigma + 1.0)
if (kernlen % 2 == 0):
kernlen = kernlen + 1
# halfSize = kernlen / 2
halfSize = kernlen / 2
r2 = float(halfSize*halfSize)
if sigma is None:
sigma2 = 0.9 * r2
# sigma = np.sqrt(sigma2)
else:
sigma2 = 2.0 * sigma * sigma
x = np.linspace(-halfSize,halfSize,kernlen)
xv, yv = np.meshgrid(x, x, sparse=False, indexing='xy')
distsq = (xv)**2 + (yv)**2
kernel = np.exp(-( distsq/ (sigma2)))
if circ_zeros:
kernel *= (distsq <= r2).astype(np.float32)
if norm:
kernel /= np.sum(kernel)
return kernel
class GaussianBlur(torch.nn.Module):
def __init__(self, sigma=1.6):
super(GaussianBlur, self).__init__()
weight = self.calculate_weights(sigma)
self.register_buffer('buf', weight)
return
def calculate_weights(self, sigma):
kernel = CircularGaussKernel(sigma = sigma, circ_zeros = False)
h,w = kernel.shape
halfSize = float(h) / 2.
self.pad = int(np.floor(halfSize))
return torch.from_numpy(kernel.astype(np.float32)).view(1,1,h,w)
def forward(self, x):
w = self.buf
if x.is_cuda:
w = w.cuda()
return F.conv2d(F.pad(x, [self.pad,self.pad,self.pad,self.pad], 'replicate'), w, padding = 0)
def batched_forward(model, data, batch_size, **kwargs):
n_patches = len(data)
if n_patches > batch_size:
bs = batch_size
n_batches = int(n_patches / bs + 1)
for batch_idx in range(n_batches):
st = batch_idx * bs
if batch_idx == n_batches - 1:
if (batch_idx + 1) * bs > n_patches:
end = n_patches
else:
end = (batch_idx + 1) * bs
else:
end = (batch_idx + 1) * bs
if st >= end:
continue
if batch_idx == 0:
# first_batch_out = model(data[st:end], kwargs)
first_batch_out = model(data[st:end])
out_size = torch.Size([n_patches] + list(first_batch_out.size()[1:]))
# out_size[0] = n_patches
out = torch.zeros(out_size)
if data.is_cuda:
out = out.cuda()
out[st:end] = first_batch_out
else:
# out[st:end, :, :] = model(data[st:end], kwargs)
out[st:end, :, :] = model(data[st:end])
return out
else:
return model(data, kwargs)
def generate_2dgrid(h, w, centered=True):
if centered:
x = torch.linspace(-w / 2 + 1, w / 2, w)
y = torch.linspace(-h / 2 + 1, h / 2, h)
else:
x = torch.linspace(0, w - 1, w)
y = torch.linspace(0, h - 1, h)
grid2d = torch.stack([y.repeat(w, 1).t().contiguous().view(-1), x.repeat(h)], 1)
return grid2d
def generate_3dgrid(d, h, w, centered=True):
if type(d) is not list:
if centered:
z = torch.linspace(-d / 2 + 1, d / 2, d)
else:
z = torch.linspace(0, d - 1, d)
dl = d
else:
z = torch.FloatTensor(d)
dl = len(d)
grid2d = generate_2dgrid(h, w, centered=centered)
grid3d = torch.cat([z.repeat(w * h, 1).t().contiguous().view(-1, 1), grid2d.repeat(dl, 1)], dim=1)
return grid3d
def zero_response_at_border(x, b):
if (b < x.size(3)) and (b < x.size(2)):
x[:, :, 0:b, :] = 0
x[:, :, x.size(2) - b :, :] = 0
x[:, :, :, 0:b] = 0
x[:, :, :, x.size(3) - b :] = 0
else:
return x * 0
return x
def batch_eig2x2(A):
trace = A[:, 0, 0] + A[:, 1, 1]
delta1 = trace * trace - 4 * (A[:, 0, 0] * A[:, 1, 1] - A[:, 1, 0] * A[:, 0, 1])
mask = delta1 > 0
delta = torch.sqrt(torch.abs(delta1))
l1 = mask.float() * (trace + delta) / 2.0 + 1000.0 * (1.0 - mask.float())
l2 = mask.float() * (trace - delta) / 2.0 + 0.0001 * (1.0 - mask.float())
return l1, l2
def normal_df(x, mu=0, sigma=0.01):
left = 1.0 / torch.sqrt(2.0 * math.pi * sigma*sigma)
right = torch.exp(-(x-mu)*(x-mu) / (2 * sigma*sigma))
return left * right
def get_rotation_matrix(angles_in_radians):
sin_a = np.sin(angles_in_radians)
cos_a = np.cos(angles_in_radians)
return np.stack([np.stack([cos_a, sin_a], 1),
np.stack([-sin_a, cos_a], 1)], axis=2)
def rectifyAffineTransformationUpIsUpNP(A, eps=1e-10):
det = np.sqrt(np.abs(A[:,0,0]*A[:,1,1] - A[:,0,1]*A[:,1,0]) + eps)
b2a2 = np.sqrt(A[:,0,0]**2 + A[:,0,1]**2 + eps)
aux = (A[:,0,1]*A[:,1,1] + A[:,0,0]*A[:,1,0])
return np.stack([np.stack([b2a2 / det, aux / (b2a2 * det)], 1),
np.stack([np.zeros(det.shape), det / b2a2], 1)], axis=2)
def get_good_sets(info, count_thr=3, loss_thr=sys.maxsize): # returns indices to patch_sets
losses = info['losses']
counts = info['counts']
summed_losses = np.sum(losses, 1)
summed_counts = np.sum(counts, 1)
all_sampled = np.min(counts, 1)
a = (all_sampled >= count_thr)
b = (summed_losses / summed_counts) < loss_thr
mask = a * b
idxs = np.arange(mask.shape[0])
### DELETE
# summed_losses = np.sum(losses, 1)
# summed_losses /= summed_counts
# c = (summed_losses > 1.1) + (summed_losses <= 0.9)
# mask = mask * c
### DELETE
return idxs[mask]
def get_patches_loss(info): # returns losses, has two dims
losses = info['losses']
counts = info['counts']
losses[counts>0] /= counts[counts>0]
### DELETE
# losses[losses > 1.1] = 1.7
# losses[losses <= 0.9] = 0.3
### DELETE
return losses
# def send_email(recipient='milan.pultar@gmail.com', ignore_host='milan-XPS-15-9560', text=''): # you can use for longer training
# msg = MIMEText(text)
#
# if socket.gethostname() == ignore_host:
# return
# msg["Subject"] = socket.gethostname() + " just finished running a job "# + os.path.basename(__main__.__file__)
# msg["From"] = "clustersgpu@gmail.com"
# msg["To"] = recipient
#
# s = smtplib.SMTP_SSL("smtp.gmail.com", 465)
# s.ehlo()
# s.login("clustersgpu@gmail.com", "4c46bc24732")
# s.sendmail("clustersgpu@gmail.com", recipient, msg.as_string())
# s.quit()
class printc:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
END = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
BLACK = "\033[1;30m"
RED = "\033[1;31m"
GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
BLUE = "\033[1;34m"
PURPLE = "\033[1;35m"
CYAN = "\033[1;36m"
WHITE = "\033[1;37m"
@staticmethod
def blue(*text):
printc.uni(printc.BLUE, text)
@staticmethod
def green(*text):
printc.uni(printc.GREEN, text)
@staticmethod
def yellow(*text):
printc.uni(printc.YELLOW, text)
@staticmethod
def red(*text):
printc.uni(printc.RED, text)
@staticmethod
def uni(color, text:tuple):
print(color + ' '.join([str(x) for x in text]) + printc.END)
def get_laf_scale(LAF: torch.Tensor) -> torch.Tensor:
eps = 1e-10
out = LAF[..., 0:1, 0:1] * LAF[..., 1:2, 1:2] - LAF[..., 1:2, 0:1] * LAF[..., 0:1, 1:2] + eps
return out.abs().sqrt()
def lookslikeimage(f):
exts = ['.ppm', '.jpg', '.jpeg', '.png', '.bmp']
return sum([f.lower().endswith(c) for c in exts]) > 0
def become_deterministic(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def dict_add(dictionary:dict, key, value, acc='list'):
if key not in dictionary.keys():
if acc=='list':
dictionary[key] = []
elif acc=='set':
dictionary[key] = set()
else:
assert False, 'only list or set'
dictionary[key] += [value]
class measure_time():
def __init__(self):
pass
def __enter__(self):
self.start_time = time.time()
def __exit__(self, type, value, traceback):
print('time elapsed', time.strftime("%H:%M:%S", time.gmtime(time.time() - self.start_time)))
class Interface:
def resize_all(self,
# dir_in='Datasets/AMOS-views/AMOS-test-1',
dir_in='/home/milan/School/2019-2020/B4M33TDV/HWs/Inputs/Room',
dir_out='/home/milan/School/2019-2020/B4M33TDV/HWs/Inputs-resized/Room',
maxsize=(1000,1000),
mdepth=sys.maxsize,
rotate=False,
): # resizes all images indirectory recursively
for (dirpath, dirnames, filenames) in os.walk(dir_in):
d = len([c for c in dirpath.replace(dir_in, '').split('/') if c!=''])
if d>mdepth:
continue
for f in filenames:
if not lookslikeimage(f):
continue
in_path = os.path.join(dirpath,f)
out_path = in_path.replace(dir_in, dir_out)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
print(out_path)
img = Image.open(in_path)
if rotate: # for photos taken vertically
img = img.rotate(-90, expand=1)
img.thumbnail(maxsize, Image.ANTIALIAS)
img.save(out_path)
def upscale_all(self,
dir_in='/home/milan/School/2019-2020/B4M33TDV/HWs/Inputs',
dir_out='/home/milan/School/2019-2020/B4M33TDV/HWs/Inputs-resized',
# dir_in='Datasets/AMOS-views/AMOS-views-v4',
# dir_out='Datasets/AMOS-views/AMOS-views-v4-upscaled',
scale=2,
mdepth=sys.maxsize,
rotate=False,
):
for (dirpath, dirnames, filenames) in os.walk(dir_in):
d = len([c for c in dirpath.replace(dir_in, '').split('/') if c != ''])
if d > mdepth:
continue
for f in sorted(filenames):
if not lookslikeimage(f):
continue
in_path = os.path.join(dirpath, f)
out_path = in_path.replace(dir_in, dir_out)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
print(out_path)
img = Image.open(in_path)
if rotate: # for photos taken vertically
img = img.rotate(-90, expand=1)
img = img.resize([c*scale for c in (img.width,img.height)], PIL.Image.BILINEAR)
img.save(out_path)
def one_img_per_folder(self,
dir_in='Datasets/AMOS-views/AMOS-test-1-downsized',
dir_out='Datasets/AMOS-views/AMOS-test-1-downsized-split',
):
for p in glob(os.path.join(dir_in, '*')):
if not os.path.isdir(p):
continue
print(p)
for f in glob(os.path.join(p, '*')):
p_out = p+'-'+os.path.splitext(os.path.basename(f))[0]
p_out = os.path.join(p_out, os.path.basename(f))
p_out = p_out.replace(dir_in, dir_out)
os.makedirs(os.path.dirname(p_out), exist_ok=True)
shutil.copyfile(f, p_out)
def get_depths(self, dir_in='Datasets/AMOS-views/AMOS-views-v4-upscaled', dir_mega='../MegaDepth/demo.py'):
dir_in = os.path.relpath(dir_in, os.path.dirname(dir_mega))
dir_out = os.path.join(os.path.dirname(dir_in), os.path.basename(dir_in)+'-depths')
print('dir_out', dir_out)
dirs = glob(os.path.join(dir_in, '*'))
dirs = [c for c in dirs if os.path.isdir(c)]
os.chdir(os.path.dirname(dir_mega))
os.makedirs(dir_out, exist_ok=True)
for d in dirs:
paths = glob(os.path.join(d, '*'))
for pin in paths:
pout = pin.replace(dir_in, dir_out)
os.makedirs(os.path.dirname(pout), exist_ok=True)
os.system(' '.join(['python demo.py --p_in',pin,'--p_out',pout]))
def transPS(self, dir_in='/home/pultami1/PS-Dataset/PS-Dataset'):
# transform PS-DS to liberty format
dout = pjoin(getdir(dir_in), getbase(dir_in)+'_trans')
print(dout)
folders = glob(pjoin(dir_in, '*'))
os.makedirs(dout, exist_ok=True)
for f in tqdm(folders):
info = open(pjoin(f, 'patch_info.txt'), 'r').readlines()
ids = [int(c.split(',')[0]) for c in ''.join(info).strip().split()]
ids = torch.tensor(ids)
patches = torch.load(pjoin(f, 'patchImg.bin'))
patches = patches.squeeze(1)
pout = pjoin(dout, 'PS-'+getbase(f)+'.pt')
torch.save((patches, ids), pout)
def extract_patches(self,
dir_in='Datasets/Phototourism/trevi_fountain/dense',
dir_out='Datasets/Phototourism',
):
printc.yellow('\n'.join(['Input arguments:'] + [str(x) for x in sorted(locals().items()) if x[0] != 'self']))
cams, imgs, pts = read_model(path=pjoin(dir_in,'sparse'), ext='.bin')
print('found points', len(pts))
paths = glob(pjoin(dir_in, 'images', '*'))
images = {}
for p in tqdm(paths):
images[getbase(p)] = np.asarray(PIL.Image.open(p))
ids3D = []
patches = []
ids_cam = []
for i in tqdm(list(pts.keys())):
image_ids = pts[i].image_ids
point2D_idxs = pts[i].point2D_idxs
patches_one = []
ids3D_one = []
ids_cam_one = []
for a,b in zip(image_ids,point2D_idxs):
img_data = imgs[a]
D2pt = img_data.xys[b]
img = PIL.Image.fromarray(images[img_data.name], 'RGB').convert('L')
w, h = img.size[0], img.size[1]
left, top, right, bottom = D2pt[0] - 32, D2pt[1] - 32, D2pt[0] + 32, D2pt[1] + 32
if not (left>0 and top>0 and right<w-1 and bottom<h-1): # no black rectangles
continue
patch = img.crop((left, top, right, bottom))
patch = torch.tensor(np.asarray(patch))
ids3D_one += [i]
ids_cam_one += [a]
patches_one += [patch]
if len(patches_one) > 1:
patches += patches_one
ids3D += ids3D_one
ids_cam += ids_cam_one
print('stacking')
patches = torch.stack(patches, 0)
ids3D = torch.tensor(ids3D)
ids_cam = torch.tensor(ids_cam)
save_path = pjoin(dir_out, getbase(getdir(dir_in))+'.pt')
print('saving to', save_path)
torch.save({'patches':patches, 'labels':ids3D, 'cam_ids':ids_cam}, save_path)
def extract_patches_rgb(self,
dir_in='Datasets/Phototourism/colosseum_exterior/dense',
dir_out='Datasets/Phototourism',
which = 'labelscolo.pt',
):
printc.yellow('\n'.join(['Input arguments:'] + [str(x) for x in sorted(locals().items()) if x[0] != 'self']))
cams, imgs, pts = read_model(path=pjoin(dir_in,'sparse'), ext='.bin')
print('found points', len(pts))
paths = glob(pjoin(dir_in, 'images', '*'))
images = {}
for p in tqdm(paths):
images[getbase(p)] = np.asarray(PIL.Image.open(p))
ids3D = []
patches = []
ids_cam = []
subset = torch.load(which)
for i in tqdm(list(pts.keys())):
if i not in subset:
continue
image_ids = pts[i].image_ids
point2D_idxs = pts[i].point2D_idxs
patches_one = []
ids3D_one = []
ids_cam_one = []
for a,b in zip(image_ids,point2D_idxs):
img_data = imgs[a]
D2pt = img_data.xys[b]
img = PIL.Image.fromarray(images[img_data.name], 'RGB')
w, h = img.size[0], img.size[1]
left, top, right, bottom = D2pt[0] - 32, D2pt[1] - 32, D2pt[0] + 32, D2pt[1] + 32
if not (left>0 and top>0 and right<w-1 and bottom<h-1): # no black rectangles
continue
patch = img.crop((left, top, right, bottom))
# patch = torch.tensor(np.asarray(patch))
patch = torch.as_tensor(np.asarray(patch))
ids3D_one += [i]
ids_cam_one += [a]
patches_one += [patch]
if len(patches_one) > 1:
patches += patches_one
ids3D += ids3D_one
ids_cam += ids_cam_one
print('stacking')
patches = torch.stack(patches, 0)
ids3D = torch.tensor(ids3D)
ids_cam = torch.tensor(ids_cam)
save_path = pjoin(dir_out, getbase(getdir(dir_in))+'_RGB.pt')
print('saving to', save_path)
torch.save({'patches':patches, 'labels':ids3D, 'cam_ids':ids_cam}, save_path)
def filter_sets(self,
# path_ds='Datasets/AMOS-views/AMOS-views-v4/AMOS-views-v4_maxsets:2000_sigmas-v:e011_thr:0.00016_WF:Hessian_PG:new_masks:AMOS-masks.pt',
path_ds='Datasets/Phototourism/hagia_sophia_interior.pt',
# path_stats='Models/id:0_arch:h7_ds:v4_loss:tripletMargin_mpos:1.0_mneg:1.0_lr:0.0_maxsets:2000_sigmas-v:e011_thr:0.00016_WF:Hessian_PG:new_masks:AMOS-masks_tps:5000000_CamsB:5_resume_ep:1_bs:3072_pos:2/stats_0.npy',
path_stats='Models/id:0_arch:h7_ds:hagia_sophia_interior_loss:tripletMargin_mpos:1.0_mneg:1.0_lr:0.0_maxsets:2000_sigmas-v:e011_thr:0.00016_WF:Hessian_PG:new_masks:AMOS-masks_tps:10000000_CamsB:5_resume_ep:1_bs:3072_pos:2/stats_0.npy',
# path_stats='Models/id:0_arch:h7_ds:liberty_loss:tripletMargin_mpos:1.0_mneg:1.0_lr:0.0_maxsets:2000_sigmas-v:e011_thr:0.00016_WF:Hessian_PG:new_masks:AMOS-masks_tps:10000000_CamsB:5_resume_ep:1_bs:3072_pos:2/stats_0.npy',
# path_stats='Models/id:0_arch:h7_ds:brandenburg_gate_loss:tripletMargin_mpos:1.0_mneg:1.0_lr:0.0_maxsets:2000_sigmas-v:e011_thr:0.00016_WF:Hessian_PG:new_masks:AMOS-masks_tps:10000000_CamsB:5_resume_ep:1_bs:3072_pos:2/stats_0.npy',
fraction = 0.5,
higher = False,
middle = False,
):
printc.yellow('\n'.join(['Input arguments:'] + [str(x) for x in sorted(locals().items()) if x[0] != 'self']))
ds = torch.load(path_ds)
stats = np.load(path_stats, allow_pickle=True).item()
e = stats.get('edges_sets')
c = stats.get('counts_sets')
if type(ds) == type({}): # AMOS
if middle:
raise NotImplemented()
idxs = np.argsort(e / c)
if higher:
idxs = idxs[::-1].copy()
idxs = idxs[:int(fraction * len(idxs))]
ds['patch_sets'] = ds['patch_sets'][idxs]
ds['LAFs'] = ds['LAFs'][idxs]
ds['cam_idxs'] = ds['cam_idxs'][idxs]
if 'collisions' in ds.keys():
ds['collisions'] = [ds['collisions'][i] for i in idxs]
else: # liberty
all_set_ids = np.sort(torch.unique(ds[1]).data.cpu().numpy())
print('found sets:',len(all_set_ids))
c = c[all_set_ids]
e = e[all_set_ids]
mean_e = e/c # priority
if middle:
mean_e[np.isnan(mean_e)] = -999 # do not pick nans
aux_idxs = np.logical_and((mean_e > -0.1), (mean_e < 0.1))
print(aux_idxs)
else:
if higher:
mean_e[np.isnan(mean_e)] = -999 # do not pick nans
aux_idxs = np.argsort(mean_e)[::-1].copy()
else:
mean_e[np.isnan(mean_e)] = 999
aux_idxs = np.argsort(mean_e).copy()
aux_idxs = aux_idxs[:int(fraction * len(aux_idxs))] # idxs to all_set_ids
set_ids = set(all_set_ids[aux_idxs]) # picked set_ids
print('found patches:',len(ds[1]))
print('selected sets:',len(set_ids))
idxs = [i for i,c in enumerate(ds[1].data.cpu().numpy()) if c in set_ids] # idxs to data
print('selected patches:',len(idxs))
ds = (ds[0][idxs],ds[1][idxs])
if middle:
p_out = splitext(path_ds)[0]+'_middleedges.pt'
else:
p_out = splitext(path_ds)[0]+'_fraction:'+str(fraction)+'_higher:'+str(int(higher))+'.pt'
print('saving to', p_out)
torch.save(ds, open(p_out, 'wb'))
def extract_hps(self, # extract hpatches
dir_in='../hpatches-release',
dir_out='Datasets/HPatches',
# splits=('illum', 'view'), # only illum, view available
splits=['illum'], # only illum, view available
types = ("e1", "e2", "e3", "e4", "e5", "ref", "h1", "h2", "h3", "h4", "h5", "t1", "t2", "t3", "t4", "t5"),
suffix='all',
# types = ("e1", "e2", "e3", "e4", "e5"),
# suffix='easy',
# types=("h1", "h2", "h3", "h4", "h5"),
# suffix='hard',
# types=("t1", "t2", "t3", "t4", "t5"),
# suffix='tough',
exclude=set(),
):
printc.yellow('\n'.join(['Input arguments:'] + [str(x) for x in sorted(locals().items()) if x[0] != 'self']))
save_path = pjoin(dir_out, '_'.join(["HPs", "-".join(splits), suffix + ".pt"]))
print("save_path:", save_path)
print("splits:", splits)
patches, labels, offset = [], [], 0
txts = []
hpatches_sequences = [x[1] for x in os.walk(dir_in)][0]
pbar = tqdm(hpatches_sequences, total=len(hpatches_sequences))
for dir in pbar:
pbar.set_description(dir)
name = getbase(dir)
if sum([c[0] + "_" in name for c in splits]) == 0: # checks for i_, v_
continue
if name in exclude:
print("XXXXX", name)
continue
for type in types:
sequence_path = pjoin(dir_in, dir, type) + ".png"
image = cv2.imread(sequence_path, 0)
h, w = image.shape
n_patches = int(h / w)
for i in range(n_patches):
patch = image[i * (w): (i + 1) * (w), 0:w]
patch = np.array(cv2.resize(patch, (64, 64)), dtype=np.uint8)
patches += [patch]
labels += [offset + i]
txts += [type]
offset += n_patches
patches = torch.ByteTensor(np.array(patches, dtype=np.uint8))
labels = torch.LongTensor(labels)
print('patches.shape:', patches.shape)
res = (patches, labels, txts)
os.makedirs(dir_out, exist_ok=True)
print("saving to ", save_path)
torch.save(res, open(save_path, "wb"))
if __name__ == "__main__":
Fire(Interface) | [
"milan.pultar@gmail.com"
] | milan.pultar@gmail.com |
69eab65119326b650d6f33273acdd49888f14e68 | 529425dd1536b6601210aa55a14a844832e06a25 | /0x0A-python-inheritance/7-base_geometry.py | 771287fcfd6e62fc0ee5fad51167604669f5eace | [] | no_license | guilmeister/holbertonschool-higher_level_programming | a6e6985deea72d21af65f05a5c814463287a6de4 | 6024909b7a4fc142f88159b021b5d482111648fc | refs/heads/master | 2020-07-22T21:52:27.344312 | 2020-02-12T04:46:34 | 2020-02-12T04:46:34 | 207,340,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | #!/usr/bin/python3
class BaseGeometry:
"""
Empty class BaseGeometry
"""
pass
def area(self):
"""
Function that prints the area
"""
raise Exception("area() is not implemented")
def integer_validator(self, name, value):
"""
Function that validates inputs
"""
if type(value) != int:
raise TypeError("{} must be an integer".format(name))
if value <= 0:
raise ValueError("{} must be greater than 0".format(name))
| [
"ebg.edwardbguillermo@yahoo.com"
] | ebg.edwardbguillermo@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.