blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
85a20a5685d762ddec4eeda36978c63036c74206 | 6a01a9287a4c23c7f11b7c5399cfb96bbe42eba8 | /python/scripts/make_id_table_with_diff_expr.py | 314e25e631430921796b32ad7d8d52c104d61aff | [
"MIT"
] | permissive | xguse/gmm-to-gff-transcripts-vs-snps | 3c25bf2752aee76174d5dab92060fe7269caf99f | 75337135ab8ff6d840af3cfccfe6404a06777a54 | refs/heads/master | 2021-01-19T01:50:33.473897 | 2016-08-02T20:31:18 | 2016-08-02T20:31:18 | 54,731,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,455 | py | """Describe here what this rule accomplishes."""
import pandas as pd
import numpy as np
# Settings
edger_results_labels = snakemake.params.edger_results_labels
cufflinks_results_labels = snakemake.params.cufflinks_results_labels
# input
edger_results = snakemake.input.edger_results
cufflinks_results = snakemake.input.cufflinks_results
ids_no_diff_expr = snakemake.input.ids_no_diff_expr
#output
ids_with_diff_expr = snakemake.output.ids_with_diff_expr
def load_and_filter_diff_expr_data(path,ids,comparison,program,fdr_thresh):
"""Return new dataframe that has standardized and filtered the DE input tables.
`path` (str):
location of input file
`ids` (dataframe):
with the following columns
- tcons_id
- xloc_id
- gene_id_external
- gene_id_internal
`comparison` (str):
describe the RNA-seq analysis run ('midgut', 'salivary gland', etc)
`program` (str):
one of ['edger', 'cufflinks']
`fdr_thresh` (float):
defining multiple testing significance threshold above which DE tests should NOT be reported
"""
column_conversions = {'edger': {'Gene_Name': 'gene_id_external',
'Gene_ID': 'xloc_id',
'logFC': 'lg2_fc',
'PValue': 'p',
'FDR': 'fdr'},
'cufflinks': {'gene': 'gene_id_external',
'gene_id': 'xloc_id',
'log2.fold_change.': 'lg2_fc',
'p_value': 'p',
'q_value': 'fdr'},
}
keep_columns = ["de_id", "xloc_id", "tcons_id","gene_id_external","gene_id_internal","lg2_fc","p","fdr","comparison","program"]
de_id_program_map = {'edger': 'EDGR',
'cufflinks': 'CUFF',
}
# Load
df = pd.read_csv(path, sep='\t')
# Convert Columns
df = df.rename(columns=column_conversions[program])
# Make missing fdr values NaN
df['fdr'] = df.fdr.apply(lambda i: np.nan if i == '-' else i)
# Filter for fdr
df = df.query(""" fdr <= 0.05 """).copy()
# Add Columns
df['program'] = program
df['comparison'] = comparison
df['de_id'] = generate_de_ids(df=df,
de_type=de_id_program_map[program],
type_mod='|{comparison}'.format(comparison=comparison),
nlen=7)
# Join external and internal IDS
df = pd.merge(left=df, right=ids_no_diff_expr,
how='left',
on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
sort=False, suffixes=('_x', '_y'), copy=True, indicator=False).fillna('-')
# Retain only needed columns
df = df[keep_columns]
# Return dataframe
return df.copy()
def generate_de_ids(df,de_type,type_mod='',nlen=7):
"""Generate unique tracking IDs for each statistical test of diff expr."""
template = '{de}{mod}_{{0:0{nlen}d}}'.format(de=de_type, mod=type_mod, nlen=nlen)
return [template.format(n) for n in range(1,len(df)+1)]
ids_no_diff_expr = pd.read_csv(ids_no_diff_expr)
table_list = []
# Load EDGER DE reults
for name, path in zip(edger_results_labels, edger_results):
df = load_and_filter_diff_expr_data(path=path,
ids=ids_no_diff_expr,
comparison=name,
program='edger',
fdr_thresh=0.05)
table_list.append(df)
# Load CUFFLINKS DE reults
for name, path in zip(cufflinks_results_labels, cufflinks_results):
df = load_and_filter_diff_expr_data(path=path,
ids=ids_no_diff_expr,
comparison=name,
program='cufflinks',
fdr_thresh=0.05)
table_list.append(df)
# Concat all result files into single dataframe
combined = pd.concat(objs=table_list, axis=0)
# Write out the resulting dataframe
combined.to_csv(path_or_buf=ids_with_diff_expr,
sep=',',
header=True, index=False,)
| [
"wadunn83@gmail.com"
] | wadunn83@gmail.com |
1aac5178d925ba1ca51b4526bd8a401ea7bd5b41 | cfa4a826e91ae08244b5f7d1e4039f45bda3c8c5 | /main.py | 918f12774f0492c0dad4d289586617dbde44c2f6 | [] | no_license | avikrakshit/Bloogging | 32ac75709ee6a72bddb8894a5e1ae067dc617c95 | ef949ead082c2fb83ba3c29b3772b1765a6e4b0a | refs/heads/main | 2023-06-02T23:30:28.776755 | 2021-06-22T05:10:35 | 2021-06-22T05:10:35 | 378,813,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,922 | py | from flask import Flask, render_template, redirect, url_for, flash, request, session, json
import requests, json
from flask_sqlalchemy import SQLAlchemy
import sqlalchemy
from wtforms.validators import InputRequired, Email, Length
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SelectField
import base64
import os
import os.path
import boto3
import uuid
import json
import datetime
UPLOAD_FOLDER = 'C:/Users/AVIK/PycharmProjects/Blogging_Website/static/data_stored/'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'JPG', 'PNG', 'JPEG'}
app = Flask(__name__)
app.config['SECRET_KEY'] = 'bloggingwebsitesecretkey'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:' '@localhost/blogging_project'
db = SQLAlchemy(app)
class Admin_panel(FlaskForm):
username = StringField('uname', validators=[InputRequired()])
password = PasswordField('pswd', validators=[InputRequired()])
class Blog_data(db.Model):
sl_no = db.Column(db.Integer, primary_key = True)
id_no = db.Column(db.String(50), nullable=False)
title = db.Column(db.String(10), nullable=True)
user = db.Column(db.String(10), nullable=True)
created_on = db.Column(db.String(10), nullable=True)
status = db.Column(db.Integer)
date = db.Column(db.String(10), nullable=False)
class Admin_data(db.Model):
user_id = db.Column(db.Integer, primary_key = True)
user_name = db.Column(db.String(10), nullable=True)
password = db.Column(db.String(10), nullable=True)
date_time = db.Column(db.String(10), nullable=True)
@app.route("/")
def home():
return render_template('Home_page.html')
@app.route("/blog")
def blog():
blog = Blog_data.query.filter_by(status=1)
return render_template('blog_page.html', blog=blog)
@app.route("/blog_details/<id_no>")
def blog_details(id_no):
tt = Blog_data.query.filter_by(id_no=id_no)
for i in tt:
title = i.title
s3 = boto3.client(
's3',
aws_access_key_id="ACCESSKEY_ID",
aws_secret_access_key="YOUR_ACCESS_KEY"
)
data = s3.get_object(
Bucket = 'us-east-2.german-bakery.blog',
Key = str(id_no)+'.content'
)['Body'].read()
currdata = json.loads(data.decode('utf-8'))
# actual_data = str(json.dumps(parsed_data, indent=4, sort_keys=True))
# title = currdata["title"]
content = currdata["content"].replace("textarea", "p")
return render_template('blog_details.html', content=content, title=title)
@app.route("/contact_us", methods=['GET', 'POST'])
def contact_us():
return render_template('Contact_Us.html')
@app.route("/about_us")
def about_us():
return render_template('About_Us.html')
@app.route('/admin_panel', methods=['GET', 'POST'])
def admin_panel():
if request.method == 'POST':
username = request.form.get('uname')
password = request.form.get('pswd')
if username == 'admin' and password == 'admin' or username == 'avik' and password == 'avik':
session['username'] = username
return redirect(url_for('admin_blog'))
else:
return"<h1>Wrong USername and Password</h1>"
return render_template('admin_panel.html')
@app.route('/admin_blog', methods=['POST', 'GET'])
def admin_blog():
if 'username' in session:
if request.method == 'POST':
title = request.form.get('title')
print(title)
return render_template('admin_blog.html')
else:
return"<h1>Unauthorised Users</h1>"
@app.route('/create_new_blog', methods=['POST', 'GET'])
def create_new_blog():
if 'username' in session:
if request.method == 'POST':
blog_id = uuid.uuid4()
user = session["username"]
requestBody = request.get_json(force=True)
title = requestBody["title"]
blog = Blog_data(id_no=blog_id, user=user, title=title)
db.session.add(blog)
s3 = boto3.client(
's3',
aws_access_key_id="AKIATP5QTEPV7YAVXCWN",
aws_secret_access_key="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"
)
s3.put_object(
ACL = 'public-read',
Body = '',
Bucket = 'us-east-2.german-bakery.blog',
Key = str(blog_id)+'.content'
)
db.session.commit()
return json.dumps({
"success": 1,
"id": str(blog_id)
})
else:
return "<h1>Unauthorised Users</h1>"
@app.route('/my_blogs', methods=['POST', 'GET'])
def my_blogs():
if 'username' in session:
user = session["username"]
blog = Blog_data.query.filter_by(user=user)
return render_template('admin_my_blogs.html', blog=blog)
else:
return"<h1>Unauthorised Users</h1>"
@app.route('/write_blogs', methods=['POST', 'GET'])
def write_blogs():
if 'username' in session:
sl_no = 'avik'
if request.method == 'POST':
# title = Blog_data.query.filter_by(id_no=id_no)
data = request.form['data']
'''content = request.form.get('data')
author = request.form.get('author')'''
# if 'file' not in request.files:
# print('No file part')
# return redirect(request.url)
# files = request.files.getlist('files[]')
# for file in data :
# if file.data == '':
# return redirect(request.url)
# if file and allowed_file(file.filename):
# file.filename = sl_no
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], str(sl_no)))
# return ('Uploaded Successfully')
# else:
# print("File Allowed", allowed_file(file.filename))
# return ('Upload Failed')
blog = Blog_data(title=title, data=content, author=author)
print(blog)
# db.session.add(blog)
# db.session.commit()
return render_template('write_blog.html')
else:
return"<h1>Unauthorised Users</h1>"
@app.route('/write_blogs_save', methods=['POST','GET'])
def write_blogs_save():
if 'username' in session:
if request.method == 'POST':
requestBody = request.get_json(force=True)
content = requestBody["content"]
blogId = requestBody["id"]
x = datetime.datetime.now()
date = x.strftime("%B") + " " + x.strftime("%d") +","+ " " + x.strftime("%Y")
print(x.strftime("%B") + " " + x.strftime("%d") +","+ " " + x.strftime("%Y"))
data = Blog_data.query.filter_by(id_no = blogId).first()
data.date = date
# dt = Blog_data(date=date)
db.session.add(data)
#bucket: us-east-2.german-bakery.blog
#key: data/<id>.content
#Access Key Id: AKIATP5QTEPV7YAVXCWN
#Secret: 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
s3 = boto3.client(
's3',
aws_access_key_id="AKIATP5QTEPV7YAVXCWN",
aws_secret_access_key="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"
)
s3.put_object(
ACL = 'public-read',
Body = json.dumps(requestBody),
Bucket = 'us-east-2.german-bakery.blog',
Key = blogId+'.content'
)
# dt = Blog_data(date=date)
# db.session.add(dt)
db.session.commit()
# f = open("myfile.txt", "r")
# print(f.read())
# f.close()
#data = request.json
# value =data.json()
#print(data)
return "<h1> submitted successfully"
else:
return "unauthorised users"
@app.route('/write_blogs_publish', methods=['POST','GET'])
def write_blogs_publish():
if 'username' in session:
if request.method == 'POST':
requestBody = request.get_json(force=True)
blogId = requestBody["id"]
data = Blog_data.query.filter_by(id_no = blogId).first()
data.status = 1
db.session.add(data)
db.session.commit()
return "<h1> Published Successfully </h1>"
else:
return "unauthorised users"
@app.route('/write_blogs_unpublish', methods=['POST','GET'])
def write_blogs_unpublish():
if 'username' in session:
if request.method == 'POST':
requestBody = request.get_json(force=True)
blogId = requestBody["id"]
data = Blog_data.query.filter_by(id_no = blogId).first()
data.status = 2
db.session.add(data)
db.session.commit()
return "<h1> Blog has been Unpublished successfully. </h1>"
else:
return "unauthorised users"
@app.route('/edit_blogs/<int:sl_no>', methods=['POST', 'GET'])
def edit_blogs(sl_no):
if 'username' in session:
if request.method == 'POST':
title = request.form.get('title')
print(title)
data = request.form.get('content')
print(datatitle)
author = request.form.get('author')
print(author)
blog = Blog_data.query.filter_by(sl_no=sl_no)
blog.title = title
blog.data = data
blog.author = author
return redirect(url_for('my_blogs'))
else:
return"<h1>Unauthorised Users</h1>"
#this is for upload images
@app.route('/image_upload/<int:sl_no>', methods=['GET', 'POST'])
def image_upload(sl_no):
if 'username' in session:
# check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
print('No selected file')
return redirect(request.url)
print("Filename bfore validation: ", file.filename)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.filename = sl_no
file.save(os.path.join(app.config['UPLOAD_FOLDER'], str(sl_no)))
print(filename)
return ('Uploaded Successfully')
else:
print("File Allowed", allowed_file(file.filename))
return ('Upload Failed')
else:
return "<h1> You are not allowed to enter.</h1>"
return render_template('upload.html')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
if 'username' in session:
session.pop('username', None)
return redirect(url_for('admin_panel'))
if __name__ == '__main__':
app.run(debug=True, use_reloader=False)
| [
"noreply@github.com"
] | noreply@github.com |
b59ce65b85cf6eb61a55b8b0051b39656e0ef0cf | e113983ca070653992187c49e1bc4b0f19e5a61a | /hw4/NEW_hw4_2.py | 55572354c28f906fa0053ad51704095e27e78ecd | [] | no_license | dada8704/ML | b9640e0f9327737a37abee93915567341ee64839 | d1578b61aab95da80f98bfab50826320c3562728 | refs/heads/master | 2023-04-20T03:56:37.504892 | 2020-12-28T13:40:47 | 2020-12-28T13:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,134 | py | import numpy as np
#import argparse
from gaussian import *
from UTIL import *
from MNIST import *
from New_EM import *
train_label_file = "file/train-labels-idx1-ubyte"
train_image_file = "file/train-images-idx3-ubyte"
input_N = 60000
lamBda = np.ones(10)
probability = np.random.rand(28 * 28, 10)
hidden_W = np.ones((input_N, 10))
#Binomial_matrix = Binomial_matrix
jimmy = 100 # big number!!
Label_fptr = Get_label_fptr(label_file = "file/train-labels-idx1-ubyte")
Binomial_matrix = Get_Binomial(train_image_file = train_image_file)
old_lamBda = np.zeros(10)
for iter_ in range(30):
E_step(Binomial_matrix, input_N = input_N, lamBda = lamBda, probability = probability, hidden_W = hidden_W)
M_step(Binomial_matrix, lamBda, hidden_W, probability, input_N)
print_P(probability)
norm = np.linalg.norm(old_lamBda - lamBda)
print("No. of Iteration: ", iter_, " , Difference:", norm, "\n")
print("-" * 73)
old_lamBda = copy.deepcopy(lamBda)
if iter_ > 5 and norm < 0.001:
break
#GroundTruth = Test(Binomial_matrix, Label_fptr, label, probability = probability)
gt = Cal_from_W(hidden_W, Label_fptr, input_N)
for iter_y in range(10):
for iter_x in range(10):
print(int(gt[iter_y][iter_x]), end = " ")
print()
print("GTTTT", gt)
tmp = copy.deepcopy(gt)
RRRow = np.zeros(10)
CCCol = np.zeros(10)
for i in range(10):
now = tmp.argmax()
row = int(now / 10)
col = now % 10
print(row, " --> ", col)
RRRow[i] = row
CCCol[i] = col
tmp[row] = 0
tmp[:, col] = 0
for i in range(10):
RrR = (np.where(RRRow == i))[0][0]
CcC = int(CCCol[RrR])
A = gt[i][CcC]
B = gt[:, CcC].sum() - A
C = gt[i].sum() - A
D = 60000 - (B + C) + A
print("Confusion Matrix", i, ":")
print("Confusion Matrix:\n Predict number ", i, " Predict cluster ", i)
print("Is number ", i, " ", A, " ", B)
print("Isn't number ", i, " ", C, " ", D, "\n")
print("Sensitivity (Successfully predict cluster 1): ", A / (A + B))
print("Specificity (Successfully predict cluster 2): ", D / (C + D))
| [
"dada0423.cs05@g2.nctu.edu.tw"
] | dada0423.cs05@g2.nctu.edu.tw |
f258f81afafb2186624f0028d7416f7aca37869d | 3114430ce15c18281117459e26eea4b774e3998a | /day4/accounts/models.py | 1fd9d1bf8a13f354846f792bd07b42ea810b5486 | [
"MIT"
] | permissive | Joseamica/Easily-written-Django | c02e7333e84ca2257b7b8bfae3f6732898c5000a | 0b746638751702c453db9490fe29ef6d34e4a3bc | refs/heads/master | 2021-05-27T20:25:41.341149 | 2014-05-25T08:25:53 | 2014-05-25T08:25:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from django.db import models
from django.contrib.auth.models import User
import hashlib
# Create your models here.
class Account(models.Model):
user = models.OneToOneField(User)
def gravatar_url(self):
return "http://www.gravatar.com/avatar/%s?s=50" % hashlib.md5(self.user.email).hexdigest()
def __unicode__(self):
return self.user
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
| [
"carpedm20@gmail.com"
] | carpedm20@gmail.com |
538fc3e6a7b554c75a45025f802bf9fb341dae19 | d6e287bbba11be4906e599d1362c9ef89c4fb9de | /modules/utils/datasets/__init__.py | 53671ef9604559f6da0848293411281007d9f83b | [
"MIT"
] | permissive | bityangke/WSDDN.pytorch-1 | 67d52f158238f2d5b234ddefeb7f05f06bf6b123 | 9a67323c80566cacc762c68021824aa80a82c524 | refs/heads/master | 2022-11-15T00:22:44.903418 | 2020-07-06T13:43:22 | 2020-07-06T13:43:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | '''import all'''
from .Sampler import GroupSampler
from .VOCDataset import VOCDataset
from .Builder import buildDataloader
'''define alll'''
__all__ = ['GroupSampler', 'VOCDataset', 'buildDataloader'] | [
"1159254961@qq.com"
] | 1159254961@qq.com |
d611e87b266429bc40bdd207cf867a0748666af8 | 414015d52ccbb062397ec3d8d7fdc0b0241c35c6 | /hejasverige/kollkoll/eventsubscribers.py | 742f627aa31343bafd0009722843bfade9c8e10d | [] | no_license | Adniel/hejasverige.kollkoll | d57e2e6c8b748226e7ce137a59961049b6027b6f | c9268a3b8616e1fdcf4b3edbe3370aca5bec1ff6 | refs/heads/master | 2021-01-01T19:15:03.457302 | 2014-08-21T15:08:52 | 2014-08-21T15:08:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | # -*- coding: utf-8 -*-
from five import grok
from Products.PluggableAuthService.interfaces.events import IUserLoggedInEvent
from hejasverige.kollkoll.kollkoll import Kollkoll
import logging
logger = logging.getLogger(__name__)
@grok.subscribe(IUserLoggedInEvent)
def getKollkollAccount(event):
"""Creates an account in Kollkoll
when users signs in (if not already present).
"""
logger.info("Will make sure %s has a kollkoll account." % event.principal.getId())
kollkoll = Kollkoll()
#import pdb; pdb.set_trace()
personal_id = event.principal.getProperty('personal_id')
if type(personal_id).__name__ == 'object':
personal_id = None
fullname = event.principal.getProperty('fullname')
if type(fullname).__name__ == 'object':
personal_id = None
email = event.principal.getProperty('email')
if type(email).__name__ == 'object':
personal_id = None
if personal_id:
try:
result = kollkoll.listUsers()
personal_ids = [x.get('uid') for x in result]
#import pdb; pdb.set_trace()
if personal_id in personal_ids:
logger.info('User present in kollkoll. Nothing is added')
else:
# user had no account in Kollkoll
# create account
logger.info('User not present in kollkoll. User %s added.' % (personal_id))
result = kollkoll.addUser(fn=fullname, ln='', uid=personal_id, email=email)
except Exception, ex:
logger.exception('Unable to access Kollkoll. User account (%s) could not be checked: %s' % (personal_id, str(ex)))
# problems accessing the bank
pass
return
| [
"daniel.grindelid@gmail.com"
] | daniel.grindelid@gmail.com |
a6b84c2d10e63e38d26824559d3c6ed3808ab651 | a4224c0b25cdb864a62a8d05cfc5579424e57326 | /06_number_checker_v1.py | 2080df98d9ee01c820f7e474e9731bd05a850d47 | [] | no_license | ST17013/01_Temperature_Convertor | 53e951b646c59a87900e36f047f19f5dc647bd97 | 0d9bf35572c127cf4bec886c49288908731b1039 | refs/heads/master | 2022-12-10T00:57:51.925027 | 2020-09-07T00:04:58 | 2020-09-07T00:04:58 | 286,612,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | #Code to check that number is valid...
def temp_check(low):
valid = False
while not valid:
try:
response = float(input("Enter a number: "))
if response < low:
print("Too Cold!")
else:
return response
except ValueError:
print("Please eneter a number")
#main routine
#run this code twice (for two valid responses in test plan)
number = temp_check(-273)
print("You chose {}".format(number))
number = temp_check(-459)
print("You chose {}".format(number)) | [
"69490737+ST17013@users.noreply.github.com"
] | 69490737+ST17013@users.noreply.github.com |
9c0ea2723caeb9ce706976b7c6a0f26a4b82e84a | 13febd474a8149d7036621cc1b423f61f433d5d7 | /dicoms/logging_indexer.py | 371b0b2c79f68c3c76a419745af6e37cc0296a4f | [
"BSD-3-Clause"
] | permissive | carpensa/dicom-harpooner | 6d9c4a56256b097e5039b4d88e528838e94f65db | 2d998c22c51e372fb9b5f3508c900af6f4405cd3 | refs/heads/master | 2022-12-29T17:43:08.906780 | 2020-06-27T00:45:51 | 2020-06-27T00:45:51 | 282,060,497 | 1 | 0 | BSD-3-Clause | 2020-07-23T21:37:40 | 2020-07-23T21:37:39 | null | UTF-8 | Python | false | false | 11,679 | py | import os
import pathlib
import sys
import time
import traceback
# importing django specific modules
import django
import pydicom
import pytz
from dateutil import parser as dateparser
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ImageSearcher.ImageSearcher.settings')
django.setup()
#from ImageSearcher.dicoms.models import Session, Subject, Series
from dicoms.models import Session, Subject, Series
from django.template.defaultfilters import slugify
from django.utils import timezone
path_log_file = '/group_shares/fnl/bulk/code/internal/GUIs/ImageSearcher_dev/paths_log_'
subject_log_file = '/group_shares/fnl/bulk/code/internal/GUIs/ImageSearcher_dev/subjects_log_'
def clean_name(name):
return name.replace('_', '').replace('-', '').lower()
def index_dicoms(root_dir):
"""Index Structure:
Key: PatientID
Value: {}, keys are AcquisitionDate of series, values are
{} with keys:
path
dcmcount: count of dcm files found
desc: SeriesDescription from the .dcm metadata
time: AcquisitionTime from the .dcm metadata
series: SeriesNumber from the .dcm metadata
"""
try:
start_time = time.time()
dcms_read_count = 0
session_dict = {}
len_session_dict = len(session_dict)
directories_searched_count = 0
old_subject_id = ''
fast_mode = True
do_once = True
print("indexing {}".format(root_dir))
for root, dirs, files in os.walk(root_dir):
directories_searched_count += 1
log_path = str(path_log_file) + str(root_dir).replace('/', '_') + str('.txt')
with open(log_path, 'a+') as outfile:
print(root, file=outfile)
if directories_searched_count % 10 == 0:
print("Directories Searched: {}".format(directories_searched_count))
for f in files:
try:
pydicom = pydicom.dcmread(os.path.join(root, f))
isdicom = True
# this is a new one, some of these dicoms don't have a PatientID attribute
check_patient_id = dicom.PatientID
log_path2 = str(subject_log_file) + str(root_dir) + str('.txt')
with open(log_path2, 'a+') as outfile:
print(check_patient_id, file=outfile)
except (dicom.errors.InvalidDicomError, FileNotFoundError, PermissionError, AttributeError, OSError):
isdicom = False
if isdicom:
#for key, value in dicom.items():
# print(key, ':', value)# initializing this variable once.
if do_once:
old_subject_entry = pydicom.dcmread(os.path.join(root, f))
do_once = False
dcms_read_count += 1
test_subject_entry = pydicom.dcmread(os.path.join(root, f))
new_subject_id = test_subject_entry.PatientID
len_session_dict = len(session_dict)
if (new_subject_id != old_subject_id and session_dict != {}) \
or (len(files) == 0 and session_dict != {}):
average_paths = []
for k, v in session_dict.items():
average_paths.append(k)
# finding the average path of all fo the series in a session
average_path = os.path.commonpath(average_paths)
session_folder = pathlib.Path(average_path)
# finding folder permissions
session_group = session_folder.group()
session_owner = session_folder.owner()
# **************************************************************************************************
# Insert populating models here
try:
# """This is hacky, you need to fix this."""
new_subject, sc = Subject.objects.get_or_create(SubjectID=old_subject_id,
slug=slugify(old_subject_id))
except django.db.utils.IntegrityError:
traceback.print_exc(file=sys.stdout)
pass
# creating a session object
try:
new_session, new_ses_status = Session.objects.get_or_create(Subject=new_subject,
Path=average_path,
owner=session_owner,
group=session_group)
aware = dateparser.parse(old_subject_entry.SeriesDate).replace(tzinfo=pytz.UTC)
new_session.SessionDate = aware
new_session.StudyDescription = old_subject_entry.StudyDescription
new_session.save()
except (AttributeError, django.db.utils.IntegrityError) as err:
# traceback.print_exc(file=sys.stdout)
new_session.save()
pass
# creating each series that took place during a session
for kwargs in session_dict.values():
try:
new_series, ns = Series.objects.get_or_create(**kwargs, Session=new_session, Subject=new_subject)
new_series.save()
except django.db.utils.IntegrityError:
traceback.print_exc(file=sys.stdout)
# **************************************************************************************************
# End populating models
session_dict = {}
old_subject_id = new_subject_id
old_subject_entry = test_subject_entry
else:
old_subject_id = new_subject_id
old_subject_entry = test_subject_entry
session_dict[root] = {}
new_len_session_dict = len(session_dict) # keeping track of the session dict, if it stops changing then
# we want to make sure to update the database with an entry.
# Below is a list of data that we're trying to extract from the dicom,
# we'll pass this list into a getattr try except, because occasionally
# some dicom's won't have the data we're looking for
dicom_data_we_want = ['SeriesDescription',
'StudyDescription',
'ImageType',
'SeriesNumber',
'PatientID',
'SeriesDate',
'StudyDate'
]
for attribute in dicom_data_we_want:
try:
if attribute is 'ImageType':
value = getattr(test_subject_entry, attribute)
value = str(value._list)
session_dict[root][attribute] = value
elif 'Date' in attribute:
value = getattr(test_subject_entry, attribute)
value = dateparser.parse(str(value)).replace(tzinfo=pytz.UTC)
session_dict[root][attribute] = value
else:
value = getattr(test_subject_entry, attribute)
session_dict[root][attribute] = str(value)
except (AttributeError, ValueError):
pass
finally:
session_dict[root]['IndexedDate'] = timezone.now()
session_dict[root]['Path'] = root
if fast_mode:
break
#### MODULARIZE THIS!!! this code previously failed to update the database if there was only one
# subject
#if len(files) == 0 and session_dict != {}:
if session_dict != {}:
print(files, session_dict)
average_paths = []
for k, v in session_dict.items():
average_paths.append(k)
# finding the average path of all fo the series in a session
average_path = os.path.commonpath(average_paths)
session_folder = pathlib.Path(average_path)
# finding folder permissions
session_group = session_folder.group()
session_owner = session_folder.owner()
# **************************************************************************************************
# Insert populating models here
try:
# """This is hacky, you need to fix this."""
new_subject, sc = Subject.objects.get_or_create(SubjectID=old_subject_id,
slug=slugify(old_subject_id))
print("Found new subject {}".format(new_subject))
except django.db.utils.IntegrityError:
traceback.print_exc(file=sys.stdout)
pass
# creating a session object
try:
new_session, new_ses_status = Session.objects.get_or_create(Subject=new_subject,
Path=average_path,
owner=session_owner,
group=session_group)
aware = dateparser.parse(old_subject_entry.SeriesDate).replace(tzinfo=pytz.UTC)
new_session.SessionDate = aware
new_session.StudyDescription = old_subject_entry.StudyDescription
new_session.save()
print("Found new session: {}".format(new_session.Path))
except (AttributeError, django.db.utils.IntegrityError) as err:
# traceback.print_exc(file=sys.stdout)
new_session.save()
pass
# creating each series that took place during a session
for kwargs in session_dict.values():
try:
new_series, ns = Series.objects.get_or_create(**kwargs, Session=new_session, Subject=new_subject)
new_series.save()
except django.db.utils.IntegrityError:
traceback.print_exc(file=sys.stdout)
print("Elapsed Time: {}, {} dicoms checked in {} directories".format(
time.time() - start_time,
dcms_read_count,
directories_searched_count))
except (ValueError, IsADirectoryError):
pass
return 1
if __name__ == "__main__":
index_dicoms("/dicom/2013")
index_dicoms("/dicom/2014")
index_dicoms("/dicom/2015")
| [
"anthony.e.galassi@gmail.com"
] | anthony.e.galassi@gmail.com |
0a390ae66c096ec7b6e7b0aff70e9f8e2f83aec5 | 68d38b305b81e0216fa9f6769fe47e34784c77f2 | /alascrapy/spiders/tomsguide_fr.py | 512927ada4008f9bf18b66205719a64e70329068 | [] | no_license | ADJet1437/ScrapyProject | 2a6ed472c7c331e31eaecff26f9b38b283ffe9c2 | db52844411f6dac1e8bd113cc32a814bd2ea3632 | refs/heads/master | 2022-11-10T05:02:54.871344 | 2020-02-06T08:01:17 | 2020-02-06T08:01:17 | 237,448,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,269 | py | # -*- coding: utf8 -*-
from datetime import datetime
import re
from scrapy.http import Request, HtmlResponse
from scrapy.selector import Selector
from alascrapy.spiders.base_spiders.ala_spider import AlaSpider
from alascrapy.spiders.base_spiders.bazaarvoice_spider import BVNoSeleniumSpider
from alascrapy.lib.generic import get_full_url, date_format
import alascrapy.lib.dao.incremental_scraping as incremental_utils
from alascrapy.items import CategoryItem, ProductItem, ReviewItem, ProductIdItem
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from alascrapy.lib.selenium_browser import SeleniumBrowser
class Tomsguide_frSpider(AlaSpider):
name = 'tomsguide_fr'
allowed_domains = ['tomsguide.fr']
start_urls = ['http://www.tomsguide.fr/articles/tests/']
def parse(self, response):
original_url = response.url
product = response.meta.get("product", {})
review = response.meta.get("review", {})
url_xpath = "(//ul[@class='pager']//li)[last()]/a/@href"
single_url = self.extract(response.xpath(url_xpath))
if single_url:
matches = None
if "":
matches = re.search("", single_url, re.IGNORECASE)
if matches:
single_url = matches.group(0)
else:
return
single_url = get_full_url(original_url, single_url)
print single_url,'='*30
request = Request(single_url, callback=self.parse)
try:
request.meta["product"] = product
except:
pass
try:
request.meta["review"] = review
except:
pass
yield request
urls_xpath = "//ul[@class='listing-items']/li/div/div[1]/a/@href"
params_regex = {}
urls = self.extract_list(response.xpath(urls_xpath))
for single_url in urls:
matches = None
if "":
matches = re.search("", single_url, re.IGNORECASE)
if matches:
single_url = matches.group(0)
else:
continue
single_url = get_full_url(original_url, single_url)
request = Request(single_url, callback=self.level_2)
try:
request.meta["product"] = product
except:
pass
try:
request.meta["review"] = review
except:
pass
yield request
def level_2(self, response):
original_url = response.url
product = response.meta.get("product", {})
review = response.meta.get("review", {})
category_leaf_xpath = "(//ul[@class='breadcrumb']/li//text())[last()-1]"
category_path_xpath = "//ul[@class='breadcrumb']/li//text()"
category = CategoryItem()
category['category_url'] = original_url
category['category_leaf'] = self.extract(response.xpath(category_leaf_xpath))
category['category_path'] = self.extract_all(response.xpath(category_path_xpath), ' | ')
if self.should_skip_category(category):
return
yield category
product_xpaths = {
"source_internal_id": "//link[@rel='canonical']/@href",
"ProductName":"//span[@class='sbs-header-title']//text()",
"OriginalCategoryName":"//ul[@class='breadcrumb']/li//text()",
}
product = self.init_item_by_xpaths(response, "product", product_xpaths)
product['TestUrl'] = original_url
picurl = product.get("PicURL", "")
if picurl and picurl[:2] == "//":
product["PicURL"] = "https:" + product["PicURL"]
if picurl and picurl[:1] == "/":
product["PicURL"] = get_full_url(original_url, picurl)
manuf = product.get("ProductManufacturer", "")
if manuf == "" and ""[:2] != "//":
product["ProductManufacturer"] = ""
try:
product["OriginalCategoryName"] = category['category_path']
except:
pass
ocn = product.get("OriginalCategoryName", "")
if ocn == "" and "//ul[@class='breadcrumb']/li//text()"[:2] != "//":
product["OriginalCategoryName"] = "//ul[@class='breadcrumb']/li//text()"
review_xpaths = {
"source_internal_id": "//link[@rel='canonical']/@href",
"ProductName":"//span[@class='sbs-header-title']//text()",
"SourceTestRating":"//div[@class='p-u-1-3 inner10 review-bar-rating']//text()",
"TestDateText":"//div[@class='author nolinks']//time[@itemprop='datePublished']//text()",
"TestPros":"(//div[@class='sbs-advice-title'])[1]/following-sibling::ul/li//text()",
"TestCons":"(//div[@class='sbs-advice-title'])[2]/following-sibling::ul/li//text()",
"TestVerdict":"//span[@class='sbc-advice-text']//text()",
"Author":"//div[@class='author nolinks']//span[@itemprop='author']//text()",
"TestTitle":"//h1[@itemprop='headline']//text()",
}
review = self.init_item_by_xpaths(response, "review", review_xpaths)
review['TestUrl'] = original_url
try:
review['ProductName'] = product['ProductName']
review['source_internal_id'] = product['source_internal_id']
except:
pass
awpic_link = review.get("AwardPic", "")
if awpic_link and awpic_link[:2] == "//":
review["AwardPic"] = "https:" + review["AwardPic"]
if awpic_link and awpic_link[:1] == "/":
review["AwardPic"] = get_full_url(original_url, awpic_link)
matches = None
field_value = product.get("source_internal_id", "")
if field_value:
matches = re.search("(\d+)(?=\.html)", field_value, re.IGNORECASE)
if matches:
product["source_internal_id"] = matches.group(1)
matches = None
field_value = review.get("source_internal_id", "")
if field_value:
matches = re.search("(\d+)(?=\.html)", field_value, re.IGNORECASE)
if matches:
review["source_internal_id"] = matches.group(1)
matches = None
field_value = review.get("SourceTestRating", "")
if field_value:
matches = re.search("(\d+)(?=\/)", field_value, re.IGNORECASE)
if matches:
review["SourceTestRating"] = matches.group(1)
matches = None
field_value = review.get("TestDateText", "")
if field_value:
matches = re.search("(.*)(?=\d{2}:)", field_value, re.IGNORECASE)
if matches:
review["TestDateText"] = matches.group(1)
if review["TestDateText"]:
review["TestDateText"] = review["TestDateText"].lower().replace('.'.lower(), "")
review["TestDateText"] = review["TestDateText"].strip()
review["TestDateText"] = date_format(review["TestDateText"], "%d %B %Y", ["fr"])
review["SourceTestScale"] = "10"
review["DBaseCategoryName"] = "PRO"
yield product
yield review
| [
"liangzijie1437@gmail.com"
] | liangzijie1437@gmail.com |
70d7dccba6ea9c322a07043d842c08b429beaa8b | 24b6d039268249fefdd18a8644a7dff4d6a80108 | /checks/resolve-db-numbers-2019-02-22/check_all_timeseries_fails_excluded.py | cf20c6dde48f52136875def0a8b073a2a2c5ce4d | [] | no_license | cp4cds/qcapp | d774a80145abf4eee0182d6451e0f70c40f16d1e | 636ef525514202e679af12e27729a8aaab304e7f | refs/heads/master | 2021-01-12T07:22:41.172118 | 2020-03-20T16:43:58 | 2020-03-20T16:43:58 | 76,947,327 | 0 | 0 | null | 2020-03-11T12:53:18 | 2016-12-20T10:31:19 | Python | UTF-8 | Python | false | false | 2,640 | py |
from setup_django import *
import os
import sys
import re
import settings
import subprocess
import utils
import shutil
QCD_DIR = '/alpha/c3scmip5/'
RAW_DIR = '/cmip5_raw/'
def check_timeseries_errors():
for ds in Dataset.objects.all():
# ds = Dataset.objects.filter(variable='pr', frequency='day', model='IPSL-CM5A-LR', experiment='amip', ensemble='r2i1p1').first()
dfs = ds.datafile_set.all()
ts_error = list(ds.qcerror_set.filter(check_type='TIME-SERIES'))
if ts_error:
for df in dfs:
if not re.search('cmip5_raw', df.gws_path):
print df
def convert_paths():
with open('timeseries_error_list.log') as r:
filepaths = [line.strip() for line in r]
uniq_paths = set()
for path in filepaths:
uniq_paths.add('/'.join(path.split('/')[:-2]))
for up in list(uniq_paths):
with open('dir_errors.log', 'a+') as w:
w.writelines(["{}\n".format(up)])
def move_timeseries_fail_datasets():
with open('ds_to_be_retracted_esgf_2019-02-13.log') as r:
dirs = [line.strip() for line in r]
for src in dirs[1:]:
dst = src.replace(QCD_DIR, RAW_DIR)
if not os.path.isdir(dst):
print "MOVE ", src, dst
shutil.move(src, dst)
update_database_records(src)
else:
print "ERROR dir exists ", dst
print "ERROR rm src ", src
def update_database_records(path):
model, exp, freq, realm, table, ens, var = path.split('/')[9:]
ds = Dataset.objects.filter(model=model, experiment=exp, frequency=freq, realm=realm, cmor_table=table,
ensemble=ens, variable=var).exclude(version='v20181201').first()
print ds
ds.qc_passed = False
ds.save()
for df in ds.datafile_set.all():
errors = df.qcerror_set.exclude(error_msg__icontains='ERROR (4): Axis attribute')
if errors: df.qc_passed = False
else: df.qc_passed = True
df.gws_path = df.gws_path.replace(QCD_DIR, RAW_DIR)
df.save()
def convert_dirs_to_datset_ids():
with open('ds_to_be_retracted_esgf_2019-02-13.log') as r:
dirs = [line.strip() for line in r]
for dir in dirs:
dir = dir.replace(QCD_DIR, RAW_DIR)
df = DataFile.objects.filter(gws_path__icontains=dir).first()
ds = df.dataset
print ds.dataset_id.replace("CMIP5", "c3s-cmip5")
if __name__ == "__main__":
# convert_paths()
# check_timeseries_errors()
# move_timeseries_fail_datasets()
convert_dirs_to_datset_ids() | [
"rpetrie@jasmin-sci4.ceda.ac.uk"
] | rpetrie@jasmin-sci4.ceda.ac.uk |
c166e51af99086d7817c7710559fdd0ec02d5968 | 00fc8b7c733f332fd3d744b53e111be88bbebcb4 | /Bricks/paddle.py | 46fc1d25b409c16da23bf236cc6d50691b5f281c | [] | no_license | MartinSlaa/Retrobia | 484e47a54b928a714c5ac522a59d0176b409a626 | f4009dc0b7d33abe4f15d92fdc7ddf987b67c47a | refs/heads/master | 2023-03-30T19:19:36.631553 | 2021-04-07T10:46:05 | 2021-04-07T10:46:05 | 339,413,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | import pygame
BLACK = (0, 0, 0)
class Paddle(pygame.sprite.Sprite):
# This class represents a paddle. It derives from the "Sprite" class in Pygame.
def __init__(self, color, width, height):
# Call the parent class (Sprite) constructor
super().__init__()
# pass in the color of the car and its x and y position, width and height.
# set background color and set it to be transparent
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
# Draw paddle (rectangle)
pygame.draw.rect(self.image, color, [0, 0, width, height])
# Fetch the rectangle object that has the dimensions of the image
self.rect = self.image.get_rect()
self._w = width
self._h = height
@property
def position(self):
return self.rect.x, self.rect.y
@property
def width(self):
return self._w
def moveLeft(self, pixels):
self.rect.x -= pixels
# keep paddle on screen
if self.rect.x < 0:
self.rect.x = 0
def moveRight(self, pixels):
self.rect.x += pixels
# keep paddle on screen
if self.rect.x > 700:
self.rect.x = 700 | [
"xfriko@gmail.com"
] | xfriko@gmail.com |
a00fe24421f074387147598994b6b7f2758f044f | da8b01aae6f76ac21a971b15537e5f042ef62dca | /venv/Lib/site-packages/skyfield/positionlib.py | 31a7887cb990935ab6193f5901fa4cce57e05c43 | [] | no_license | zxm256/ScheduleBackend | b225ffc2375be42ca1f997551107aed04359b2c7 | d9e64da11d1b282c5cbd43d572e2e96ae194608b | refs/heads/master | 2023-02-27T07:42:43.004547 | 2021-01-24T13:42:15 | 2021-01-24T13:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,979 | py | # -*- coding: utf-8 -*-
"""Classes representing different kinds of astronomical position."""
from numpy import array, einsum, full, reshape, nan, nan_to_num
from . import framelib
from .constants import ANGVEL, AU_M, C, ERAD, DAY_S, RAD2DEG, tau
from .data.spice import inertial_frames
from .descriptorlib import reify
from .earthlib import compute_limb_angle
from .functions import (
_T, _to_array, angle_between, from_spherical,
length_of, mxm, mxv, rot_z, to_spherical,
)
from .geometry import intersect_line_and_sphere
from .relativity import add_aberration, add_deflection
from .timelib import Time
from .units import Angle, Distance, Velocity, _interpret_angle
_ECLIPJ2000 = inertial_frames['ECLIPJ2000']
_GIGAPARSEC_AU = 206264806247096.38 # 1e9 * 360 * 3600 / tau
def build_position(position_au, velocity_au_per_d=None, t=None,
center=None, target=None):
if center == 0:
cls = Barycentric
elif center == 399:
cls = Geocentric
elif hasattr(center, 'rotation_at'): # and thus deserves an altaz() method
cls = Geometric
else:
cls = ICRF
return cls(position_au, velocity_au_per_d, t, center, target)
def position_of_radec(ra_hours, dec_degrees, distance_au=_GIGAPARSEC_AU,
epoch=None, t=None, center=None, target=None):
"""Build a position object from a right ascension and declination.
If a specific ``distance_au`` is not provided, Skyfield returns a
position vector a gigaparsec in length. This puts the position at a
great enough distance that it will stand at the same right ascension
and declination from any viewing position in the Solar System, to
very high precision (within a few hundredths of a microarcsecond).
If an ``epoch`` is specified, the input coordinates are understood
to be in the dynamical system of that particular date. Otherwise,
they will be assumed to be ICRS (the modern replacement for J2000).
"""
theta = _to_array(dec_degrees) / 360.0 * tau
phi = _to_array(ra_hours) / 24.0 * tau
position_au = from_spherical(distance_au, theta, phi)
if epoch is not None:
position_au = mxv(epoch.MT, position_au)
return build_position(position_au, None, t, center, target)
def position_from_radec(ra_hours, dec_degrees, distance=1.0, epoch=None,
t=None, center=None, target=None):
"""DEPRECATED version of ``position_of_radec()``.
Problems:
* The ``distance`` parameter specifies no unit, contrary to Skyfield
best practices. I have no idea what I was thinking.
* The default ``distance`` is far too small, since most objects for
which users specify an RA and declination are out on the celestial
sphere. The hope was that users would see the length 1.0 and
think, “ah, yes, that’s obviously a fake placeholder value.” But
it’s more likely that users will not even check the distance, or
maybe not even realize that a distance is involved.
"""
return position_of_radec(ra_hours, dec_degrees, distance, epoch,
t, center, target)
class ICRF(object):
"""An (x,y,z) position and velocity oriented to the ICRF axes.
The International Coordinate Reference Frame (ICRF) is a permanent
reference frame that is the replacement for J2000. Their axes agree
to within 0.02 arcseconds. It also supersedes older equinox-based
systems like B1900 and B1950.
Each instance of this class provides a ``.position`` vector and a
``.velocity`` vector that specify (x,y,z) coordinates along the axes
of the ICRF. A specific time ``.t`` might be specified or might be
``None``.
"""
center_barycentric = None
_observer_gcrs_au = None
_default_center = None
_ephemeris = None # cached so we can compute how light is deflected
def __init__(self, position_au, velocity_au_per_d=None, t=None,
center=None, target=None):
self.t = t
self.position = Distance(position_au)
if velocity_au_per_d is None:
velocity_au_per_d = full(self.position.au.shape, nan)
self.velocity = Velocity(velocity_au_per_d)
self.center = self._default_center if center is None else center
self.target = target
if center == 0:
self.center_barycentric = self
@classmethod
def from_radec(cls, ra_hours, dec_degrees,
distance_au=_GIGAPARSEC_AU, epoch=None):
theta = _to_array(dec_degrees) / 360.0 * tau
phi = _to_array(ra_hours) / 24.0 * tau
position_au = from_spherical(distance_au, theta, phi)
if epoch is not None:
position_au = mxv(epoch.MT, position_au)
return cls(position_au)
@classmethod
def from_time_and_frame_vectors(cls, t, frame, distance, velocity):
"""Constructor: build a position from two vectors in a reference frame.
* ``t`` — The :class:`~skyfield.timelib.Time` of the position.
* ``frame`` — A reference frame listed at `reference_frames`.
* ``distance`` — A `Distance` x,y,z vector in the given frame.
* ``velocity`` — A `Velocity` ẋ,ẏ,ż vector in the given frame.
"""
r, v = distance.au, velocity.au_per_d
at = getattr(frame, '_dRdt_times_RT_at', None)
if at is not None:
V = at(t)
v = v - mxv(V, r) # subtract instead of transposing
RT = _T(frame.rotation_at(t))
r = mxv(RT, r)
v = mxv(RT, v)
return cls(r, v, t) # TODO: args for center and target?
def __repr__(self):
name = self.__class__.__name__
center = self.center
if name == 'Barycentric' and center == 0:
suffix = ' BCRS'
elif name == 'Apparent' and center == 399:
suffix = ' GCRS'
elif name != 'ICRF':
suffix = ' ICRS'
else:
suffix = ''
center = self.center
target = self.target
center_name = getattr(center, 'center_name', None)
if center_name is None:
center_name = str(center)
target_name = getattr(target, 'target_name', None)
if target_name is None:
target_name = str(target)
return '<{0}{1} position{2}{3}{4}{5}>'.format(
name,
suffix,
'' if (self.velocity is None) else ' and velocity',
'' if self.t is None else ' at date t',
'' if self.center is None else ' center={0}'.format(center_name),
'' if self.target is None else ' target={0}'.format(target_name),
)
def __sub__(self, body):
"""Subtract two ICRF vectors to produce a third."""
# TODO: set center and target of result
p = self.position.au - body.position.au
if self.velocity is None or body.velocity is None:
v = None
else:
v = self.velocity.au_per_d - body.velocity.au_per_d
return ICRF(p, v, self.t)
def __getitem__(self, i):
return type(self)(
self.position.au[:,i],
self.velocity.au_per_d[:,i],
self.t[i],
self.center,
self.target,
)
def __neg__(self):
return type(self)(
-self.position.au,
-self.velocity.au_per_d,
self.t,
self.target,
self.center,
)
def distance(self):
"""Compute the distance from the origin to this position.
The return value is a :class:`~skyfield.units.Distance` that
prints itself out in astronomical units (au) but that also
offers attributes ``au``, ``km``, and ``m`` if you want to
access its magnitude as a number.
>>> v = ICRF([1, 1, 0])
>>> print(v.distance())
1.41421 au
"""
return Distance(length_of(self.position.au))
def speed(self):
"""Compute the magnitude of the velocity vector.
>>> v = ICRF([0, 0, 0], [1, 2, 3])
>>> print(v.speed())
3.74166 au/day
"""
return Velocity(length_of(self.velocity.au_per_d))
@reify
def light_time(self):
"""Length of this vector in days of light travel time."""
return self.distance().m / C * DAY_S
def radec(self, epoch=None):
r"""Compute equatorial (RA, declination, distance)
When called without a parameter, this returns standard ICRF
right ascension and declination:
>>> from skyfield.api import load
>>> ts = load.timescale()
>>> t = ts.utc(2020, 5, 13, 10, 32)
>>> eph = load('de421.bsp')
>>> astrometric = eph['earth'].at(t).observe(eph['sun'])
>>> ra, dec, distance = astrometric.radec()
>>> print(ra, dec, sep='\n')
03h 21m 47.67s
+18deg 28' 55.3"
If you instead want the coordinates referenced to the dynamical
system defined by the Earth's true equator and equinox, provide
a specific epoch time.
>>> ra, dec, distance = astrometric.apparent().radec(epoch='date')
>>> print(ra, dec, sep='\n')
03h 22m 54.73s
+18deg 33' 04.5"
To get J2000.0 coordinates, simply pass ``ts.J2000``.
"""
position_au = self.position.au
if epoch is not None:
if isinstance(epoch, Time):
pass
elif isinstance(epoch, float):
epoch = Time(None, tt=epoch)
elif epoch == 'date':
epoch = self.t
else:
raise ValueError('the epoch= must be a Time object,'
' a floating point Terrestrial Time (TT),'
' or the string "date" for epoch-of-date')
position_au = mxv(epoch.M, position_au)
r_au, dec, ra = to_spherical(position_au)
return (Angle(radians=ra, preference='hours'),
Angle(radians=dec, signed=True),
Distance(r_au))
def separation_from(self, another_icrf):
"""Return the angle between this position and another.
>>> from skyfield.api import load
>>> ts = load.timescale()
>>> t = ts.utc(2020, 4, 18)
>>> eph = load('de421.bsp')
>>> sun, venus, earth = eph['sun'], eph['venus'], eph['earth']
>>> e = earth.at(t)
>>> s = e.observe(sun)
>>> v = e.observe(venus)
>>> print(s.separation_from(v))
43deg 23' 23.1"
You can also compute separations across an array of positions.
>>> t = ts.utc(2020, 4, [18, 19, 20])
>>> e = earth.at(t)
>>> print(e.observe(sun).separation_from(e.observe(venus)))
3 values from 43deg 23' 23.1" to 42deg 49' 46.6"
"""
u = self.position.au
v = another_icrf.position.au
# Allow an array of positions to be compared with a single other
# position.
difference = len(u.shape) - len(v.shape)
if difference:
if difference > 0:
v = reshape(v, v.shape + (1,) * difference)
else:
u = reshape(u, u.shape + (1,) * -difference)
return Angle(radians=angle_between(u, v))
# TODO: build a reference frame for the following two methods.
def cirs_xyz(self, epoch):
"""Compute cartesian CIRS coordinates at a given epoch (x,y,z).
Calculate coordinates in the Celestial Intermediate Reference System
(CIRS), a dynamical coordinate system referenced to the Celestial
Intermediate Origin (CIO). As this is a dynamical system it must be
calculated at a specific epoch.
"""
if isinstance(epoch, Time):
pass
elif isinstance(epoch, float):
epoch = Time(None, tt=epoch)
elif epoch == 'date':
epoch = self.t
else:
raise ValueError('the epoch= must be a Time object,'
' a floating point Terrestrial Time (TT),'
' or the string "date" for epoch-of-date')
vector = mxv(epoch.C, self.position.au)
return Distance(vector)
def cirs_radec(self, epoch):
"""Get spherical CIRS coordinates at a given epoch (ra, dec, distance).
Calculate coordinates in the Celestial Intermediate Reference System
(CIRS), a dynamical coordinate system referenced to the Celestial
Intermediate Origin (CIO). As this is a dynamical system it must be
calculated at a specific epoch.
"""
r_au, dec, ra = to_spherical(self.cirs_xyz(epoch).au)
return (Angle(radians=ra, preference='hours'),
Angle(radians=dec, signed=True),
Distance(r_au))
# Deprecated methods, that have been replaced by `framelib.py` plus
# the "frame" methods in the next section.
def ecliptic_xyz(self, epoch=None):
if epoch is None:
return self.frame_xyz(framelib.ecliptic_J2000_frame)
return _Fake(self, epoch).frame_xyz(framelib.ecliptic_frame)
def ecliptic_velocity(self):
return self.frame_xyz_and_velocity(framelib.ecliptic_J2000_frame)[1]
def ecliptic_latlon(self, epoch=None):
if epoch is None:
return self.frame_latlon(framelib.ecliptic_J2000_frame)
return _Fake(self, epoch).frame_latlon(framelib.ecliptic_frame)
def galactic_xyz(self): return self.frame_xyz(framelib.galactic_frame)
def galactic_latlon(self): return self.frame_latlon(framelib.galactic_frame)
ecliptic_position = ecliptic_xyz # old alias
galactic_position = galactic_xyz # old alias
# New methods for converting to and from `framelib.py` reference frames.
def frame_xyz(self, frame):
"""Return this position as an (x,y,z) vector in a reference frame.
Returns a :class:`~skyfield.units.Distance` object giving the
(x,y,z) of this position in the given ``frame``. See
`reference_frames`.
"""
return Distance(mxv(frame.rotation_at(self.t), self.position.au))
def frame_xyz_and_velocity(self, frame):
"""Return (x,y,z) position and velocity vectors in a reference frame.
Returns two vectors in the given coordinate ``frame``: a
:class:`~skyfield.units.Distance` providing an (x,y,z) position
and a :class:`~skyfield.units.Velocity` giving (xdot,ydot,zdot)
velocity. See `reference_frames`.
"""
R = frame.rotation_at(self.t)
r, v = self.position.au, self.velocity.au_per_d
r = mxv(R, r)
v = mxv(R, v)
at = getattr(frame, '_dRdt_times_RT_at', None)
if at is not None:
V = at(self.t)
v += mxv(V, r)
return Distance(r), Velocity(v)
def frame_latlon(self, frame):
"""Return longitude, latitude, and distance in the given frame.
Returns a 3-element tuple giving the latitude and longitude as
:class:`~skyfield.units.Angle` objects and the range to the
target as a :class:`~skyfield.units.Distance`. See
`reference_frames`.
"""
vector = mxv(frame.rotation_at(self.t), self.position.au)
d, lat, lon = to_spherical(vector)
return (Angle(radians=lat, signed=True),
Angle(radians=lon),
Distance(d))
def to_skycoord(self, unit=None):
"""Convert this distance to an AstroPy ``SkyCoord`` object."""
from astropy.coordinates import SkyCoord
from astropy.units import au
x, y, z = self.position.au
return SkyCoord(representation_type='cartesian', x=x, y=y, z=z, unit=au)
def _to_spice_frame(self, name):
vector = self.position.au
vector = inertial_frames[name].dot(vector)
d, dec, ra = to_spherical(vector)
return (Angle(radians=ra, preference='hours', signed=True),
Angle(radians=dec),
Distance(au=d))
def is_sunlit(self, ephemeris):
"""Return whether a position in Earth orbit is in sunlight.
Returns ``True`` or ``False``, or an array of such values, to
indicate whether this position is in sunlight or is blocked by
the Earth’s shadow. It should work with positions produced
either by calling ``at()`` on a satellite object, or by calling
``at()`` on the relative position ``sat - topos`` of a satellite
with respect to an Earth observer’s position. See
:ref:`satellite-is-sunlit`.
"""
if self.center == 399:
earth_m = - self.position.m
else:
gcrs_position = self._observer_gcrs_au
if gcrs_position is None:
raise ValueError('cannot tell whether this position is sunlit')
earth_m = - self.position.m - gcrs_position * AU_M
sun_m = (ephemeris['sun'] - ephemeris['earth']).at(self.t).position.m
near, far = intersect_line_and_sphere(sun_m + earth_m, earth_m, ERAD)
return nan_to_num(far) <= 0
def is_behind_earth(self):
"""Return whether the Earth blocks the view of this object.
For a position centered on an Earth-orbiting satellite, return
whether the target is in eclipse behind the disc of the Earth.
See :ref:`is-behind-earth`.
"""
observer_gcrs_au = self._observer_gcrs_au
if observer_gcrs_au is None:
raise ValueError('can only compute Earth occultation for'
' positions observed from an Earth satellite')
earth_m = - observer_gcrs_au * AU_M
vector_m = self.position.m
near, far = intersect_line_and_sphere(vector_m, earth_m, ERAD)
return nan_to_num(far) > 0
@reify
def _altaz_rotation(self):
# Return and cache (with @reify) the orientation of this
# observer, in case a single observer.at() position is used in
# several subsequent .observe().apparent().altaz() calls.
rotation_at = getattr(self.target, 'rotation_at', None)
if rotation_at is None:
raise ValueError(_altaz_message)
return rotation_at(self.t)
def from_altaz(self, alt=None, az=None, alt_degrees=None, az_degrees=None,
distance=Distance(au=0.1)):
"""Generate an Apparent position from an altitude and azimuth.
The altitude and azimuth can each be provided as an `Angle`
object, or else as a number of degrees provided as either a
float or a tuple of degrees, arcminutes, and arcseconds::
alt=Angle(...), az=Angle(...)
alt_degrees=23.2289, az_degrees=142.1161
alt_degrees=(23, 13, 44.1), az_degrees=(142, 6, 58.1)
The distance should be a :class:`~skyfield.units.Distance`
object, if provided; otherwise a default of 0.1 au is used.
"""
# TODO: should this method live on another class?
rotation_at = getattr(self.target, 'rotation_at', None)
if rotation_at is None:
raise ValueError(_altaz_message)
R = rotation_at(self.t)
alt = _interpret_angle('alt', alt, alt_degrees)
az = _interpret_angle('az', az, az_degrees)
r = distance.au
p = from_spherical(r, alt, az)
p = einsum('ji...,j...->i...', R, p)
return Apparent(p)
# For compatibility with my original name for the class. Not an
# important enough change to warrant a deprecation error for users, so:
ICRS = ICRF
class Geometric(ICRF):
"""An (x,y,z) vector between two instantaneous position.
A geometric position is the difference between the Solar System
positions of two bodies at exactly the same instant. It is *not*
corrected for the fact that, in real physics, it will take time for
light to travel from one position to the other.
Both the ``.position`` and ``.velocity`` are (x,y,z) vectors
oriented along the axes of the International Celestial Reference
System (ICRS), the modern replacement for J2000 coordinates.
"""
def altaz(self, temperature_C=None, pressure_mbar='standard'):
"""Compute (alt, az, distance) relative to the observer's horizon
The altitude returned is an :class:`~skyfield.units.Angle`
measured in degrees above the horizon, while the azimuth
:class:`~skyfield.units.Angle` measures east along the horizon
from geographic north (so 0 degrees means north, 90 is east, 180
is south, and 270 is west).
By default, Skyfield does not adjust the altitude for
atmospheric refraction. If you want Skyfield to estimate how
high the atmosphere might lift the body's image, give the
argument ``temperature_C`` either the temperature in degrees
centigrade, or the string ``'standard'`` (in which case 10°C is
used).
When calculating refraction, Skyfield uses the observer’s
elevation above sea level to estimate the atmospheric pressure.
If you want to override that value, simply provide a number
through the ``pressure_mbar`` parameter.
"""
return _to_altaz(self, temperature_C, pressure_mbar)
class Barycentric(ICRF):
"""An (x,y,z) position measured from the Solar System barycenter.
Skyfield generates a `Barycentric` position measured from the
gravitational center of the Solar System whenever you ask a body for
its location at a particular time:
>>> t = ts.utc(2003, 8, 29)
>>> mars.at(t)
<Barycentric BCRS position and velocity at date t center=0 target=499>
This class’s ``.position`` and ``.velocity`` are (x,y,z) vectors in
the Barycentric Celestial Reference System (BCRS), the modern
replacement for J2000 coordinates measured from the Solar System
Barycenter.
"""
_default_center = 0
def observe(self, body):
"""Compute the `Astrometric` position of a body from this location.
To compute the body's astrometric position, it is first asked
for its position at the time `t` of this position itself. The
distance to the body is then divided by the speed of light to
find how long it takes its light to arrive. Finally, the light
travel time is subtracted from `t` and the body is asked for a
series of increasingly exact positions to learn where it was
when it emitted the light that is now reaching this position.
>>> earth.at(t).observe(mars)
<Astrometric ICRS position and velocity at date t center=399 target=499>
"""
p, v, t, light_time = body._observe_from_bcrs(self)
astrometric = Astrometric(p, v, t, self.target, body.target)
astrometric._ephemeris = self._ephemeris
astrometric.center_barycentric = self
astrometric.light_time = light_time
return astrometric
# TODO: pre-create a Barycentric object representing the SSB, and make
# it possible for it to observe() a planet.
class Astrometric(ICRF):
"""An astrometric (x,y,z) position relative to a particular observer.
The astrometric position of a body is its position relative to an
observer, adjusted for light-time delay. It is the position of the
body back when it emitted (or reflected) the light that is now
reaching the observer's eye or telescope. Astrometric positions are
usually generated in Skyfield by calling the `Barycentric` method
`observe()`, which performs the light-time correction.
Both the ``.position`` and ``.velocity`` are ``[x y z]`` vectors
oriented along the axes of the ICRF, the modern replacement for the
J2000 reference frame.
It is common to either call ``.radec()`` (with no argument) on an
astrometric position to generate an *astrometric place* right
ascension and declination with respect to the ICRF axes, or else to
call ``.apparent()`` to generate an :class:`Apparent` position.
"""
def apparent(self):
"""Compute an :class:`Apparent` position for this body.
This applies two effects to the position that arise from
relativity and shift slightly where the other body will appear
in the sky: the deflection that the image will experience if its
light passes close to large masses in the Solar System, and the
aberration of light caused by the observer's own velocity.
>>> earth.at(t).observe(mars).apparent()
<Apparent GCRS position and velocity at date t center=399 target=499>
These transforms convert the position from the BCRS reference
frame of the Solar System barycenter and to the reference frame
of the observer. In the specific case of an Earth observer, the
output reference frame is the GCRS.
"""
t = self.t
target_au = self.position.au.copy()
cb = self.center_barycentric
bcrs_position = cb.position.au
bcrs_velocity = cb.velocity.au_per_d
observer_gcrs_au = cb._observer_gcrs_au
# If a single observer position (3,) is observing an array of
# targets (3,n), then deflection and aberration will complain
# that "operands could not be broadcast together" unless we give
# the observer another dimension too.
if len(bcrs_position.shape) < len(target_au.shape):
shape = bcrs_position.shape + (1,)
bcrs_position = bcrs_position.reshape(shape)
bcrs_velocity = bcrs_velocity.reshape(shape)
if observer_gcrs_au is not None:
observer_gcrs_au = observer_gcrs_au.reshape(shape)
if observer_gcrs_au is None:
include_earth_deflection = array((False,))
else:
limb_angle, nadir_angle = compute_limb_angle(
target_au, observer_gcrs_au)
include_earth_deflection = nadir_angle >= 0.8
add_deflection(target_au, bcrs_position,
self._ephemeris, t, include_earth_deflection)
add_aberration(target_au, bcrs_velocity, self.light_time)
apparent = Apparent(target_au, None, t, self.center, self.target)
apparent.center_barycentric = self.center_barycentric
apparent._observer_gcrs_au = observer_gcrs_au
return apparent
class Apparent(ICRF):
"""An apparent ``[x y z]`` position relative to a particular observer.
This class’s vectors provide the position and velocity of a body
relative to an observer, adjusted to predict where the body’s image
will really appear (hence "apparent") in the sky:
* Light-time delay, as already present in an `Astrometric` position.
* Deflection: gravity bends light, and thus the image of a distant
object, as the light passes massive objects like Jupiter, Saturn,
and the Sun. For an observer on the Earth’s surface or in Earth
orbit, the slight deflection by the gravity of the Earth itself is
also included.
* Aberration: incoming light arrives slanted because of the
observer's motion through space.
These positions are usually produced in Skyfield by calling the
`apparent()` method of an `Astrometric` object.
Both the ``.position`` and ``.velocity`` are ``[x y z]`` vectors
oriented along the axes of the ICRF, the modern replacement for the
J2000 reference frame. If the observer is at the geocenter, they
are more specifically GCRS coordinates. Two common coordinates that
this vector can generate are:
* *Proper place:* call ``.radec()`` without arguments to compute
right ascension and declination with respect to the fixed axes of
the ICRF.
* *Apparent place,* the most popular option: call ``.radec('date')``
to generate right ascension and declination with respect to the
equator and equinox of date.
"""
def altaz(self, temperature_C=None, pressure_mbar='standard'):
"""Compute (alt, az, distance) relative to the observer's horizon
The altitude returned is an :class:`~skyfield.units.Angle`
measured in degrees above the horizon, while the azimuth
:class:`~skyfield.units.Angle` measures east along the horizon
from geographic north (so 0 degrees means north, 90 is east, 180
is south, and 270 is west).
By default, Skyfield does not adjust the altitude for
atmospheric refraction. If you want Skyfield to estimate how
high the atmosphere might lift the body's image, give the
argument ``temperature_C`` either the temperature in degrees
centigrade, or the string ``'standard'`` (in which case 10°C is
used).
When calculating refraction, Skyfield uses the observer’s
elevation above sea level to estimate the atmospheric pressure.
If you want to override that value, simply provide a number
through the ``pressure_mbar`` parameter.
"""
return _to_altaz(self, temperature_C, pressure_mbar)
class Geocentric(ICRF):
"""An (x,y,z) position measured from the center of the Earth.
A geocentric position is the difference between the position of the
Earth at a given instant and the position of a target body at the
same instant, without accounting for light-travel time or the effect
of relativity on the light itself.
Its ``.position`` and ``.velocity`` vectors have (x,y,z) axes that
are those of the Geocentric Celestial Reference System (GCRS), an
inertial system that is an update to J2000 and that does not rotate
with the Earth itself.
"""
_default_center = 399
def itrf_xyz(self):
"""Deprecated; instead, call ``.frame_xyz(itrs)``. \
See `reference_frames`."""
return self.frame_xyz(framelib.itrs)
def subpoint(self):
"""Deprecated; instead, call either ``iers2010.subpoint(pos)`` or \
``wgs84.subpoint(pos)``."""
from .toposlib import iers2010
return iers2010.subpoint(self)
def _to_altaz(position, temperature_C, pressure_mbar):
"""Compute (alt, az, distance) relative to the observer's horizon."""
cb = position.center_barycentric
if cb is not None:
R = cb._altaz_rotation
else:
rotation_at = getattr(position.center, 'rotation_at')
if rotation_at is not None:
R = rotation_at(position.t)
else:
raise ValueError(_altaz_message)
position_au = mxv(R, position.position.au)
r_au, alt, az = to_spherical(position_au)
if temperature_C is None:
alt = Angle(radians=alt)
else:
refract = getattr(position.center, 'refract', None)
if refract is None:
raise ValueError(_altaz_message)
alt = position.center.refract(
alt * RAD2DEG, temperature_C, pressure_mbar,
)
return alt, Angle(radians=az), Distance(r_au)
_altaz_message = (
'to compute an altazimuth position, you must observe from a'
' specific Earth location or from a position on another body'
' loaded from a set of planetary constants'
)
class _Fake(ICRF): # support for deprecated frame rotation methods above
def __init__(self, original, epoch):
self.position = original.position
if isinstance(epoch, Time):
self.t = epoch
elif isinstance(epoch, float):
self.t = Time(None, tt=epoch)
elif epoch == 'date':
self.t = original.t
else:
raise ValueError('the epoch= must be a Time object,'
' a floating point Terrestrial Time (TT),'
' or the string "date" for epoch-of-date')
def ITRF_to_GCRS(t, rITRF): # Deprecated; for compatibility with old versions.
return mxv(_T(framelib.itrs.rotation_at(t)), rITRF)
def ITRF_to_GCRS2(t, rITRF, vITRF, _high_accuracy=False):
position = array(rITRF)
velocity = array(vITRF)
# TODO: This is expensive, and should be extensively trimmed to only
# include the most important terms underlying GAST. But it improves
# the precision by something like 1e5 times when compared to using
# the round number skyfield.constants.ANGVEL!
#
# See the test `test_velocity_in_ITRF_to_GCRS2()`.
#
if _high_accuracy:
_one_second = 1.0 / DAY_S
t_later = t.ts.tt_jd(t.whole, t.tt_fraction + _one_second)
angvel = (t_later.gast - t.gast) / 24.0 * tau
else:
angvel = ANGVEL
spin = rot_z(t.gast / 24.0 * tau)
R = mxm(t.MT, spin)
z = 0.0 * angvel
import numpy as np
V = np.array((
(z,-DAY_S * angvel,z),
(DAY_S * angvel,z,z),
(z,z,z),
))
velocity = velocity + mxv(V, position)
position = mxv(R, position)
velocity = mxv(R, velocity)
return position, velocity
| [
"zhaopku09@gmail.com"
] | zhaopku09@gmail.com |
f3bea3265e891f75ffc9b6338b3c362e3a8ada3a | 2129d956c1117fb8386932e01d894a36245cd6bf | /find_most_followed.py | fa4d2db08e0aec1b34c61bd46c682de75872595d | [] | no_license | lwishing/url-shortener- | 78caeb12a4dbb51fef00fce3ff3e1c1840849ad1 | 9fbdbe9abae05f3c3a78410325c0b206cf6d44d1 | refs/heads/master | 2021-01-23T08:04:29.011630 | 2012-12-04T03:33:58 | 2012-12-04T03:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | """
This script will find the most followed urls from our link shortener. The input file is the log file located at static/log.txt.
To run:
find_most_followed.py static/log.txt > most_followed_urls.txt
To sort:
Open most_followed_urls.txt in Excel, sort by second column, which is the count of redirects.
"""
from mrjob.job import MRJob
import fileinput
import flask
import csv
app = flask.Flask(__name__)
app.debug = True
def csv_readline(line):
"""Given a string CSV line, return a list of strings."""
for row in csv.reader([line]):
return row
class ActionCounter(MRJob):
#data = open('static/log.txt')
#app.logger.debug(data)
def mapper(self, line_no, line):
cell = csv_readline(line)
#app.logger.debug(cell[2])
yield cell[4], 1
def reducer(self, vroot, occurrences):
total = sum(occurrences)
if vroot != '':
yield vroot, total
if __name__ == '__main__':
ActionCounter.run()
| [
"ryanfbaker@ischool.berkeley.edu"
] | ryanfbaker@ischool.berkeley.edu |
71ab2907a52b111828b911817be9533720d79b95 | f48690a65601d4c8196d5f61062e77fbbc1592dc | /CH-1/06 Automatic Natural Language Understanding.py | 3bc44b5165c6cdfa30416f2fe4a59701f252d193 | [] | no_license | youth-for-you/Natural-Language-Processing-with-Python | 19f16b3b217b37a727508daae41de3d15122cbb9 | 43d974d86727780a97498618ecac0bd15d6a0bfc | refs/heads/master | 2021-09-01T10:11:48.283178 | 2017-12-26T11:47:57 | 2017-12-26T11:47:57 | 115,415,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | #自然语言的一些术语
"""
5.1 词义消歧:
对于那些一词多义的单词,想要消除歧义,就需要上下文,理解上下文的含义才能确定单词的意思。
5.2 指代消解:
eg:The thieves stole the paintings. They were subsequently sold.
指代消解指确定名词或代词指得是什么。角色标注,确定名词短语如何与动词短语相关联。
5.3 自动生成语言:
自动解决语言理解问题,如自动问答,机器翻译
""" | [
"[pf_li163@163.com]"
] | [pf_li163@163.com] |
78050cb98834dcfbf54474a64b713bf895b8194f | 5e13873543d9ab0e04a756dc081a227b02643bee | /LSTM_regression.py | 4d06d1bddcc17cc71ecf034441486c683f40e856 | [] | no_license | mrthlinh/Agile-User-Story-Point-Estimation | 5f95cb7d51a61d72c60b6732b173064426b7a132 | 0af824fd1594fa76f5566064ded8bcf2cfd57478 | refs/heads/master | 2020-04-06T21:21:34.260841 | 2018-12-05T20:56:33 | 2018-12-05T20:56:33 | 157,800,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,355 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 19:22:30 2018
@author: bking
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
tf.enable_eager_execution()
#from movieReviewData import movieReviewData
from prepareData import prepareData
import argparse
tf.logging.set_verbosity(tf.logging.INFO)
print(tf.__version__)
#################### Define some parameter here ###############################
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default= './models/ckpt/', type=str, help='Dir to save a model and checkpoints')
parser.add_argument('--saved_dir', default='./models/pb/', type=str, help='Dir to save a model for TF serving')
parser.add_argument('--step_size', default=10, type=int, help='Step size')
parser.add_argument('--batch_size', default=100, type=int, help='Batch size')
parser.add_argument('--embedding_size', default=100, type=int, help='Embedding size')
parser.add_argument('--rnn_layers', default=2, type=int, help='RNN layer size')
parser.add_argument('--rnn_units', default=50, type=int, help='RNN layer size')
args = parser.parse_args()
print("================== Custom model in LSTM ===========================")
print(parser.print_help())
print("===================================================================")
############################### Load and Preprocesing data ###########################################
# Load data
data = prepareData()
# Preprocessing data
#data.preProcessing()
vocab_size = data.vocab_size
embedding_size = args.embedding_size
sentence_size = data.sentence_size
# Prepare data
x_train = data.x_train
x_test = data.x_test
y_train = data.y_train
y_test = data.y_test
def LSTM_model_fn(features, labels, mode):
"""
Description: Custom model LSTM
Usage:
return:
"""
# [batch_size x sentence_size x embedding_size]
# Using embeddings, you can represent each category as a vector of floats of the desired size
# which can be used as features for the rest of the model
inputs = tf.contrib.layers.embed_sequence(
features['x'],vocab_size,embed_dim=embedding_size,
initializer=tf.random_uniform_initializer(-1.0,-1.0))
# create an LSTM cell of size 100
# lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units = 50)
# Deep LSTM
layers = args.rnn_layers
rnn_size = args.rnn_units
def lstm_cell():
# LSTM cell
# lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size, forget_bias=1.0)
lstm = tf.nn.rnn_cell.LSTMCell(rnn_size)
# lstm = tf.nn.rnn_cell.GRUCell(rnn_size)
# Add Dropout Layer
lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=0.2)
return lstm
deep_lstm_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(layers)])
# Getting sequence length from features sucks -> initialize sequence length here
sequence_length = tf.count_nonzero(features['x'], 1)
# create the complete LSTM
_, final_states = tf.nn.dynamic_rnn(
deep_lstm_cell, inputs, sequence_length = sequence_length, dtype=tf.float32)
# _, final_states = tf.nn.dynamic_rnn(
# Deep_cell, inputs, sequence_length = sequence_length, dtype=tf.float32)
# get the final hidden states of dimensionality [batch_size x sentence_size] [batch_size, lstm_units]
# the last state for each element in the batch is final_states.h
# outputs = final_states[-1].h
outputs = final_states[-1].h
# Fully Connected Layer
num_unit = [rnn_size/2,1]
out_points = tf.layers.dense(inputs=outputs, units=num_unit[0])
out_points = tf.layers.dense(inputs=out_points, units=num_unit[1])
if labels is not None:
labels = tf.reshape(labels, [-1, 1])
# Compute loss.
# loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# loss = tf.losses.softmax_cross_entropy(labels,logits=logits)
# loss = tf.losses.mean_squared_error(labels,logits)
# Compute predictions.
# predictions = {
# # Generate predictions (for PREDICT and EVAL mode)
# "next": tf.round(tf.sigmoid(logits)),
# # Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# "probabilities": tf.sigmoid(logits, name="sigmoid_tensor")
# }
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"point": out_points
}
# Prediction
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions["point"])
# Compute loss => NEED TO PUT THIS AFTER PREDICTION
# loss = tf.losses.sigmoid_cross_entropy(labels,logits)
loss = tf.losses.mean_squared_error(labels,out_points)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
labels = tf.cast(labels, tf.float32)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"MSE": tf.metrics.mean_squared_error(
labels=labels, predictions=predictions["point"]),
"MeanAbsoluteError": tf.metrics.mean_absolute_error(labels=labels, predictions=predictions["point"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def serving_input_receiver_fn():
"""
Description: This is used to define inputs to serve the model.
Usage:
return: ServingInputReciever
Ref: https://www.tensorflow.org/versions/r1.7/api_docs/python/tf/estimator/export/ServingInputReceiver
"""
reciever_tensors = {
# The size of input sentence is flexible.
"sentence":tf.placeholder(tf.int32, [None,])
}
features = {
# Resize given images.
"x": tf.reshape(reciever_tensors["sentence"], [1,sentence_size])
}
return tf.estimator.export.ServingInputReceiver(receiver_tensors=reciever_tensors,
features=features)
def parser(x, y):
'''
Description:
Usage:
'''
features = {"x": x}
return features, y
def train_input_fn(x_train,y_train,batch_size):
'''
Description:
Usage:
'''
dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train))
dataset = dataset.shuffle(1000).batch(batch_size).map(parser).repeat()
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def eval_input_fn(x_train,y_train,batch_size):
'''
Description:
Usage:
'''
dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train))
# Don't shuffle when evaluation
dataset = dataset.batch(batch_size).map(parser)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def main(unused_argv):
# Create the Estimator
RNN_classifier = tf.estimator.Estimator(
model_fn=LSTM_model_fn, model_dir= args.model_dir)
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
# tensors_to_log = {"predicted_points": "predicted_points"}
# logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
# RNN_classifier.train(
# input_fn=lambda: train_input_fn(x_train,y_train,batch_size=args.batch_size),
# steps=args.step_size,
# hooks=[logging_hook])
RNN_classifier.train(
input_fn=lambda: train_input_fn(x_train,y_train,batch_size=args.batch_size),
steps=args.step_size)
eval_results = RNN_classifier.evaluate(
input_fn = lambda: eval_input_fn(x_test,y_test,batch_size=args.batch_size))
print(eval_results)
# Save the model
RNN_classifier.export_savedmodel(args.saved_dir, serving_input_receiver_fn=serving_input_receiver_fn)
if __name__ == "__main__":
tf.app.run()
| [
"mrthlinh@gmail.com"
] | mrthlinh@gmail.com |
d9b24565572ae2e7727fd092000ba9875a8c0be8 | d35ca5eb98533bff6da921195e91de3b3bdede57 | /detect_faces_video.py | f047b9092ddbe880ddc67ea9412f493d7a978f96 | [] | no_license | akshatjain15/Iot-Project | dc91c4ab2716a93755a9bcbe137016ecf5406094 | 0a514d43a2a9154578acfcdc8792f3922a970234 | refs/heads/master | 2020-06-02T14:52:30.891990 | 2019-07-16T09:53:52 | 2019-07-16T09:53:52 | 191,196,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,476 | py | # python detect_faces_video.py --prototxt deploy.prototxt.txt --model res10_300x300_ssd_iter_140000.caffemodel
# import the necessary packages
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import random
import os
import urllib.request as ur
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
# ap.add_argument("-p", "--prototxt", required=True,
# help="path to Caffe 'deploy' prototxt file")
# ap.add_argument("-m", "--model", required=True,
# help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe('deploy.prototxt.txt', 'res10_300x300_ssd_iter_140000.caffemodel')
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] starting video stream...")
# vs = VideoStream(src=0).start()
time.sleep(2.0)
count = 0
i = 1
y=10000
name = input("Enter your name: ")
newpath = os.path.join('/home/pi/Desktop/IOT FINAL/DATA/train', name)
print("New path"+ newpath)
if not os.path.exists(newpath):
os.makedirs(newpath)
os.chdir(newpath)
file = open('url_read.txt', 'r')
url = file.read()
# loop over the frames from the video stream
while True:
try:
imgResp=ur.urlopen(url)
except:
print("ERROR \n Connection not established")
break
imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
image = cv2.imdecode(imgNp,-1)
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
# frame = vs.read()
frame = imutils.resize(image, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence < args["confidence"]:
continue
print(frame)
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box of the face along with the associated
# probability
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),(0, 0, 255), 2)
if count%50 == 0 :
image = frame[startY:endY,startX:endX]
ran = str(random.randint(i,y))
path = newpath +"/"+ ran+".png" # This directory will change as per Raspberry Pi
print(path)
i = i + 5
y = y - 5
cv2.imwrite(path,image)
print("Image capture"+" "+str(i))
count+=1
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
# vs.stop()
| [
"akshjain1512@gmail.com"
] | akshjain1512@gmail.com |
01de268c85690257309bbc1dc8619ec497183bda | cb7e7b48c18b1024e597f4d445307f130661e249 | /portafolio/core/migrations/0005_auto_20200805_1518.py | b79b4bd5705960978dbb785dea752d37513c5b2b | [
"Apache-2.0"
] | permissive | jhonfmg7/portafolioDjangoV2 | 2294c093070684c3f863d2433cea7d54eb88b87b | f8fe158b97a79c148b062ae0410ef2c2d5938b8f | refs/heads/main | 2022-12-25T20:44:47.500158 | 2020-10-08T14:58:25 | 2020-10-08T14:58:25 | 301,833,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # Generated by Django 3.0.5 on 2020-08-05 15:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0004_career_contact'),
]
operations = [
migrations.AlterModelOptions(
name='career',
options={'ordering': ['-created'], 'verbose_name': 'Carrera', 'verbose_name_plural': 'Carrera'},
),
migrations.AddField(
model_name='career',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Fecha de creación'),
preserve_default=False,
),
migrations.AddField(
model_name='career',
name='updated',
field=models.DateTimeField(auto_now=True, verbose_name='Fecha de edición'),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
4b650a3f95def49c325ad1b3bd210643a3f114fd | 1977e882650036b98058c47cf2eae4f33d131a18 | /service/chatbot.py | c6c61852bbd72ab83f0a59b9cc244d860ce22771 | [] | no_license | 4binas/aifo-client | 7412772722da738f7c256aa0368d5b05c4b3aa8f | 50881b4987adf1999c99cc482d13f5932dd20a3d | refs/heads/main | 2023-09-03T20:31:02.132679 | 2021-10-28T14:06:14 | 2021-10-28T14:06:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | from google.cloud import dialogflow
class Chatbot:
def __init__(self, project_id, session_id, language_code, bot_name):
self.project_id = project_id
self.session_id = session_id
self.language_code = language_code
self.bot_name = bot_name
def __detect_intent_texts__(self, text):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(self.project_id, self.session_id)
text_input = dialogflow.TextInput(text=text, language_code=self.language_code)
query_input = dialogflow.QueryInput(text=text_input)
response = session_client.detect_intent(
request={"session": session, "query_input": query_input}
)
return response
def get_response(self, msg):
response = self.__detect_intent_texts__(msg)
cards = []
if len(response.query_result.fulfillment_messages) > 1:
for message in response.query_result.fulfillment_messages:
if message.card:
cards.append(message.card)
return (response.query_result.fulfillment_text or response.query_result.fulfillment_messages[0].text.text[0] or "Sorry, can you repeat that?"), cards | [
"abinas.kuganathan@ost.ch"
] | abinas.kuganathan@ost.ch |
a0c0f6c768be56f2ff8f41663a411ade0ac6bcf8 | 7e5f16f4fb1a6f38d4ff83f2734ac9601fddad1f | /covid/urls.py | c6806f21cc4257db0a363d271402168003a1e3a6 | [] | no_license | ashikpydev/Covid-19-Result-Checker | 82a40349d655076c756ee6064eef090c4421035c | 5ff9a086dd49b9a6867806b9ef1a42097d9824dc | refs/heads/main | 2023-03-30T21:40:08.262076 | 2021-04-08T13:34:34 | 2021-04-08T13:34:34 | 355,917,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """covid URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('covid_app.urls'))
]
| [
"ashiqurrahman0506@gmail.com"
] | ashiqurrahman0506@gmail.com |
c911bd1bd284b7ad16e57eafd5e3a161a64f11bb | 931d68232aa1bb16bba4a4a598020f74ca5dab80 | /wagtailnhsukfrontend/settings/models.py | 32727c5dd4b42f6be874a2c8a6defc04313ae8db | [] | no_license | degerli/wagtail-nhsuk-frontend | ee11c864d37643c54f498172d11ac37c2c4179fa | 7429b862eb72decaa9a10a0efb6f1310504f8204 | refs/heads/master | 2020-04-23T04:02:56.342831 | 2019-02-15T13:37:53 | 2019-02-15T13:37:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | from django.db import models
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
PageChooserPanel,
)
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.core.models import Orderable
@register_setting
class HeaderSettings(ClusterableModel, BaseSetting):
service_name = models.CharField(max_length=255, blank=True)
service_long_name = models.BooleanField(default=False)
service_link = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='service_link',
)
transactional = models.BooleanField(default=False)
logo_link = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
logo_aria = models.CharField(
max_length=255,
null=True,
blank=True,
help_text="Aria label override for the NHS logo."
)
show_search = models.BooleanField(default=False)
panels = [
MultiFieldPanel([
FieldPanel('service_name'),
FieldPanel('service_long_name'),
PageChooserPanel('service_link'),
FieldPanel('transactional'),
], heading="Service"),
MultiFieldPanel([
PageChooserPanel('logo_link'),
FieldPanel('logo_aria'),
], heading="Logo"),
FieldPanel('show_search'),
InlinePanel('navigation_links', heading="Navigation"),
]
class NavigationLink(Orderable):
setting = ParentalKey(
HeaderSettings,
on_delete=models.CASCADE,
related_name='navigation_links',
)
label = models.CharField(max_length=255)
page = models.ForeignKey(
'wagtailcore.Page',
null=True,
on_delete=models.SET_NULL,
related_name='+',
)
panels = [
FieldPanel('label'),
PageChooserPanel('page'),
]
| [
"mike@mikemonteith.com"
] | mike@mikemonteith.com |
d9455b27c4d82c62c9830d5b109c470332bd951b | 57a7d6b777600d6d45c5a818f00d49731fefd5de | /portfolio/venv/bin/chardetect | 6788e4051b39066df361221958e5659e1402baee | [] | no_license | bharismendy/portfolio_project | fe95b945ec2d1198509121afc9ccad6c794022eb | 518b6ddb2fd487a338c6284776d2ab8060cd9f9b | refs/heads/master | 2021-11-19T23:05:31.297492 | 2019-04-23T07:05:44 | 2019-04-23T07:05:44 | 154,189,446 | 0 | 0 | null | 2021-09-08T00:49:32 | 2018-10-22T17:49:43 | Python | UTF-8 | Python | false | false | 270 | #!/home/bharismendy/PycharmProjects/portfolio/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"brice.harismendy@etud.univ-angers.fr"
] | brice.harismendy@etud.univ-angers.fr | |
6d8ce22c751efd861956be268dafc8c2f00f3fbd | c0acf82a18b8e90cd38afedb02e45e53425a067e | /pyecharts/custom/overlap.py | e2cdd57622347e115a2fe03fcdc86c1ef34f05fd | [
"MIT"
] | permissive | caideyang/pyecharts | 66b61d0400ea15b25ef7fb90f7305647343eea3a | c13f2fecece566359b2c881705bf96337c42ce40 | refs/heads/master | 2021-01-22T13:48:00.474761 | 2017-08-18T07:09:53 | 2017-08-18T07:09:53 | 100,685,801 | 1 | 0 | null | 2017-08-18T07:31:13 | 2017-08-18T07:31:13 | null | UTF-8 | Python | false | false | 1,609 | py | #!/usr/bin/env python
# coding=utf-8
class Overlap(object):
def __init__(self):
self._chart = None
def add(self, chart):
"""
:param chart:
chart instance
:return:
"""
if self._chart is None:
self._chart = chart
else:
self.__custom(self.__get_series(chart))
def __get_series(self, chart):
""" Get chart series data
:param chart:
chart instance
:return:
"""
return (
chart._option.get('legend')[0].get('data'),
chart._option.get('series'),
)
def __custom(self, series):
""" Appends the data for the series of the chart type
:param series:
series data
"""
_name, _series = series
for n in _name:
self._chart._option.get('legend')[0].get('data').append(n)
for s in _series:
self._chart._option.get('series').append(s)
def render(self, path="render.html"):
"""
:param path:
:return:
"""
self._chart.render(path)
def render_embed(self):
"""
:return:
"""
return self._chart.render_embed()
def show_config(self):
"""
:return:
"""
import pprint
return pprint.pprint(self._chart._option)
@property
def chart(self):
"""
:return:
"""
return self._chart
def _repr_html_(self):
"""
:return:
"""
return self._chart._repr_html_()
| [
"chenjiandongx@qq.com"
] | chenjiandongx@qq.com |
4bfdcf8367c14c6fefe61c847447c9e02b91a619 | 1a59289401721f1b2fc856e57661f1eb3bd07bfd | /classification/hyperparameter_tuner.py | b1a8d63d09bb50b0dc906a4b93082d614b14f18a | [] | no_license | babakaskari/KommuneProject | 33f84e738d98f46c1d6d31709af2df89ce69d78e | 11487abd21c263720752a1ef94cd492b9acbbea3 | refs/heads/master | 2023-03-05T07:31:41.974199 | 2021-02-14T09:21:15 | 2021-02-14T09:21:15 | 328,130,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,705 | py | import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from tqdm import tqdm
def GridSearchCVFunc(kernel, x_train, y_train, param):
g_search = GridSearchCV(estimator=kernel,
param_grid=param,
cv=3,
n_jobs = -1,
verbose = 4)
g_search.fit(x_train, y_train.values.ravel())
# print("model hypermarateres : ", model.get_params())
print("Best hyperparameters : ", g_search.best_params_)
# print(g_search.best_params_["criterion"])
return g_search.best_params_
def rfr_hyperparameter_tuner(model, x_train, y_train):
random_forest_classifier_parameters = {
"n_estimators": np.arange(100, 1000, 100),
"max_depth": [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
"min_samples_split": [2, 4, 6, 8, 10],
"min_samples_leaf": [1, 2, 4, 6, 8, 10],
"max_features": ['auto', 'sqrt', 'log2', None],
'max_samples': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
"bootstrap": [True, False],
}
return GridSearchCVFunc(model, x_train, y_train, random_forest_classifier_parameters)
def svm_hyperparameter_tuner(model, x_train, y_train):
svm_parameters = {
"kernel": ["linear", "rbf", "poly"],
"degree": [2, 3, 4, 5],
"gamma": ["scale", "auto"],
"coef0": [0.0, 0.5, 1.0, 1.5, 2.0],
"epsilon": [0.05, 0.1, 0.2, 0.3, 0.5],
}
return GridSearchCVFunc(model, x_train, y_train, svm_parameters)
def knn_hyperparameter_tuner(model, x_train, y_train):
knn_parameters = {
"algorithm": ["kd_tree", "auto", "ball_tree", "brute"],
"leaf_size": np.arange(10, 30, 10),
"n_neighbors": np.arange(2, 4, 5),
"n_jobs": [-1],
}
return GridSearchCVFunc(model, x_train, y_train, knn_parameters)
def ridge_hyperparameter_tuner(model, x_train, y_train):
ridge_parameters = {
"alpha": [0.05, 0.1, 0.2, 0.3, 0.5],
"fit_intercept": [True, False],
"normalize": [True, False],
"max_iter": [2000],
}
return GridSearchCVFunc(model, x_train, y_train, ridge_parameters)
def xgb_hyperparameter_tuner(model, x_train, y_train):
xgb_parameters = {
"n_estimators": np.arange(10, 30, 10),
'max_depth': np.arange(3, 15, 1),
'learning_rate': [0.01, 0.03, 0.05, 0.1, 0.3, 0.4, 0.5, 0.6, 0.7],
'booster': ["gbtree", "gblinear", "dart"],
'eval_metric': ["map","error"],
'subsample': [0.9, 1.0],
'colsample_bytree': [0.9, 1.0],
'n_jobs': [-1],
'verbosity': [0],
}
return GridSearchCVFunc(model, x_train, y_train, xgb_parameters)
def adaboost_hyperparameter_tuner(model, x_train, y_train):
ada_parameters = {
"n_estimators": np.arange(10, 30, 10),
'learning_rate': [0.01, 0.03, 0.05, 0.1, 0.3, 0.4, 0.5, 0.6, 0.7],
'loss' : ['linear', 'square', 'exponential'],
}
return GridSearchCVFunc(model, x_train, y_train, ada_parameters)
def et_hyperparameter_tuner(model, x_train, y_train):
ExtraTreesr_parameters = {
"n_estimators": np.arange(100, 1000, 100),
"criterion": ['mse', 'mae'],
"max_depth": [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
"max_features": ['auto', 'sqrt'],
"bootstrap": [True, False],
"n_jobs": [-1],
}
return GridSearchCVFunc(model, x_train, y_train, ExtraTreesr_parameters) | [
"Siamak"
] | Siamak |
826f60594002015e659cc80aca283bfe601d0b98 | 0c958692bb3abf99ecbd03bd75a605b202d4da5a | /CRAB/MuNu/synch/2014ocbr24/synchThree.py | 0bc6084ab362fd99b029e74554cc6bfc9b96b5f1 | [] | no_license | tmrhombus/UWAnalysis | a9ed18a7ba8726522c8d98fbdc018c77d80c5cc5 | eb9e0794e1b847f36c660a55d3631176a39148e2 | refs/heads/master | 2021-01-23T20:46:41.578341 | 2017-05-01T08:26:57 | 2017-05-01T08:26:57 | 10,620,824 | 0 | 0 | null | 2014-10-21T11:21:16 | 2013-06-11T12:19:43 | Python | UTF-8 | Python | false | false | 4,427 | py | cut = 'C1_data_2014ocbr23_m12e10_smrGenNu_clnMu'
#cut = 'C3_tt_2014ocbr23_m12e10_smrGenNu_clnMu'
andreas_events = set([line.strip() for line in open('./comp/%s_and.txt'%(cut))])
jelenas_events = set([line.strip() for line in open('./comp/%s_jel.txt'%(cut))])
toms_events = set([line.strip() for line in open('./comp/%s_tom.txt'%(cut))])
just_andrea = []
just_jelena = []
just_tom = []
t_noJ = []
t_noA = []
j_noT = []
j_noA = []
a_noT = []
a_noJ = []
t_j = []
a_t = []
j_a = []
t_j_a = []
#runover = set([line.strip() for line in open('./comp/badevents.txt')])
runover = set([])
for toms_event in toms_events:
tj = False
ta = False
for jelenas_event in jelenas_events:
if long(toms_event) == long(jelenas_event):
tj = True
break
for andreas_event in andreas_events:
if long(toms_event) == long(andreas_event):
ta = True
break
if tj == False and ta == False:
just_tom.append(toms_event)
runover.add(toms_event)
if tj == False:
t_noJ.append(toms_event)
runover.add(toms_event)
if ta == False:
t_noA.append(toms_event)
runover.add(toms_event)
if tj == True and ta == True: t_j_a.append(toms_event)
if tj == True: t_j.append(toms_event)
if ta == True: a_t.append(toms_event)
for andreas_event in andreas_events:
at = False
aj = False
for toms_event in toms_events:
if long(andreas_event) == long(toms_event):
at = True
break
for jelenas_event in jelenas_events:
if long(andreas_event) == long(jelenas_event):
aj = True
break
if at == False and aj == False:
just_andrea.append(andreas_event)
runover.add(andreas_event)
if at == False:
a_noT.append(andreas_event)
runover.add(andreas_event)
if aj == False:
a_noJ.append(andreas_event)
runover.add(andreas_event)
if aj == True: j_a.append(andreas_event)
for jelenas_event in jelenas_events:
ja = False
jt = False
for andreas_event in andreas_events:
if long(andreas_event) == long(jelenas_event):
ja = True
break
for toms_event in toms_events:
if long(toms_event) == long(jelenas_event):
jt = True
if ja == False and jt == False:
just_jelena.append(jelenas_event)
runover.add(jelenas_event)
if ja == False:
j_noA.append(jelenas_event)
runover.add(jelenas_event)
if jt == False:
j_noT.append(jelenas_event)
runover.add(jelenas_event)
print( "http://www.hep.wisc.edu/~tperry/wbb/synch/2014ocbr24/%s_comp.log"%(cut))
log = open('/afs/hep.wisc.edu/home/tperry/www/wbb/synch/2014ocbr24/%s_comp.log'%(cut),'w')
log.write("Andreas Events: %s\n"%len(andreas_events))
log.write("Jelenas Events: %s\n"%len(jelenas_events))
log.write("Toms Events: %s\n"%len(toms_events ))
log.write("All Three: %s\n\n"%len(t_j_a))
log.write(" Tom Has, Jelena Doesn't (%s)\n"%len(t_noJ))
for e in t_noJ: log.write(" "+e)
log.write("\n\n")
log.write(" Jelena Has, Tom Doesn't (%s)\n"%len(j_noT))
for e in j_noT: log.write(" "+e)
log.write("\n\n")
log.write("====================================================================\n\n")
log.write(" Tom Has, Andrea Doesn't (%s)\n"%len(t_noA))
for e in t_noA: log.write(" "+e)
log.write("\n\n")
log.write(" Andrea Has, Tom Doesn't (%s)\n"%len(a_noT))
for e in a_noT: log.write(" "+e)
log.write("\n\n")
log.write("====================================================================\n\n")
log.write(" Jelena Has, Andrea Doesn't (%s)\n"%len(j_noA))
for e in j_noA: log.write(" "+e)
log.write("\n\n")
log.write(" Andrea Has, Jelena Doesn't (%s)\n"%len(a_noJ))
for e in a_noJ: log.write(" "+e)
log.write("\n\n")
log.write("We All Have %s\n"%len(t_j_a))
for e in t_j_a: log.write(e+" ")
log.write("\n\n")
log.write("Tom Has %s\n"%len(toms_events))
for e in toms_events: log.write(e+" ")
log.write("\n\n")
log.write("Jelena Has %s\n"%len(jelenas_events))
for e in jelenas_events: log.write(e+" ")
log.write("\n\n")
log.write("Andreas Has %s\n"%len(andreas_events))
for e in andreas_events: log.write(e+" ")
log.write("\n\n")
log.write("Run Over\n")
log.write("eventsToProcess = cms.untracked.VEventRange(")
bades = []
for e in set(runover): bades.append("'1:%s'"%e)
badlist = ",".join(bades)
log.write("%s)"%(badlist))
log.write("\n\n")
log.write("eventsToProcess = cms.untracked.VEventRange(")
badet = []
for e in set(runover): badet.append("'1:%s-1:%s'"%(e,e))
badliss = ",".join(badet)
log.write("%s)"%(badliss))
log.write("\n\n")
#lob = open('./comp/badevents.txt','a')
#for e in set(runover): lob.write("%s\n"%(e))
| [
"tperry@cern.ch"
] | tperry@cern.ch |
52980438ee437a5977680307d4b13bd673f3b1a3 | 6d7a67be5c2aa1bcebdcfd5bec855c0172c8f01f | /convert_weight.py | 55566963e7439f9fb4e9649bdd289f5114337916 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] | permissive | JWHennessey/stylegan2-pytorch-1 | 19184e1713b9bcfce6404fb6d19478f1dbcc56ec | 88852e3695d3ffd9281787690c3f8796dc1e225a | refs/heads/master | 2020-12-11T17:17:04.082956 | 2020-01-14T18:44:39 | 2020-01-14T18:44:39 | 233,909,977 | 0 | 0 | NOASSERTION | 2020-01-14T18:37:33 | 2020-01-14T18:37:32 | null | UTF-8 | Python | false | false | 6,849 | py | import argparse
import os
import sys
import pickle
import math
import torch
import numpy as np
from torchvision import utils
from model import Generator, Discriminator
def convert_modconv(vars, source_name, target_name, flip=False):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
noise = vars[source_name + '/noise_strength'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'noise.weight': np.array([noise]),
'activate.bias': bias,
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
if flip:
dic_torch[target_name + '.conv.weight'] = torch.flip(
dic_torch[target_name + '.conv.weight'], [3, 4]
)
return dic_torch
def convert_conv(vars, source_name, target_name, bias=True, start=0):
weight = vars[source_name + '/weight'].value().eval()
dic = {'weight': weight.transpose((3, 2, 0, 1))}
if bias:
dic['bias'] = vars[source_name + '/bias'].value().eval()
dic_torch = {}
dic_torch[target_name + f'.{start}.weight'] = torch.from_numpy(dic['weight'])
if bias:
dic_torch[target_name + f'.{start + 1}.bias'] = torch.from_numpy(dic['bias'])
return dic_torch
def convert_torgb(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'bias': bias.reshape((1, 3, 1, 1)),
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def convert_dense(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {'weight': weight.transpose((1, 0)), 'bias': bias}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def update(state_dict, new):
for k, v in new.items():
if k not in state_dict:
raise KeyError(k + ' is not found')
if v.shape != state_dict[k].shape:
raise ValueError(f'Shape mismatch: {v.shape} vs {state_dict[k].shape}')
state_dict[k] = v
def discriminator_fill_statedict(statedict, vars, size):
log_size = int(math.log(size, 2))
update(statedict, convert_conv(vars, f'{size}x{size}/FromRGB', 'convs.0'))
conv_i = 1
for i in range(log_size - 2, 0, -1):
reso = 4 * 2 ** i
update(statedict, convert_conv(vars, f'{reso}x{reso}/Conv0', f'convs.{conv_i}.conv1'))
update(statedict, convert_conv(vars, f'{reso}x{reso}/Conv1_down', f'convs.{conv_i}.conv2', start=1))
update(statedict, convert_conv(vars, f'{reso}x{reso}/Skip', f'convs.{conv_i}.skip', start=1, bias=False))
conv_i += 1
update(statedict, convert_conv(vars, f'4x4/Conv', 'final_conv'))
update(statedict, convert_dense(vars, f'4x4/Dense0', 'final_linear.0'))
update(statedict, convert_dense(vars, f'Output', 'final_linear.1'))
return statedict
def fill_statedict(state_dict, vars, size):
log_size = int(math.log(size, 2))
for i in range(8):
update(state_dict, convert_dense(vars, f'G_mapping/Dense{i}', f'style.{i + 1}'))
update(
state_dict,
{
'input.input': torch.from_numpy(
vars['G_synthesis/4x4/Const/const'].value().eval()
)
},
)
update(state_dict, convert_torgb(vars, 'G_synthesis/4x4/ToRGB', 'to_rgb1'))
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_torgb(vars, f'G_synthesis/{reso}x{reso}/ToRGB', f'to_rgbs.{i}'),
)
update(state_dict, convert_modconv(vars, 'G_synthesis/4x4/Conv', 'conv1'))
conv_i = 0
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_modconv(
vars,
f'G_synthesis/{reso}x{reso}/Conv0_up',
f'convs.{conv_i}',
flip=True,
),
)
update(
state_dict,
convert_modconv(
vars, f'G_synthesis/{reso}x{reso}/Conv1', f'convs.{conv_i + 1}'
),
)
conv_i += 2
return state_dict
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--repo', type=str, required=True)
parser.add_argument('--gen', action='store_true')
parser.add_argument('--disc', action='store_true')
parser.add_argument('path', metavar='PATH')
args = parser.parse_args()
sys.path.append(args.repo)
from dnnlib import tflib
tflib.init_tf()
with open(args.path, 'rb') as f:
generator, discriminator, g_ema = pickle.load(f)
size = g_ema.output_shape[2]
g = Generator(size, 512, 8)
state_dict = g.state_dict()
state_dict = fill_statedict(state_dict, g_ema.vars, size)
g.load_state_dict(state_dict)
latent_avg = torch.from_numpy(g_ema.vars['dlatent_avg'].value().eval())
ckpt = {'g_ema': state_dict, 'latent_avg': latent_avg}
if args.gen:
g_train = Generator(size, 512, 8)
g_train_state = g_train.state_dict()
g_train_state = fill_statedict(g_train_state, generator.vars, size)
ckpt['g'] = g_train_state
if args.disc:
disc = Discriminator(size)
d_state = disc.state_dict()
d_state = discriminator_fill_statedict(d_state, discriminator.vars, size)
ckpt['d'] = d_state
name = os.path.splitext(os.path.basename(args.path))[0]
torch.save(ckpt, name + '.pt')
batch_size = {256: 16, 512: 9, 1024: 4}
n_sample = batch_size.get(size, 25)
g = g.to(device)
x = torch.randn(n_sample, 512).to(device)
with torch.no_grad():
img, _ = g([x], truncation=0.5, truncation_latent=latent_avg.to(device))
utils.save_image(
img, name + '.png', nrow=int(n_sample ** 0.5), normalize=True, range=(-1, 1)
)
| [
"kim.seonghyeon@snu.ac.kr"
] | kim.seonghyeon@snu.ac.kr |
4dd92271a56fb87fccbb08a8546c583002f07258 | b5dfc657e395d12f1ea40a12a92daf423bcabcc7 | /pro1/pro1/wsgi.py | 9f0501719b5866ca4c2e5555814f95e7c4ab2736 | [] | no_license | ardy2811/webLearn | cc86d7f2c60d9009fa7bbef0625c14a51cf0b17f | 97aa18c61f023a544d613254520f7ee5b0a94c7c | refs/heads/main | 2023-05-25T07:27:57.332887 | 2021-06-04T23:44:31 | 2021-06-04T23:44:31 | 373,018,611 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for pro1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pro1.settings')
application = get_wsgi_application()
| [
"zs@senduo.cn"
] | zs@senduo.cn |
254e45f5a29c2e097da736ed48419948ea826c21 | 377cb0a225a80752b837d0679f95f34d8aa49afe | /Python/list.py | 5511183cbf24fecd09d23334f352af5c28c193f2 | [
"MIT"
] | permissive | nuvish04/Basic-Programming | 568eed1ac4b7829639aeed3a599142b0e0ddae8f | a7ba600f71847974af4b84d1407613a045819edd | refs/heads/main | 2023-04-16T18:02:52.103757 | 2021-04-29T15:23:29 | 2021-04-29T15:23:29 | 362,538,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | b=["India",4,5,"Hi",0.2]
print(b)
b[2]=10
print(b)
b.append("Python")
print(b)
b.insert(2,"Hello")
print(b)
b.remove(0.2)
print(b)
print("Length=",len(b))
b.append(4)
print("Count =",b.count(4))
b.reverse()
print(b)
b.pop(0)
print(b) | [
"noreply@github.com"
] | noreply@github.com |
b57aa04bb1157d20423de65671bee218d8715f6d | 730b92e439dbb013950b8bbf417cfde1bb40f8b9 | /Python/Add-Binary.py | 8b8be13ae529418ef8672901ffeb760e078c1eb4 | [] | no_license | yuede/Lintcode | fdbca5984c2860c8b532b5f4d99bce400b0b26d0 | d40b7ca1c03af7005cc78b26b877a769ca0ab723 | refs/heads/master | 2021-01-13T04:14:32.754210 | 2015-08-22T13:15:54 | 2015-08-22T13:15:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | class Solution:
# @param {string} a a number
# @param {string} b a number
# @return {string} the result
def addBinary(self, a, b):
# Write your code here
pa = len(a) - 1
pb = len(b) - 1
s = ""
d = 0
while pa >= 0 and pb >= 0:
cur = d + int(a[pa]) + int(b[pb])
pa -= 1
pb -= 1
s += str(cur % 2)
d = cur / 2
while pa >= 0:
cur = d + int(a[pa])
pa -= 1
s += str(cur % 2)
d = cur / 2
while pb >= 0:
cur = d + int(b[pb])
pb -= 1
s += str(cur % 2)
d = cur / 2
if d > 0:
s += str(d)
rs = ""
for i in range(len(s)):
rs += s[len(s) - 1 - i]
return rs | [
"jiangyi0425@gmail.com"
] | jiangyi0425@gmail.com |
f8d8a6731dc30588edc7fe302e1ec0607a2fd18f | 6e0a07de34b00ac7a9dbe8ffd569b8a30dc152d6 | /Django_/mysite/films/migrations/0001_initial.py | ba576504603e0663ceb2e658583080abd71a2503 | [] | no_license | Stianbot/INFO215 | 9fb63449eea736fe4ed2ac19cc31743efddbac79 | 98a3172d201f4a9770e96a55b009f95e9f5e925d | refs/heads/main | 2023-04-23T02:19:39.671710 | 2021-05-04T13:35:12 | 2021-05-04T13:35:12 | 331,590,861 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | # Generated by Django 3.1.6 on 2021-02-02 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Film',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('length', models.IntegerField()),
('year', models.IntegerField()),
],
),
]
| [
"stian.botnevik@student.uib.no"
] | stian.botnevik@student.uib.no |
159b6bec53fe5f2e9e41e8b51080ef183804251d | f9abef69eec532e591a1503ec7c0b93912e02bcd | /NAMD/2_2nm/F19/1eV/run_step4.py | 745810c9adedfaeef4fd1781719b53eaaedcd6e7 | [] | no_license | MahsaJabbar/Project_NBRA-BL-NA-MD_SiNCs | a9c75a5614c0ce7e4dd9c3d446c98cd254ed627f | 42e8cf4c36f297028aa6b9db7bca6eb706ab4e71 | refs/heads/master | 2020-12-26T08:35:10.004518 | 2020-01-19T16:48:38 | 2020-01-19T16:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,132 | py | #***********************************************************
# * Copyright (C) 2019-2020 Brendan A. Smith and Alexey V. Akimov
# * This file is distributed under the terms of the
# * GNU General Public License as published by the
# * Free Software Foundation; either version 3 of the
# * License, or (at your option) any later version.
# * http://www.gnu.org/copyleft/gpl.txt
#***********************************************************/
import os
import sys
import time
import math
# Fisrt, we add the location of the library to test to the PYTHON path
if sys.platform=="cygwin":
from cyglibra_core import *
elif sys.platform=="linux" or sys.platform=="linux2":
from liblibra_core import *
from libra_py import *
import libra_py.workflows.nbra.step4 as step4
import libra_py.workflows.nbra.lz as lz
from libra_py import units
import libra_py.workflows.nbra.step4 as step4
import libra_py.workflows.nbra.lz as lz
import libra_py.workflows.nbra.decoherence_times as decoherence_times
from libra_py import data_conv
from libra_py import fit
from libra_py import influence_spectrum as infsp
params = {}
excess_eV = 1
#excess_eV = 2
#excess_eV = 3
#case = 1 # 2.2 nm Si NC - H capped
case = 2 # 2.2 nm Si NC - F capped
if case == 1:
trajs = range(3)
params["data_set_paths"] = []
for itraj in trajs:
params["data_set_paths"].append( "/budgetdata/academic/alexeyak/brendan/Si_QD/H_capped/2_2nm/step2/traj"+str(itraj)+"/res/" )
if excess_eV == 1:
params["istate"] = 35
if excess_eV == 2:
params["istate"] = 134
if excess_eV == 3:
params["istate"] = 276
elif case == 2:
trajs = range(3)
params["data_set_paths"] = []
for itraj in trajs:
params["data_set_paths"].append( "/budgetdata/academic/alexeyak/brendan/Si_QD/F_capped/2_2nm/step2/traj"+str(itraj)+"/res/" )
if excess_eV == 1:
params["istate"] = 26
if excess_eV == 2:
params["istate"] = 109
if excess_eV == 3:
params["istate"] = 238
params["nstates"] = 300
params["nfiles"] = 4000 # Ex) # of Hvib files to read for a given traj
params["Hvib_re_prefix"] = "hvib_"; params["Hvib_re_suffix"] = "_re"
params["Hvib_im_prefix"] = "hvib_"; params["Hvib_im_suffix"] = "_im"
# General simulaiton parameters
params["T"] = 300.0 # Temperature, K
params["dt"] = 1.0*units.fs2au # Nuclear dynamics integration timestep, in a.u.
params["nsteps"] = params["nfiles"] # The length of the NA-MD trajectory
params["init_times"] = [0] # starting points for sub-trajectories
params["do_output"] = True # request to print the results into a file
params["do_return"] = False # request to not store the date in the operating memory
# For running NA-MD
start = time.time()
Hvib = step4.get_Hvib2(params) # get the Hvib for all data sets, Hvib is a lists of lists
init_time = params["init_times"][0]
end = time.time()
print("Time to read / assemble data = ", end - start)
# Compute average decoherence time over entire trajectory
tau, rates = decoherence_times.decoherence_times_ave(Hvib, [init_time], params["nfiles"]-init_time, 0)
avg_deco = tau/units.fs2au
avg_deco.show_matrix()
#====================== Another case =====================
# Looking on the "SE" populations - Markov chain approach
params["target_space"] = 1
params["gap_min_exception"] = 0
params["Boltz_opt"] = 0 # Option for the frustrated hops acceptance/rejection
params["Boltz_opt_BL"] = 1 # Option to incorporate hte frustrated hops into BL probabilities
params["outfile"] = "_out_Markov_.txt" # output file
params["evolve_Markov"] = True # Rely on the Markov approach
params["evolve_TSH"] = False # don't care about TSH
params["ntraj"] = 1 # how many stochastic trajectories
start = time.time()
res = lz.run(Hvib, params)
end = time.time()
print("Time to run Markov = ", end - start)
| [
"bsmith24@buffalo.edu"
] | bsmith24@buffalo.edu |
53b5e3a77e56432166513d5908048beb567e31a6 | a59a3335ceb27b807e9a8eeb95932c72eac214ac | /apps/units/forms.py | 70c8fdd73cb67633e188acba24e8d89b1931819d | [
"Beerware"
] | permissive | egaillot/zython | ef2a413f90323ed9f3e6c131913548eb0e0c9142 | cf7ebcdb5265012d9e2b9c0652befe33b3bb6fe0 | refs/heads/main | 2023-03-24T02:36:33.281166 | 2022-07-05T08:42:44 | 2022-07-05T08:42:44 | 193,670,642 | 0 | 0 | NOASSERTION | 2022-09-24T12:58:40 | 2019-06-25T08:50:22 | Python | UTF-8 | Python | false | false | 1,985 | py | from django import forms
from units import settings as app_settings
from units.helpers import get_convert_to_default, get_full_unit_name, get_converted_value
class BaseUnitForm(object):
unit_fields = {}
'''
unit_fields = {
'volume': ['amount', 'batch_size'],
'weight': ['total_grain']
}
'''
def get_unit_fieldnames(self):
fieldnames = []
if self.unit_fields:
for group,fields in self.unit_fields.items():
for f in fields:
fieldnames.append(f)
return fieldnames
def __init__(self, request, *args, **kwargs):
self.request = request
super(BaseUnitForm, self).__init__(*args, **kwargs)
prefix = app_settings.CONTEXT_PREFIX
for group,fields in self.unit_fields.items():
user_unit = self.request.session.get('%s%s' % (prefix, group))
for f in fields:
unit_group = app_settings.UNITS.get(group)
help_text = get_full_unit_name(group, user_unit)
self.fields[f].help_text = user_unit
if self.initial.get(f):
val = self.initial[f]
self.initial[f] = get_converted_value(val, user_unit, group, raw_output=True)
def clean(self):
datas = self.cleaned_data
prefix = app_settings.CONTEXT_PREFIX
for group,fields in self.unit_fields.items():
user_unit = self.request.session.get('%s%s' % (prefix, group))
for f in fields:
value = datas.get(f)
if None not in (value, user_unit):
v = get_convert_to_default(value, user_unit, group, raw_output=True)
datas[f] = v
return datas
def save(self, *args, **kwargs):
return super(BaseUnitForm, self).save(*args, **kwargs)
class UnitModelForm(BaseUnitForm, forms.ModelForm):
pass
class UnitForm(BaseUnitForm, forms.Form):
pass
| [
"martyn.clement@gmail.com"
] | martyn.clement@gmail.com |
b807ffc86a07e7f973f259a78f26ce037459f756 | d6450042f7f3e861ff41e2facaa7bf047bbc1fdf | /regression/py/SubmitJobs.py | 2f39637f6175346bc0eb8ef9d73b4bac247ad2d8 | [] | no_license | xiaoqiangzhao/imx_proj | d7ef79929e0f71c22e0bc8685b5b6b155c7ae533 | 0d4d4aa8adc6103414f9fe8c00484d438bc24b1b | refs/heads/master | 2020-04-06T10:02:10.110692 | 2016-09-10T02:46:25 | 2016-09-10T02:46:25 | 55,764,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,222 | py | import os
import sys
import json
from termcolor import colored
info_color = {'PASS':'green','FAILED':'red','COMPILE_ERROR':'yellow','NO_ARG':'blue','MAKE_ERROR_OR_NOT_RUN':'magenta','RUNNING':'cyan'}
BASE_DIR = '/proj/imx6sll/design/workarea/b51816/JAY_HOME/my_git/repository/imx_proj/regression'
class SubmitJobs:
def __init__(self,json_file,block='soc_tb'):
self.json_file = json_file
self.project = os.getenv('PROJECT','')
self.block = block
self.workdir = os.getenv('DESIGN_DIR',os.path.dirname(os.path.abspath(sys.argv[0])))
self.bom = os.path.basename(self.workdir)
self.cwp = BASE_DIR
with open(self.json_file,'r') as j:
self.testdict = json.load(j)
self.vectors = self.testdict.keys()
if not os.path.isdir(os.path.join(self.cwp,'RunShell')):
os.chdir(self.cwp)
os.system("mkdir RunShell")
if not os.path.isdir(os.path.join(self.cwp,'RunShell',self.bom)):
os.chdir(os.path.join(self.cwp,'RunShell'))
os.system("mkdir {bom_dir}".format(bom_dir=self.bom))
self.shell_dir = os.path.join(self.cwp,'RunShell',self.bom)
print("#################################################")
print("project : "+self.project)
print("workarea : "+self.workdir)
print("json file : "+self.json_file)
def show_vectors(self):
print(colored("\n".join(self.vectors),'green'))
def analysis_log(self,vector):
pass_text = 'Simulation completed Successfully'
fail_text = 'Simulation completed with Errors'
compile_error_text = 'An error occurred during parsing'
block_size = 2048 ## get last 2048 bytes text from log file
log_dir = os.path.join(self.workdir,'testbench/blocks/soc_tb/vectors/{vector}/logfiles'.format(vector=vector))
os.chdir(log_dir)
if not vector in self.testdict:
print("No Such Vector {vector}".format(vector=vector))
sys.exit()
with_arg_test = []
no_arg_test = []
make_error_or_not_run_test = []
pass_test = []
fail_test = []
compile_error_test = []
running_test = []
test_checked = self.check_testlist(self.testdict[vector],vector)
with_arg_test = test_checked['test_with_arg']
no_arg_test = test_checked['test_no_arg']
for test in with_arg_test:
start_seek = 0
test_logfile = test['pat']+'verilog_{group}.log'.format(group=test['groups'])
if not os.path.isfile(test_logfile):
test['Status'] = "MAKE_ERROR_OR_NOT_RUN"
make_error_or_not_run_test.append(test)
continue
test['review'] = test_logfile
with open(test_logfile,'r') as log:
log.seek(0,2) ## get the size
log_size = log.tell()
if log_size > block_size:
start_seek = log_size - block_size
log.seek(start_seek,0)
log_text = log.read()
if log_text.count(pass_text):
test['Status'] = 'PASS'
pass_test.append(test)
elif log_text.count(fail_text):
test['Status'] = 'FAILED'
fail_test.append(test)
elif log_text.count(compile_error_text):
test['Status'] = 'COMPILE_ERROR'
compile_error_test.append(test)
else:
test['Status'] = 'RUNNING'
running_test.append(test)
result = {'PASS':pass_test,'FAILED':fail_test,'COMPILE_ERROR':compile_error_test,'RUNNING':running_test,'NO_ARG':no_arg_test,'MAKE_ERROR_OR_NOT_RUN':make_error_or_not_run_test}
with open(vector+".json",'w') as f:
json.dump(result,f)
return result
def show_results(self,vector,status='all'):
log_dir = os.path.join(self.workdir,'testbench/blocks/soc_tb/vectors/{vector}/logfiles'.format(vector=vector))
os.chdir(log_dir)
json_file = vector+".json"
if not os.path.isfile(json_file):
print("JSON file for this vector not available, you may use analysis_log to generate one")
return
with open(json_file,'r') as f:
results = json.load(f)
if status in results:
print(colored('{:*^40s}'.format(status),info_color[status]))
for test in results[status]:
print(colored(" {pattern}".format(pattern=test['pat']),info_color[status]))
else:
for i in results:
if results[i]:
# format_text = format()
print(colored('{:*^40s}'.format(i),info_color[i]))
for test in results[i]:
print(colored(" {pattern}".format(pattern=test['pat']),info_color[i]))
def check_testlist(self,testlist,vector):
arg_dir = os.path.join(self.workdir,'testbench/blocks/soc_tb/vectors/{vector}/stimulus/arg'.format(vector=vector))
test_no_arg = []
test_with_arg = []
for test in testlist:
arg_file = os.path.join(arg_dir,'{test}.arg'.format(test=test['pat']))
if os.path.isfile(arg_file):
test_with_arg.append(test)
else:
test['Status'] = 'NO_ARG'
test_no_arg.append(test)
return {'test_with_arg':test_with_arg,'test_no_arg':test_no_arg}
def generate_sh(self,vector,priority=999): #assume 999 the lowest priority
# os.chdir(os.path.join(self.cwp,'RunShell',self.bom))
if not vector in self.testdict:
print("No Such Vector {vector}".format(vector=vector))
sys.exit()
self.vector = vector
test_checked = self.check_testlist(self.testdict[self.vector],self.vector)
self.testlist = test_checked['test_with_arg']
test_no_arg = test_checked['test_no_arg']
if test_no_arg:
print("#################################################")
print("Below Patterns Not Found:")
for test in test_no_arg:
print(" "+test['pat'])
print("#################################################")
self.group = self.testlist[0]['groups'] ## take the first to sample group info
self.priority = priority
self.so = 'testbench/blocks/soc_tb/tool_data/verilog/{lib_dir}/INCA_libs/worklib/cfg1/config/_sv_export.so'.format(lib_dir=self.group+'.linux26_64bit')
self.shell_script = os.path.join(self.shell_dir,"runsim_"+vector+".sh")
compile_job_name = 'compile_'+self.group
compile_job = "bsub -q priority -J {job_name} soc verilog -irun -block {block} -bc {bc} -session {session} -keeptemps -bbox -no_simulate \n".format(job_name='compile_'+self.group , block=self.block, bc=self.group, session=self.group)
with open(self.shell_script,'w') as f:
f.write("#!/bin/sh\n")
if os.path.isfile(os.path.join(self.workdir,self.so)):
depend_str = ""
else:
f.write(compile_job)
depend_str = '-w "ended({compile_job_name})"'.format(compile_job_name = compile_job_name)
simulate_finish_depend = ""
depend_list = []
for test in self.testlist:
if test['priority'] > self.priority: ## skip the test with lower priority
continue
job_name = "simulate_"+test['pat']
depend_list.append("ended({job_name})".format(job_name=job_name))
simulate_job = "bsub -q priority -J {job_name} {depend_str} soc verilog -irun -block {block} -session {session} -bc {bc} -vectors {vector} -test {test} -keeptemps -bbox -no_compile -no_save_db\n".format(job_name=job_name, block=self.block, bc=self.group, session=self.group, vector= self.vector, test= test['pat'], depend_str = depend_str)
f.write(simulate_job)
simulate_finish_depend = '-w "{depend}"'.format(depend=" && ".join(depend_list))
cmd = "echo '{vector} Finished' | mail -s Submit jay.zhao@nxp.com".format(vector=vector)
analysis_output_job = "bsub -q priority {depend} {cmd}".format(depend=simulate_finish_depend , cmd = cmd)
f.write(analysis_output_job)
os.system("chmod 750 {shell_script}".format(shell_script=self.shell_script))
if __name__ == "__main__":
test = SubmitJobs('/home/b51816/my_git/repository/imx_proj/regression/py/iMX8QM_Security_Verification_Plan.json')
test.generate_sh('scu_sec_snvs')
test.generate_sh('scu_sec_romcp')
# results = test.analysis_log('scu_sec_snvs')
results = test.analysis_log('scu_sec_romcp')
test.show_results('scu_sec_romcp','PASS')
# for i in results:
# if results[i]:
# # print(colored("################################################",info_color[i]))
# print(colored(i,info_color[i]))
# for test in results[i]:
# print(" {name}".format(name=test['pat']))
| [
"b51816@lvd5754.ap.freescale.net"
] | b51816@lvd5754.ap.freescale.net |
6c12b65bc75d06218bc70fa9fbd8a4d1205ac0e8 | 55fe97ec40490683c996642462f4b6f9e790e99e | /energy_space_explore/__init__.py | 06d891cf0cee4085133e311e154c161efdf71257 | [] | no_license | spencer-loggia/ComplexAssemblyAnalysis | a2a562e28a47f3e40d647ec1e81d91c6ac2650c4 | e8bbe435f79f28bcaf713ae95d802a4bca322a1d | refs/heads/master | 2021-07-22T17:39:41.909408 | 2021-03-03T23:41:02 | 2021-03-03T23:41:02 | 244,244,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | import os
import sys
from preprocess import preprocess
from network import Network
import pickle as pk
subunit_dir = sys.argv[1]
config_dir = sys.argv[2]
subunits = ''
mol_config = ''
mol_struct = {}
try:
subunits = os.listdir(subunit_dir)
except IOError:
print("Bad monomer directory", sys.stderr)
exit()
try:
mol_config = open(config_dir, 'r')
except IOError:
print("Config file not found", sys.stderr)
exit()
for line in mol_config:
if line[0] != '#':
# load molecule structure to dict
try:
cols = line.split('\t')
subunit = cols[0].replace(' ', '')
if subunit+'.pdb' in subunits:
mol_struct[cols[0]] = cols[1].replace(' ', '').replace('\n', '').split(',')
else:
print("Bad config format", sys.stderr)
exit()
except IndexError:
print("Bad config format", sys.stderr)
exit()
poses = preprocess(subunit_dir, mol_struct)
try:
f = open('./obj/network.pkl', 'rb')
net = pk.load(f)
except IOError:
net = Network(poses, mol_struct)
net.build_network()
f = open('./obj/network.pkl', 'wb')
pk.dump(net, f)
print(net.network)
det_net = net.deterministic_network()
print(det_net)
| [
"spencer.loggia98@gmail.com"
] | spencer.loggia98@gmail.com |
003d9d838b7372a3284b30915aec63707830d821 | ef20884169d10ec9ac4d1d3b77ee35245d248294 | /practice/deep-learning-from-scratch-2/np_random_choice.py | 9360dbb41512575e33d9d1d800f8a11e55fdeec2 | [] | no_license | heaven324/Deeplearning | 64016671879cdf1742eff6f374cfb640cfc708ae | a7a8d590fa13f53348f83f8c808538affbc7b3e8 | refs/heads/master | 2023-05-05T08:54:27.888155 | 2021-05-22T08:25:47 | 2021-05-22T08:25:47 | 188,010,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | import numpy as np
print(np.random.choice(10)) # 5
print(np.random.choice(10)) # 9
# words에서 하나만 무작위로 샘플링
words = ['you', 'say', 'goodbye', 'I', 'hello', '.']
print(np.random.choice(words))
# 5개만 무작위로 샘플링(중복 있음)
print(np.random.choice(words, size = 5))
# 5개만 무작위로 샘플링(중복 없음)
print(np.random.choice(words, size = 5, replace = False))
# 확률분포에 따라 샘플링
p = [0.5, 0.1, 0.05, 0.2, 0.05, 0.1]
print(np.random.choice(words, p = p))
# 0.75제곱을 하는 이유( 빈도가 낮은 단어의 확률을 살짝 높이기 위해서)
p = [0.7, 0.29, 0.01]
new_p = np.power(p, 0.75)
print(new_p)
new_p /= np.sum(new_p)
print(new_p)
| [
"wjdtjdgh2005@gmail.com"
] | wjdtjdgh2005@gmail.com |
c3142e42f11df6cedf0e2a9bd20f63f0919bc2c3 | 2fb34b829a3c5d0b2872a36367f3358335e2720e | /button.py | bb61f6cd17da09ce23ec5c19b419ddb82b44e4ea | [] | no_license | ziqiiii/python_alien_invasion | 525a8c7c2471c0449882f2abb5770f09b30626b0 | a6d4c03e1a8cd812d2362c6ef42602bb1ee02847 | refs/heads/master | 2020-03-29T08:22:24.304618 | 2018-09-28T14:26:50 | 2018-09-28T14:26:50 | 149,707,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import pygame.font
class Button():
def __init__(self,ai_setting,screen,msg):
self.screen = screen
self.screen_rect = screen.get_rect()
self.width,self.height = 200,50
self.button_color = (0,255,0)
self.text_color = (255,255,255)
self.font = pygame.font.SysFont(None,48)
self.rect = pygame.Rect(0,0,self.width,self.height)
self.rect.center = self.screen_rect.center
self.prep_msg(msg)
def prep_msg(self,msg):
self.msg_image = self.font.render(msg,True,self.text_color,self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
self.screen.fill(self.button_color,self.rect)
self.screen.blit(self.msg_image,self.msg_image_rect)
| [
"522243591@qq.com"
] | 522243591@qq.com |
34db41a2067da5cdc11588510dae607a0a3f2ed9 | a7742c5b97690d76c3872e2ad8f7300c5afa3856 | /googleinceptionnet.py | fc868bbd08db47f5562782e8266ad284c7e927c6 | [] | no_license | LeeCheer00/TF | bb98a62197848eab3a44278ed1aea5bdf3e5fe9c | eb4f41fd70e2177938124273fb41aed99f4a0d47 | refs/heads/master | 2020-03-19T15:37:00.162198 | 2018-07-10T12:57:25 | 2018-07-10T12:57:25 | 136,678,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,708 | py | # encoding=utf-8
import time
import tensorflow as tf
import math
from datetime import datetime
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# inception_v3_arg_scope
# L2正则的weight_decay = 0.00004
# stddev = 0.1
# batch_norm_var_collection 默认值为moving_vars
######
# batch normalizaton的参数字典
# 衰减系数decay为0.9997
# epsilon 为0.001
# updates_collection 为 tf.GrapahKeys.UPDATE_OPS
# variables_collections 和beta 和gamma 设置 None
# moving_mean和moving_variance均设置为前面的batch_norm_var_collection
######
######
# slim.arg_scope is a useful tool, give the vaules to parameters automatically
# slim.arg_scope 不需要重复设置参数
# slim.arg_scope 对卷积层生成函数slim.conv2d的几个参数赋予默认值
# trunc_normal(stddev)设置成权重初始化器
# slim.batch_norm, 标准化器设置为前面的batch_norm_params
# 返回scope
######
######
# slim.conv2d默认参数, 定义一个卷积层变得非常方便
# 一行代码定义一个conv
#####
def inception_v3_arg_scope(weight_decay=0.00004, stddev=0.1, batch_norm_var_collection='moving_vars'):
batch_norm_params = {
'decay': 0.9997,
'epsilon': 0.001,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
'variables_collections':{
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance':[batch_norm_var_collection],
}
}
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], weights_initializer=trunc_normal(stddev), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as sc:
return sc
def inception_v3_base(inputs, scope=None):
end_points = {}
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='VALID'):
net = slim.conv2d(inputs, 32, [3, 3], stride=2, scope='Conv2d_1a_3x3')
net = slim.conv2d(net, 32, [3, 3], scope='Conv2d_2a_3x3')
net = slim.conv2d(net, 64, [3, 3], padding='SAME',scope='Conv2d_2b_3x3')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_3a_3x3')
net = slim.conv2d(net, 80, [1, 1], scope='Conv2d_3b_1x1')
net = slim.conv2d(net, 192, [3, 3], scope='Conv2d_4a_3x3')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_5a_3x3')
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
with tf.variable_scope('Mixed_5c'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
with tf.variable_scope('Mixed_5d'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], 3)
with tf.variable_scope('Mixed_6b'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 128, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 128, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
with tf.variable_scope('Mixed_6c'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
with tf.variable_scope('Mixed_6d'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 160, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 160, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 160, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
with tf.variable_scope('Mixed_6e'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2 ,192, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 192, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points['Mixed_6e'] = net
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 320, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 192, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat([branch_0, branch_1, branch_2], 3)
with tf.variable_scope('Miexed_7b'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat([slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0b_3x1')], 3)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat([slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'), slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1')], 3)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
with tf.variable_scope('Mixed_7c'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat([slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, 384, [3 ,1], scope='Conv2d_0c_3x1')], 3)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 384, [3, 3], scope='Conv3d_0b_3x3')
branch_2 = tf.concat([slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'), slim.conv2d(branch_2, 384, [3, 1], scope= 'Conv2d_0d_3x1')], 3)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 3)
return net, end_points
def inception_v3(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, prediction_fn=slim.softmax,spatial_squeeze=True, reuse=None, scope='InceptionV3'):
with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = inception_v3_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, padding='VALID', scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768, [5, 5], weights_initializer=trunc_normal(0.01),padding='VALID', scope='Conv2d_2a_5x5')
aux_logits = slim.conv2d(aux_logits, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, weights_initializer=trunc_normal(0.001), scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, [8, 8], padding='VALID', scope='AvgPool_1a_8x8')
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(num_batches + num_steps_burn_in):
#num_batchaes+num_steps_burn_in 次迭代以计算
#time.time()记录时间,每10轮迭代后显示当前迭代所需要的时间
#每轮将total_duration和total_duration_squared累加,为了后面计算每轮耗时的均值和标准差
#
start_time = time.time()
_ = session.run(target)
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i % 10:
print('%s : step %d, duration = %.3f' % (datetime.now(), i - num_steps_burn_in, duration ))
total_duration += duration
total_duration_squared += duration * duration
# 计算每轮迭代的平均耗时mn和标准差sd,显示
#""" 这样就完成了每轮迭代耗时的评测函数time_tensorflow_run """
mn = total_duration / num_batches
vr = total_duration_squared / num_batches - mn * mn
sd = math.sqrt(vr)
print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, num_batches, mn, sd))
batch_size = 32
height, width= 299,299
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception_v3_arg_scope()):
logits, end_points = inception_v3(inputs, is_training=False)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
num_batches=100
time_tensorflow_run(sess, logits, "Forward")
| [
"645513992@qq.com"
] | 645513992@qq.com |
778459e47142827e3629b6af6b3dbfc2ccc5d25e | ce990be34e8759efb96b890d9676da313fd2d9b4 | /tests/python/contrib/test_ethosu/cascader/test_plan.py | ddc40b49ac8a8de119af6b9b19d208ef745f4899 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | tmoreau89/tvm | 291c0b1beb13503e18b1e45f135aaf334660b68d | 8136173a631bf6c7274d26285349225fcf6e495f | refs/heads/master | 2022-11-23T08:36:24.853648 | 2022-11-21T07:36:57 | 2022-11-21T07:36:57 | 119,757,672 | 5 | 1 | Apache-2.0 | 2019-03-22T23:06:53 | 2018-01-31T23:41:33 | Python | UTF-8 | Python | false | false | 7,708 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm.contrib.ethosu.cascader as cs
import pytest
def test_plan(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([10, 10], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
part.set_input(0, tensor_1)
part.set_output(tensor_2)
tensor_1.add_consumer(part)
tensor_2.add_producer(part)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_out = cs.TensorConfig(
tensor=tensor_2,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_config = part.calculate_input_stripe_configs(output_stripe_config)[0]
tensor_config_in = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_in, tensor_2: tensor_config_out}
open_configs = frozenset([tensor_config_in])
part_group = frozenset([part])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_out,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
assert plan.tensor_configs == tensor_configs
assert plan.open_configs == open_configs
assert plan.output_config == tensor_config_out
assert plan.part_group == part_group
assert plan.interior_region == interior_region
assert plan.memory_usage == memory_usage
assert plan.cycles == cycles
def test_plan_merge(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
part_2 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[0, 0, 6], [0, 0, 6], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[1, 0], [0, 1]],
[0],
),
],
)
tensor_1 = cs.Tensor([20, 20], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
tensor_3 = cs.Tensor([6, 6], "uint8")
tensor_4 = cs.Tensor([10], "uint8")
tensor_5 = cs.Tensor([10, 10], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
part_2.set_input(0, tensor_2)
part_2.set_input(1, tensor_3)
part_2.set_input(2, tensor_4)
part_2.set_output(tensor_5)
tensor_2.add_consumer(part_2)
tensor_3.add_consumer(part_2)
tensor_4.add_consumer(part_2)
tensor_5.add_producer(part_2)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_5 = cs.TensorConfig(
tensor=tensor_5,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_configs = part_2.calculate_input_stripe_configs(output_stripe_config)
tensor_config_4 = cs.TensorConfig(
tensor=tensor_4,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[2]],
copy_tensor=False,
)
tensor_config_3 = cs.TensorConfig(
tensor=tensor_3,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[1]],
copy_tensor=False,
)
tensor_config_2 = cs.TensorConfig(
tensor=tensor_2,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_configs[0]],
copy_tensor=False,
)
input_stripe_config = part_1.calculate_input_stripe_configs(input_stripe_configs[0])[0]
tensor_config_1 = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_1, tensor_2: tensor_config_2}
open_configs = frozenset([tensor_config_2])
part_group = frozenset([part_1])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan_1 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_2,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
tensor_configs = {
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
open_configs = frozenset([tensor_config_2, tensor_config_3])
part_group = frozenset([part_2])
interior_region = SRAM
memory_usage = 200
cycles = 30
plan_2 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_5,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
merged_plan = plan_1.merge(plan_2)
assert merged_plan.tensor_configs == {
tensor_1: tensor_config_1,
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
assert merged_plan.open_configs == frozenset([tensor_config_3])
assert merged_plan.output_config == tensor_config_5
assert merged_plan.part_group == frozenset([part_1, part_2])
assert merged_plan.interior_region == interior_region
assert merged_plan.memory_usage == plan_1.memory_usage + plan_2.memory_usage
assert merged_plan.cycles == plan_1.cycles + plan_2.cycles
if __name__ == "__main__":
pytest.main([__file__])
| [
"noreply@github.com"
] | noreply@github.com |
825ff37b9030c959ca4c89367e9e5f31a97cf6a8 | 79c2d6c68281a297a2541f1fc9acb67a3e96be80 | /shop/views.py | 9c19653bbd6e76f92988faac5bde211ea1987772 | [] | no_license | abhi1540/Django-Shopping_cart | b021c1f78555b870261eb16c2d00ffdf08ce314a | b317073886feac78a861bab502179956a93c784e | refs/heads/master | 2020-03-18T16:46:14.005819 | 2018-05-26T18:13:30 | 2018-05-26T18:13:30 | 134,985,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse
from .models import Product, Category
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.contrib.auth.models import Group, User
from .forms import SignUpForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, authenticate, logout
# Create your views here.
def index(request):
return HttpResponse("Helloooooooooooooooo")
def allProdCat(request, c_slug=None):
c_page = None
products_list = None
if c_slug!=None:
c_page = get_object_or_404(Category, slug=c_slug)
products_list = Product.objects.filter(category=c_page, available=True)
else:
products_list = Product.objects.all().filter(available=True)
'''Pagination code'''
paginator = Paginator(products_list, 3)
try:
page = int(request.GET.get('page', '1'))
except:
page = 1
try:
products = paginator.page(page)
except (EmptyPage, InvalidPage):
products = paginator.page(paginator.num_pages)
return render(request, 'shop/category.html', {'category':c_page, 'products': products})
def prodCartDetail(request, c_slug, p_slug):
try:
product = Product.objects.get(category__slug= c_slug, slug=p_slug)
except Exception as e:
raise e
return render(request, 'shop/product.html', {'product':product})
def signupView(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
signup_user = User.objects.get(username=username)
customer_group = Group.objects.get(name='Customer')
customer_group.user_set.add(signup_user)
else:
form = SignUpForm()
return render(request, 'accounts/signup.html', {'form': form})
def signinView(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('shop:allProdCat')
else:
return redirect('signup')
else:
form = AuthenticationForm()
return render(request, 'accounts/signin.html', {'form': form})
def signoutView(request):
logout(request)
return redirect('signin') | [
"abhisek1651990@gmail.com"
] | abhisek1651990@gmail.com |
2d1155d32a90b5db0505c5e7d1005e480fd33c00 | 6ac0aa077de3d234ad19eae7e65813d3b99a89af | /142_LinkedListCycleII/solution2.py | 911328dfca8a6988900503d40f8555d8d6afdbee | [] | no_license | changchingchen/leetcode | 2c85dd2c433dd3937be3f74a87534a8fccd80b38 | 481e873a0332c7939e3a24281bcea837b383af58 | refs/heads/master | 2023-01-12T02:08:44.339658 | 2020-10-25T14:33:44 | 2020-10-25T14:33:44 | 266,359,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
if not head:
return None
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
break
else:
return None
node = head
while node != slow:
node = node.next
slow = slow.next
return node
# Time: O(N)
# Space: O(1) | [
"tim.changching.chen@gmail.com"
] | tim.changching.chen@gmail.com |
a2e944d171d136342803a1d014bb44c7dce372df | 68ff38f2dd0188909737b395ac227ec0e205d024 | /kthapa2/MSAprioriImplementation.py | 88cd7c703c16da55aa29ac4054f8986a419e9810 | [] | no_license | kushagraThapar/Data-Mining | 7c3992c5efe820185e5197c8098168ae92e68cf9 | 658f3fdc8c4ea91d717cb36d5504d5e1fb803960 | refs/heads/master | 2021-01-18T15:40:23.412283 | 2017-01-29T08:49:04 | 2017-01-29T08:49:04 | 68,418,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,396 | py | from collections import OrderedDict
from itertools import combinations
from kthapa2 import CustomConstants
item_support_count_map = {}
item_support_map = {}
item_frequency_map = {}
item_tailcount_map = {}
minimum_item_support_map = OrderedDict()
cannot_be_together = set()
must_have = set()
def ms_apriori_algorithm(transactions, minimum_item_support_list, sdc_value):
minimum_item_support_list.sort(key=lambda tup: (float(tup[1]), int(tup[0])))
for element in minimum_item_support_list:
minimum_item_support_map[element[0]] = element[1]
f_1 = []
f_final = []
l_list = init_pass(minimum_item_support_map, transactions)
for element in l_list:
if item_support_map[element] >= minimum_item_support_map[element]:
if element in must_have:
f_1.append(element)
fill_item_map(frozenset([element]), transactions, item_tailcount_map)
if len(f_1) == 0:
return f_final
f_k = f_1
f_final.append(f_k)
k = 2
f_k_without_must_have = []
while True:
if k == 2:
c_k = level2_candidate_gen_function(l_list, sdc_value)
else:
c_k = ms_candidate_generation(f_k_without_must_have, sdc_value, k - 1)
for single_transaction in transactions:
single_transaction_set = frozenset(single_transaction)
for single_candidate in c_k:
single_candidate = list(single_candidate)
single_candidate_set = frozenset(single_candidate)
if single_candidate_set.issubset(single_transaction):
fill_item_map(single_candidate_set, transactions, item_frequency_map)
single_candidate_without_first_element = frozenset(single_candidate[1:])
if len(single_candidate_without_first_element) > 0 and single_candidate_without_first_element.issubset(
single_transaction_set):
fill_item_map(single_candidate_without_first_element, transactions, item_tailcount_map)
f_k = []
f_k_without_must_have = []
for candidate in c_k:
candidate = list(candidate)
candidate_set = frozenset(candidate)
if candidate_set in item_frequency_map and item_frequency_map[candidate_set] / len(
transactions) >= \
minimum_item_support_map[candidate[0]]:
f_k_without_must_have.append(candidate)
element_present = False
for element in must_have:
if element in candidate:
element_present = True
break
if element_present:
are_together = False
for single_set in set(combinations(candidate_set, 2)):
if set(single_set).issubset(cannot_be_together):
are_together = True
break
if not are_together:
f_k.append(candidate)
if len(f_k_without_must_have) == 0:
return f_final
f_final.append(f_k)
k += 1
return f_final
def init_pass(minimum_item_support_map_copy, transactions):
list_to_be_returned = []
initiating_element_mis = None
for element in minimum_item_support_map_copy:
count = 0
for single_transaction in transactions:
if element in single_transaction:
count += 1
item_support_count_map[element] = count
support = round(count / len(transactions), CustomConstants.ROUND_FLOAT_DIGITS)
item_support_map[element] = support
if initiating_element_mis is not None and support >= initiating_element_mis:
list_to_be_returned.append(element)
elif support >= minimum_item_support_map_copy[element]:
initiating_element_mis = minimum_item_support_map_copy[element]
list_to_be_returned.append(element)
return list_to_be_returned
def level2_candidate_gen_function(l_list, sdc_value):
c_2 = []
for element in l_list:
if item_support_map[element] >= minimum_item_support_map[element]:
for new_element in l_list[l_list.index(element) + 1:]:
if minimum_item_support_map[new_element] >= minimum_item_support_map[element] and abs(
item_support_map[new_element] - item_support_map[element]) <= sdc_value:
# Check for the cannot be together condition
if not {element, new_element}.issubset(cannot_be_together):
c_2.append([element, new_element])
return c_2
def ms_candidate_generation(f_k_1, sdc_value, k):
c_k_final = []
f_k_1_set = set()
for element in f_k_1:
f_k_1_set.add(frozenset(element))
f_k_1_set = frozenset(f_k_1_set)
current_index = -1
for single_f_1 in f_k_1:
for single_f_2 in f_k_1[f_k_1.index(single_f_1) + 1:]:
if frozenset(single_f_1[:-1]) == frozenset(single_f_2[:-1]) and single_f_1[-1] != single_f_2[-1] and \
minimum_item_support_map[single_f_1[-1]] <= minimum_item_support_map[single_f_2[-1]] \
and abs(item_support_map[single_f_1[-1]] - item_support_map[single_f_2[-1]]) <= sdc_value:
c_k_local = single_f_1[:]
c_k_local.append(single_f_2[-1])
current_index += 1
c_k_final.append(c_k_local)
first = c_k_local[0]
second = c_k_local[1]
# Do the pruning step
for each_subset in set(combinations(c_k_local, k)):
if first in each_subset or minimum_item_support_map[first] == \
minimum_item_support_map[second]:
if frozenset(each_subset) not in f_k_1_set:
del c_k_final[current_index]
current_index -= 1
break
return c_k_final
def fill_item_map(item, transactions, input_map):
if item in input_map:
return
for single_transaction in transactions:
if item.issubset(set(single_transaction)):
if item not in input_map:
input_map[item] = 0
input_map[item] += 1
| [
"kthapa2@uic.edu"
] | kthapa2@uic.edu |
f3c5610d6e768e63d846e7e2e59d3e56cbe94975 | 72d4184dc2d32ce9a5c72e4aa381342cc605545a | /src/migrations/versions/2940356ebeac_.py | c1fdd8a3d7111a7898fd2c7d9f55502b7c5d065f | [] | no_license | taller2fiuba/chotuve-app-server | 7acddf1e7cd5e7c9cfecae712625cd80304a283e | ac2d389196f3176bf8e3675d113a42a70259d512 | refs/heads/master | 2023-05-02T16:10:43.991179 | 2020-07-31T00:50:17 | 2020-07-31T00:50:17 | 257,125,755 | 0 | 0 | null | 2021-05-06T20:24:18 | 2020-04-19T23:44:44 | Python | UTF-8 | Python | false | false | 996 | py | """empty message
Revision ID: 2940356ebeac
Revises: 6030eadd9681
Create Date: 2020-06-10 00:29:15.278535
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2940356ebeac'
down_revision = '6030eadd9681'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comentario',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('video', sa.String(length=32), nullable=True),
sa.Column('usuario', sa.Integer(), nullable=True),
sa.Column('comentario', sa.String(length=5000), nullable=True),
sa.Column('fecha', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comentario')
# ### end Alembic commands ###
| [
"sportelliluciano@gmail.com"
] | sportelliluciano@gmail.com |
ab55393ddc0e46a0f229ce84b50466d0ac1cb266 | 65701888f7e09716b83ddbb965a50b7c62b0f287 | /blocks/google/common_block.py | fb2ba923f68bc8aedfef5cc46a894ff664e758b9 | [] | no_license | ColinKennedy/auto_docstring | 6a4a27c16434cb6d94db435226758a09627d9252 | dbca838630faf410a277069aedbecb82cfeedae9 | refs/heads/master | 2021-04-12T12:36:31.825008 | 2018-11-05T01:49:49 | 2018-11-05T01:49:49 | 89,107,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,343 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO : Just had an idea. Why not change the gross "if X.is_valid(obj): return X(obj)
# into a single classmethod? That'd look way better and potentially be
# easier to loop over
#
'''The classes and functions needed to parse the types of all astroid nodes.
This module does most of the heavy-lifting for args return-types. It can
parse functions within functions, infer an object's type, and even recursively
traverse imported modules to get an object's type.
'''
# IMPORT STANDARD LIBRARIES
import abc
import os
# IMPORT THIRD-PARTY LIBRARIES
import six
# IMPORT LOCAL LIBRARIES
from ...config import environment
from ...parsing import visit
from ...core import check
from . import common_type
@six.add_metaclass(abc.ABCMeta)
class CommonBlock(object):
'''An abstract class used to implement a Google-style block.
Attributes:
label (str): The block display text.
'''
label = 'Header label'
@staticmethod
@abc.abstractmethod
def draw(info):
'''Create the docstring lines to represent the given `info`.
Args:
info (dict[str]):
The parsed AST node whose type needs to be found and then
converted into a string.
Returns:
list[str]: The lines to create.
'''
return []
@abc.abstractproperty
def name(self):
'''str: A unique name to use to identify this block-type.'''
return '_unique_id'
@classmethod
def get_starting_lines(cls):
'''list[str]: Get the label used for the top of this block.'''
return ['{}:'.format(cls.label)]
@staticmethod
def get_spacing():
return
@staticmethod
def get_spacing():
'''int: Get the number of newlines to separate each docstring block.'''
try:
return int(os.getenv('AUTO_DOCSTRING_BLOCK_SPACING', '1'))
except TypeError:
return 1
@staticmethod
def _expand_types(obj, include_type=False):
r'''Wrap the given `obj` with a specific docstring-class wrapper.
Args:
obj (`astroid.NodeNG`):
Some node to wrap.
include_type (bool, optional):
If True and `obj` is a container of some kind, for example
a list of strs, then `obj` will be printed like "list[str]".
If False, `obj` would be printed as just "str".
This parameter is used primarily mainly for keeping return-types
from accidentally printing its container-type twice when
the container is nested.
Default is False.
Returns:
`SpecialType` or `ComprehensionContainerType` or `ContainerType` \
or `IterableType` or `Type`: .
The wrapped type.
'''
if common_type.SpecialType.is_valid(obj):
return common_type.SpecialType(obj)
obj = visit.get_value(obj)
if common_type.DictComprehensionContainerType.is_valid(obj):
return common_type.DictComprehensionContainerType(obj)
if common_type.ComprehensionContainerType.is_valid(obj):
return common_type.ComprehensionContainerType(obj)
if common_type.ContainerType.is_valid(obj):
return common_type.ContainerType(obj)
if check.is_itertype(obj):
return common_type.IterableType(obj, include_type=include_type)
return common_type.Type(obj)
@staticmethod
def _change_type_to_str(*objs):
'''Create the full string of all return-types for the given `objs`.
Args:
*objs (list[:class:`auto_docstring.blocks.google.common_block.Type`]):
The types to change into strings.
Returns:
str: The final set of return types for the given objects. This string
will be added to the auto-generated docstrings, directly.
'''
items = []
for item in [obj.as_str() for obj in objs]:
if item not in items:
items.append(item)
return common_type.make_items_text(items)
@six.add_metaclass(abc.ABCMeta)
class MultiTypeBlock(CommonBlock):
'''The base-class used to create "Returns" and "Yields" blocks.'''
_info_key = '_some_key'
name = 'multitype_base_block'
@classmethod
def _process_args(cls, info):
expected_object = info.get(cls._info_key)
if not expected_object:
return []
indent = ''
# Check if I need this if-statement
if info.get('lines'):
indent = environment.get_default_indent()
info['indent'] = indent
obj_types = cls._expand_types(expected_object)
type_info_as_str = cls._change_type_to_str(*obj_types)
return [type_info_as_str]
@classmethod
def _build_indented_docstring_lines(cls, lines, indent='', multiline=False):
return [cls._make_line(line, indent=indent, multiline=multiline)
for line in lines]
@classmethod
def draw(cls, info):
# '''Create the docstring lines to represent the given `info`.
# Note:
# If no data is found for cls._info_key, this method will return
# an empty list.
# Args:
# info (dict[str, list[`astroid.NodeNG`]]):
# The parsed AST node whose type needs to be found and then
# converted into a string.
# Returns:
# list[str]: The lines to create.
# '''
lines = cls._process_args(info)
if not lines:
return []
starting_lines = []
all_lines = info.get('lines', [])
if all_lines:
starting_lines = cls.get_starting_lines()
multiline = is_multiline(all_lines)
docstring_lines = cls._build_indented_docstring_lines(
lines,
info.get('indent', ''),
multiline=is_multiline(all_lines),
)
return starting_lines + docstring_lines
@staticmethod
def _make_line(obj_type, indent, multiline=False):
'''Create the docstring line for the given input.
Args:
indent (str):
The amount of space to add to the docstring block.
obj_type (str):
The type of the object. Example: "tuple[str]", "bool".
multiline (`obj`, optional):
If True, get the user's preferred separator and place it between
the return type and the return description.
If False, force the separator to just be " " so that the return
statement will stay on a single line.
Default is False.
Returns:
str: The created docstring line.
'''
if obj_type:
# This ":" is needed for parsing by auto_docstring
obj_type = ':' + obj_type
if not multiline:
sep = ' '
else:
sep = environment.get_description_separator()
return '{indent}{{{obj_type}!f}}:{sep}{{!f}}.'.format(
indent=indent,
obj_type=obj_type,
sep=sep,
)
def is_multiline(lines):
return len(lines) > 1
| [
"colinvfx@gmail.com"
] | colinvfx@gmail.com |
97a5797d6b970d29dbea2c4c90e09131f13ca91c | e5efada3529d94875455c4230c8dabe27fb72a89 | /apps/search/migrations/0015_advancedsearchpage_simplesearchpage.py | 74a14dceeeef2ab60fb56655bb00ed68b2a72af6 | [] | no_license | alexmon1989/uma | d8c321fb0ec9b1a9039b1c83aeaaff774f657416 | 5dea579d634eeb1c8103c21157299b33ca5590f0 | refs/heads/master | 2023-08-03T04:31:13.598577 | 2023-07-22T18:17:13 | 2023-07-22T18:17:13 | 154,835,498 | 0 | 0 | null | 2023-03-02T11:20:54 | 2018-10-26T13:02:12 | Nunjucks | UTF-8 | Python | false | false | 1,712 | py | # Generated by Django 2.1.3 on 2019-10-10 13:38
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0014_auto_20190719_1155'),
]
operations = [
migrations.CreateModel(
name='AdvancedSearchPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description_uk', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (укр.)')),
('description_en', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (англ.)')),
],
options={
'verbose_name': 'Сторінка розширенного пошуку',
'verbose_name_plural': 'Сторінка розширенного пошуку',
},
),
migrations.CreateModel(
name='SimpleSearchPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description_uk', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (укр.)')),
('description_en', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Опис сторінки (англ.)')),
],
options={
'verbose_name': 'Сторінка простого пошуку',
'verbose_name_plural': 'Сторінка простого пошуку',
},
),
]
| [
"alex.mon1989@gmail.com"
] | alex.mon1989@gmail.com |
4b4d36b26e7d2fc8de7b4008abe4f6bf9efa866e | 036615d94d3cd048d6fd5895b398dd64e31e150b | /Support Vector Regression/Support Vector Regression.py | 78d2384623062416ed31c812c55e61f094d668f5 | [] | no_license | TaymoorAk/Machine-Learning | e5c7469ed98c31370d9ae31d01f45dc82b656fe3 | 938a152469d82bb1fbf9f19104395487606def61 | refs/heads/master | 2020-04-17T00:53:03.677399 | 2020-03-12T12:40:22 | 2020-03-12T12:40:22 | 166,066,165 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | ' SUPPORT VECTOR REGRESSION '
'Importing Libraries'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
'Importing DATASET'
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:,1:2].values
y = dataset.iloc[:,2:3].
'Apply feature Scaling'
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
'Fitting Support Vector Regressor'
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
regressor.fit(X,y)
'predicting salary'
y_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform(np.array([[6.5]]))))
'plotting'
plt.scatter(X, y, color='red')
plt.plot(X, regressor.predict(X),color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
""""" Better way for plotting """""
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
5925d8c069b068625838d2d835608430dd10d97c | ce488e674dc95e9ac7d73e63ff3ec688dba5b443 | /practice/linkedlist.py | 67a7f5992d3bf8e065815e75787bc4f05a797fc0 | [] | no_license | groovycol/week8-skills-concepts | 1016357f0814ab147a2969a3169b6885e24a931f | bbbfef361d31bafe412d41178269c1e0f1bba919 | refs/heads/master | 2020-04-02T20:05:01.693548 | 2016-05-30T03:52:00 | 2016-05-30T03:52:00 | 59,912,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | # Linked list with Node/LinkedList classes
class Node(object):
"""Node in a linked list."""
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return "<Node %s>" % self.data
class LinkedList(object):
"""Linked List using head and tail."""
def __init__(self):
self.head = None
self.tail = None
def add_node(self, data):
"""Add node with data to end of list."""
new_node = Node(data)
if self.head is None:
self.head = new_node
if self.tail is not None:
self.tail.next = new_node
self.tail = new_node
def remove_node_by_index(self, index):
"""Remove node with given index."""
prev = None
node = self.head
i = 0
while (node is not None) and (i < index):
prev = node
node = node.next
i += 1
if prev is None:
self.head = node.next
else:
prev.next = node.next
def find_node(self, data):
"""Is a matching node in the list?"""
current = self.head
while current is not None:
if current.data == data:
return True
current = current.next
return False
def print_list(self):
"""Print all items in the list::
>>> ll = LinkedList()
>>> ll.add_node('dog')
>>> ll.add_node('cat')
>>> ll.add_node('fish')
>>> ll.print_list()
dog
cat
fish
"""
# FIXME
node = self.head
while node:
print node.data
node = node.next
def get_node_by_index(self, idx):
"""Return a node with the given index::
>>> ll = LinkedList()
>>> ll.add_node('dog')
>>> ll.add_node('cat')
>>> ll.add_node('fish')
>>> ll.get_node_by_index(0)
<Node dog>
>>> ll.get_node_by_index(2)
<Node fish>
"""
# FIXME
node = self.head
i = 0
while node:
if i == idx:
return node
node = node.next
i += 1
| [
"groovycol@gmail.com"
] | groovycol@gmail.com |
75b3291d3896b4f1adca2395879097b999ebaa0c | eeafb1890e41219cf677af5f70612d7c19ddf50f | /number_pattern.py | cf037dceab672d3108f3b2a28579f86e5749da2a | [] | no_license | lradebe/simple_programming_problems | 0c4299308da62b667b90c6d2680ff9858e07a85c | ee1f1a8d612ca425bfe61c48146f559b6f56835b | refs/heads/main | 2023-09-02T18:49:19.625649 | 2021-11-16T19:37:25 | 2021-11-16T19:37:25 | 406,937,386 | 0 | 0 | null | 2021-10-12T22:03:45 | 2021-09-15T22:07:41 | Python | UTF-8 | Python | false | false | 183 | py | def number_pattern(number):
for num in range(number):
print(str(num) * len(range(num)))
print()
if __name__ == '__main__':
number = 4
number_pattern(number)
| [
"lwaziradebe100@gmail.com"
] | lwaziradebe100@gmail.com |
daa3d653f4c374205f2e794ea68a62b84f6ddc08 | c95f76dfe8aab9c22de487daa53bfb12d2d23fd5 | /budget/migrations/0009_auto_20200927_1029.py | 9b5eff6317ee7283ca13eecba97254bcb500b20b | [] | no_license | MartaJasek/family_budget | 122b8fcf5bfe234d17e0cc136de65f45d8bf3894 | 1f3bc44bb1ec730696f89b36e483397e833ddfc2 | refs/heads/master | 2022-12-22T20:56:30.749776 | 2020-09-27T13:48:43 | 2020-09-27T13:48:43 | 298,771,615 | 0 | 0 | null | 2020-09-26T10:19:51 | 2020-09-26T08:34:29 | Python | UTF-8 | Python | false | false | 660 | py | # Generated by Django 3.1 on 2020-09-27 08:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('budget', '0008_auto_20200927_0941'),
]
operations = [
migrations.AlterField(
model_name='transaction',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Owner',
),
]
| [
"marta.jasek@yahoo.com"
] | marta.jasek@yahoo.com |
73a7467e699b9b1bcb92b3230e63358c48fc8268 | fd0ef72e7fa2693ae576389773e794e0c717c7c3 | /feature_extractor/fir_filter.py | 63573dd756f4fbbe3bcd1e9d38bb220550a0230f | [] | no_license | zhangsen1802/spoken_language_classification | 06a1f6d03e6d7ce27f6108064a1e89ea2e6b4950 | b5807d7eadaa978aa5a8d893aa71e1e96b601cd9 | refs/heads/master | 2022-02-27T13:15:25.665758 | 2019-10-15T11:30:05 | 2019-10-15T11:30:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from scipy.signal import kaiserord, lfilter, firwin, freqz
class FIRFilter:
def __init__(self, signal, rate):
self.signal = signal
self.rate = rate
def f_filter(self, ripple=60, cutoff=5):
nyq_rate = self.rate / 2
width = 5.0 / nyq_rate
N, beta = kaiserord(ripple, width)
f_filter = firwin(N, cutoff/nyq_rate, width, window=('kaiser', beta))
filtered_signal = lfilter(f_filter, 1, self.signal)
return filtered_signal
| [
"linhndt@viettel.com.vn"
] | linhndt@viettel.com.vn |
7659459672a025e4b61ead51cea0f2b290a37634 | 796d2e8c1b801b745e628876b20f03399b343b39 | /learning/admin.py | 4505b532c666c0cb54ee8ef0d215b472516390d7 | [] | no_license | educatecomtr/stocks | 25b675996350d8496bf163ffef2d539ebcb9c6ec | 16fa674a694981ce77be4e915440883f3d08e65e | refs/heads/master | 2021-06-25T22:18:29.239692 | 2020-12-26T13:17:14 | 2020-12-26T13:17:14 | 180,432,643 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from django.contrib import admin
from learning.models import Product
admin.site.register(Product)
| [
"info@internet.com.tr"
] | info@internet.com.tr |
622d6ebfa27243050d46b43e17fbdae195a07cdd | d4328041e05f72824e001cc300dcd48a6a0b63df | /decisionmaker/urls.py | 65f601bed8bb21cc441e2ffcd27ce20a597c69ec | [] | no_license | rkasale28/Decision-Maker | 4db06caed68ac052e2de54c81b05445de6ae2595 | 21867ff463a15c3d8438b1fbb75392b90f08d4b5 | refs/heads/master | 2022-09-15T19:23:07.122106 | 2020-05-26T06:59:59 | 2020-05-26T06:59:59 | 260,201,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | """decisionmaker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('dip/',include('dip.urls')),
path('preference/',include('preferences.urls'))
]
| [
"rohitkasale28@gmail.com"
] | rohitkasale28@gmail.com |
422b432cd65d1b069db07a9ef0dd5bd2f9a6b092 | 8f3985f7e51f3e9874fc3795253d396109095f45 | /tests/test_compose.py | 813f5483c0b910091aa7cb8841e8c66f50ee0eed | [
"Apache-2.0"
] | permissive | H-Pastry/PSFDataset | 18c5b738ff6c281814dce0eb622f2361997cb5c0 | 6826d7c9c21be4a98ed9a0176806b0a1cbf8cedc | refs/heads/master | 2022-06-13T18:13:49.873214 | 2020-05-07T16:20:24 | 2020-05-07T16:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # -----------------------------------------------------------
# Test composing transformations.
#
# (C) 2020 Kevin Schlegel, Oxford, United Kingdom
# Released under Apache License, Version 2.0
# email kevinschlegel@cantab.net
# -----------------------------------------------------------
import numpy as np
from ..transforms import Compose
from ..transforms.spatial import Crop, Normalize
class TestCompose:
def test_Compose(self):
crop = Crop()
norm = Normalize(2)
transform = Compose([crop, norm])
# test composition
test_input = np.array([[[1, 2], [2, 4]]])
expected = np.array([[[-1, -1], [0, 1]]])
output = transform(test_input)
np.testing.assert_array_equal(output, expected)
# check desc array
desc = transform.get_desc()
for key, val in crop.get_desc().items():
assert key in desc
assert desc[key] == val
for key, val in norm.get_desc().items():
assert key in desc
assert desc[key] == val
assert "compose" in desc
assert desc["compose"] == "(s)Crop->(s)Normalize"
| [
"kevinschlegel42@gmail.com"
] | kevinschlegel42@gmail.com |
eca04642b2ba94dc8c12489625c83dd3baee1cd1 | ed52713b051cf13009e829eb1ac3db033caa61bf | /learning_templates/basic_app/views.py | 8723f27914374252f748e89a1e16be3e977a1d62 | [] | no_license | zsigil/django-deployment-example | cdbfa4dfb0b283b7a30e323483145d1a93c585fc | a56b9a50b755469a1a0b9ff2ba6c61c61e026242 | refs/heads/master | 2020-03-06T22:18:00.794478 | 2018-03-28T07:36:28 | 2018-03-28T07:36:28 | 127,099,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | from django.shortcuts import render
from basic_app.forms import UserForm, UserInfoForm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.urls import reverse
# Create your views here.
def index(request):
return render(request, 'basic_app/index.html')
def other(request):
return render(request, 'basic_app/other.html')
def relative(request):
return render(request, 'basic_app/relative.html')
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserInfoForm(data=request.POST)
if user_form.is_valid and profile_form.is_valid:
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserForm()
profile_form = UserInfoForm()
return render(request, 'basic_app/registration.html',
{'user_form':user_form,
'profile_form':profile_form,
'registered':registered})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def user_login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse('account not active')
else:
return HttpResponse('authentication failed')
else:
return render(request, 'basic_app/login.html', {})
| [
"chickpox@freemail.hu"
] | chickpox@freemail.hu |
782a5e2a11fe39696a75f0f5a033a5af024cc786 | f8ffac4fa0dbe27316fa443a16df8a3f1f5cff05 | /Python/Counting_Valleys.py | db3c7a3eda8be589ae74a986fadb83c8e44b2c00 | [] | no_license | ankitniranjan/HackerrankSolutions | e27073f9837787a8af7a0157d95612028c07c974 | e110c72d3b137cf4c5cef6e91f58a17452c54c08 | refs/heads/master | 2023-03-16T19:06:17.805307 | 2021-03-09T16:28:39 | 2021-03-09T16:28:39 | 292,994,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | import math
import os
import random
import re
import sys
# Complete the countingValleys function below.
def countingValleys(n, s):
level=valley=0
for i in range(n):
if(s[i]=='U'):
level+=1
if(level==0):
valley+=1
else:
level-=1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
| [
"noreply@github.com"
] | noreply@github.com |
b7530b96cf67aaf73f9dd13b5488f34cdaa51b55 | 1cc498b1ad793506896891bde83d7e9280d33c13 | /devel/lib/python2.7/dist-packages/nodes_pkg/__init__.py | cb3ee6aa491ffc70a061700110e01b90caeb204e | [] | no_license | EduPonz/RobotProgramming | cddc2447ade83d0d591fe0f21cb0870a3364d6b6 | b0c950f98902f7fa44f21eacfa450ee2a19fe1fa | refs/heads/master | 2021-01-12T06:50:38.777523 | 2017-01-10T12:39:02 | 2017-01-10T12:39:02 | 76,843,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | /home/eduponz/RobotProgramming/devel/.private/nodes_pkg/lib/python2.7/dist-packages/nodes_pkg/__init__.py | [
"e.ponzs@gmail.com"
] | e.ponzs@gmail.com |
c4a8f635188831c0fd5ddfbb48da5189fa50438c | b263ad54ce23cff978b06ad18e32593e0b38240e | /SnapperCreate/rbCreate/drivers/manual_steering.py | f21a5c278040f8a5312026bb0e940bb2eb95f20c | [] | no_license | carlh/Create2AndSnapperArm | c695f285cc2699f2abd24aefb4e50d6f0deef6e6 | 668361b38c92de5c81fb9ed39386d5c1385df7ba | refs/heads/master | 2020-12-24T18:42:16.016998 | 2016-04-24T12:02:50 | 2016-04-24T12:02:50 | 56,465,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,049 | py | """
The MIT License (MIT)
Copyright (c) 2016 Carl Hinkle <cghinkle427@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from time import sleep
import curses
import atexit
from rbCreate import SpecialRadii
class ManualSteering:
_MAX_SPEED = 500
_MAX_RADUIS = 2000
_turn_radius = SpecialRadii.straight()
_turn_radius_delta = 5
_current_direction_key = -1
_current_turn_key = -1
def __init__(self, create):
self.create = create
self._left_speed = 0 # The current speed of the left wheel [-500, 500]
self._right_speed = 0 # The current speed of the right wheel [-500, 500]
self._speed_delta = 100 # The amount to change the speed each time a keypress is detected
self._turn_speed = 20 # Gradual turn speed
self.stdscr = curses.initscr()
self.stdscr.nodelay(1)
self.stdscr.keypad(1)
curses.cbreak()
curses.noecho()
atexit.register(self.cleanup, self.stdscr)
def run(self):
print "Beginning Manual steering routine"
self.create.set_safe_mode()
self.stdscr.addstr(0, 0, "Enter W, A, S, D: ")
while True:
c = self.stdscr.getch()
if c == curses.ERR: # No key is pressed
continue
self.stdscr.addch(0, 18, c)
self.stdscr.refresh()
display_string = ""
if c == ord('q'):
self.stdscr.addstr(0, 18, "Quitting...")
self.stdscr.clrtoeol()
self.stdscr.refresh()
break
elif c == ord('x'):
self.stdscr.addstr(0, 18, "Stopping...")
self.stdscr.clrtoeol()
self.stdscr.refresh()
self.create.drive_straight_forward(0)
self._left_speed = 0
self._right_speed = 0
self.create.stop_motion()
self.stdscr.addstr(0, 18, "Stopped...")
self.stdscr.clrtoeol()
self.stdscr.refresh()
elif c == ord('w'):
if self._left_speed < self._MAX_SPEED:
self._left_speed += self._speed_delta
if self._right_speed < self._MAX_SPEED:
self._right_speed += self._speed_delta
if self._left_speed == 0 and self._right_speed == 0:
self.create.stop_motion()
else:
self.create.drive_direct(right_speed=self._right_speed, left_speed=self._left_speed)
display_string = "Left wheel moving at {0} mm/s. Right wheel moving at {1} mm/s".format(self._left_speed, self._right_speed)
self.stdscr.addstr(0, 18, display_string)
self.stdscr.clrtoeol()
self.stdscr.refresh()
elif c == ord('a'):
if self._right_speed < self._MAX_SPEED:
self._right_speed += int(self._turn_speed / 2)
self._left_speed -= int(self._turn_speed / 2)
self.create.drive_direct(right_speed=self._right_speed, left_speed=self._left_speed)
display_string = "Left wheel moving at {0} mm/s. Right wheel moving at {1} mm/s".format(self._left_speed, self._right_speed)
self.stdscr.addstr(0, 18, display_string)
self.stdscr.clrtoeol()
self.stdscr.refresh()
elif c == ord('s'):
if self._left_speed > -self._MAX_SPEED:
self._left_speed -= self._speed_delta
if self._right_speed > -self._MAX_SPEED:
self._right_speed -= self._speed_delta
if self._right_speed == 0 and self._left_speed == 0:
self.create.stop_motion()
else:
self.create.drive_direct(right_speed=self._right_speed, left_speed=self._left_speed)
display_string = "Left wheel moving at {0} mm/s. Right wheel moving at {1} mm/s".format(self._left_speed, self._right_speed)
self.stdscr.addstr(0, 18, display_string)
self.stdscr.clrtoeol()
self.stdscr.refresh()
elif c == ord('d'):
if self._left_speed < self._MAX_SPEED:
self._right_speed -= int(self._turn_speed / 2)
self._left_speed += int(self._turn_speed / 2)
self.create.drive_direct(right_speed=self._right_speed, left_speed=self._left_speed)
display_string = "Left wheel moving at {0} mm/s. Right wheel moving at {1} mm/s".format(self._left_speed, self._right_speed)
self.stdscr.addstr(0, 18, display_string)
self.stdscr.clrtoeol()
self.stdscr.refresh()
sleep(3)
print "End Manual steering routine"
def cleanup(self, stdscr):
curses.nocbreak()
curses.echo()
stdscr.keypad(0)
curses.endwin()
| [
"cghinkle427@gmail.com"
] | cghinkle427@gmail.com |
e15c5f1983b8fc63e4ec6da464da368d065b54d4 | 818e64f8bc6c2543a27d686c8ab8d7cb660640ec | /web/webapp/bot/forms.py | 5185e2b17573b73843947d98c0a25eb000c153d7 | [
"Unlicense"
] | permissive | technicalmini/InstagramBot | 02c73e7a1b966e41f3efb18defbdd84e0313fa38 | f7f042ee919a02e41de74bf0f91ce8ccc9ade798 | refs/heads/main | 2023-03-04T11:18:20.545614 | 2021-02-13T19:57:12 | 2021-02-13T19:57:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, IntegerField, SubmitField, PasswordField
from wtforms.validators import DataRequired, EqualTo
from wtforms import ValidationError
from wtforms_alchemy import QuerySelectField
from flask_login import current_user
from webapp.models import Abonnenten,Abonniert,Source,Statistiken,Targets_done,Targets_raw
from webapp import db
from sqlalchemy import func
class TargetsLaden(FlaskForm):
'''Enter Instagram username into "USERNAME" and corresponding password into "PASSWORD'''
username = StringField("Instagram-Username", default="USERNAME", validators=[DataRequired()])
password = PasswordField("Instagram-Passwort", default="PASSWORD", validators=[DataRequired()])
zielurl = StringField('Zielurl (Form: TargetsLaden)', validators=[DataRequired()])
submit = SubmitField('Los gehts (Form: TargetsLaden)')
# def target_choice_query():
# return Source.query.group_by(Source.id).all()
class StartWorkflow(FlaskForm):
'''Enter Instagram username into "USERNAME" and corresponding password into "PASSWORD'''
username = StringField("Instagram-Username", default="USERNAME", validators=[DataRequired()])
password = PasswordField("Instagram-Passwort", default="PASSWORD", validators=[DataRequired()])
#target = QuerySelectField(query_factory=target_choice_query, allow_blank=False, get_label="source_url", validators=[DataRequired()])
#laufzeit = IntegerField(label='Laufzeit (in Stunden)', default="4", validators=[DataRequired()])
submit2 = SubmitField('Los gehts (Form: StartWorkflow)')
class NewBlacklistEntry(FlaskForm):
url = StringField("Instagram-URL", validators=[DataRequired()])
submit = SubmitField('Eintragen')
| [
"tobias@Lingnas-MacBook-Pro.local"
] | tobias@Lingnas-MacBook-Pro.local |
bcedf778fb4f57e2789e5c692524d1abfefe846f | af4f7d5522e362c207ddc5a06872923fdb38765a | /weixin/weimsg.py | a63216c03bd5db97005518e7d1ac668ad4e605f7 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | jixuduxing/django-ex | 288b823f7f0282675fc3f206ffd1d0a847f741fc | fab0a44045206ccb0038d372c70867d5dedbc0a8 | refs/heads/master | 2021-01-21T19:27:49.899007 | 2017-07-03T02:44:34 | 2017-07-03T02:44:34 | 92,139,106 | 0 | 0 | null | 2017-05-23T06:47:54 | 2017-05-23T06:47:54 | null | UTF-8 | Python | false | false | 3,026 | py | import hashlib
import os
import re
# basic info
re_msg_type = re.compile(r"<MsgType><!\[CDATA\[(.*?)\]\]></MsgType>")
re_msg_tuid = re.compile(r"<ToUserName><!\[CDATA\[(.*?)\]\]></ToUserName>")
re_msg_fuid = re.compile(r"<FromUserName><!\[CDATA\[(.*?)\]\]></FromUserName>")
re_msg_ctime = re.compile(r"<CreateTime>(.*?)</CreateTime>")
re_msg_id = re.compile(r"<MsgId>(.*?)</MsgId>")
re_media_id = re.compile(r"<MediaId><!\[CDATA\[(.*?)\]\]></MediaId>")
# text msg
re_text_content = re.compile(r"<Content><!\[CDATA\[(.*?)\]\]></Content>")
# img msg
re_img_url = re.compile(r"<PicUrl><!\[CDATA\[(.*?)\]\]></PicUrl>")
re_img_id = re.compile(r"")
# location msg
re_locx = re.compile(r"<Location_X>(.*?)</Location_X>")
re_locy = re.compile(r"<Location_Y>(.*?)</Location_Y>")
re_scale = re.compile(r"<Scale>(.*?)</Scale>")
re_label = re.compile(r"<Label><!\[CDATA\[(.*?)\]\]></Label>")
# link msg
re_title = re.compile(r"<Title><!\[CDATA\[(.*?)\]\]></Title>")
re_description = re.compile(r"<Description><!\[CDATA\[(.*?)\]\]></Description>")
re_url = re.compile(r"<Url><!\[CDATA\[(.*?)\]\]></Url>")
# event msg
re_event = re.compile(r"<Event><!\[CDATA\[(.*?)\]\]></Event>")
re_eventkey = re.compile(r"<EventKey><!\[CDATA\[(.*?)\]\]></EventKey>")
class WeiMsg(object):
def get_info(self, regx, msg):
result = re.findall(regx, msg)
if result:
return result[0]
else:
return ''
def get_text_msg(self, msg):
self.content = self.get_info(re_text_content, msg)
def get_img_msg(self, msg):
self.pic_url = self.get_info(re_img_url, msg)
self.media_id = self.get_info(re_media_id, msg)
def get_location_msg(self, msg):
self.location_x = self.get_info(re_locx, msg)
self.location_y = self.get_info(re_locy, msg)
self.scale = self.get_info(re_scale, msg)
self.label = self.get_info(re_label, msg)
def get_link_msg(self, msg):
self.title = self.get_info(re_title, msg)
self.description = self.get_info(re_description, msg)
self.url = self.get_info(re_url, msg)
def get_event_msg(self, msg):
self.event = self.get_info(re_event, msg)
self.event_key = self.get_info(re_eventkey, msg)
def __init__(self, msg):
"""genetate a message object
"""
msg = msg.decode('utf-8')
self.to_user_name = self.get_info(re_msg_tuid, msg)
self.from_user_name = self.get_info(re_msg_fuid, msg)
self.create_time = self.get_info(re_msg_ctime, msg)
self.msg_type = self.get_info(re_msg_type, msg)
self.msg_id = self.get_info(re_msg_id, msg)
msgtype = self.msg_type
if msgtype == 'text':
self.get_text_msg(msg)
elif msgtype == 'image':
self.get_img_msg(msg)
elif msgtype == 'location':
self.get_location_msg(msg)
elif msgtype == 'link':
self.get_link_msg(msg)
elif msgtype == 'event':
self.get_event_msg(msg)
| [
"jixuduxing@gmail.com"
] | jixuduxing@gmail.com |
9b5f678ee01f74948e3abe78205622ca733d1def | f6d96e9505103428402ea9772fdd0b48c4dff7e9 | /tests/test_models/test_place.py | 4bd8e6e2e665353886e8de7c111a98acd68c7add | [] | no_license | KarenCampo777/AirBnB_clone | 8271a2a7f75c01ea875b9232a939f1f58f484705 | 95051e3c7c05837b89966caae55bb54eef81c95f | refs/heads/master | 2023-03-14T03:41:18.367359 | 2021-02-24T22:32:17 | 2021-02-24T22:32:17 | 276,201,869 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,527 | py | #!/usr/bin/python3
"""
Test module for place module
"""
from models.place import Place
import models
import unittest
import os
import datetime
class TestPlace(unittest.TestCase):
""" Testing an Place instance """
def setUp(self):
"""
Setting up the test instance
"""
self.my_base1 = Place()
self.my_base2 = Place()
def Tearown(self):
"""
Closing the test instance
"""
del self.my_base1
del self.my_base2
def test_create(self):
"""
Testing creation of an Place instance
"""
self.assertIsInstance(self.my_base1, Place)
def test_permissions(self):
"""
Testing file permissions to be executable
"""
self.assertTrue(os.access("models/place.py", os.X_OK))
def test_id(self):
"""
Testing if attribute id is as unique as a string type
"""
self.assertIsInstance(self.my_base1.id, str)
self.assertNotEqual(self.my_base1.id, self.my_base2.id)
def test_dates(self):
"""
Testing created_at and updated_at of instances
"""
self.assertIsInstance(self.my_base1.created_at, datetime.datetime)
self.assertIsInstance(self.my_base1.updated_at, datetime.datetime)
prev_date = self.my_base1.updated_at
self.my_base1.save()
self.assertNotEqual(prev_date, self.my_base1.updated_at)
def test_str_format(self):
"""
Testing the function __str__ to have the correct format
[<class name>] (<self.id>) <self.__dict__>
"""
o = self.my_base1
msg1 = o.__str__()
msg2 = "[{}] ({}) {}".format(o.__class__.__name__, o.id, o.__dict__)
self.assertEqual(msg1, msg2)
def test_to_dict(self):
"""
Testing to_dict function to return correct format
"""
ins = self.my_base1
obj = ins.to_dict()
self.assertIsInstance(obj, dict)
self.assertTrue('__class__' in obj)
self.assertEqual(obj['__class__'], 'Place')
self.assertIsInstance(obj['created_at'], str)
self.assertIsInstance(obj['updated_at'], str)
self.assertEqual(obj['created_at'], ins.created_at.isoformat())
self.assertEqual(obj['updated_at'], ins.updated_at.isoformat())
def test_docstring(self):
"""
Testing documentation on place
"""
self.assertIsNotNone(models.place.__doc__)
self.assertIsNotNone(Place.__doc__)
| [
"andresbaymon@gmail.com"
] | andresbaymon@gmail.com |
d84e36d8cd9c5b11daff2b46fd9a882e9ea69bdb | 00cfe2b2c1205d005ce11a17f5cddfb9a29ebcec | /rev_str.py | 866f11246fc22344e322d0fcabb3c0d5f317785e | [] | no_license | roopkumard19/Python | 7c31238ac720ab1b224e78c3c9e29b57975719a7 | 80aef1594cdb9656e11449ec5920108ab1be2e4d | refs/heads/master | 2021-01-19T20:18:33.515637 | 2017-05-23T02:14:10 | 2017-05-23T02:14:10 | 83,746,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | def reverse(string):
if len(string) <= 1:
return string
else:
return reverse(string[1:]) + string[0]
def reverse_word(text):
text = reverse(text)
text = text.split(" ")
result = []
for item in text:
temp = reverse(item)
result.append(temp)
return result
if __name__ == '__main__':
s = raw_input("Enter the string to be reversed: ")
s = reverse_word(s)
print s
| [
"noreply@github.com"
] | noreply@github.com |
ac98559499064faeb61e844e8d542b2ee2952c71 | 73104abd9324736e7b038df3cb7f84fa2a8f6f42 | /toolseo/wsgi.py | f6e953bc8edc2ef7370f0d5aa0bd3906b1f70893 | [] | no_license | spsp01/tool | b32c107ed499a44301d3b7d27d42a63fbbe280e1 | df6cd3d4caf9ca60058c95a55b81104ba410a1cc | refs/heads/master | 2020-03-18T22:05:43.971295 | 2018-12-10T13:35:49 | 2018-12-10T13:35:49 | 135,324,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for toolseo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toolseo.settings")
application = get_wsgi_application()
| [
"spsp01@o2.pl"
] | spsp01@o2.pl |
71c473a4ce644be35e7385d43b5dfc76a15ba0fa | 7699dd3267f62ea1089c7b356639924d738e8033 | /CNN.py | c3e9f4dd60be1255eec6ee339f381d8b33b0dcf9 | [] | no_license | aswinsourav/CIFAR-10-Classification-Using-Numpy | a5f06446cc4c32b5f76edfe6aa60a11a8009e6ec | fa8ab4772cf6750e819a75a90d8c8d366b18f9b0 | refs/heads/master | 2021-05-16T17:44:59.098027 | 2017-09-11T01:26:54 | 2017-09-11T01:26:54 | 103,072,276 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | class CNN:
def __init__(self,input_image,filter_size,stride,filter_depth,channels,pad,dropout_p):
self.input_image=input_image
self.filter_size=filter_size
self.stride=stride
self.filter_depth=filter_depth
self.channels=channels
self.pad=pad
self.dropout_p=dropout_p
def weight_init(self):
fan_in=self.filter_size*self.filter_size*self.channels
self.cnn_weights=np.random.randn(self.filter_depth,self.channels,self.filter_size,self.filter_size)/np.sqrt(fan_in/2)
self.bias=np.zeros(self.filter_depth)
#print(self.cnn_weights.shape)
def convolve(self,input_image,dropout=True):
output_shape=int(((input_image.shape[2]-self.filter_size)/self.stride)+1)
#print(output_shape)
self.cnn_output=np.zeros((input_image.shape[0],self.filter_depth,output_shape,output_shape))
#print(cnn_output.shape)
i=j=k=r=c=0
while(k<output_shape):
j=c=0
while(j<output_shape):
self.cnn_output[:,:,r,c]=np.einsum('icjk,fcjk->if',input_image[:,:,k:k+self.filter_size,j:j+self.filter_size],self.cnn_weights)+self.bias
j+=self.stride
c+=1
k+=self.stride
r+=1
npad=((0,0),(0,0),(self.pad,self.pad),(self.pad,self.pad))
self.cnn_output=np.pad(self.cnn_output, pad_width=npad, mode='constant', constant_values=0)
self.cnn_output_relu=relu_op(self.cnn_output)
if(dropout):
dr=(np.random.rand(*self.cnn_output_relu.shape)<self.dropout_p)/self.dropout_p
else:
dr=1
return self.cnn_output_relu*dr | [
"noreply@github.com"
] | noreply@github.com |
33bada0a6ebc9c86ad48aa12cb5fff42acd3588a | 3b43cf4cfc666798ebe85ed1db8858034b13d45c | /tests/universal_functions_tests/power_tests/normal.py | dab58c185239f89bab51ee55c80dbe61e5d4326a | [
"Apache-2.0"
] | permissive | Pandinosaurus/legate.numpy | 5428b80a0a53ab882cd74b5dbf5fd86c7ee82199 | 896f4fd9b32db445da6cdabf7b78d523fca96936 | refs/heads/master | 2023-06-27T04:33:52.982601 | 2021-07-01T21:39:52 | 2021-07-01T21:39:52 | 358,820,941 | 0 | 0 | Apache-2.0 | 2021-08-01T02:57:57 | 2021-04-17T08:06:05 | C++ | UTF-8 | Python | false | false | 1,000 | py | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import legate.numpy as lg
def test():
bases_np = np.random.randn(4, 5)
# avoid fractional exponents
exponents_np = np.random.randint(10, size=(4, 5)).astype(np.float64)
bases = lg.array(bases_np)
exponents = lg.array(exponents_np)
assert lg.allclose(
lg.power(bases, exponents), np.power(bases_np, exponents_np)
)
if __name__ == "__main__":
test()
| [
"wonchanl@nvidia.com"
] | wonchanl@nvidia.com |
de7cbb78c9d77b6a8d1cf0544c40cf06f333cee2 | 1c2d329adcd6683c93125e4309bae2a60a007c9c | /lab1.py | 1a1672eea43477ac063a872033cbf10f3d157c14 | [] | no_license | Heldiggris/CG_Labs | 30e7708e2df5b911172ecdf7caa4e136b1f8f504 | bb75de40183a69dcf678300ac2e6a9d3af5ffc51 | refs/heads/master | 2020-03-29T17:49:47.720588 | 2018-11-05T22:57:02 | 2018-11-05T22:57:02 | 150,181,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,185 | py | # Построение графика функции
# Потенко Максим М8О-307Б
# Функция r = a * (1 - cos(phi))
import cv2
import numpy as np
import math
from tkinter import *
from PIL import ImageTk, Image
import os
# Отбеливание изображения
def white_img(img):
h, w, d = img.shape
for i in range(h):
for j in range(w):
img[i, j] = 255
# Рисование координатной оси, при максимальном отклонении max_value
def coordinate_axes(img, max_value):
if(max_value < 1):
max_value = 1
h, w, d = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
wb_line = max(w // 130, 1)
wl_line = max(w // 250, 1)
w_text = w / 2000 / max(math.log10(max_value) * 0.35 , 1)
# Горизонтальная линия
cv2.line(img,(int(w * 0.1) ,h // 2),(int(w * 0.9), h // 2),(0,0,0), wb_line)
# Вертикальная линия
cv2.line(img,(w // 2, int(h * 0.1)),(w // 2, int(h * 0.9)),(0,0,0),wb_line)
# Стрелочка на оси Ox
cv2.line(img,(int(w * 0.9), h // 2),(int(w * 0.87), int(h / 2.05) ),(0,0,0), wb_line)
cv2.line(img,(int(w * 0.9), h // 2),(int(w * 0.87), int(h / 1.95) ),(0,0,0), wb_line)
# Стрелочка на оси Oy
cv2.line(img, (w // 2, int(h * 0.1)),(int(w / 2.05), int(h * 0.13)), (0,0,0), wl_line)
cv2.line(img, (w // 2, int(h * 0.1)),(int(w / 1.95), int(h * 0.13)), (0,0,0), wl_line)
# Начало координат
cv2.putText(img,"0",(int(w / 1.97), int(h / 1.92)), font, w_text,(100,100,100),1,cv2.LINE_AA)
# Разметка по горизонтальной оси
for i in range(10):
cv2.line(img,(w // 2 - i * w // 25, int(h / 2.03)),(w // 2 - i * w // 25, int(h / 1.97)),(0,0,0), wl_line)
cv2.line(img,(w // 2 + i * w // 25, int(h / 2.03)),(w // 2 + i * w // 25, int(h / 1.97)),(0,0,0), wl_line)
if (i != 0):
if(max_value < 50):
s1 = str(round(i * max_value / 10, 1))
s2 = str(round(-i * max_value / 10, 1))
else:
s1 = str(int(i * max_value // 10))
s2 = str(int(-i * max_value // 10))
cv2.putText(img,s1,(w // 2 + i * w // 25, int(h / 1.92)), font, w_text,(100,100,100),1,cv2.LINE_AA)
cv2.putText(img,s2,(w // 2 - i * w // 25, int(h / 1.92)), font, w_text,(100,100,100),1,cv2.LINE_AA)
# Разметка по вертикальной оси
for i in range(10):
cv2.line(img, (int(w / 2.03), h // 2 - i * h // 25),(int(w / 1.97), h // 2 - i * h // 25),(0,0,0), wl_line)
cv2.line(img, (int(w / 2.03), h // 2 + i * h // 25),(int(w / 1.97), h // 2 + i * h // 25),(0,0,0), wl_line)
if (i != 0):
if(max_value < 50):
s1 = str(round(i * max_value / 10, 1))
s2 = str(round(-i * max_value / 10, 1))
else:
s1 = str(int(i * max_value // 10))
s2 = str(int(-i * max_value // 10))
cv2.putText(img,s1,(int(w / 1.97), h // 2 + i * h // 25), font, w_text,(100,100,100),1,cv2.LINE_AA)
cv2.putText(img,s2,(int(w / 1.95), h // 2 - i * h // 25), font, w_text,(100,100,100),1,cv2.LINE_AA)
# Реакция на кнопку - построение графика
def button_ev():
global img
st = text_box.get()
if(len(st) > 0):
# Проверяем что введено число
try:
a = float(st)
except:
return
# Пустое черное изображение
image = np.zeros((height,width,3), np.uint8)
# Отбеливаем
white_img(image)
points = []
max_val = -math.inf
# Вычисляем все точки и находим максимальное отклонение
for i in range(-314159, 314159 + 1, 10):
r = a * (1 - math.cos(i / 100000))
# print(r)
y = math.sin(i / 100000) * r
x = math.cos(i / 100000) * r
if(abs(x) > max_val):
max_val = abs(x)
if(abs(y) > max_val):
max_val = abs(y)
points.append((x, y))
# Координатная ось
coordinate_axes(image, max_val)
# Рисуем точки
for i in points:
x = i[0]
y = i[1]
if(max_val < 50):
s = round(max_val / 10, 1)
else:
s = max_val // 10
if(s != 0):
cv2.circle(image,((width // 2) + int(x / s * (width // 25)), height // 2 + int(y / s * (width // 25))), 5, (0,0,255), -1)
else:
cv2.circle(image, ((width // 2), (height // 2)), 5,(0,0,255), -1)
# Трансляция изображения из OpenCV в PIL
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
img = Image.fromarray(image)
img = ImageTk.PhotoImage(img)
# Меняем изображение
label.configure(image=img)
# Отклик на движение ползунка
def scroll_event(event):
canvas.configure(scrollregion=canvas.bbox("all"), width=root.winfo_width(),height=root.winfo_height())
if __name__ == "__main__":
# Размер окна
height = 900
width = 900
img = ""
# Создаем окно и даём имя
root=Tk()
root.title("График")
# Даем минимальный и максимальный размер окон, присваиваем стандартный размер
root.minsize(600,600)
root.maxsize(1800,1000)
root.wm_geometry("%dx%d+%d+%d" % (width, height, 0, 0))
# Поле для изображения
canvas=Canvas(root)
canvas.place(x=0,y=0)
frame=Frame(canvas)
# Создаем ползунки
myscrollbar=Scrollbar(root,orient="vertical",command=canvas.yview)
myscrollbar2=Scrollbar(root,orient="horizontal",command=canvas.xview)
canvas.configure(yscrollcommand=myscrollbar.set, xscrollcommand=myscrollbar2.set, width=root.winfo_width(),height=root.winfo_height())
myscrollbar.pack(side="right",fill="y")
myscrollbar2.pack(side="bottom",fill="x")
canvas.create_window((0,0),window=frame,anchor='nw')
# Создаём кнопки
but = Button(root, text="График", background="#555", foreground="#ccc", padx="20", pady="8", font="16", command=button_ev)
but.place(x=0,y=0)
root.update()
tex = Text(root, state=DISABLED)
# Текстовая подпись
tex.config(state=NORMAL, font="16", height=3, width=36)
tex.insert(END, 'График:r = a * (1 - sin(phi))\nВведите параметр a')
# tex.pack()
tex.place(x = 0,y= but.winfo_height())
# Поле для ввода текста
text_box = Entry(root, bd =8,width=20, font="16", background="#555", foreground="#ccc")
text_box.place(x = but.winfo_width(), y=0)
# Ожидание события
root.bind("<Configure>",scroll_event)
# Отображаем изображение
label = Label(frame, width=width, height=height)
label.pack()
# Запускаем окно
root.mainloop()
| [
"potenkog@gmail.com"
] | potenkog@gmail.com |
dc9d726c1e74ed904273b707f36a0cf9e3e067a3 | 237799f505290e899ad710667de2c097b40baa93 | /viper/input/inputTester.py | c9968cd5cdd0a9079db4110c8478d75e83221149 | [] | no_license | GaryZ700/viperEngine | 3b7312cb3089cfa6727298fefcaaff6f66346366 | 18fe9e3909da496cc561939c1ca6f2e8270ec23e | refs/heads/master | 2020-04-04T23:16:58.731007 | 2018-11-19T02:03:44 | 2018-11-19T02:03:44 | 156,353,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import msvcrt
import time
from input import input
i = input()
def test():
print("DOwn")
def test2():
print("Up")
i.addKeyBinding("UP", "whileKeyDown", [test])
i.addKeyBinding("UP", "whileKeyDown", [test2])
while(True):
#print(msvcrt.kbhit())
#if(msvcrt.kbhit()):
# msvcrt.getch()
time.sleep(0.1)
i.update()
| [
"root@DESKTOP-7TOTAEN.localdomain"
] | root@DESKTOP-7TOTAEN.localdomain |
7b9478ad3395af89ea80e87c973e3abef34f94b0 | 5c9b5338659f1b5c55a87d9aab65aa1a2b6be9f1 | /article.py | eaaaa37e322e166feb82d13a33f63b4e9c69f7ad | [] | no_license | weirongsie/hulabear_downloader | 2d3b3d00815355a2319be9c386a766d31edaa11e | b97dc9cf45b65fe62e3f43d7e41185244fe54e9a | refs/heads/master | 2021-07-11T23:29:02.241653 | 2018-01-24T16:37:00 | 2018-01-24T16:37:00 | 96,695,413 | 8 | 1 | null | 2018-01-24T15:12:15 | 2017-07-09T16:04:09 | Python | UTF-8 | Python | false | false | 3,079 | py | # -*- coding: utf8 -*-
import re
import ConfigParser
class Article:
def __init__(self, index, raw_content):
self.index = index
self.title = ""
self.author = ""
self.publication_date = None
self.ip = None
self.content = ""
self.responses = []
self.raw_content = raw_content
self._formatter = Formatter()
def build(self):
self.content = self._formatter.normalize(self.raw_content)
self.title = str(self.index) + self._formatter.escape_article_title(self._formatter.parse_article_title(self.content))
class Formatter:
def __init__(self):
self._current_line = 0
self._config = ConfigParser.ConfigParser()
self._config.read("config.ini")
self._page_splitter = self._config.get("data", "page_splitter")
self._title_encode = self._config.get("encode", "file_name")
def normalize(self, data):
return self.reformat(self.decolor(data))
def decolor(self, data):
data = re.sub(r"\[([0-9]*;*)*m|\[m", "", data)
return data
def reformat(self, data):
self._current_line = 4
data = data.replace("[;H\x1B[2J", "") # article header
data = re.sub(r"(\n)*\x1B\[[0-9]*;1H(?P<content>.*)\x1B\[K", self._extract_content, data) # control code
data = re.sub(r"\x1B\[K", "", data)
data = re.sub(r"\[(?P<line>[0-9]+);1H", self._line_no_to_breakers, data) # mapping line number to new lines
data = re.sub(r"<<hulabear_page_splitter>>([\s\x1B])*", "<<hulabear_page_splitter>>\n ", data)
data = re.sub(r"\n[^\n]*<<hulabear_page_splitter>>", "\n "+self._page_splitter, data) # page footer
match = re.split(r"Origin:.*<hulabear.twbbs.org>", data) # split into article and comments
if match and len(match) == 2:
data = match[0] + u"Origin: 呼啦貝爾 <hulabear.twbbs.org>".encode("Big5") + \
re.sub(r"([\s|\x1B]*\n)", "\n", match[1])
return data
def parse_article_title(self, article):
article = re.sub(r"\x1B", " ", article)
match = re.search(ur"\xbc\xd0\xc3D(?P<title>.*)\n", article) # \xbc\xd0\xc3D = big5 encoding for 標題
if match:
return match.group("title").rstrip()
else:
return ""
def escape_article_title(self, title):
return (re.sub(r"[\\/:\*\?\"<>\|]", "_", title)
.decode("big5", "strict").encode(self._title_encode, "strict"))
def _extract_content(self, match):
if match:
return match.group("content")
def _line_no_to_breakers(self, match):
if match:
if int(match.group("line")) > self._current_line: # still in the same page
breaks = int(match.group("line")) - self._current_line
else: # a new page
breaks = int(match.group("line"))
self._current_line = int(match.group("line"))
return "\r\n"*breaks + " " | [
"test@gg"
] | test@gg |
ada10adc0bef6aee3f66cc6505c04af63ade6437 | ca2818572d17285210792694ba1f07c99e11d9ad | /setup.py | 209a4bd93203208084c183cf32cece8f76ddf3bd | [
"Apache-2.0"
] | permissive | tomzhang/codesnap | cc335e8a63af70ed0121b222eb4fc2e35841b0b0 | 04e11176888243052c46a6a04a1ba63a8f80d684 | refs/heads/master | 2022-11-29T16:23:05.625385 | 2020-08-09T07:11:58 | 2020-08-09T07:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import setuptools
from distutils.core import Extension
with open("README.md") as f:
long_description = f.read()
setuptools.setup(
name="codesnap",
version="0.0.4",
author="Tian Gao",
author_email="gaogaotiantian@hotmail.com",
description="A profiling tool that can visualize python code in flame graph",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gaogaotiantian/codesnap",
packages=setuptools.find_packages("src"),
package_dir={"":"src"},
package_data={
"codesnap": [
"html/*.js",
"html/*.css",
"html/*.html"
]
},
ext_modules=[
Extension(
"codesnap.snaptrace",
sources = [
"src/codesnap/modules/snaptrace.c",
]
)
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Quality Assurance",
],
python_requires=">=3.5",
)
| [
"gaogaotiantian@hotmail.com"
] | gaogaotiantian@hotmail.com |
4c236c1bace8030ea42ef74bcaaa38063e9f808c | 67acdf1918786bc59c324af58d16a4ce0b6bdef7 | /main.py | 43f3d2e671d37602c2f7a7a7cdf61c6eb65ecfc5 | [] | no_license | krigelmanj/web-caesar | d66b1ed9c0553bbbc9c42e42166d802ce486ea85 | d3949867594462b553f92f51b393ffe45081216a | refs/heads/master | 2020-05-23T07:49:38.339103 | 2017-01-30T18:31:09 | 2017-01-30T18:31:09 | 80,450,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import ceasar
import helpers
import initials
import vigenere
import cgi
def build_page(textarea_content):
rot_label = "<label>Rotate by:</label>"
rotation_input = "<input type= 'number' name= 'rotation' />"
message_label = "<label> Type a message:</label>"
textarea = "<textarea name = 'message'>" + textarea_content + "</textarea>"
submit = "<input type='submit'/>"
form = ("<form method='post'>" +
rot_label + rotation_input + "<br>" +
message_label + textarea + "<br>" +
submit + "</form>")
header = "<h2>Web Ceasar</h2>"
return header + form
class MainHandler(webapp2.RequestHandler):
def get(self):
content = build_page("")
self.response.write(content)
def post(self):
message = self.request.get("message")
rotation = int(self.request.get("rotation"))
encrypted_message = ceasar.encrypt(message, rotation)
escaped_message = cgi.escape(encrypted_message)
content = build_page(encrypted_message)
self.response.write(content)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| [
"eng250@nyu.edu"
] | eng250@nyu.edu |
7e7837310b3043f2473a0f6f8cd0295e29153617 | 4d30f96c1a67d866eb5b8a35896a48c7edb58dac | /serdnio_zaawansowany/sekcja_2/funkcje.py | 98bfef6b4e9cd85d8398eca295c6aec38d86fc3a | [] | no_license | michaszo18/python- | d729a3b4dbc35937c45711b28d876f4ea9e3e4ca | 4d854423f3c97bf2d726c8337610e81f05e575a7 | refs/heads/master | 2021-06-18T01:50:13.453347 | 2021-02-15T20:04:21 | 2021-02-15T20:04:21 | 171,266,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | def double(x):
return 2 * x
def root(x):
return x**2
def negative(x):
return -x
def div2(x):
return x/2
number = 8
sequence = [double, root, div2, negative]
tmp_return_value = number
for step in sequence:
tmp_return_value = step(tmp_return_value)
print(tmp_return_value)
print("#" * 60)
def bake(what):
return f"Baking {what}"
def cook(actvity, obj):
return actvity(obj)
print(cook(bake, 'pizza'))
print("#" * 60)
def generate_values(fun1, list_of_numbers):
result_list = list()
for i in list_of_numbers:
result_list.append(fun1(i))
return result_list
print(generate_values(div2, [4,6,7,8,9,123]))
print("#" * 60)
def create_function(kind):
source = f'''
def f(*args):
result = 0
for a in args:
result {kind}= a
return result
'''
exec(source, globals())
return f
f_add = create_function('+')
print(f_add(1,2,3,4,5))
f_mul = create_function('*')
print(f_mul(1,2,3,4,5))
print("#" * 60)
def create_fun_2(kind):
source = f"""
def f(date_1, date_2):
sub = date_1 - date_2
return sub * {kind}
"""
exec(source, globals())
return f
diff_in_days = create_fun_2('1')
diff_in_hours = create_fun_2('24')
diff_in_min = create_fun_2('3600')
print(diff_in_days(20-11-2020, 12-11-2020))
print(diff_in_hours(20-11-2020, 12-11-2020))
print(diff_in_min(20-11-2020, 12-11-2020)) | [
"michaszo18@gmail.com"
] | michaszo18@gmail.com |
8079705487272d96bf238f6579ecca2dadb6d815 | bbb9a19e6e57c925ca2f32e11db06178dcd503a1 | /blog-v1/blogsite/blog/views.py | 702b13845cd1be57b38d6fcff6aa163626698aec | [] | no_license | dhanushka-gayashan/django-projects | ebb2e9d6c9c5081b2aa31ac714c42c8f8a6feb34 | 75699a6185cd20b9297683ef5b504e2b4cc3d2e3 | refs/heads/master | 2022-06-29T02:30:11.252484 | 2020-05-12T05:30:59 | 2020-05-12T05:30:59 | 256,104,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,011 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import (TemplateView, ListView, DetailView,
CreateView, UpdateView, DeleteView)
from blog.models import Post, Comment
from blog.forms import PostForm, CommentForm
# Create your views here.
class AboutView(TemplateView):
template_name = 'about.html'
class PostListView(ListView):
model = Post
def get_queryset(self):
# '-published_date': Order by ASC
return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
# Login require to make a post
login_url = '/login/'
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
model = Post
class PostUpdateView(LoginRequiredMixin, UpdateView):
# Login require to edit a post
login_url = '/login/'
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
model = Post
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
# reverse_lazy: only redirect if delete operation is success
# post_list name coming from definitions at URLs.py
success_url = reverse_lazy('post_list')
class PostDraftListView(LoginRequiredMixin, ListView):
# Login require to get list of drafted posts
login_url = '/login/'
redirect_field_name = 'blog/post_list.html'
model = Post
def get_queryset(self):
return Post.objects.filter(published_date__isnull=True).order_by('created_date')
########################################################################################
########################################################################################
@login_required
def post_publish(request, pk):
post = get_object_or_404(Post, pk=pk)
post.publish()
return redirect('post_detail', pk=pk)
@login_required
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = CommentForm()
return render(request, 'blog/comment_form.html', {'form': form})
@login_required
def comment_approve(request, pk):
comment = get_object_or_404(Comment, pk=pk)
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request, pk):
comment = get_object_or_404(Comment, pk=pk)
post_pk = comment.post.pk
comment.delete()
return redirect('post_detail', pk=post_pk)
| [
"dhanukdg.soft@gmail.com"
] | dhanukdg.soft@gmail.com |
7682d015ae7bf1c66760f66ccd69828722c28cbf | d3ea9bc41e61a441ab4e239ad8b3fefd4a586e4c | /populace_learning.py | 0f9b759f9fc2e20155b00ff1f7c7216e26757102 | [
"MIT"
] | permissive | RPGLite/analysis | 6363a4e293d331a2164d61c5a8ec058f1ecee578 | 13f683beb26d77c6f7ae7de54808b0cb5acb9eee | refs/heads/master | 2023-09-03T20:16:42.343990 | 2020-12-09T12:38:59 | 2020-12-09T12:38:59 | 253,753,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,183 | py | # William
# Generate line graphs for comparison of the average cost of moves comparing player experience and days since release.
from helper_fns import *
import numpy as np
import matplotlib.pyplot as plt
import math, pymongo
from bson import objectid
#from scipy.interpolate import make_interp_spline, BSpline
# Some tunable constants
critical_section_delta = 0.1
# End
s1 = process_lookup("beta")
s2 = process_lookup("tango-2-3")
set_config("beta")
def get_cost_list(g, p, lookup):
"""
Return a list of costs per move of all critical moves made by the player
"""
costs = [] # return list
# Ignore dodgy games
if g["_id"] == objectid.ObjectId("5e98b4658a225cfc82573fd1") or g["_id"] == objectid.ObjectId("5eaaee2c684de5692fc01ef6") or g["_id"] == objectid.ObjectId("5ec108ef29108c1ba22cb375"):
return costs # just leave.
state = get_initial_state(g) # store game state, initialised to game start.
pos = g["usernames"].index(p) + 1 # pos 1 = p is player 1.
pair = g["p1c1"][0] + g["p1c2"][0] if pos==1 else g["p2c1"][0] + g["p2c2"][0]
if chars.index(pair[0]) > chars.index(pair[1]):
pair = pair[1]+pair[0] # correct ordering
for m in g["Moves"]:
if m[1] == str(pos): # if it is the player's turn
if pos == 1:
act, max_poss = cost(state, pair, m, lookup, classify_mistake=True) # actual P() and maximum possible P()
if check_actions_available(state, pair, critical_section_delta, lookup):
costs += [(max_poss - act) / max_poss]
# print(state, pair, max_poss, count_actions_available(state, pair, lookup))
else:
act, max_poss = cost(flip_state(state), pair, m, lookup, classify_mistake=True) # actual P() and maximum possible P()
if check_actions_available(flip_state(state), pair, critical_section_delta, lookup):
costs += [(max_poss - act) / max_poss]
# print(state, pair, max_poss, count_actions_available(flip_state(state), pair, lookup))
do_action(m, state)
return costs
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(ncols=2, nrows=2, figsize=(16,12), sharey=True)
def plot_section(min_games, axis):
games = []
for i in range(min_games):
games += [[]]
users = []
for p in db.players.find({"Username":{"$exists":True}}):
if p["Username"] in ["probablytom", "cptKav"]: # Do not process the devs, they should know better.
continue
if db.completed_games.count_documents({"usernames":p["Username"], "winner":{"$exists":True}}) < min_games:
continue # Didn't play enough games
#print("parsing user {0}".format(p["Username"]), end=" ")
users += [p["Username"]]
count = 0
for g in db.completed_games.find({"usernames":p["Username"], "winner":{"$exists":True}}):
if g["_id"] in [objectid.ObjectId("5e98b4658a225cfc82573fd1"), objectid.ObjectId("5eaaee2c684de5692fc01ef6"), objectid.ObjectId("5ec108ef29108c1ba22cb375")]:
continue
if "balance_code" in g.keys() and what_config() == 1:
set_config("tango-2-3")
if "balance_code" not in g.keys() and what_config() == 2:
set_config("beta")
## Play each game, count the costs of the user, add them to the appropriate position.
costs = get_cost_list(g, p["Username"], s1 if "balance_code" not in g.keys() else s2)
if len(costs) == 0:
continue
try :
games[count] += [sum(i>0.25 for i in costs) / len(costs)]
except IndexError:
print("oops", count)
count += 1
if count >= min_games:
break
#print("count reached {0}".format(count))
for i in range(len(games)):
remove_these = []
for j in range(len(games[i])):
if math.isnan(games[i][j]):
remove_these = [j] + remove_these
for e in remove_these:
games[i].remove(games[i][e])
games[i] = np.average(games[i])
axis.plot(games, 'o')
x = np.array(range(min_games))
y = np.array(games)
m, n, b = np.polyfit(x, y, 2)
axis.plot(x, m*x*x+n*x+b, '-')
axis.set_title("minimum games: {0}, players: {1}".format(min_games, len(users)))
# x_labels = [0]
# for i in range(min_games):
# if i % 10 == 0:
# x_labels += [i]
# axis.set_xticks(np.arange(min_games / len(x_labels)))
# axis.set_xticklabels(x_labels)
if axis == ax0 or axis == ax2:
axis.set_ylabel("Mistakes per move (cost>0.25)")
axis.set_xlabel("Games")
print(min_games, np.average(games))
# xnew = np.linspace(0, min_games, min_games*3)
# spl = make_interp_spline(min_games, games, k=3) # type: BSpline
# power_smooth = spl(xnew)
# ax.plot(xnew, power_smooth, '-')
plot_section(25, ax0)
plot_section(50, ax1)
plot_section(100, ax2)
plot_section(200, ax3)
#plot_section(400, ax4)
plt.tight_layout()
plt.show()
| [
"bkav93@gmail.com"
] | bkav93@gmail.com |
c4f32be09e672db78a8a8c41b2a248a37a4287d8 | af41c70b2a279d20a6f5c8aa6ea56d41894f9366 | /caravaggio_rest_api/apps.py | a5aa7ddb7ed39ddbaacf0b223e4489f0483a4074 | [
"MIT"
] | permissive | xalperte/django-caravaggio-rest-api | 7c0cd2706520b1d82d9a12d52be526c7f2fb3aa5 | 36fcdc6b77982fc7fd2462f2c8997911f14047c4 | refs/heads/master | 2020-06-20T20:18:59.357529 | 2019-03-11T16:39:41 | 2019-03-11T16:39:41 | 197,235,376 | 0 | 0 | MIT | 2020-02-29T02:06:57 | 2019-07-16T17:05:58 | Python | UTF-8 | Python | false | false | 376 | py | # -*- coding: utf-8 -*
# Copyright (c) 2018-2019 PreSeries Tech, SL
# All rights reserved.
from django.apps import AppConfig
class CaravaggioRESTAPIConfig(AppConfig):
name = 'caravaggio_rest_api'
verbose_name = "Django Caravaggio REST API"
def ready(self):
pass
# Add System checks
# from .checks import pagination_system_check # NOQA
| [
"javier.alperte@gmail.com"
] | javier.alperte@gmail.com |
8a4209560e01a9bb2625b02445afa69dcf3b28fc | e7ff2f9e21a94f2956b8c79f268dc6d45b41237b | /Frontend/node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi | b5962c025c83982c05fecf7c1819e71e4893c18a | [
"MIT"
] | permissive | vipul-07/MERN-Project | fcb4af686557b99b802404e8622905781e89bbc3 | c0bdd3b5dfc73b2657b8563d069360e11466714a | refs/heads/master | 2023-02-14T15:42:38.653627 | 2021-01-10T05:35:02 | 2021-01-10T05:35:02 | 317,460,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,709 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/apple/Library/Caches/node-gyp/14.10.1",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/apple/.npm-init.js",
"userconfig": "/Users/apple/.npmrc",
"cidr": "",
"node_version": "14.10.1",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/apple/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.8 node/v14.10.1 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/0w/px0kn_6s561dhjplhgbypnj80000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"apple@Apples-MacBook-Pro.local"
] | apple@Apples-MacBook-Pro.local |
8c6d81cd4db4951e024e27145336d78bfff492b4 | d583393a14c84b3e0dbf4a63a481beddb5f5a5fe | /scripts/start_guild.py | 64dc5a24aeed3a88c208b914ad4d72350b854a56 | [] | no_license | daxingyou/qingguoqingchen-erlang- | 33917edb1d0b8a2311d81c485f271f3bbedc952a | 5daa9c16847033c872e1c544eeb2d902359e4d97 | refs/heads/master | 2021-12-04T10:01:47.321589 | 2015-03-09T08:34:05 | 2015-03-09T08:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | #!/usr/bin/env python
import os
import sys
host = '127.0.0.1'
if len(sys.argv) > 1:
host = sys.argv[1]
cmdline = './start.py --dbcenter $host$ --linecenter $host$ --guild $host$ --gmcenter $host$'
cmdline = cmdline.replace('$host$',host)
print cmdline
os.system(cmdline)
| [
"yunfei_lei@126.com"
] | yunfei_lei@126.com |
f32cc09e9b5e4191dae2fb825a128f8ca6aa38c6 | 2e2a02ec8323982975ace3d249b22a42d8b97a1f | /skipper.py | 11171dc6ca97629b3d735b09f2921f679e80ed68 | [] | no_license | datagovua/os-budget-ukraine | 4e8c6d0373aead42890349befbd69bf8e8fef0a1 | 3a45f89c3872c9b9b45fb1206da445989b37b335 | refs/heads/master | 2021-01-13T02:49:03.608617 | 2016-12-22T20:59:14 | 2016-12-23T01:14:22 | 77,156,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | import logging
from datapackage_pipelines.wrapper import ingest, spew
_, datapackage, resource_iterator = ingest()
def intTryParse(value):
try:
int(value)
return True
except ValueError:
return False
def process(res):
for row in res:
if intTryParse(row['1.0']):
yield row
spew(datapackage, (process(res) for res in resource_iterator))
| [
"vanuan@gmail.com"
] | vanuan@gmail.com |
075d12cc2468772d65f234d0d7a4d9c0f0e457e7 | 0a7c900b7fb2ca321852ae475c2ed2caac030477 | /main.py | 559cf0e231904ad36b3b0d2312b025871e48499b | [] | no_license | fabiofucci98/PathFinding | dd1d7ec78aa951bcaa692a5909b3cad2425482c0 | 591a74280f092fd3bf122155cc7bc9c106535f24 | refs/heads/main | 2023-03-24T05:56:28.968213 | 2021-03-22T14:06:08 | 2021-03-22T14:06:08 | 350,362,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,398 | py | from tkinter import *
from tkinter.ttk import *
from graph import Graph
import search
import search_pruning
import heuristic
class App(Tk):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
# size of square side and canvas side
self.square_side = 25
self.size = 700
# creates the interface
self.title('Path finding')
self.geometry('700x800')
self.resizable(False, False)
self.canvas = Canvas(self, height=self.size, width=self.size)
self.canvas.bind('<Button-1>', self.change_color)
self.canvas.bind('<B1-Motion>', self.change_color)
button_start = Button(self, text='Select start node')
button_start.bind('<Button-1>', self.select_start)
button_end = Button(self, text='Select end node')
button_end.bind('<Button-1>', self.select_end)
button_walls = Button(self, text='Select wall node')
button_walls.bind('<Button-1>', self.select_walls)
button_search = Button(self, text='Perform search')
button_search.bind('<Button-1>', self.perform_search)
button_reset = Button(self, text='reset')
button_reset.bind('<Button-1>', self.reset)
button_water = Button(self, text='Select water node')
button_water.bind('<Button-1>', self.select_water)
algos = ['naive',
'breadth first',
'depth first',
'iterative deepening',
'uniform cost',
'A star']
self.algos_combo_box = Combobox(self, values=algos)
self.algos_combo_box.current(1)
self.canvas.place(x=0, y=0)
offset = 10
button_start.place(x=offset, y=710)
button_water.place(x=offset, y=760)
offset += button_start.winfo_reqwidth()+20
button_end.place(x=offset, y=710)
self.algos_combo_box.place(x=offset, y=763)
offset += button_end.winfo_reqwidth() + 20
button_walls.place(x=offset, y=710)
offset += button_walls.winfo_reqwidth() + 20
button_search.place(x=offset, y=710)
offset += button_search.winfo_reqwidth() + 20
button_reset.place(x=offset, y=710)
# 0 Wall 1 Start 2 End 3 Water
self.mode = 0
self.color_node_mode_dict = [
('black', 'wall', 0), ('red', 'start', 1), ('green', 'end', 2), ('blue', 'water', 3), ('white', 'node', None)]
# will contain the ids of the squares on the canvas
self.matrix = []
# used to check if a tile can change color
self.last_x = None
self.last_y = None
self.can_change_color = True
# fills the canvas
for i in range(self.size // self.square_side):
x = self.square_side*i
row = []
for j in range(self.size // self.square_side):
y = self.square_side*j
row.append(self.canvas.create_rectangle(
x, y, x+self.square_side, y+self.square_side, fill='white'))
self.matrix.append(row)
# clears the grey trail used to show the path
def clear_from_route(self):
for row in self.matrix:
tmp_row = []
for elem in row:
color = self.canvas.itemcget(elem, 'fill')
if color == 'grey':
self.canvas.itemconfig(elem, fill='white')
# Performs the search
def perform_search(self, event):
self.clear_from_route()
alg = self.get_alg()
matrix = []
for row in self.matrix:
tmp_row = []
for elem in row:
color = self.canvas.itemcget(elem, 'fill')
value = self.get_node_type(color)
tmp_row.append(value)
matrix.append(tmp_row)
g = Graph(matrix)
route = alg(
g, [g.start_node], lambda n: n == g.end_node) if alg != search_pruning.A_star else alg(
g, [g.start_node], lambda n: n == g.end_node, heuristic.distance)
for node in route[1:-1]:
i, j = node.value.split('_')[1:]
i, j = int(i), int(j)
self.canvas.itemconfig(self.matrix[i][j], fill='grey')
# These function get called when a button is pressed
def select_start(self, event):
self.mode = 1
def select_end(self, event):
self.mode = 2
def select_walls(self, event):
self.mode = 0
def select_water(self, event):
self.mode = 3
def reset(self, event):
for row in self.matrix:
for elem in row:
self.canvas.itemconfig(elem, fill='white')
def change_color(self, event):
x = event.x
y = event.y
x = (x-(x % self.square_side))//self.square_side
y = (y-(y % self.square_side))//self.square_side
if (self.last_x == x and self.last_y == y) or not self.can_change_color:
return
self.last_x = x
self.last_y = y
self.can_change_color = False
self.after(60, self.can_change_color_again)
try:
square = self.matrix[x][y]
except IndexError:
return
color = self.get_color()
if self.canvas.itemcget(square, 'fill') == 'white':
self.canvas.itemconfig(square, fill=color)
else:
self.canvas.itemconfig(square, fill='white')
def can_change_color_again(self):
self.can_change_color = True
# getters
def get_color(self):
for tuple in self.color_node_mode_dict:
if tuple[2] == self.mode:
return tuple[0]
def get_node_type(self, color):
for tuple in self.color_node_mode_dict:
if tuple[0] == color:
return tuple[1]
def get_alg(self):
value = self.algos_combo_box.get()
alg_dict = {'naive': search_pruning.naive_search,
'breadth first': search_pruning.breadth_first_search,
'depth first': search_pruning.depth_first_search,
'iterative deepening': search_pruning.it_iterative_deepening,
'uniform cost': search_pruning.uniform_cost_search,
'A star': search_pruning.A_star}
return alg_dict[value]
if __name__ == '__main__':
app = App(None)
app.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
b86cb72958a9d1bc11a459aaf2d9c0061872b3f9 | da1f46a29a8736d964af372a4c6aa7f205ff67da | /01_Vulture_vs_Zealot/Perfect_Performance_test/vz_PPO_test.py | 60e827aaf48015a0ba63c886d220fb5037fe699a | [] | no_license | betastarcraft/Problem_1 | 9eb783410633a3f981a73dabd20382dae1ed5128 | 401682a6f02067333ab004ca906c0edf6927dff2 | refs/heads/master | 2020-08-30T06:26:50.293707 | 2020-05-21T10:56:57 | 2020-05-21T10:56:57 | 218,290,014 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,387 | py | from saida_gym.starcraft.vultureVsZealot import VultureVsZealot
## gym 환경 import VultureVsZealot
from collections import deque
import numpy as np
import random
import os
import math
import pickle
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical ## 분포 관련
from tensorboardX import SummaryWriter
class Actor(nn.Module):
def __init__(self, state_size, action_size):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_size,128) ## input state
self.fc2 = nn.Linear(128,512)
self.fc3 = nn.Linear(512,128)
self.fc4 = nn.Linear(128,action_size) ## output each action
def forward(self, x, soft_dim):
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
x = torch.tanh(self.fc3(x))
prob_each_actions = F.softmax(self.fc4(x),dim=soft_dim) ## NN에서 각 action에 대한 확률을 추정한다.
return prob_each_actions
def scale_velocity(v):
return v / 6.4
def scale_coordinate(pos):
if pos > 0:
return 1 if pos > 320 else int(pos / 16) / 20
else:
return -1 if pos < -320 else int(pos / 16) / 20
def scale_angle(angle):
return (angle - math.pi) / math.pi
def scale_cooldown(cooldown):
return (cooldown + 1) / 15
def scale_vul_hp(hp):
return hp / 80
def scale_zeal_hp(hp):
return hp / 160
def scale_bool(boolean):
return 1 if boolean else 0
def rearrange_State(observation, state_size, env):
state_arr = deque(maxlen=state_size)
my_x = 0
my_y = 0
if observation.my_unit:
for idx, me in enumerate(observation.my_unit): ## 9
my_x = me.pos_x
my_y = me.pos_y
state_arr.append(math.atan2(me.velocity_y, me.velocity_x) / math.pi)
state_arr.append(scale_velocity(math.sqrt((me.velocity_x) ** 2 + (me.velocity_y) ** 2)))
state_arr.append(scale_cooldown(me.cooldown))
state_arr.append(scale_vul_hp(me.hp))
state_arr.append(scale_angle(me.angle))
state_arr.append(scale_bool(me.accelerating))
state_arr.append(scale_bool(me.braking))
state_arr.append(scale_bool(me.attacking))
state_arr.append(scale_bool(me.is_attack_frame))
for i, terrain in enumerate(me.pos_info): ##12
state_arr.append(terrain.nearest_obstacle_dist / 320)
else:
for _ in range(state_size - 11):
state_arr.append(0)
if observation.en_unit:
for idx, enemy in enumerate(observation.en_unit): ## 11
state_arr.append(math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x) / math.pi)
state_arr.append(scale_coordinate(math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2)))
state_arr.append(math.atan2(enemy.velocity_y, enemy.velocity_x) / math.pi)
state_arr.append(scale_velocity(math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2)))
state_arr.append(scale_cooldown(enemy.cooldown))
state_arr.append(scale_zeal_hp(enemy.hp + enemy.shield))
state_arr.append(scale_angle(enemy.angle))
state_arr.append(scale_bool(enemy.accelerating))
state_arr.append(scale_bool(enemy.braking))
state_arr.append(scale_bool(enemy.attacking))
state_arr.append(scale_bool(enemy.is_attack_frame))
else:
for _ in range(11):
state_arr.append(0)
return state_arr
def reward_reshape(state, next_state, reward, done):
KILL_REWARD = 10
DEAD_REWARD = -10
DAMAGED_REWARD = -4
HIT_REWARD = 2
if done:
if reward > 0: ## env에서 반환된 reward가 1 이면, 질럿을 잡음.
reward = KILL_REWARD
if next_state[3] == 1.0 and next_state[-6] == 0: ## perfect clear했다면 추가 bonus reward
reward+=5
return reward
# 잡은 경우
else: ## 게임이 종료되고 -1 값을 받게 된다면,
reward = DEAD_REWARD
return reward
else: ## 게임이 종료되지 않았다면,
my_pre_hp = state[3]
my_cur_hp = next_state[3]
en_pre_hp = state[-6]
en_cur_hp = next_state[-6]
if my_pre_hp - my_cur_hp > 0: ## 벌쳐가 맞아 버렸네 ㅠㅠ
reward += DAMAGED_REWARD
if en_pre_hp - en_cur_hp > 0: ## 질럿을 때려 버렸네 ㅠㅠ
reward += HIT_REWARD
## 벌쳐가 맞고, 질럿도 때리는 2가지 동시 case가 있을 거 같아. reward를 +=을 했고 각각 if문으로 처리했습니다.
return reward
def main():
load = True
episode = 0 ## 21710:91.1% 21610: 91% 21600: 90% 여러 모델 테스트 결과 중 3 모델이 90% 이상을 보였음.
## 환경의 초기, 진행되는 episode의 조건에 따라 86% ~ 91% 까지 perfect score를 보임.
env = VultureVsZealot(version=0, frames_per_step=12, action_type=0, move_angle=20, move_dist=3, verbose=0, no_gui=False
,auto_kill=False)
print_interval = 10
learning_rate=0.00003
torch.manual_seed(500)
state_size = 38
action_size= 19
actor = Actor(state_size, action_size)
if load: ## 경로를 model 파일 경로+ model 파일 이름으로 변경해주세요. 저는 원래 episode 번호로 구분했습니다.
actor.load_state_dict(torch.load(os.path.join('C:/SAIDA_RL/python/saida_agent_example/vultureZealot/save_ppo3_clear/','clear_ppo_actor_'+str(episode)+'.pkl')))
actor_optimizer = optim.Adam(actor.parameters(), lr=learning_rate)
episode = 0
clear_cnt=0
for n_iter in range(1000):
step = 0
state = env.reset()
state = rearrange_State(state, state_size, env)
episode+=1
temp_score = 0.0
while True:
prob_each_actions = actor(torch.Tensor(state), soft_dim=0)
distribution = Categorical(prob_each_actions)
action = distribution.sample().item()
next_state, reward, done, info = env.step([action])
next_state = rearrange_State(next_state, state_size, env)
reward = reward_reshape(state, next_state, reward, done)
mask = 0 if done else 1
state = next_state
temp_score += reward
if next_state[3] == 1.0 and next_state[-6] == 0:
clear_cnt+=1
print("clear: ",next_state[3],next_state[-6],"clear_score: ",temp_score, "clear_cnt: ", clear_cnt," / ", n_iter+1)
if done:
print("step: ", step, "per_episode_score: ",temp_score)
break
print("clear count: ",clear_cnt," percent: ",(clear_cnt/n_iter))
env.close()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
7b22a98ec9edab84239d505a37031161cc9b6fdc | 7dfccf846e545e178c67964998daa828d8c864e9 | /userpanel/views.py | 2f6963efee67d33e1a6105cd4ab24c52e4830755 | [] | no_license | akashpoddarr/yashvardhanretail | bca1ce1f64f8d5727c7506459088696fe52d907d | 4cc64e6c8517b4fdf94f7d2824bd00d2ed8ba840 | refs/heads/master | 2023-04-18T00:11:13.517871 | 2021-04-29T08:30:49 | 2021-04-29T08:30:49 | 349,639,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | from django.conf import settings
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.http import JsonResponse, HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Profile
from .forms import ProfileModelForm
from django.views.generic import DetailView
from django.core.mail import send_mail, EmailMessage
from django.template.loader import render_to_string
# Create your views here.
# userpanel page function
def userpanel(request):
return render(request, 'userpanel.html')
# signup function
def handlesignup(request):
if request.method == 'POST':
fname = request.POST['fname']
lname = request.POST['lname']
username = request.POST['username']
pass1 = request.POST['pass1']
pass2 = request.POST['pass2']
# check for anny error
if pass1 != pass2:
messages.error(request, 'your passwords do not match')
return redirect('/')
# create the user
myuser = User.objects.create_user(username=username, password=pass1)
myuser.first_name = fname
myuser.last_name = lname
myuser.save()
template = render_to_string('email_template.html')
email = EmailMessage(
'Thank you for registering at YashvardhanRetail.com !!',
template,
settings.EMAIL_HOST_USER,
[myuser.username],
)
email.fail_silently = False
email.send()
messages.success(request, 'your account has been created successfully')
return redirect('/')
else:
return HttpResponse("404 not found")
# login function
def handlelogin(request):
if request.method == 'POST':
# get the post parameters
loginusername = request.POST['loginusername']
loginpassword = request.POST['loginpassword']
user = authenticate(username=loginusername, password=loginpassword)
if user is not None:
# session
request.session['user_id'] = user.id
request.session['email'] = user.email
# session code ends
login(request, user)
messages.success(request, 'Successfully Logged In!!!')
return redirect('/')
else:
messages.error(
request, 'You Have Entered Invalid Credentials, Please Try Again!!!')
return redirect('/')
return HttpResponse("404 not found")
# logout function
def handlelogout(request):
logout(request)
messages.info(request, 'Logged out')
return redirect('/')
# # my profile page function
# @login_required
# def myprofile(request):
# profile = Profile.objects.get(user=request.user)
# form = ProfileModelForm(request.POST or None, request.Files or None, instance=profile)
# confirm = False
# if request.method == 'POST':
# if form.is_valid():
# form.save()
# confirm = True
# context = {
# 'profile': profile,
# 'form': form,
# 'confirm': confirm,
# }
# return render(request,'myprofile.html', context)
@login_required
def myprofile(request):
if request.user.is_authenticated:
context = {
'user': request.user,
}
return render(request, 'myprofile.html', context)
@login_required
def myprofilesettings(request):
profile = Profile.objects.get(user=request.user)
form = ProfileModelForm(request.POST or None, request.FILES or None, instance=profile)
confirm = False
if request.method == 'POST':
if form.is_valid():
form.save()
confirm = True
context = {
'profile': profile,
'form': form,
'confirm': confirm,
}
return render(request, 'myprofilesettings.html', context)
| [
"poddar.akyk184@gmail.com"
] | poddar.akyk184@gmail.com |
3918f61e23c0b6d7ff36122f0d2b5f1149268f15 | ddecd36b9d2b198f7abd7018d9e47418481f1123 | /pay.py | 676a13244fff0d70b5188b78cf90bf84bb59d046 | [] | no_license | ghartong/pythonClass | 6ab694067c41418c6dbc062188522428f2298876 | 44a48c183c4b92709d87582df631e776299cb8f6 | refs/heads/master | 2020-06-28T00:53:16.593876 | 2019-08-01T18:15:10 | 2019-08-01T18:15:10 | 200,098,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | hrInput = input("Enter hours:")
rInput = input("Enter rate:")
errors = False
try:
hours = float(hrInput)
except:
print("Hours must be numeric")
errors = True
try:
rate = float(rInput)
except:
print("Rate must be numeric")
errors = True
if errors != True:
pay = hours * rate
print(pay) | [
"Glenn.Hartong@signetjewelers.com"
] | Glenn.Hartong@signetjewelers.com |
bef0ed13f2de69be65966c0dbd62c900617726fa | b9cb653a048ff4a3a50f670af653776e90e98e39 | /App.py | 9c2bc9a33a53371d3d1df6bf7158419d27cff9d4 | [] | no_license | constantiux/creditsuissecit | 36988ff03467e799a72562def6196ea537ffe1c7 | a6b743b5b7baed3af52964f670a5f1c3887544f2 | refs/heads/master | 2022-12-19T03:23:35.076967 | 2020-09-29T10:18:16 | 2020-09-29T10:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | import os
import glob
import logging
import socket
from flask import Flask, request, render_template, jsonify,send_from_directory
from codeitsuisse import app
@app.route('/', methods=['GET'])
def default_route():
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'codeitsuisse', 'views', 'problems')
paths = glob.glob(os.path.join(root, '*.html'))
pages = [path.split('/')[-1] for path in paths]
pages.sort()
return render_template('index.html', base='/codeitsuisse/views/problems/', pages=pages)
@app.route('/codeitsuisse/views/problems/<path:path>', methods=['GET'])
def problemlist(path):
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'codeitsuisse', 'views', 'problems')
return send_from_directory(root, path)
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if __name__ == "__main__":
logging.info("Starting application ...")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
app.run(port=5000)
| [
"18105757+grrlic@users.noreply.github.com"
] | 18105757+grrlic@users.noreply.github.com |
28d10354490b85281a08488e292224bd29f749ea | 4c588959f0415e671b301972da2250b3304e1d73 | /medea/tests/test_mapper.py | 5960fac13e62fc0f3c13d38970fb9a591652d2d8 | [
"MIT"
] | permissive | kevinbeaty/medea | f7c4c5b083a9010307320a09bbc111e7df86e0c8 | c200be0e781299d0f9064f5cb44b90d5c01dcfb3 | refs/heads/master | 2023-09-03T20:39:55.584986 | 2014-01-04T12:20:38 | 2014-01-04T12:20:38 | 14,655,913 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,834 | py | """
MedeaMapper test
"""
from . import Person
from .. import MedeaMapper
def test_object_args_to_json():
bob = Person('Bob', 'Hope', '123 Main', '123', '1903-05-29')
mapper = MedeaMapper('first_name', 'last_name')
bob_json = {
'first_name': 'Bob',
'last_name': 'Hope'}
assert mapper.to_json(bob) == bob_json
mapper = MedeaMapper('first_name', 'last_name',
'address', 'phone_number', 'dob')
bob_json = {
'first_name': 'Bob',
'last_name': 'Hope',
'address': '123 Main',
'phone_number': '123',
'dob': '1903-05-29'}
assert mapper.to_json(bob) == bob_json
def test_object_args_from_json():
bob = Person('Bob', 'Hope', '123 Main', '123', '1903-05-29')
assert bob.first_name == 'Bob'
assert bob.last_name == 'Hope'
assert bob.address == '123 Main'
assert bob.phone_number == '123'
assert bob.dob == '1903-05-29'
fred = Person('Fred', 'Rodgers', '234 Rock', '456', '1928-03-20')
assert fred.first_name == 'Fred'
assert fred.last_name == 'Rodgers'
assert fred.address == '234 Rock'
assert fred.phone_number == '456'
assert fred.dob == '1928-03-20'
bob_json = {
'first_name': 'Bob',
'last_name': 'Hope'}
bob_json_full = {
'first_name': 'Bob',
'last_name': 'Hope',
'address': '123 Main',
'phone_number': '123',
'dob': '1903-05-29'}
fred_json = {
'first_name': 'Fred',
'last_name': 'Rodgers'}
fred_json_full = {
'first_name': 'Fred',
'last_name': 'Rodgers',
'address': '234 Rock',
'phone_number': '456',
'dob': '1928-03-20'}
mapper = MedeaMapper('first_name', 'last_name')
mapper_full = MedeaMapper('first_name', 'last_name',
'address', 'phone_number', 'dob')
assert mapper.to_json(bob) == bob_json
assert mapper.to_json(fred) == fred_json
assert mapper_full.to_json(bob) == bob_json_full
assert mapper_full.to_json(fred) == fred_json_full
# Override Bob's name from Fred
mapper.from_json(fred_json, bob)
# Mapper only serializes names
assert mapper.to_json(bob) == fred_json
assert mapper_full.to_json(bob) != bob_json_full
assert bob.first_name == 'Fred'
assert bob.last_name == 'Rodgers'
assert bob.address == '123 Main'
assert bob.phone_number == '123'
assert bob.dob == '1903-05-29'
# Revert back to Bob's name
mapper.from_json(bob_json, bob)
assert mapper.to_json(bob) == bob_json
assert mapper_full.to_json(bob) == bob_json_full
# Map Fred onto Bob using only name
mapper_full.from_json(fred_json, bob)
assert mapper.to_json(bob) == fred_json
assert mapper_full.to_json(bob) != bob_json_full
assert bob.first_name == 'Fred'
assert bob.last_name == 'Rodgers'
assert bob.address == '123 Main'
assert bob.phone_number == '123'
assert bob.dob == '1903-05-29'
# Revert back to Bob's name
mapper.from_json(bob_json, bob)
assert mapper.to_json(bob) == bob_json
assert mapper_full.to_json(bob) == bob_json_full
# Map Fred onto Bob
mapper_full.from_json(fred_json_full, bob)
assert mapper_full.to_json(bob) == fred_json_full
# Map Bob back onto Bob
mapper_full.from_json(bob_json_full, bob)
assert mapper_full.to_json(bob) == bob_json_full
def test_object_kwargs_to_json():
bob = Person('Bob', 'Hope', '123 Main', '123', '1903-05-29')
mapper = MedeaMapper(first_name='firstName', last_name='lastName')
bob_json = {
'firstName': 'Bob',
'lastName': 'Hope'}
assert mapper.to_json(bob) == bob_json
mapper = MedeaMapper('address', 'dob',
first_name='firstName', last_name='lastName')
bob_json = {
'firstName': 'Bob',
'lastName': 'Hope',
'address': '123 Main',
'dob': '1903-05-29'}
assert mapper.to_json(bob) == bob_json
def test_object_kwargs_from_json():
bob = Person('Bob', 'Hope', '123 Main', '123', '1903-05-29')
assert bob.first_name == 'Bob'
assert bob.last_name == 'Hope'
assert bob.address == '123 Main'
assert bob.phone_number == '123'
assert bob.dob == '1903-05-29'
fred = Person('Fred', 'Rodgers', '234 Rock', '456', '1928-03-20')
assert fred.first_name == 'Fred'
assert fred.last_name == 'Rodgers'
assert fred.address == '234 Rock'
assert fred.phone_number == '456'
assert fred.dob == '1928-03-20'
bob_json = {
'firstName': 'Bob',
'lastName': 'Hope'}
bob_json_full = {
'firstName': 'Bob',
'lastName': 'Hope',
'address': '123 Main',
'phoneNumber': '123',
'DOB': '1903-05-29'}
fred_json = {
'firstName': 'Fred',
'lastName': 'Rodgers'}
fred_json_full = {
'firstName': 'Fred',
'lastName': 'Rodgers',
'address': '234 Rock',
'phoneNumber': '456',
'DOB': '1928-03-20'}
mapper = MedeaMapper(first_name='firstName', last_name='lastName')
mapper_full = MedeaMapper('address',
first_name='firstName', last_name='lastName',
phone_number='phoneNumber', dob='DOB')
assert mapper.to_json(bob) == bob_json
assert mapper.to_json(fred) == fred_json
assert mapper_full.to_json(bob) == bob_json_full
assert mapper_full.to_json(fred) == fred_json_full
# Override Bob's name from Fred
mapper.from_json(fred_json, bob)
# Mapper only serializes names
assert mapper.to_json(bob) == fred_json
assert mapper_full.to_json(bob) != bob_json_full
assert bob.first_name == 'Fred'
assert bob.last_name == 'Rodgers'
assert bob.address == '123 Main'
assert bob.phone_number == '123'
assert bob.dob == '1903-05-29'
# Revert back to Bob's name
mapper.from_json(bob_json, bob)
assert mapper.to_json(bob) == bob_json
assert mapper_full.to_json(bob) == bob_json_full
# Map Fred onto Bob using only name
mapper_full.from_json(fred_json, bob)
assert mapper.to_json(bob) == fred_json
assert mapper_full.to_json(bob) != bob_json_full
assert bob.first_name == 'Fred'
assert bob.last_name == 'Rodgers'
assert bob.address == '123 Main'
assert bob.phone_number == '123'
assert bob.dob == '1903-05-29'
# Revert back to Bob's name
mapper.from_json(bob_json, bob)
assert mapper.to_json(bob) == bob_json
assert mapper_full.to_json(bob) == bob_json_full
# Map Fred onto Bob
mapper_full.from_json(fred_json_full, bob)
assert mapper_full.to_json(bob) == fred_json_full
# Map Bob back onto Bob
mapper_full.from_json(bob_json_full, bob)
assert mapper_full.to_json(bob) == bob_json_full
| [
"kevin@simplectic.com"
] | kevin@simplectic.com |
62bb60a7627578d846612bcd7722a06c13cd08ca | d4ea3d36e61fa1a3b30fe1e2a8daaad517c612d6 | /ExtractDocByID.py | c28e9ae6d0eb30f64a3275c49c647c369f5229f0 | [] | no_license | mateuss-ntnu/TopicModeling | 39a36b9c99c2d9ffeb6067df4dd5b92fea02554c | b8caf90e24fe1021d55312d08717e9260662cf93 | refs/heads/master | 2021-01-10T07:20:10.767335 | 2016-02-17T21:58:44 | 2016-02-17T21:58:44 | 49,872,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from gensim import corpora, models, similarities
pathDictionary = '/Volumes/My Passport/gensim-wiki-ensimple-20160111/dictionary.dict'
pathCorpus = '/Volumes/My Passport/gensim-wiki-ensimple-20160111/corpus.mm'
pathIndex = '/Volumes/My Passport/gensim-wiki-ensimple-20160111/index.index'
corpus = corpora.MmCorpus(pathCorpus)
print corpus[20545] | [
"mateusz.siniarski@gmail.com"
] | mateusz.siniarski@gmail.com |
d1668c6c2f07163c5dfc0e6337ad4655521fb14a | e6f1bb6ee51e83b8efdaec38ec553388fce78103 | /updateBaseImage.py | e65360e9caf8220fc6d8952c6683a03a9f33009e | [
"Apache-2.0"
] | permissive | Azure-App-Service/php-template | aa1afe19e97dc147c42a909fc04a600804bda368 | 7d7706f45df8d2d1fc3239cc697319d05f2d9b08 | refs/heads/dev | 2020-05-19T17:08:37.868562 | 2020-02-25T21:18:45 | 2020-02-25T21:18:45 | 185,127,929 | 2 | 3 | Apache-2.0 | 2020-01-17T22:28:00 | 2019-05-06T05:17:13 | Python | UTF-8 | Python | false | false | 1,018 | py | import requests
import argparse
import json
import time
import threading
import sys
def getConfig(config):
f = open(config, "r")
content = json.loads(f.read(), strict=False)
f.close()
return content
def writeConfig(content, file):
f = open(file, "w+")
f.write(json.dumps(content, indent=4, sort_keys=True))
f.close()
parser = argparse.ArgumentParser()
parser.add_argument('--newOryxTimestamp', "-t", help='new oryx timestamp EG: 20190628.2', required=True)
args = parser.parse_args()
newOryxTimestamp = args.newOryxTimestamp
configs = ["blessedImageConfig-dev.json",
"blessedImageConfig-master.json",
"blessedImageConfig-save.json",
"blessedImageConfig-temp.json"]
for config in configs:
buildRequests = getConfig(config)
for br in buildRequests:
newBaseImageName = "mcr.microsoft.com/oryx/" + br["stack"] + ":" + br["version"] + "-" + newOryxTimestamp
br.update( { "baseImageName": newBaseImageName })
writeConfig(buildRequests, config)
| [
"31744877+patricklee2@users.noreply.github.com"
] | 31744877+patricklee2@users.noreply.github.com |
69fc7bd70b34d6f4379525241c0417e7f0d0b035 | 5ef1f32696a5ef5124aedf11a16e75c06d8ae997 | /user/migrations/0004_auto_20201118_2028.py | 999ffe5a69e5c1d2ce9b11f0a1d1dcabafbf7001 | [] | no_license | amusesla/14-1st-HOT-backend | f1e2863570cae34a5b877f62b930892ce08de99d | 1680ae9c8365f9af4caf24ee42b59d249cd27730 | refs/heads/main | 2023-02-17T06:33:20.703349 | 2021-01-17T10:40:00 | 2021-01-17T10:40:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # Generated by Django 3.1.3 on 2020-11-18 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20201118_1924'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='order',
),
migrations.RemoveField(
model_name='cart',
name='product_detail',
),
migrations.RemoveField(
model_name='order',
name='user',
),
migrations.DeleteModel(
name='Status',
),
migrations.AlterField(
model_name='user',
name='follow',
field=models.ManyToManyField(through='user.Follow', to='user.User'),
),
migrations.DeleteModel(
name='Cart',
),
migrations.DeleteModel(
name='Order',
),
]
| [
"amusesla@gmail.com"
] | amusesla@gmail.com |
8c60e0c394d4f8ed99349dee8cf83e79c82a2825 | 66ab57a8a9f03ac155c1d25b904c5ddf766e6c4c | /src/fetch.py | 0d71d3180a4674e861f947f6d97c3dc1e2dc6b8e | [] | no_license | rahullabs/slideshare-downloader | b2716a9074d397b0fdbe3bf43fd749f8a53c6d24 | 20086567c62f9085958343b57fe7a3d3d34d0483 | refs/heads/main | 2023-07-14T04:03:09.714024 | 2021-08-27T07:11:02 | 2021-08-27T07:11:02 | 399,691,693 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | import requests
from bs4 import BeautifulSoup
import src.helpers as hp
import shutil
from PIL import Image
from tqdm import tqdm
import os
class Fetch():
def __init__(self, arguments):
super(Fetch, self).__init__()
self.args = arguments
hp.imageDirectory(self.args[0])
def directory_check(self, slide_name):
self.slide_folder_name = slide_name.split('/')[-1]
self.slide_folder = self.args[0]+"/"+ self.slide_folder_name
hp.imageDirectory(self.slide_folder)
self.pdf_dir = self.slide_folder+'/pdf'
self.img_dir = self.slide_folder+'/jpg'
hp.imageDirectory(self.pdf_dir)
hp.imageDirectory(self.img_dir)
def individual_link_slides(self, url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
slides = soup.find_all("div", {"id":"slide_container"})
img_link = []
for img in slides[0].find_all('img', alt=True):
if not img['src']:
if not img['data-original']:
pass
if img['data-original']:
img_link.append(img['data-original'])
elif not img['data-original']:
if not img['src']:
pass
if img['src']:
img_link.append(img['src'])
self.dld_img = []
for total_slides in tqdm(range(len(img_link))):
src_link = img_link[total_slides].split('?')[0:img_link[total_slides].find('?')-1]
self.download_slide_images(src_link[0], self.img_dir)
self.savePdf(self.slide_folder_name+".pdf")
def savePdf(self, filename):
im_list = []
for imgs in self.dld_img:
im = Image.open(imgs)
im_list.append(im)
pdf_file = self.pdf_dir + "/" + filename
im_list[0].save(pdf_file, "PDF", resolution=100.0, save_all=True, append_images=im_list[1:])
if not self.args[1]:
for imgs in self.dld_img:
os.remove(imgs)
def download_slide_images(self, image_url, output_dir):
filename = image_url.split('/')[-1]
r = requests.get(image_url, stream = True)
output = output_dir+ "/" + filename
if r.status_code == 200:
r.raw.decode_content = True
self.dld_img.append(output)
with open(output,'wb') as f:
shutil.copyfileobj(r.raw, f)
def text_handler(self):
file = open(self.args[-1], 'r+')
self.tot_slide_link = file.readlines()
file.close()
def check_type_input(self):
if self.args[2]:
self.text_handler()
for link_no in range(len(self.tot_slide_link)):
self.directory_check(self.tot_slide_link[link_no].strip('\n'))
self.individual_link_slides(self.tot_slide_link[link_no].strip('\n'))
elif not self.args[2]:
self.directory_check(self.args[-1])
self.individual_link_slides(self.args[-1])
def start(self):
self.check_type_input()
| [
"rahul.maharjan.12114@ncit.edu.np"
] | rahul.maharjan.12114@ncit.edu.np |
b0258289543572c3d2fd2b3d83991eb4e2d9f4dc | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/orchs/svcsencap.py | 83b1a538fc7c72069845b02465a56b59e320b8da | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 8,108 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SvcsEncap(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.orchs.SvcsEncap")
meta.moClassName = "orchsSvcsEncap"
meta.rnFormat = "encap-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "IP Pool Resource Instance"
meta.writeAccessMask = 0x2001
meta.readAccessMask = 0x2001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.orchs.RsSvcsEncapToSvcAlloc")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.orchs.RsSvcsEncapToSvcAlloc", "rssvcsEncapToSvcAlloc-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.orchs.Config")
meta.superClasses.add("cobra.model.orchs.Entity")
meta.superClasses.add("cobra.model.orchs.Element")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.naming.NamedIdentifiedObject")
meta.rnPrefixes = [
('encap-', True),
]
prop = PropMeta("str", "annotation", "annotation", 38579, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 28290, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "encap", "encap", 28246, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("encap", prop)
prop = PropMeta("str", "encapNsName", "encapNsName", 28248, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("encapNsName", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 40718, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "guid", "guid", 28255, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.regex = ['[[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}]{0,1}']
meta.props.add("guid", prop)
prop = PropMeta("str", "id", "id", 28253, PropCategory.REGULAR)
prop.label = "Id"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "legLoc", "legLoc", 28245, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("legLoc", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 28679, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 28294, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "rtrId", "rtrId", 28247, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("rtrId", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToNwIf", "Private Network to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
3157fe6fdd447bb57057687c3cf1dc6b2b15380c | 7eadcb17f2555a80b1d7a065a9f9cefbe3c127e7 | /frappe/translate.py | d3d629a6fbd8419a6d004f9b68b66dfb0ab778d2 | [
"MIT"
] | permissive | sivaranjanipalanivel/frappesample | a97f7c636d5f5869e3410a57bc3ac82d32884d0e | e37ff70ac92c16d1fb0bce5eb11dad62c9ff7564 | refs/heads/master | 2020-07-24T17:00:35.139985 | 2019-09-12T07:21:50 | 2019-09-12T07:21:50 | 207,989,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,483 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from six import iteritems, text_type, string_types
"""
frappe.translate
~~~~~~~~~~~~~~~~
Translation tools for frappe
"""
import frappe, os, re, codecs, json
from frappe.model.utils import render_include, InvalidIncludePath
from frappe.utils import strip
from jinja2 import TemplateError
import itertools, operator
def guess_language(lang_list=None):
"""Set `frappe.local.lang` from HTTP headers at beginning of request"""
lang_codes = frappe.request.accept_languages.values()
if not lang_codes:
return frappe.local.lang
guess = None
if not lang_list:
lang_list = get_all_languages() or []
for l in lang_codes:
code = l.strip()
if not isinstance(code, text_type):
code = text_type(code, 'utf-8')
if code in lang_list or code == "en":
guess = code
break
# check if parent language (pt) is setup, if variant (pt-BR)
if "-" in code:
code = code.split("-")[0]
if code in lang_list:
guess = code
break
return guess or frappe.local.lang
def get_user_lang(user=None):
"""Set frappe.local.lang from user preferences on session beginning or resumption"""
if not user:
user = frappe.session.user
# via cache
lang = frappe.cache().hget("lang", user)
if not lang:
# if defined in user profile
lang = frappe.db.get_value("User", user, "language")
if not lang:
lang = frappe.db.get_default("lang")
if not lang:
lang = frappe.local.lang or 'en'
frappe.cache().hset("lang", user, lang)
return lang
def get_lang_code(lang):
return frappe.db.get_value('Language', {'language_name': lang}) or lang
def set_default_language(lang):
"""Set Global default language"""
frappe.db.set_default("lang", lang)
frappe.local.lang = lang
def get_all_languages():
"""Returns all language codes ar, ch etc"""
def _get():
if not frappe.db:
frappe.connect()
return frappe.db.sql_list('select name from tabLanguage')
return frappe.cache().get_value('languages', _get)
def get_lang_dict():
"""Returns all languages in dict format, full name is the key e.g. `{"english":"en"}`"""
return dict(frappe.db.sql('select language_name, name from tabLanguage'))
def get_dict(fortype, name=None):
"""Returns translation dict for a type of object.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned.
"""
fortype = fortype.lower()
cache = frappe.cache()
asset_key = fortype + ":" + (name or "-")
translation_assets = cache.hget("translation_assets", frappe.local.lang, shared=True) or {}
if not asset_key in translation_assets:
if fortype=="doctype":
messages = get_messages_from_doctype(name)
elif fortype=="page":
messages = get_messages_from_page(name)
elif fortype=="report":
messages = get_messages_from_report(name)
elif fortype=="include":
messages = get_messages_from_include_files()
elif fortype=="jsfile":
messages = get_messages_from_file(name)
elif fortype=="boot":
messages = get_messages_from_include_files()
messages += frappe.db.sql("select 'Print Format:', name from `tabPrint Format`")
messages += frappe.db.sql("select 'DocType:', name from tabDocType")
messages += frappe.db.sql("select 'Role:', name from tabRole")
messages += frappe.db.sql("select 'Module:', name from `tabModule Def`")
messages += frappe.db.sql("select 'Module:', label from `tabDesktop Icon` where standard=1 or owner=%s",
frappe.session.user)
message_dict = make_dict_from_messages(messages)
message_dict.update(get_dict_from_hooks(fortype, name))
# remove untranslated
message_dict = {k:v for k, v in iteritems(message_dict) if k!=v}
translation_assets[asset_key] = message_dict
cache.hset("translation_assets", frappe.local.lang, translation_assets, shared=True)
return translation_assets[asset_key]
def get_dict_from_hooks(fortype, name):
translated_dict = {}
hooks = frappe.get_hooks("get_translated_dict")
for (hook_fortype, fortype_name) in hooks:
if hook_fortype == fortype and fortype_name == name:
for method in hooks[(hook_fortype, fortype_name)]:
translated_dict.update(frappe.get_attr(method)())
return translated_dict
def add_lang_dict(code):
"""Extracts messages and returns Javascript code snippet to be appened at the end
of the given script
:param code: Javascript code snippet to which translations needs to be appended."""
messages = extract_messages_from_code(code)
messages = [message for pos, message in messages]
code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages))
return code
def make_dict_from_messages(messages, full_dict=None):
"""Returns translated messages as a dict in Language specified in `frappe.local.lang`
:param messages: List of untranslated messages
"""
out = {}
if full_dict==None:
full_dict = get_full_dict(frappe.local.lang)
for m in messages:
if m[1] in full_dict:
out[m[1]] = full_dict[m[1]]
return out
def get_lang_js(fortype, name):
"""Returns code snippet to be appended at the end of a JS script.
:param fortype: Type of object, e.g. `DocType`
:param name: Document name
"""
return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name))
def get_full_dict(lang):
"""Load and return the entire translations dictionary for a language from :meth:`frape.cache`
:param lang: Language Code, e.g. `hi`
"""
if not lang:
return {}
# found in local, return!
if getattr(frappe.local, 'lang_full_dict', None) and frappe.local.lang_full_dict.get(lang, None):
return frappe.local.lang_full_dict
frappe.local.lang_full_dict = load_lang(lang)
try:
# get user specific transaltion data
user_translations = get_user_translations(lang)
except Exception:
user_translations = None
if user_translations:
frappe.local.lang_full_dict.update(user_translations)
return frappe.local.lang_full_dict
def load_lang(lang, apps=None):
"""Combine all translations from `.csv` files in all `apps`.
For derivative languages (es-GT), take translations from the
base language (es) and then update translations from the child (es-GT)"""
if lang=='en':
return {}
out = frappe.cache().hget("lang_full_dict", lang, shared=True)
if not out:
out = {}
for app in (apps or frappe.get_all_apps(True)):
path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv")
out.update(get_translation_dict_from_file(path, lang, app) or {})
if '-' in lang:
parent = lang.split('-')[0]
parent_out = load_lang(parent)
parent_out.update(out)
out = parent_out
frappe.cache().hset("lang_full_dict", lang, out, shared=True)
return out or {}
def get_translation_dict_from_file(path, lang, app):
"""load translation dict from given path"""
cleaned = {}
if os.path.exists(path):
csv_content = read_csv_file(path)
for item in csv_content:
if len(item)==3:
# with file and line numbers
cleaned[item[1]] = strip(item[2])
elif len(item)==2:
cleaned[item[0]] = strip(item[1])
elif item:
raise Exception("Bad translation in '{app}' for language '{lang}': {values}".format(
app=app, lang=lang, values=repr(item).encode("utf-8")
))
return cleaned
def get_user_translations(lang):
out = frappe.cache().hget('lang_user_translations', lang)
if out is None:
out = {}
for fields in frappe.get_all('Translation',
fields= ["source_name", "target_name"], filters={'language': lang}):
out.update({fields.source_name: fields.target_name})
frappe.cache().hset('lang_user_translations', lang, out)
return out
def clear_cache():
"""Clear all translation assets from :meth:`frappe.cache`"""
cache = frappe.cache()
cache.delete_key("langinfo")
# clear translations saved in boot cache
cache.delete_key("bootinfo")
cache.delete_key("lang_full_dict", shared=True)
cache.delete_key("translation_assets", shared=True)
cache.delete_key("lang_user_translations")
def get_messages_for_app(app):
"""Returns all messages (list) for a specified `app`"""
messages = []
modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \
for m in frappe.local.app_modules[app]])
# doctypes
if modules:
for name in frappe.db.sql_list("""select name from tabDocType
where module in ({})""".format(modules)):
messages.extend(get_messages_from_doctype(name))
# pages
for name, title in frappe.db.sql("""select name, title from tabPage
where module in ({})""".format(modules)):
messages.append((None, title or name))
messages.extend(get_messages_from_page(name))
# reports
for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport
where tabReport.ref_doctype = tabDocType.name
and tabDocType.module in ({})""".format(modules)):
messages.append((None, name))
messages.extend(get_messages_from_report(name))
for i in messages:
if not isinstance(i, tuple):
raise Exception
# workflow based on app.hooks.fixtures
messages.extend(get_messages_from_workflow(app_name=app))
# custom fields based on app.hooks.fixtures
messages.extend(get_messages_from_custom_fields(app_name=app))
# app_include_files
messages.extend(get_all_messages_from_js_files(app))
# server_messages
messages.extend(get_server_messages(app))
return deduplicate_messages(messages)
def get_messages_from_doctype(name):
"""Extract all translatable messages for a doctype. Includes labels, Python code,
Javascript code, html templates"""
messages = []
meta = frappe.get_meta(name)
messages = [meta.name, meta.module]
if meta.description:
messages.append(meta.description)
# translations of field labels, description and options
for d in meta.get("fields"):
messages.extend([d.label, d.description])
if d.fieldtype=='Select' and d.options:
options = d.options.split('\n')
if not "icon" in options[0]:
messages.extend(options)
# translations of roles
for d in meta.get("permissions"):
if d.role:
messages.append(d.role)
messages = [message for message in messages if message]
messages = [('DocType: ' + name, message) for message in messages if is_translatable(message)]
# extract from js, py files
doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name)
messages.extend(get_messages_from_file(doctype_file_path + ".js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.html"))
messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js"))
# workflow based on doctype
messages.extend(get_messages_from_workflow(doctype=name))
return messages
def get_messages_from_workflow(doctype=None, app_name=None):
assert doctype or app_name, 'doctype or app_name should be provided'
# translations for Workflows
workflows = []
if doctype:
workflows = frappe.get_all('Workflow', filters={'document_type': doctype})
else:
fixtures = frappe.get_hooks('fixtures', app_name=app_name) or []
for fixture in fixtures:
if isinstance(fixture, string_types) and fixture == 'Worflow':
workflows = frappe.get_all('Workflow')
break
elif isinstance(fixture, dict) and fixture.get('dt', fixture.get('doctype')) == 'Workflow':
workflows.extend(frappe.get_all('Workflow', filters=fixture.get('filters')))
messages = []
for w in workflows:
states = frappe.db.sql(
'select distinct state from `tabWorkflow Document State` where parent=%s',
(w['name'],), as_dict=True)
messages.extend([('Workflow: ' + w['name'], state['state']) for state in states if is_translatable(state['state'])])
states = frappe.db.sql(
'select distinct message from `tabWorkflow Document State` where parent=%s and message is not null',
(w['name'],), as_dict=True)
messages.extend([("Workflow: " + w['name'], state['message'])
for state in states if is_translatable(state['message'])])
actions = frappe.db.sql(
'select distinct action from `tabWorkflow Transition` where parent=%s',
(w['name'],), as_dict=True)
messages.extend([("Workflow: " + w['name'], action['action']) \
for action in actions if is_translatable(action['action'])])
return messages
def get_messages_from_custom_fields(app_name):
fixtures = frappe.get_hooks('fixtures', app_name=app_name) or []
custom_fields = []
for fixture in fixtures:
if isinstance(fixture, string_types) and fixture == 'Custom Field':
custom_fields = frappe.get_all('Custom Field', fields=['name','label', 'description', 'fieldtype', 'options'])
break
elif isinstance(fixture, dict) and fixture.get('dt', fixture.get('doctype')) == 'Custom Field':
custom_fields.extend(frappe.get_all('Custom Field', filters=fixture.get('filters'),
fields=['name','label', 'description', 'fieldtype', 'options']))
messages = []
for cf in custom_fields:
for prop in ('label', 'description'):
if not cf.get(prop) or not is_translatable(cf[prop]):
continue
messages.append(('Custom Field - {}: {}'.format(prop, cf['name']), cf[prop]))
if cf['fieldtype'] == 'Selection' and cf.get('options'):
for option in cf['options'].split('\n'):
if option and 'icon' not in option and is_translatable(option):
messages.append(('Custom Field - Description: ' + cf['name'], option))
return messages
def get_messages_from_page(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Page`"""
return _get_messages_from_page_or_report("Page", name)
def get_messages_from_report(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Report`"""
report = frappe.get_doc("Report", name)
messages = _get_messages_from_page_or_report("Report", name,
frappe.db.get_value("DocType", report.ref_doctype, "module"))
# TODO position here!
if report.query:
messages.extend([(None, message) for message in re.findall('"([^:,^"]*):', report.query) if is_translatable(message)])
messages.append((None,report.report_name))
return messages
def _get_messages_from_page_or_report(doctype, name, module=None):
if not module:
module = frappe.db.get_value(doctype, name, "module")
doc_path = frappe.get_module_path(module, doctype, name)
messages = get_messages_from_file(os.path.join(doc_path, frappe.scrub(name) +".py"))
if os.path.exists(doc_path):
for filename in os.listdir(doc_path):
if filename.endswith(".js") or filename.endswith(".html"):
messages += get_messages_from_file(os.path.join(doc_path, filename))
return messages
def get_server_messages(app):
"""Extracts all translatable strings (tagged with :func:`frappe._`) from Python modules
inside an app"""
messages = []
for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in (".git", "public", "locale"):
if dontwalk in folders: folders.remove(dontwalk)
for f in files:
f = frappe.as_unicode(f)
if f.endswith(".py") or f.endswith(".html") or f.endswith(".js"):
messages.extend(get_messages_from_file(os.path.join(basepath, f)))
return messages
def get_messages_from_include_files(app_name=None):
"""Returns messages from js files included at time of boot like desk.min.js for desk and web"""
messages = []
for file in (frappe.get_hooks("app_include_js", app_name=app_name) or []) + (frappe.get_hooks("web_include_js", app_name=app_name) or []):
messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))
return messages
def get_all_messages_from_js_files(app_name=None):
"""Extracts all translatable strings from app `.js` files"""
messages = []
for app in ([app_name] if app_name else frappe.get_installed_apps()):
if os.path.exists(frappe.get_app_path(app, "public")):
for basepath, folders, files in os.walk(frappe.get_app_path(app, "public")):
if "frappe/public/js/lib" in basepath:
continue
for fname in files:
if fname.endswith(".js") or fname.endswith(".html"):
messages.extend(get_messages_from_file(os.path.join(basepath, fname)))
return messages
def get_messages_from_file(path):
"""Returns a list of transatable strings from a code file
:param path: path of the code file
"""
apps_path = get_bench_dir()
if os.path.exists(path):
with open(path, 'r') as sourcefile:
return [(os.path.relpath(" +".join([path, str(pos)]), apps_path),
message) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(".py"))]
else:
# print "Translate: {0} missing".format(os.path.abspath(path))
return []
def extract_messages_from_code(code, is_py=False):
"""Extracts translatable srings from a code file
:param code: code from which translatable files are to be extracted
:param is_py: include messages in triple quotes e.g. `_('''message''')`"""
try:
code = render_include(code)
except (TemplateError, ImportError, InvalidIncludePath):
# Exception will occur when it encounters John Resig's microtemplating code
pass
messages = []
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("([^"]*)"').finditer(code)]
messages += [(m.start(), m.groups()[0]) for m in re.compile("_\('([^']*)'").finditer(code)]
if is_py:
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("{3}([^"]*)"{3}.*\)').finditer(code)]
messages = [(pos, message) for pos, message in messages if is_translatable(message)]
return pos_to_line_no(messages, code)
def is_translatable(m):
if re.search("[a-zA-Z]", m) and not m.startswith("fa fa-") and not m.endswith("px") and not m.startswith("eval:"):
return True
return False
def pos_to_line_no(messages, code):
ret = []
messages = sorted(messages, key=lambda x: x[0])
newlines = [m.start() for m in re.compile('\\n').finditer(code)]
line = 1
newline_i = 0
for pos, message in messages:
while newline_i < len(newlines) and pos > newlines[newline_i]:
line+=1
newline_i+= 1
ret.append((line, message))
return ret
def read_csv_file(path):
"""Read CSV file and return as list of list
:param path: File path"""
from csv import reader
with codecs.open(path, 'r', 'utf-8') as msgfile:
data = msgfile.read()
# for japanese! #wtf
data = data.replace(chr(28), "").replace(chr(29), "")
data = reader([r.encode('utf-8') for r in data.splitlines()])
newdata = [[text_type(val, 'utf-8') for val in row] for row in data]
return newdata
def write_csv_file(path, app_messages, lang_dict):
"""Write translation CSV file.
:param path: File path, usually `[app]/translations`.
:param app_messages: Translatable strings for this app.
:param lang_dict: Full translated dict.
"""
app_messages.sort(key = lambda x: x[1])
from csv import writer
with open(path, 'wb') as msgfile:
w = writer(msgfile, lineterminator='\n')
for p, m in app_messages:
t = lang_dict.get(m, '')
# strip whitespaces
t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t)
w.writerow([p.encode('utf-8') if p else '', m.encode('utf-8'), t.encode('utf-8')])
def get_untranslated(lang, untranslated_file, get_all=False):
"""Returns all untranslated strings for a language and writes in a file
:param lang: Language code.
:param untranslated_file: Output file path.
:param get_all: Return all strings, translated or not."""
clear_cache()
apps = frappe.get_all_apps(True)
messages = []
untranslated = []
for app in apps:
messages.extend(get_messages_for_app(app))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return (s.replace("\\\n", "|||||")
.replace("\\n", "||||")
.replace("\n", "|||"))
if get_all:
print(str(len(messages)) + " messages")
with open(untranslated_file, "w") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print(str(len(untranslated)) + " missing translations of " + str(len(messages)))
with open(untranslated_file, "w") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print("all translated!")
def update_translations(lang, untranslated_file, translated_file):
"""Update translations from a source and target file for a given language.
:param lang: Language code (e.g. `en`).
:param untranslated_file: File path with the messages in English.
:param translated_file: File path with messages in language to be updated."""
clear_cache()
full_dict = get_full_dict(lang)
def restore_newlines(s):
return (s.replace("|||||", "\\\n")
.replace("| | | | |", "\\\n")
.replace("||||", "\\n")
.replace("| | | |", "\\n")
.replace("|||", "\n")
.replace("| | |", "\n"))
translation_dict = {}
for key, value in zip(frappe.get_file_items(untranslated_file, ignore_empty_lines=False),
frappe.get_file_items(translated_file, ignore_empty_lines=False)):
# undo hack in get_untranslated
translation_dict[restore_newlines(key)] = restore_newlines(value)
full_dict.update(translation_dict)
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def import_translations(lang, path):
"""Import translations from file in standard format"""
clear_cache()
full_dict = get_full_dict(lang)
full_dict.update(get_translation_dict_from_file(path, lang, 'import'))
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def rebuild_all_translation_files():
"""Rebuild all translation files: `[app]/translations/[lang].csv`."""
for lang in get_all_languages():
for app in frappe.get_all_apps():
write_translations_file(app, lang)
def write_translations_file(app, lang, full_dict=None, app_messages=None):
"""Write a translation file for a given language.
:param app: `app` for which translations are to be written.
:param lang: Language code.
:param full_dict: Full translated language dict (optional).
:param app_messages: Source strings (optional).
"""
if not app_messages:
app_messages = get_messages_for_app(app)
if not app_messages:
return
tpath = frappe.get_pymodule_path(app, "translations")
frappe.create_folder(tpath)
write_csv_file(os.path.join(tpath, lang + ".csv"),
app_messages, full_dict or get_full_dict(lang))
def send_translations(translation_dict):
"""Append translated dict in `frappe.local.response`"""
if "__messages" not in frappe.local.response:
frappe.local.response["__messages"] = {}
frappe.local.response["__messages"].update(translation_dict)
def deduplicate_messages(messages):
ret = []
op = operator.itemgetter(1)
messages = sorted(messages, key=op)
for k, g in itertools.groupby(messages, op):
ret.append(next(g))
return ret
def get_bench_dir():
return os.path.join(frappe.__file__, '..', '..', '..', '..')
def rename_language(old_name, new_name):
if not frappe.db.exists('Language', new_name):
return
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings == old_name:
frappe.db.set_value("System Settings", "System Settings", "language", new_name)
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
| [
"you@example.com"
] | you@example.com |
153121106ebc24e5c336526de3225d92751f09bb | 2efa07bd7d8864950fb2f377386d74a2ee992d3a | /project.py | c044ceba15827ad76acdba4c58a966ae8a6a9a4b | [] | no_license | Sbk3824/RestaurantWebsite | 9e318c30dd65fe46fe8adbc3aa4ccde0a4924f74 | a7930a6d487700d2d6fa54201f87eda48d16528f | refs/heads/master | 2020-03-20T19:19:48.387025 | 2018-07-06T01:52:47 | 2018-07-06T01:52:47 | 137,632,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,252 | py | from flask import Flask, render_template, request, redirect, jsonify, url_for, flash
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem, User
from flask import session as login_session
import random
import string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Restaurant Menu Application"
# Connect to Database and create database session
engine = create_engine('sqlite:///restaurantmenuwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
'web']['app_id']
app_secret = json.loads(
open('fb_client_secrets.json', 'r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
'''
Due to the formatting for the result from the server token exchange we have to
split the token first on commas and select the first index which gives us the key : value
for the server access token then we split it on colons to pull out the actual token value
and replace the remaining quotes with nothing so that it can be used directly in the graph
api calls
'''
token = result.split(',')[0].split(':')[1].replace('"', '')
url = 'https://graph.facebook.com/v2.8/me?access_token=%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# print "url sent for API access:%s"% url
# print "API JSON result: %s" % result
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
# The token must be stored in the login_session in order to properly logout
login_session['access_token'] = token
# Get user picture
url = 'https://graph.facebook.com/v2.8/me/picture?access_token=%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("Now logged in as %s" % login_session['username'])
return output
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = login_session['facebook_id']
# The access token must me included to successfully logout
access_token = login_session['access_token']
url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (facebook_id,access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
return "you have been logged out"
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# ADD PROVIDER TO LOGIN SESSION
login_session['provider'] = 'google'
# see if user exists, if it doesn't make a new one
user_id = getUserID(data["email"])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# JSON APIs to view Restaurant Information
@app.route('/restaurant/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(
restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJSON(restaurant_id, menu_id):
Menu_Item = session.query(MenuItem).filter_by(id=menu_id).one()
return jsonify(Menu_Item=Menu_Item.serialize)
@app.route('/restaurant/JSON')
def restaurantsJSON():
restaurants = session.query(Restaurant).all()
return jsonify(restaurants=[r.serialize for r in restaurants])
# Show all restaurants
@app.route('/')
@app.route('/restaurant/')
def showRestaurants():
restaurants = session.query(Restaurant).order_by(asc(Restaurant.name))
if 'username' not in login_session:
return render_template('publicrestaurants.html', restaurants=restaurants)
else:
return render_template('restaurants.html', restaurants=restaurants)
# Create a new restaurant
@app.route('/restaurant/new/', methods=['GET', 'POST'])
def newRestaurant():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newRestaurant = Restaurant(
name=request.form['name'], user_id=login_session['user_id'])
session.add(newRestaurant)
flash('New Restaurant %s Successfully Created' % newRestaurant.name)
session.commit()
return redirect(url_for('showRestaurants'))
else:
return render_template('newRestaurant.html')
# Edit a restaurant
@app.route('/restaurant/<int:restaurant_id>/edit/', methods=['GET', 'POST'])
def editRestaurant(restaurant_id):
editedRestaurant = session.query(
Restaurant).filter_by(id=restaurant_id).one()
if 'username' not in login_session:
return redirect('/login')
if editedRestaurant.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to edit this restaurant. Please create your own restaurant in order to edit.');}</script><body onload='myFunction()'>"
if request.method == 'POST':
if request.form['name']:
editedRestaurant.name = request.form['name']
flash('Restaurant Successfully Edited %s' % editedRestaurant.name)
return redirect(url_for('showRestaurants'))
else:
return render_template('editRestaurant.html', restaurant=editedRestaurant)
# Delete a restaurant
@app.route('/restaurant/<int:restaurant_id>/delete/', methods=['GET', 'POST'])
def deleteRestaurant(restaurant_id):
restaurantToDelete = session.query(
Restaurant).filter_by(id=restaurant_id).one()
if 'username' not in login_session:
return redirect('/login')
if restaurantToDelete.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized to delete this restaurant. Please create your own restaurant in order to delete.');}</script><body onload='myFunction()'>"
if request.method == 'POST':
session.delete(restaurantToDelete)
flash('%s Successfully Deleted' % restaurantToDelete.name)
session.commit()
return redirect(url_for('showRestaurants', restaurant_id=restaurant_id))
else:
return render_template('deleteRestaurant.html', restaurant=restaurantToDelete)
# Show a restaurant menu
@app.route('/restaurant/<int:restaurant_id>/')
@app.route('/restaurant/<int:restaurant_id>/menu/')
def showMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
creator = getUserInfo(restaurant.user_id)
items = session.query(MenuItem).filter_by(
restaurant_id=restaurant_id).all()
if 'username' not in login_session or creator.id != login_session['user_id']:
return render_template('publicmenu.html', items=items, restaurant=restaurant, creator=creator)
else:
return render_template('menu.html', items=items, restaurant=restaurant, creator=creator)
# Create a new menu item
@app.route('/restaurant/<int:restaurant_id>/menu/new/', methods=['GET', 'POST'])
def newMenuItem(restaurant_id):
if 'username' not in login_session:
return redirect('/login')
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
if login_session['user_id'] != restaurant.user_id:
return "<script>function myFunction() {alert('You are not authorized to add menu items to this restaurant. Please create your own restaurant in order to add items.');}</script><body onload='myFunction()'>"
if request.method == 'POST':
newItem = MenuItem(name=request.form['name'], description=request.form['description'], price=request.form[
'price'], course=request.form['course'], restaurant_id=restaurant_id, user_id=restaurant.user_id)
session.add(newItem)
session.commit()
flash('New Menu %s Item Successfully Created' % (newItem.name))
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('newmenuitem.html', restaurant_id=restaurant_id)
# Edit a menu item
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit', methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(MenuItem).filter_by(id=menu_id).one()
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
if login_session['user_id'] != restaurant.user_id:
return "<script>function myFunction() {alert('You are not authorized to edit menu items to this restaurant. Please create your own restaurant in order to edit items.');}</script><body onload='myFunction()'>"
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['price']:
editedItem.price = request.form['price']
if request.form['course']:
editedItem.course = request.form['course']
session.add(editedItem)
session.commit()
flash('Menu Item Successfully Edited')
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('editmenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, item=editedItem)
# Delete a menu item
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete', methods=['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
if 'username' not in login_session:
return redirect('/login')
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
itemToDelete = session.query(MenuItem).filter_by(id=menu_id).one()
if login_session['user_id'] != restaurant.user_id:
return "<script>function myFunction() {alert('You are not authorized to delete menu items to this restaurant. Please create your own restaurant in order to delete items.');}</script><body onload='myFunction()'>"
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Menu Item Successfully Deleted')
return redirect(url_for('showMenu', restaurant_id=restaurant_id))
else:
return render_template('deleteMenuItem.html', item=itemToDelete)
# Disconnect based on provider
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['access_token']
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('showRestaurants'))
else:
flash("You were not logged in")
return redirect(url_for('showRestaurants'))
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5555) | [
"shantu24@gmail.com"
] | shantu24@gmail.com |
a242b4b5f7d0a846496f0be692655f3219904e71 | cbe5c182c2783f867ac05a075d42e95963d789cb | /guizero.py | 9068b27bb0a28f701117377f7c33cbcc0346877a | [] | no_license | Amoory001/codes | 6fd4251156b0383519b6853b2e154947e96ae793 | 494e74a6aa775671eb36bbf16a6ddfee46889427 | refs/heads/main | 2023-02-28T13:17:47.764735 | 2021-02-10T11:43:54 | 2021-02-10T11:43:54 | 315,178,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from guizero import App, Text, PushButton
def say_hello():
text.value ="3D"
app = App(title="Hello world")
text = Text(app, text="Welcome to the Hello world app!")
button = PushButton(app, command=say_hello)
app.display()
| [
"noreply@github.com"
] | noreply@github.com |
80a7585e86a4e8633b65ccb5495c63da103934b7 | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python3/216.Combination Sum III(组合总和 III).py | 4c2a30d0cd8dad2c5c465ba3a4dfdb989f691e11 | [
"MIT"
] | permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | """
<div>
<p>Find all possible combinations of <i><b>k</b></i> numbers that add up to a number <i><b>n</b></i>, given that only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.</p>
<p><strong>Note:</strong></p>
<ul>
<li>All numbers will be positive integers.</li>
<li>The solution set must not contain duplicate combinations.</li>
</ul>
<p><strong>Example 1:</strong></p>
<pre>
<strong>Input:</strong> <i><b>k</b></i> = 3, <i><b>n</b></i> = 7
<strong>Output:</strong> [[1,2,4]]
</pre>
<p><strong>Example 2:</strong></p>
<pre>
<strong>Input:</strong> <i><b>k</b></i> = 3, <i><b>n</b></i> = 9
<strong>Output:</strong> [[1,2,6], [1,3,5], [2,3,4]]
</pre>
</div>
<p>找出所有相加之和为 <em><strong>n</strong> </em>的 <strong><em>k </em></strong>个数的组合<strong><em>。</em></strong>组合中只允许含有 1 - 9 的正整数,并且每种组合中不存在重复的数字。</p>
<p><strong>说明:</strong></p>
<ul>
<li>所有数字都是正整数。</li>
<li>解集不能包含重复的组合。 </li>
</ul>
<p><strong>示例 1:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 7
<strong>输出:</strong> [[1,2,4]]
</pre>
<p><strong>示例 2:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 9
<strong>输出:</strong> [[1,2,6], [1,3,5], [2,3,4]]
</pre>
<p>找出所有相加之和为 <em><strong>n</strong> </em>的 <strong><em>k </em></strong>个数的组合<strong><em>。</em></strong>组合中只允许含有 1 - 9 的正整数,并且每种组合中不存在重复的数字。</p>
<p><strong>说明:</strong></p>
<ul>
<li>所有数字都是正整数。</li>
<li>解集不能包含重复的组合。 </li>
</ul>
<p><strong>示例 1:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 7
<strong>输出:</strong> [[1,2,4]]
</pre>
<p><strong>示例 2:</strong></p>
<pre><strong>输入:</strong> <em><strong>k</strong></em> = 3, <em><strong>n</strong></em> = 9
<strong>输出:</strong> [[1,2,6], [1,3,5], [2,3,4]]
</pre>
"""
class Solution:
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
| [
"lishulong@wecash.net"
] | lishulong@wecash.net |
503332bad41e324b9c15cbcf42f5ea665e02af44 | 2f64d8a5337e6e78cc0899030a09562f15d99c51 | /Even Numbers.py | a88398d961f538a42831423bba37e50a0c6757ed | [] | no_license | LolaSun/Tasks | a91d0651777714b83964e14d9ce7134682572d4f | 1a4700dad7066ac58d4d4389e7311e387c8e5d44 | refs/heads/main | 2023-01-23T23:47:33.590642 | 2020-11-23T18:18:50 | 2020-11-23T18:18:50 | 315,275,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | """
Even Numbers
Given a list of numbers, you want to take out all of the odd ones and leave just the even ones.
Task:
Evaluate each number in your list to see if it is even or odd. Then, output a new list that only contains the even numbers
from your original list.
Input Format:
A string that includes all of the integer values in your list separated by spaces.
Output Format:
A string that includes all of the even integer values from your first list separated by spaces.
Sample Input:
8 10 19 25 5 16 12
Sample Output:
8 10 16 12
"""
dig=input().split(" ")
dig_ev=[]
for i in dig:
if int(i) % 2==0:
dig_ev.append(i)
print(" ".join(dig_ev))
| [
"noreply@github.com"
] | noreply@github.com |
d6e4df85a3e6861e2639d6df9092c22691c0cf1d | 4337aab7603fc100fc2c598d17d3ef3935236f5b | /lib2to3/pgen2/driver.py | 460639649a1d479f64913ce89cae5fe7f3ce7580 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | gerrat/pythoscope | a2997008db0e3902b562f716a4c7b5084b5ba678 | 814167fc31f1983f80d606bd79e43adada5a1bef | refs/heads/master | 2021-01-01T16:29:17.560213 | 2017-07-20T15:07:05 | 2017-07-20T15:07:05 | 97,845,842 | 0 | 0 | null | 2017-07-20T14:36:06 | 2017-07-20T14:36:06 | null | UTF-8 | Python | false | false | 4,802 | py | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import os
import logging
import sys
# Pgen imports
import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = ""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, debug=False):
"""Parse a file and return the syntax tree."""
stream = open(filename)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(generate_lines(text).next)
return self.parse_tokens(tokens, debug)
def generate_lines(text):
"""Generator that behaves like readline without using StringIO."""
for line in text.splitlines(True):
yield line
while True:
yield ""
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
| [
"michal@trivas.pl"
] | michal@trivas.pl |
45d708d8fae94a586b546fab161acc015f0f9afc | 43cfd0055565b5c43b18de2c13a49a9e85068a69 | /pwauth.py | 1de2ff691cb53c7811ff3f81c9afb44f8bb25af1 | [] | no_license | mydor/Giganews-Homework1 | 15fd6ecce048905a12cec213d692a72c0906223a | 2de51754b810a251665488ac7f83a6f4fd767b9e | refs/heads/master | 2021-01-22T06:37:27.481218 | 2014-02-12T15:00:13 | 2014-02-12T15:00:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | #!/usr/bin/python
import crypt
import sys
db = {
'users': [
{
'id': 1,
'user': 'user1',
'domain_id': 1,
'password': 'testpw1',
'hpw': 'a9cXlAGYp1YTg'
},
{
'id': 2,
'user': 'user2',
'domain_id': 1,
'password': 'testpw2',
'hpw': 'E.cTWtAAuY4dg'
}
],
'domains': [
{
'id': 1,
'domain': 'domain1.com'
},
{
'id': 2,
'domain': 'domain2.com'
}
]
}
def usage():
sys.stderr.write( sys.argv[0] + ' <username> <password>\n' )
sys.exit(0)
def authUser( username, pw ):
at = username.find( '@' )
if at < 1:
return 'Invalid user/password1 for ' + username
user = username[:at]
domain = username[at+1:]
user_obj = getUser( user, domain )
if user_obj is None:
return 'Invalid user/password2 for ' + username
if checkPassword( user_obj, pw ) != 1:
return 'Invalid user/password3 for ' + username
print 'Authentication successful for %s' % ( username )
print 'User: %s\nDomain: %s\nUsername: %s' % ( user_obj['user'], user_obj['domain'], username )
return None
def checkPassword( user, password ):
if crypt.crypt( password, user['hpw'] ) == user['hpw']:
return True
return False
def isValidDomain( domain ):
domain_obj = None
for key in db['domains']:
if domain == key.get('domain',None):
domain_obj = key
break
return domain_obj
def getUser( user, domain ):
dKey = isValidDomain( domain )
if dKey is None:
return None
userid = None
for key in db['users']:
if user == key.get('user',None):
userid = key
break
if userid is None:
return None
user_obj = dict(
dKey.items() + userid.items()
)
return user_obj
if len(sys.argv) < 3:
usage()
error = authUser( sys.argv[1], sys.argv[2] )
if error is not None:
sys.stderr.write( error + '\n' )
sys.exit(1)
sys.exit(0)
| [
"msmith@arch-mage.com"
] | msmith@arch-mage.com |
3cf8ee2f2d96a7b435f8fe8b59b914bc534868dc | beb17bb8e02ccf039af005adfbccafc1b14215b0 | /LIBRARY/rpi_US_2back.py | 74c42e4a58e6a1bed62bc9c3c5a29c975a3050e0 | [] | no_license | Owluska/RPi_car1.0_soft | 010ff669bc29226404b6dcbefc8d90fe10d7c79b | b4e685a3f6f13f1d61ec9462c07268c47225514d | refs/heads/main | 2023-04-18T14:18:41.419829 | 2021-05-08T10:30:06 | 2021-05-08T10:30:06 | 318,752,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 10:40:35 2021
@author: root
"""
from multiprocessing import Process, Manager
from LIBRARY.rpi_US import US
US1_TRIG = 22
US1_ECHO = 17
US2_TRIG = 24
US2_ECHO = 23
bl = US(trig = US1_TRIG, echo = US1_ECHO)
br = US(trig = US2_TRIG, echo = US2_ECHO)
bl.setup_US_ports()
br.setup_US_ports()
#print(back,front)
class US_multi():
def __init__(self, USs = [US(trig = US1_TRIG, echo = US1_ECHO), US(trig = US2_TRIG, echo = US2_ECHO)], labels = ['back', 'front']):
self.USs = USs
self.USs_labels = labels
self.US_pools = []
self.USs_out = Manager().dict({l:0.0 for l in self.USs_labels})
self.toSleep = 0.2
def US_pooling(self, US, label):
while(1):
try:
d = US.get_distance()
#print(self.USs_out)
self.USs_out[label] = d
except Exception:
return
def US_start(self):
pool_size = len(self.USs)
#self.USS_out = Manager().dict({l:0.0 for l in self.USS_labels})
for i, us, l in zip(range(pool_size), self.USs, self.USs_labels):
self.US_pools.append(Process(target = self.US_pooling, args=(us, l,)))
for p in self.US_pools:
p.daemon = False
p.start()
# return dct, pool
def USs_stop(self):
for p in self.US_pools:
print("stopping {}".format(p))
p.terminate()
#p.close()
| [
"kagirina@gmail.com"
] | kagirina@gmail.com |
e2b3644d891b5d9f9cb8f98e5d2c1087997a4353 | 51e8665776011d86e07e91f883fb4a55f010a2e4 | /reports/views.py | 3acdf4184a9f61eda9043a4d1e9d8f14d81ce784 | [] | no_license | 61070055/Webpro-week3 | 93e85ec1f52e2a52e957f32be4d3f3afbaf6a472 | 50b2f60e8d298dc384bbfa06c420bdb230f80b00 | refs/heads/master | 2020-12-23T14:07:26.579274 | 2020-01-30T10:05:52 | 2020-01-30T10:05:52 | 237,175,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.template.context_processors import request
# Create your views here.
def dashboard(request):
return HttpResponse('This is Dashboard')
def find(request):
return HttpResponse('หน้าจอค้นหา และ export ข้อมูลการเข้าห้องเรียน ทั้งในเทอมปัจจุบัน และ ย้อนหลัง')
| [
"61070055@kmitl.ac.th"
] | 61070055@kmitl.ac.th |
cf7a63d20fda488bf419e211b7e6c556c81d199b | 0d38d7155220615b128e5045c3c5d71bacda8230 | /modules/experiment/texfig.py | 5a7467510a54d51ab8442f2b81559106f6652a92 | [
"MIT"
] | permissive | avogel88/compare-VAE-GAE | da7e23c3e3bead625df2c22e496f5b90aca3e649 | 35f5e5b007d130419c33e5751402ff89fa953465 | refs/heads/main | 2023-08-05T05:09:01.017701 | 2023-07-21T06:45:24 | 2023-07-21T06:45:24 | 313,011,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | """
Utility to generate PGF vector files from Python's Matplotlib plots to use in LaTeX documents.
Read more at https://github.com/knly/texfig
"""
import matplotlib as mpl
from math import sqrt
def pgf():
default_width = 5.78853 # in inches
default_ratio = (sqrt(5.0) - 1.0) / 2.0 # golden mean
mpl.use('pgf')
mpl.rcParams.update({
"text.usetex": True,
"pgf.texsystem": "pdflatex",
"pgf.rcfonts": False,
"font.family": "serif",
"font.serif": [],
"font.sans-serif": [],
"font.monospace": [],
"figure.figsize": [default_width, default_width * default_ratio],
"pgf.preamble": [
# put LaTeX preamble declarations here
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
# macros defined here will be available in plots, e.g.:
r"\newcommand{\vect}[1]{#1}",
# You can use dummy implementations, since your LaTeX document
# will render these properly, anyway.
],
})
| [
"andreas.vogel88@mail.de"
] | andreas.vogel88@mail.de |
3c5320d357242e588c1913155e9894ac80e4c591 | 9339d28e15b80f63e5360070292b4c7e4d1982a1 | /one_classify/SFA/utility/envi_read.py | 4f186f27d66cdb100e92a45dbe01d39b754f4c3c | [] | no_license | Fanrongbo/change_detection_compare_method | 7bcb65f85a146eaa9da579f6b29a763959dd1176 | ee8249b55f21f4dc4c5a753956e9bf1a989e0eb5 | refs/heads/main | 2023-06-26T09:52:56.904551 | 2021-07-29T11:36:21 | 2021-07-29T11:36:21 | 390,595,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py |
###读取envi遥感数据
import gdal
from skimage import io
import scipy.io as sio
class ENVI_read:
def __init__(self, in_file):
self.in_file = in_file # Tiff或者ENVI文件
dataset = gdal.Open(self.in_file)
self.XSize = dataset.RasterXSize # 网格的X轴像素数量
self.YSize = dataset.RasterYSize # 网格的Y轴像素数量
self.GeoTransform = dataset.GetGeoTransform() # 投影转换信息
self.ProjectionInfo = dataset.GetProjection() # 投影信息
self.im_bands = dataset.RasterCount
#band: 读取第几个通道的数据
def get_data(self, band):
dataset = gdal.Open(self.in_file)
band = dataset.GetRasterBand(band)
data = band.ReadAsArray()
return data
#获取经纬度信息
def get_lon_lat(self):
gtf = self.GeoTransform
x_range = range(0, self.XSize)
y_range = range(0, self.YSize)
x, y = np.meshgrid(x_range, y_range)
lon = gtf[0] + x * gtf[1] + y * gtf[2]
lat = gtf[3] + x * gtf[4] + y * gtf[5]
return lon, lat
class standard_read:
#IMREAD_UNCHANGED = -1#不进行转化,比如保存为了16位的图片,读取出来仍然为16位。
# IMREAD_GRAYSCALE = 0#进行转化为灰度图,比如保存为了16位的图片,读取出来为8位,类型为CV_8UC1。
# IMREAD_COLOR = 1#进行转化为RGB三通道图像,图像深度转为8位
# IMREAD_ANYDEPTH = 2#保持图像深度不变,进行转化为灰度图。
# IMREAD_ANYCOLOR = 4#若图像通道数小于等于3,则保持原通道数不变;若通道数大于3则只取取前三个通道。图像深度转为8位
def __init__(self,in_file):
self.in_file = in_file # Tiff或者ENVI文件
def mat_read(self):
matfile = sio.loadmat(meanstd_file)
test_mean = np.array(matfile['mean_test'])
test_std = np.array(matfile['std_test'])
# Save predictions to a matfile to open later in matlab
mdict = {"Recovery": Recovery}
# sio.savemat(savename, mdict
io.imsave('Recovery.tif', np.float32(Recovery))
| [
"noreply@github.com"
] | noreply@github.com |
fe92c22ed71c0c1f4658405f96084f2d8a16c64e | aeb76bc35ffcb1b62e0a9d354d0c3e659eeb1e8a | /crm/forms.py | 3d62ea97a5983eb1c8a614140bc275296c3a883f | [] | no_license | vk-airline/airline-crm | 9eb22221c6376fcf0c2a477fe1dd61769f73b63b | 9c05da81421dc9fafbde525c96d863d820278133 | refs/heads/main | 2023-05-02T13:42:51.770473 | 2021-05-22T16:56:23 | 2021-05-22T16:56:23 | 346,685,294 | 0 | 0 | null | 2021-04-22T09:25:58 | 2021-03-11T11:52:59 | Python | UTF-8 | Python | false | false | 3,002 | py | from django.forms import ModelForm
from crm.models import FlightPlan, Flight
from django import forms
from multiselectfield import MultiSelectFormField
class FlightPlanForm(ModelForm):
class Meta:
model = FlightPlan
fields = "__all__"
widgets = {
'planning_departure_time': forms.TimeInput(attrs={
'class': 'form-control',
'placeholder': 'HH:MM:SS',
'required': True,
}),
'planning_arrival_time': forms.TimeInput(attrs={
'class': 'form-control',
'placeholder': 'HH:MM:SS',
'required': True,
}),
'start_date': forms.DateInput(attrs={
'class': 'form-control',
'placeholder': 'YYYY-MM-DD',
'required': True,
}),
'end_date': forms.DateInput(attrs={
'class': 'form-control',
'placeholder': 'YYYY-MM-DD',
'required': True,
}),
'flight_code': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': '**-***',
'required': True,
}),
'passanger_capacity': forms.NumberInput(attrs={
'class': 'form-control',
'required': True,
}),
'status': forms.Select(attrs={'class': 'form-control'}),
'days_of_week': forms.CheckboxSelectMultiple(attrs={}),
"description": forms.Textarea(attrs={'class': 'form-control', 'rows': 5, 'readonly': ''}),
'source': forms.Select(attrs={'class': 'form-control'}),
'destination': forms.Select(attrs={'class': 'form-control'}),
}
class FlightForm(ModelForm):
class Meta:
model = Flight
fields = "__all__"
widgets = {
'planning_departure_datetime': forms.DateTimeInput(attrs={
'class': 'form-control',
'placeholder': 'YYYY-MM-DD HH:MM:SS',
'required': True,
}),
'planning_arrival_datetime': forms.DateTimeInput(attrs={
'class': 'form-control',
'placeholder': 'YYYY-MM-DD HH:MM:SS',
'required': True,
}),
'actual_departure_datetime': forms.DateTimeInput(attrs={
'class': 'form-control',
'placeholder': 'YYYY-MM-DD HH:MM:SS',
}),
'actual_arrival_datetime': forms.DateTimeInput(attrs={
'class': 'form-control',
'placeholder': 'YYYY-MM-DD HH:MM:SS',
}),
'flight_plan': forms.Select(attrs={'class': 'form-control'}),
'aircraft': forms.Select(attrs={'class': 'form-control'}),
'actual_destination': forms.Select(attrs={'class': 'form-control'}),
'employees': forms.SelectMultiple(attrs={'class': 'form-control', 'size': '10'}),
}
| [
"issokov@yandex.ru"
] | issokov@yandex.ru |
070518cdd5ef09d6e867133c27cc64804dca8c70 | 619229cf49134e1859d746d69f00b76b7b583f16 | /leetcode/python/maxConsecutiveOnes.py | 4bb291fd0408584a4b6e6097f0adbf416ca82b22 | [] | no_license | rene150390/training | 427ce161cfd559876378a44ca3d17ae8877a44fb | 2e7b9fb1db2be8ccbcbbd31fa6c40108f2a447e4 | refs/heads/master | 2022-12-25T20:31:27.459971 | 2020-09-29T05:03:03 | 2020-09-29T05:03:03 | 298,703,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | class Solution(object):
def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
numberOnes = 0
consecutives = []
maxCon = 0
for id, x in enumerate(nums):
if x == 1:
numberOnes += 1
if id == len(nums) - 1:
consecutives.append(numberOnes)
elif x == 0:
consecutives.append(numberOnes)
numberOnes = 0
for y in consecutives:
if y > maxCon:
maxCon = y
return maxCon | [
"rene.sanabria@optimissa.com"
] | rene.sanabria@optimissa.com |
b0424b8ff6d9b046b3dd740b1e3d0037d998774c | afede9b03024605e60ec6f719c27f98e6b30d942 | /scripts/calculate_optical_flow.py | 1a6d5068542baad1874ee8c092feb1fa450ac2d1 | [] | no_license | aswinvk28/dynamic-skin-segmentation | c2afd12a76570f712b235573a4f7e1908c03fd68 | 21914a1e939687fd6ef5b1a2fce1be2dcd689d7b | refs/heads/main | 2021-02-27T19:06:38.868184 | 2021-01-26T12:13:10 | 2021-01-26T12:13:10 | 245,628,750 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,116 | py | from PIL import Image
import math
import matplotlib
import numpy as np
import os
import pathlib
import random
import scenenet_pb2 as sn
import sys
import scipy.misc
import argparse
import cv2
import imageio
import json
from time import time
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-im', '--image',
help='image input',
type=str,
default=False,
required=True
)
return parser.parse_args()
def normalize(v):
return v/np.linalg.norm(v)
def load_depth_map_in_m(file_name):
image = Image.open(file_name).convert('L').resize((320,240))
pixel = np.asarray(image)
return (pixel * 0.001)
def pixel_to_ray(pixel,vfov=45,hfov=60,pixel_width=320,pixel_height=240):
x, y = pixel
x_vect = math.tan(math.radians(hfov/2.0)) * ((2.0 * ((x+0.5)/pixel_width)) - 1.0)
y_vect = math.tan(math.radians(vfov/2.0)) * ((2.0 * ((y+0.5)/pixel_height)) - 1.0)
return (x_vect,y_vect,1.0)
def normalised_pixel_to_ray_array(width=320,height=240):
pixel_to_ray_array = np.zeros((height,width,3))
for y in range(height):
for x in range(width):
pixel_to_ray_array[y,x] = normalize(np.array(pixel_to_ray((x,y),pixel_height=height,pixel_width=width)))
return pixel_to_ray_array
def points_in_camera_coords(depth_map,pixel_to_ray_array):
assert depth_map.shape[0] == pixel_to_ray_array.shape[0]
assert depth_map.shape[1] == pixel_to_ray_array.shape[1]
assert len(depth_map.shape) == 2
assert pixel_to_ray_array.shape[2] == 3
camera_relative_xyz = np.ones((depth_map.shape[0],depth_map.shape[1],4))
for i in range(3):
camera_relative_xyz[:,:,i] = depth_map * pixel_to_ray_array[:,:,i]
return camera_relative_xyz
def flatten_points(points):
return points.reshape(-1, 4)
def reshape_points(height,width,points):
other_dim = points.shape[1]
return points.reshape(height,width,other_dim)
def transform_points(transform,points):
assert points.shape[2] == 4
height = points.shape[0]
width = points.shape[1]
points = flatten_points(points)
return reshape_points(height,width,(transform.dot(points.T)).T)
def world_to_camera_with_pose(view_pose):
lookat_pose = position_to_np_array(view_pose.lookat)
camera_pose = position_to_np_array(view_pose.camera)
up = np.array([0,1,0])
R = np.diag(np.ones(4))
R[2,:3] = normalize(lookat_pose - camera_pose)
R[0,:3] = normalize(np.cross(R[2,:3],up))
R[1,:3] = -normalize(np.cross(R[0,:3],R[2,:3]))
T = np.diag(np.ones(4))
T[:3,3] = -camera_pose
return R.dot(T)
def camera_to_world_with_pose(view_pose):
return np.linalg.inv(world_to_camera_with_pose(view_pose))
def camera_point_to_uv_pixel_location(point,vfov=45,hfov=60,pixel_width=320,pixel_height=240):
point = point / point[2]
u = ((pixel_width/2.0) * ((point[0]/math.tan(math.radians(hfov/2.0))) + 1))
v = ((pixel_height/2.0) * ((point[1]/math.tan(math.radians(vfov/2.0))) + 1))
return (u,v)
def position_to_np_array(position):
return np.array([position['x'],position['y'],position['z']]) if type(position) == dict else \
np.array([position.x,position.y,position.z])
def interpolate_poses(start_pose,end_pose,alpha):
assert alpha >= 0.0
assert alpha <= 1.0
camera_pose = alpha * position_to_np_array(end_pose['camera'])
camera_pose += (1.0 - alpha) * position_to_np_array(start_pose['camera'])
lookat_pose = alpha * position_to_np_array(end_pose['lookat'])
lookat_pose += (1.0 - alpha) * position_to_np_array(start_pose['lookat'])
timestamp = alpha * end_pose['timestamp'] + (1.0 - alpha) * start_pose['timestamp']
pose = sn.Pose()
pose.camera.x = camera_pose[0]
pose.camera.y = camera_pose[1]
pose.camera.z = camera_pose[2]
pose.lookat.x = lookat_pose[0]
pose.lookat.y = lookat_pose[1]
pose.lookat.z = lookat_pose[2]
pose.timestamp = timestamp
return pose
def world_point_to_uv_pixel_location_with_interpolated_camera(point,shutter_open,shutter_close,alpha):
view_pose = interpolate_poses(shutter_open,shutter_close,alpha)
wTc = world_to_camera_with_pose(view_pose)
point_in_camera_coords = wTc.dot(np.array(point))
uv = camera_point_to_uv_pixel_location(point_in_camera_coords)
return uv
# Expects:
# an nx4 array of points of the form [[x,y,z,1],[x,y,z,1]...] in world coordinates
# a length three array [x,y,z] for camera start and end (of a shutter) and lookat start/end in world coordinates
# Returns:
# a nx2 array of the horizontal and vertical pixel location time derivatives (i.e. pixels per second in the horizontal and vertical)
# NOTE: the pixel coordinates are defined as (0,0) in the top left corner, to (320,240) in the bottom left
def optical_flow(points,shutter_open,shutter_close,alpha=0.5,shutter_time=(1.0/60),
hfov=60,pixel_width=320,vfov=45,pixel_height=240):
# Alpha is the linear interpolation coefficient, 0.5 takes the derivative in the midpoint
# which is where the ground truth renders are taken. The photo render integrates via sampling
# over the whole shutter open-close trajectory
view_pose = interpolate_poses(shutter_open,shutter_close,alpha)
wTc = world_to_camera_with_pose(view_pose)
camera_pose = position_to_np_array(view_pose['camera'] if type(view_pose) == dict else view_pose.camera)
lookat_pose = position_to_np_array(view_pose['lookat'] if type(view_pose) == dict else view_pose.lookat)
# Get camera pixel scale constants
uk = (pixel_width/2.0) * ((1.0/math.tan(math.radians(hfov/2.0))))
vk = (pixel_height/2.0) * ((1.0/math.tan(math.radians(vfov/2.0))))
# Get basis vectors
ub1 = lookat_pose - camera_pose
b1 = normalize(ub1)
ub2 = np.cross(b1,np.array([0,1,0]))
b2 = normalize(ub2)
ub3 = np.cross(b2,b1)
b3 = -normalize(ub3)
# Get camera pose alpha derivative
camera_end = position_to_np_array(shutter_close['camera'])
camera_start = position_to_np_array(shutter_open['camera'])
lookat_end = position_to_np_array(shutter_close['lookat'])
lookat_start= position_to_np_array(shutter_open['lookat'])
dc_dalpha = camera_end - camera_start
# Get basis vector derivatives
# dub1 means d unnormalised b1
db1_dub1 = (np.eye(3) - np.outer(b1,b1))/np.linalg.norm(ub1)
dub1_dalpha = lookat_end - lookat_start - camera_end + camera_start
db1_dalpha = db1_dub1.dot(dub1_dalpha)
db2_dub2 = (np.eye(3) - np.outer(b2,b2))/np.linalg.norm(ub2)
dub2_dalpha = np.array([-db1_dalpha[2],0,db1_dalpha[0]])
db2_dalpha = db2_dub2.dot(dub2_dalpha)
db3_dub3 = (np.eye(3) - np.outer(b3,b3))/np.linalg.norm(ub3)
dub3_dalpha = np.array([
-(db2_dalpha[2]*b1[1]+db1_dalpha[1]*b2[2]),
-(db2_dalpha[0]*b1[2] + db1_dalpha[2]*b2[0])+(db2_dalpha[2]*b1[0]+db1_dalpha[0]*b2[2]),
(db1_dalpha[1]*b2[0]+db2_dalpha[0]*b1[1])
])
db3_dalpha = -db3_dub3.dot(dub3_dalpha)
# derivative of the rotated translation offset
dt3_dalpha = np.array([
-db2_dalpha.dot(camera_pose)-dc_dalpha.dot(b2),
-db3_dalpha.dot(camera_pose)-dc_dalpha.dot(b3),
-db1_dalpha.dot(camera_pose)-dc_dalpha.dot(b1),
])
# camera transform derivative
dT_dalpha = np.empty((4,4))
dT_dalpha[0,:3] = db2_dalpha
dT_dalpha[1,:3] = db3_dalpha
dT_dalpha[2,:3] = db1_dalpha
dT_dalpha[:3,3] = dt3_dalpha
# Calculate 3D point derivative alpha derivative
dpoint_dalpha = dT_dalpha.dot(points.T)
point_in_camera_coords = wTc.dot(np.array(points.T))
# Calculate pixel location alpha derivative
du_dalpha = uk * (dpoint_dalpha[0] * point_in_camera_coords[2] - dpoint_dalpha[2] * point_in_camera_coords[0])
dv_dalpha = vk * (dpoint_dalpha[1] * point_in_camera_coords[2] - dpoint_dalpha[2] * point_in_camera_coords[1])
du_dalpha = du_dalpha/(point_in_camera_coords[2]*point_in_camera_coords[2])
dv_dalpha = dv_dalpha/(point_in_camera_coords[2]*point_in_camera_coords[2])
# Calculate pixel location time derivative
du_dt = du_dalpha / shutter_time
dv_dt = dv_dalpha / shutter_time
return np.vstack((du_dt,dv_dt)).T
def flow_to_hsv_image(flow, magnitude_scale=1.0/100.0):
hsv = np.empty((240,320,3))
for row in range(240):
for col in range(320):
v = flow[row,col,:]
magnitude = np.linalg.norm(v)
if magnitude < 1e-8:
hsv[row,col,0] = 0.0
hsv[row,col,1] = 0.0
hsv[row,col,2] = 0.0
else:
direction = v / magnitude
theta = math.atan2(direction[1], direction[0])
if theta <= 0:
theta += 2*math.pi
assert(theta >= 0.0 and theta <= 2*math.pi)
hsv[row,col,0] = theta / (2*math.pi)
hsv[row,col,1] = 1.0
hsv[row,col,2] = min(magnitude * magnitude_scale, 1.0)
return hsv
def depth_path_from_view(filename):
depth_path = os.path.join('','{0}'.format(filename))
return depth_path
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
AttrDict.__getattribute__ = AttrDict.__getitem__
AttrDict.__setattribute__ = AttrDict.__setitem__
self.__dict__ = self
if __name__ == '__main__':
args = parse_args()
view = json.load(open("view.json", "r"))
t1 = time()
# This stores for each image pixel, the cameras 3D ray vector
depth_path = depth_path_from_view(args.image)
optical_flow_path = 'output/'+args.image
print('\nConverting depth image:{0} and camera pose to optical flow image:{1}'.format(depth_path,optical_flow_path), "\n")
depth_map = load_depth_map_in_m(str(depth_path))
print(depth_map.shape)
cached_pixel_to_ray_array = normalised_pixel_to_ray_array(width=depth_map.shape[1],height=depth_map.shape[0])
depth_map[depth_map == 0.0] = 1000.0
# This is a 320x240x3 array, with each 'pixel' containing the 3D point in camera coords
points_in_camera = points_in_camera_coords(depth_map,cached_pixel_to_ray_array)
# Transform point from camera coordinates into world coordinates
ground_truth_pose = interpolate_poses(view['shutter_open'],view['shutter_close'],0.5)
camera_to_world_matrix = camera_to_world_with_pose(ground_truth_pose)
points_in_world = transform_points(camera_to_world_matrix,points_in_camera)
# Calculate optical flow
points_in_world = flatten_points(points_in_world)
optical_flow_derivatives = optical_flow(points_in_world,view['shutter_open'],view['shutter_close'])
optical_flow_derivatives = reshape_points(240,320,optical_flow_derivatives)
# Write out hsv optical flow image. We use the matplotlib hsv colour wheel
hsv = flow_to_hsv_image(optical_flow_derivatives)
rgb = matplotlib.colors.hsv_to_rgb(hsv)
t2 = time()
print("\nOptical Flow Time: ", t2- t1, "\n")
imageio.imwrite(optical_flow_path,rgb)
| [
"aswinvk28@gmail.com"
] | aswinvk28@gmail.com |
5a64d79668de4d8bf784b385470a75c341fd60d2 | 5d135bce94f19c970949431774b0dcfe36b5c48f | /Easy/missing_number.py | d14b50f71392d9a78ee91cc481ed7e6c1b2597f0 | [] | no_license | kaswal/letecode_solution | 16e2232f23a9244662f83b6b8d26f89d99b4ee96 | 0ec4e29b8a87b0060943a749a331c9688f94c771 | refs/heads/master | 2022-06-18T06:36:05.160449 | 2020-05-12T18:46:06 | 2020-05-12T18:46:06 | 259,476,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def missing_number(arr):
min = sorted(arr)[0]
max = sorted(arr)[-1]
for num in range(min, max + 1):
if num not in arr:
return num
input = [1,1,3,4,5]
print(missing_number(input))
| [
"er.kautilya21@gmail.com"
] | er.kautilya21@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.