blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24b0969f347074dde7b34d9eb0ca632fc4327364 | 469cf0d322a3a0d14e3d3aa8a8975eba930c2485 | /experiments/utils.py | 4afdf94965a8a0a3548fb480638ffd7b34d4a520 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | DLwbm123/att_gconvs | 70394bd74eef043abeecd9a4feba9d57b6b2f010 | 872259cad49763fdcfa3e96e80b6b5c331adf084 | refs/heads/master | 2022-11-07T22:02:44.173434 | 2020-07-07T07:53:29 | 2020-07-07T07:53:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | import numpy as np
def num_params(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(params)
| [
"dwromerog@unal.edu.co"
] | dwromerog@unal.edu.co |
295047c1a5bced9d866665ee801ef0a70d580617 | 635f5c838a0fd866ffcf727c56fb9b57bd130c53 | /ADSE13_125/merging/cc_anom_calculator.py | 874b530f5462a9b342ac15c669d675efc0d6fbe0 | [] | no_license | ExaFEL/exafel_project | 3e83aca036f5e3e94b1c6075aec9322c5a194b8d | 9c3a2769820e733704648d0d35c9e23279c8a50b | refs/heads/master | 2023-09-01T19:49:19.709792 | 2023-09-01T19:46:12 | 2023-09-01T19:46:12 | 91,735,247 | 0 | 5 | null | 2023-08-24T06:57:30 | 2017-05-18T20:32:28 | Jupyter Notebook | UTF-8 | Python | false | false | 1,187 | py | from __future__ import division, print_function, absolute_import
message = '''Pass in 2 half dataset mtz file and it should return CC_anom value'''
from iotbx import reflection_file_reader
from cctbx.array_family import flex
import sys
def run(mtz1, mtz2):
# read in mtz files
reflection_file_1 = reflection_file_reader.any_reflection_file(mtz1)
miller_arrays_1 = reflection_file_1.as_miller_arrays()
reflection_file_2 = reflection_file_reader.any_reflection_file(mtz2)
miller_arrays_2 = reflection_file_2.as_miller_arrays()
ma_anom_diff_1 = miller_arrays_1[0].anomalous_differences()
ma_anom_diff_2 = miller_arrays_2[0].anomalous_differences()
ma_anom_diff_1.show_summary()
ma_anom_diff_2.show_summary()
ma_anom_diff_1_cs = ma_anom_diff_1.common_set(ma_anom_diff_2)
ma_anom_diff_2_cs = ma_anom_diff_2.common_set(ma_anom_diff_1)
# Make sure the 2 arrays have the same size
cc_anom = flex.linear_correlation(ma_anom_diff_1_cs.data(), ma_anom_diff_2_cs.data()).coefficient()
print ('Value of CC_anom for the dataset is = ',cc_anom)
#from IPython import embed; embed(); exit()
if __name__ == '__main__':
print (message)
run(sys.argv[1], sys.argv[2])
| [
"asmit3@gmail.com"
] | asmit3@gmail.com |
b1f00c327467380c506ad0afdda9a0a52a98f0f6 | 9b0adb117f19c4a694c421207e0ac5065b46a1ce | /natuurpunt_cmis/__openerp__.py | fd2fbac21ef66daf61888ed7d6505f007a513168 | [] | no_license | smart-solution/natuurpunt-base | 50cd88d116c2d019cf7a0d77cf7167ac51055d12 | 8d44d422d1f777a2ec701a587e2f75e57a9f4059 | refs/heads/master | 2021-01-17T02:28:05.447739 | 2020-11-03T15:57:08 | 2020-11-03T15:57:08 | 39,185,966 | 0 | 0 | null | 2020-11-03T15:57:09 | 2015-07-16T08:35:14 | Python | UTF-8 | Python | false | false | 1,293 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "natuurpunt_cmis",
"version" : "1.0",
"author" : "Natuurpunt (joeri.belis@natuurpunt.be)",
"website" : "www.natuurpunt.be",
"category" : "base",
"description": """
Custom cmis support for natuurpunt
""",
"depends" : ["natuurpunt_purchase",],
"data" : ["natuurpunt_cmis_view.xml",],
"init_xml" : [],
"update_xml" : [],
"active": False,
"installable": True
}
| [
"joeri.belis@natuurpunt.be"
] | joeri.belis@natuurpunt.be |
de1d031d0e3f4f8412e64401493eb255b3891c8f | 7946dfd98e8b2cc03b100cfd72c52095783ef6d0 | /pdf_scraper.py | 94570eadea822c5b4faa78507b5a570fe314a621 | [] | no_license | nourouhichi/Scrapme | 709c53dc950e6f654de9f6f9577a316a045144db | 9bd912afb0cdd0f25057331a51482344e7261a26 | refs/heads/main | 2023-07-27T04:49:44.098666 | 2021-09-01T19:36:32 | 2021-09-01T19:36:32 | 340,412,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from cStringIO import StringIO
import codecs
import io
import sys
import os
import re
def scraper():
# Specify the url
filepath = raw_input("PDF file to scrape: ")
pages=None
if not pages:
pagenums = set()
else:
pagenums = set(pages)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
infile = file(filepath, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close
print text
save = raw_input("Save the text file as scrape.txt? Note: this will overwrite your existing scrape file if you haven't renamed it from 'scrape.txt'. Type 'y' for yes or 'n' for no.")
if save == "y":
text_file = open("scrape.txt", "w")
text = re.sub("\s\s+", " ", text)
text_file.write("%s" % text)
text_file.close()
prompt = raw_input("Save successful! Type 'c' to continue, or 'q' to quit.")
if prompt == "c":
prompt2 = raw_input("Would you like to scrape another file? Type 'y' for yes or 'n' for no.")
if prompt2 == "y":
scraper()
if prompt2 == "n":
prompt3 = raw_input("Would you like to... \n 1. Save a weighted word list to 'word_frequencies.csv' \n 2. Graph the text's parts of speech by type \n 3. Scrape dates from the text \n 4. Quit the program. \n Type 1, 2, 3, or 4.")
if prompt3 == "1":
sep_words()
if prompt3 == "2":
pos_graph()
if prompt3 == "3":
date_scraper()
if prompt3 == "4":
print("Bye!")
exit(0)
if prompt == "q":
print("Bye!")
exit(0)
else:
print("Please enter a valid input.")
if save == "n":
print "Bye!"
exit(0)
else:
print("Please enter a valid input.")
def sep_words():
os.system("python sepwords.py")
exit(0)
def pos_graph():
os.system("python pos_grapher.py")
exit(0)
def date_scraper():
os.system("python date_scraper.py")
exit(0)
scraper()
| [
"noreply@github.com"
] | noreply@github.com |
8f344cb73443082ee006077b541cde87ab3ec719 | 193e18687ac40bffaaf87462935960989ac8c3b3 | /app_modules.py | ce9c73cee119bfca1ef912ee4cf40d873a5bc646 | [] | no_license | wejustwantsleep/data-analysis | 3ff6509b8a07d1ea73aec23b243ddb33ba61ade2 | a02bd4759781dbd45bb155a638aa850873de0449 | refs/heads/master | 2023-05-04T13:57:43.319858 | 2021-05-30T06:42:10 | 2021-05-30T06:42:10 | 372,142,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py |
# GUI FILE
from ui_main import Ui_MainWindow
# 서브 GUI 파일
from ui_sub import Ui_SubWindow
# IMPORT QSS CUSTOM
from ui_styles import Style
# IMPORT FUNCTIONS
from ui_functions import *
# Sub GUI Function
from ui_subfunctions import *
## ==> APP FUNCTIONS
from app_functions import *
| [
"84782837+wejustwantsleep@users.noreply.github.com"
] | 84782837+wejustwantsleep@users.noreply.github.com |
446995ae6e5fdca25e665c1c1ccc611cc0762647 | 59c16cc6c8db9dfc0a72102b661e064cc79f2095 | /maximum.py | 07841b7125729e470bff8165c27472823cf93dde | [] | no_license | Ernest-Macharia/Data-Structures-in-Python | 2fa1b29b4fde4df5d91dda8e8d41813fe44265d9 | 13048b56a8037c1a6a335fb23220e241a9920af0 | refs/heads/master | 2022-07-31T01:37:46.295116 | 2020-05-18T16:34:15 | 2020-05-18T16:34:15 | 264,999,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | #string
x = "bag"
print(max(x))
#list
y = ["money", "post", "pricing"]
print(max(y))
#tuple
z = ("Ernesto", "Kigo", "Wanjiru")
print(max(z)) | [
"ernesthuku@gmail.com"
] | ernesthuku@gmail.com |
6ad326d685fd833c1472b2880dd46e33631efef2 | ccc97be67244855a847d682a525479d8d861005c | /profiles/migrations/0009_alter_coach_plan.py | ad34e8ababed7fd8c0531df2572e5253aff4b0d7 | [] | no_license | Code-Institute-Submissions/alychinque-my-coachee_SepResub | bfb20e5ced6eae8d631a6e3c69681ec55c66b981 | f138eaf4ff3a6fd953b98f022dbede26be57123c | refs/heads/master | 2023-08-28T19:12:13.545377 | 2021-10-04T10:50:16 | 2021-10-04T10:50:16 | 413,435,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Generated by Django 3.2.4 on 2021-10-03 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0008_alter_coach_plan'),
]
operations = [
migrations.AlterField(
model_name='coach',
name='plan',
field=models.CharField(choices=[('Premium', 'Premium'), ('Free', 'Free'), ('Standard', 'Standard'), ('Basic', 'Basic')], max_length=10),
),
]
| [
"alychinque@gmail.com"
] | alychinque@gmail.com |
439573e544eeaa3fb9c9ce2a134bb59042694d57 | c1e93acb58f683ad40ccf0188192efc8426060cb | /flaskblog/main/routes.py | ebded2cd8681021efb2049bf20ac6b898f04d90d | [] | no_license | anuj-jaryal/flask-blog | 7203f6c3fddd63c364119cfedc0467b42f0c6ab8 | 018c10bc6d27e83da8209aa4aa74b36ee78e0072 | refs/heads/master | 2023-07-16T06:12:15.695027 | 2021-09-03T06:50:47 | 2021-09-03T06:50:47 | 402,673,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | from flask import render_template, Blueprint
main = Blueprint('main', __name__)
@main.route('/')
def home():
return render_template('home/index.html')
@main.route('/about')
def about():
return render_template('home/about.html')
| [
"anuj@ourdesignz.in"
] | anuj@ourdesignz.in |
ce9a504baf33919b24dc53bdf46a87dc45cd164e | fa45fe7eaba7ef7c27ecf95db7c460ca189ce0d4 | /everydays/day002/flask_test/hm_07_helloflask.py | c5e844f1d49376ac75384f887e29197ae23fd8cb | [] | no_license | jake20001/Hello | be1a2bb5331f2ad4c1d8f30c6a9a530aff79e605 | 08217871bb17152eb09e68cd154937ebe5d59d2c | refs/heads/master | 2021-07-10T09:48:15.883716 | 2021-04-23T14:49:03 | 2021-04-23T14:49:03 | 56,282,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | from datetime import timedelta
from flask import Flask, session,jsonify
app = Flask(__name__)
# 设置应用秘钥会被用于session签名
app.secret_key = 'test'
# 设置session过期时间 默认31天
print(f'默认过期时间: {app.permanent_session_lifetime}')
# 通过赋值一个 timedelta 对象来修改 session 的过期时间
app.permanent_session_lifetime = timedelta(days=0,seconds=20)
print(f'测试过期时间: {app.permanent_session_lifetime}')
@app.route('/session')
def get_session():
# session是一个类字典对象
print(session)
return jsonify({key: value for key, value in session.items()})
@app.route('/session/set')
def set_session():
# session是一个类字典对象, 对其取值/赋值 就可以实现session数据的读写
# 记录session数据
session['username'] = 'zhangsan'
session['age'] = 100
return "set session"
@app.route('/session/delete')
def delete_session():
# 使用 del 来删除 session 的 key,但是要判断 key 是否在 session,如果不判断可能会出现异常
if 'username' in session:
del session['username']
return "delete session"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True) | [
"jianke.zhang@beantechs.com"
] | jianke.zhang@beantechs.com |
8cce70bd3c55dcc1923e46f3c5349ea9c70d3833 | e418d71e647079efc1cab228afb3dda13d9df14b | /news/urls.py | b35b24f5164c0b43b8ac164d608b0bfc77c31a7b | [] | no_license | Frankline-Kiplangat/moringa-tribune | 4e0fc08f2df945acdfed0d3e766e641f83b37e09 | 336cd728207d433959eb1ec9ff7839224e517a13 | refs/heads/master | 2022-11-29T22:00:37.592496 | 2020-07-28T12:59:17 | 2020-07-28T12:59:17 | 283,212,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.conf.urls import url
from . import views
urlpatterns=[
url('^$', views.welcome, name = 'welcome')
]
| [
"kipfrankline@gmail.com"
] | kipfrankline@gmail.com |
c9979f423a456cb880b77c2b8a584ec0c5691070 | b007d88e6726452ffa8fe80300614f311ae5b318 | /educative.io/coding_patterns/hash_maps/isomorphic_string.py | 3f2177702a7f146a99345b2c40f7a05c9cd83761 | [] | no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | """
Given two strings, check whether two strings are isomorphic to each other or not. Two strings are isomorphic if a fixed mapping exists from the characters of one string to the characters of the other string. For example, if there are two instances of the character "a" in the first string, both these instances should be converted to another character (which could also remain the same character if "a" is mapped to itself) in the second string. This converted character should remain the same in both positions of the second string since there is a fixed mapping from the character "a" in the first string to the converted character in the second string.
"""
def is_isomorphic(string1, string2):
# Write your code here
# your code will replace this placeholder return statement
if len(string1) != len(string2):
return False
map_1 = {} # str1 to str2 mapping
map_2 = {} # str2 to str1 mapping
for i in range(len(string1)):
char_1 = string1[i]
char_2 = string2[i]
if char_1 in map_1 and map_1[char_1] != char_2:
return False
if char_2 in map_2 and map_2[char_2] != char_1:
return False
map_1[char_1] = char_2
map_2[char_2] = char_1
return True
def is_isomorphic(string1, string2):
# Write your code here
# your code will replace this placeholder return statement
if len(string1) != len(string2):
return False
map_1 = {} # str1 to str2 mapping
map_2 = {} # str2 to str1 mapping
for char_1, char_2 in zip(string1,string2):
if char_1 in map_1 and map_1[char_1] != char_2:
return False
if char_2 in map_2 and map_2[char_2] != char_1:
return False
map_1[char_1] = char_2
map_2[char_2] = char_1
return True
| [
"jinu.p.r@gmail.com"
] | jinu.p.r@gmail.com |
8681e67c556768c6c046339c0b3ff64bb0807e64 | 63463bc0976c768406273edd04fbae483d34fbe2 | /setup.py | cc95fdb3f09530b302be185aed7dbed78b673031 | [] | no_license | drnextgis/sport_venue | b894e212d1899f44d0382b615bd21f2dce493996 | 43c097e4b91a7b26e6839a8c1b1be2e2765d05cf | refs/heads/master | 2021-01-01T03:44:59.222224 | 2016-05-31T04:21:20 | 2016-05-31T04:21:20 | 57,450,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | import os
from setuptools import setup, find_packages
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_tm',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
'psycopg2',
'pyramid_sacrud',
'ps_alchemy',
]
setup(name='sport_venue',
version='0.0',
description='sport_venue',
long_description="",
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='sport_venue',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = sport_venue:main
[console_scripts]
initialize_sport_venue_db = sport_venue.scripts.initializedb:main
""",
)
| [
"rykovd@gmail.com"
] | rykovd@gmail.com |
a5c866848db0a2d103e4eccf93def3588d598874 | f20da8440bae10fe73900f787fc7781f23196325 | /downsample/downsample_dense.py | ad5654289ac0181edcac53448c9e825628577396 | [] | no_license | ramesh720/recipe_zs2017_track2_phoneme | 9c5cdb3066a84e5059153b1390802e700c66978e | f8bbd9b8e6ae4f542e52c2582eab1cf166923226 | refs/heads/master | 2020-04-29T11:07:47.406768 | 2018-01-13T13:03:46 | 2018-01-13T13:03:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,364 | py | #!/usr/bin/env python
"""
Perform dense downsampling over indicated segmentation intervals.
Author: Herman Kamper
Contact: kamperh@gmail.com
Date: 2015-2017
"""
from datetime import datetime
from os import path
import argparse
import cPickle as pickle
import numpy as np
import scipy.signal as signal
import sys
OUTPUT_DIR = "embeddings"
#-----------------------------------------------------------------------------#
# UTILITY FUNCTIONS #
#-----------------------------------------------------------------------------#
def check_argv():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(description=__doc__.strip().split("\n")[0], add_help=False)
parser.add_argument("lang", type=str, choices=["english", "french", "mandarin", "LANG1", "LANG2"])
parser.add_argument("subset", type=str, choices=["train"]) #, "test"])
# parser.add_argument("landmarks", type=str, choices=["gtphone", "unsup_syl"], help="landmarks set")
parser.add_argument("landmarks", type=str, choices=["unsup_syl"], help="landmarks set")
parser.add_argument(
# "feature_type", type=str, help="input feature type", choices=["mfcc", "cae.d_10", "cae.d_13"]
"feature_type", type=str, help="input feature type", choices=["mfcc", "okko0"]
)
parser.add_argument("--n", type=int, help="number of samples (default: %(default)s)", default=10)
parser.add_argument(
"--frame_dims", type=int, default=None,
help="only keep these number of dimensions"
)
parser.add_argument(
"--n_landmarks_max", type=int,
help="maximum number of landmarks to cross (default: %(default)s)", default=6
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def downsample_utterance(features, seglist, n):
"""
Return the downsampled matrix with each row an embedding for a segment in
the seglist.
"""
embeddings = []
for i, j in seglist:
y = features[i:j+1, :].T
y_new = signal.resample(y, n, axis=1).flatten("C")
embeddings.append(y_new)
return np.asarray(embeddings)
#-----------------------------------------------------------------------------#
# MAIN FUNCTION #
#-----------------------------------------------------------------------------#
def main():
args = check_argv()
if args.feature_type == "mfcc":
input_npz_fn = path.join(
"..", "features", "mfcc", args.lang + "_" + args.subset, "numpy", "mfcc.cmvn_dd.npz"
)
elif args.feature_type == "okko0":
input_npz_fn = path.join(
"..", "features", "okko0", args.lang + "_" + args.subset, "segments.npz"
)
else:
assert False
print("Reading: " + input_npz_fn)
input_npz = np.load(input_npz_fn)
d_frame = input_npz[input_npz.keys()[0]].shape[1]
print("No. of utterances: " + str(len(input_npz.keys())))
seglist_pickle_fn = path.join(
OUTPUT_DIR, args.lang + "_" + args.subset, "seglist." + args.landmarks
+ ".n_max_" + str(args.n_landmarks_max) + ".pkl"
)
print("Reading: " + seglist_pickle_fn)
with open(seglist_pickle_fn, "rb") as f:
seglist_dict = pickle.load(f)
print("No. of utterances: " + str(len(seglist_dict)))
print("Frame dimensionality: " + str(d_frame))
if args.frame_dims is not None and args.frame_dims < d_frame:
d_frame = args.frame_dims
print("Reducing frame dimensionality: " + str(d_frame))
print("No. of samples: " + str(args.n))
print(datetime.now())
print("Downsampling")
downsample_dict = {}
for i, utt in enumerate(input_npz.keys()):
downsample_dict[utt] = downsample_utterance(
input_npz[utt][:, :args.frame_dims], seglist_dict[utt], args.n
)
print(datetime.now())
output_npz_fn = path.join(
OUTPUT_DIR, args.lang + "_" + args.subset, "downsample_dense." + args.feature_type +
".n_" + str(args.n) + ".n_max_" + str(args.n_landmarks_max) + "." + args.landmarks + ".npz"
)
print("Writing: " + output_npz_fn)
np.savez_compressed(output_npz_fn, **downsample_dict)
if __name__ == "__main__":
main()
| [
"kamperh@gmail.com"
] | kamperh@gmail.com |
b95feeca262a9036432d30423ce62dd23cffdd32 | 415fcefe59c8d33bc3f8b0784d48a7509ea7d5da | /addanother_example/models.py | f005bc5f0ebd821cc308a8ef2e021933eecd6f68 | [] | no_license | asifpy/django-quickstart | 6f517699375015584a7d17f112b70b8eeff89762 | 0ff625915cf169d3fb2f9646d9838260629c1576 | refs/heads/master | 2021-01-11T11:19:22.446634 | 2017-05-04T05:28:55 | 2017-05-04T05:28:55 | 72,719,312 | 2 | 1 | null | 2017-05-04T05:28:56 | 2016-11-03T07:24:32 | Python | UTF-8 | Python | false | false | 795 | py | from django.db import models
class Team(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Player(models.Model):
name = models.CharField(max_length=20)
current_team = models.ForeignKey(
"Team", related_name="current_players",
help_text='This demonstrates the wrapper adding an "add" button only'
)
future_team = models.ForeignKey(
"Team", related_name="future_players",
help_text='This demonstrates the wrapper adding both an "add" and an "edit" button'
)
previous_teams = models.ManyToManyField(
"Team", related_name="ancient_players",
help_text="This demonstrates the wrapper on a ManyToMany field"
)
def __str__(self):
return self.name | [
"saluasif@gmail.com"
] | saluasif@gmail.com |
dc4ba9522892d2b29251cd8eab33b73c5fffbcf8 | 2d2c10ffa7aa5ee35393371e7f8c13b4fab94446 | /projects/ai/mrc/haihua/mrc_guwen/loss.py | b36544f5e4359d2393243aba18e0a179e657b745 | [] | no_license | faker2081/pikachu2 | bec83750a5ff3c7b5a26662000517df0f608c1c1 | 4f06d47c7bf79eb4e5a22648e088b3296dad3b2d | refs/heads/main | 2023-09-02T00:28:41.723277 | 2021-11-17T11:15:44 | 2021-11-17T11:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file loss.py
# \author chenghuige
# \date 2021-01-09 17:51:33.472128
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import tensorflow as tf
import melt as mt
from .config import *
def loss_fn(y_true, y_pred, x, model):
pred = y_pred
pred = tf.cast(pred, tf.float32)
loss_func = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
loss = loss_func(y_true, pred)
loss = mt.reduce_over(loss)
return loss
def get_loss(model=None):
loss_fn_ = model.get_loss()
# loss_fn_ = loss_fn
# if not FLAGS.custom_loss:
# loss_fn_ = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# else:
# loss_fn_ = model.get_loss()
return loss_fn_
| [
"chenghuige@gmail.com"
] | chenghuige@gmail.com |
be00130b2738c88fe5d3877bc9bc0607dd24e5a1 | 8220a7bf1189002b9e59259c971afab6e2b46948 | /bullet.py | 613ae21dcf8701c5595e7f379bf3fae74970b85a | [] | no_license | YowFung/CodeWarGame | 1003b964dbee89afe468cfe6c1fba91573aeed17 | a72b83cf5b62f44dbd5efa57372a6d9ded797cda | refs/heads/master | 2020-06-06T07:52:02.045936 | 2019-06-20T01:10:48 | 2019-06-20T01:10:48 | 192,683,032 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,954 | py | import pygame
# 定义常量
DIRECTION_UPWARD = 0 # 方向:向上发射
DIRECTION_DOWNWARD = 1 # 方向:向下发射
LAUNCHER_HERO = 0 # 发射者:主角
LAUNCHER_ENEMY = 1 # 发射者:敌机
class Bullet(object):
"""
子弹类
"""
def __init__(self, config, game, default_pos, launcher, direction, bullet_type=0):
"""
构造方法:初始化子弹
:param config: 游戏配置对象
:param game: 游戏对象
:param default_pos: 子弹默认位置
:param launcher: 子弹发射者
:param direction: 子弹发射方向
:param bullet_type: 子弹类型
"""
self.config = config
self.game = game
# 子弹属性
self.pos = default_pos # 子弹位置
self.launcher = launcher # 子弹发射者
self.direction = direction # 子弹飞行方向
self.alive = True # 子弹生存状态
self.acc = 5 # 子弹飞行加速度
self.speed = 10 # 子弹飞行速度
# 加载图像
img_url = config.IMG_BULLET_HERO if launcher == LAUNCHER_HERO else config.IMG_BULLET_ENEMIES[bullet_type]
self.img = pygame.image.load(img_url)
# 计算图像尺寸
self.width = self.img.get_rect()[2]
self.height = self.img.get_rect()[3]
def show(self):
"""
显示子弹
"""
x = self.pos[0] - self.width/2
y = self.pos[1] - self.height/2
self.game.screen.blit(self.img, (x, y))
def fly(self):
"""
让子弹飞
"""
if self.alive == False:
return
# 更新飞行速度
self.speed += self.acc
# 向上飞
if self.direction == DIRECTION_UPWARD:
self.pos[1] -= self.speed
# 向下飞
elif self.direction == DIRECTION_DOWNWARD:
self.pos[1] += self.speed
# 边界与碰撞检测
if self.detect():
# 标记该子弹已阵亡
self.alive = False
else:
self.show() # 刷新子弹位置
def detect(self):
"""
碰撞检测和边界检测
"""
# 检测越界
if self.direction == DIRECTION_UPWARD and self.pos[1] <= 0 - self.height/2:
# 上边界越界了
return True
elif self.direction == DIRECTION_DOWNWARD and self.pos[1] >= self.config.SCREEN_HEIGHT - self.height/2:
# 下边界越界了
return True
# 检测碰撞
enemies = self.game.enemies if self.launcher == LAUNCHER_HERO else [self.game.hero]
for enemy in enemies:
x_range = [enemy.pos[0]-enemy.width/2-self.width, enemy.pos[0]+enemy.width/2+self.width]
y_range1 = [0-self.speed, enemy.pos[1]+enemy.height/2]
y_range2 = [enemy.pos[1]-enemy.height/2, self.config.SCREEN_HEIGHT+self.speed]
y_range = y_range1 if self.launcher == LAUNCHER_HERO else y_range2
if x_range[0] < self.pos[0] < x_range[1] and y_range[0] < self.pos[1] < y_range[1]:
# 发生碰撞
if self.launcher == LAUNCHER_HERO:
# 让目标飞机开花
enemy.boom()
# 得分
self.game.score += self.config.GET_SCORE
self.game.kill_count += 1
else:
# 主角受伤
enemy.bruise()
return True
# 无碰撞和越界
return False
| [
"yowfung@outlook.com"
] | yowfung@outlook.com |
620f04f48552c6843b61711ebe8a8794bf4ede01 | 5d386f40d141dbc9f536d1fc94322710ea0f0234 | /Practice Exams/Practice Exam 1/practice_exam_1.py | 5928c669cf8a499c58be55a16de13d8e8b385deb | [] | no_license | oescob16/CS2302-Data-Structures | e07da1313d14fa8708e765248d4ad27543c33e89 | c89b5e354887df0e998b295b0101a5fdf5ddf818 | refs/heads/master | 2022-07-15T03:19:26.629355 | 2020-05-19T23:21:54 | 2020-05-19T23:21:54 | 264,374,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py | import numpy as np
import matplotlib.pyplot as plt
import math
import singly_linked_list as sll
def set_drawing_parameters_and_show(ax):
show_axis = 'on'
show_grid = 'True'
ax.set_aspect(1.0)
ax.axis(show_axis)
plt.grid(show_grid)
plt.show()
def nested_squares(ax,n,x0,y0,size):
if n > 0:
x = [x0-size,x0-size,x0+size,x0+size,x0-size]
y = [y0-size,y0+size,y0+size,y0-size,y0-size]
ax.plot(x,y,linewidth=2,color='b')
nested_squares(ax,n-1,x0+size,y0,size/2)
nested_squares(ax,n-1,x0-size,y0,size/2)
def list_n_to_0(n):
if n == 0:
return [0]
return [n] + list_n_to_0(n-1)
def sum_first_n(L,n):
t = L.head
i = 0
sum_list = 0
while t is not None:
if i >= n:
break
sum_list += t.data
i += 1
t = t.next
return sum_list
def sum_until(L,i):
t = L.head
sum_list = 0
while t is not None:
if i == t.data:
break
sum_list += t.data
t = t.next
return sum_list
def next_to_last(L):
t = L.head
while t is not None:
if t.next.next is None:
return t.data
t = t.next
return None
if __name__ == "__main__":
plt.close("all") # Close all figures
fig, ax = plt.subplots()
nested_squares(ax,2,0,0,100)
set_drawing_parameters_and_show(ax)
fig2, ax2 = plt.subplots()
nested_squares(ax2,5,0,0,100)
set_drawing_parameters_and_show(ax2)
print(list_n_to_0(0)) # [0]
print(list_n_to_0(5)) # [5, 4, 3, 2, 1, 0]
L= sll.List()
L.extend([3,6,1,2,5])
L.draw()
print(sum_first_n(L,4)) # 12
print(sum_first_n(L,10)) # 17
print(sum_until(L,3)) # 0
print(sum_until(L,1)) # 9
print(sum_until(L,10)) # 17
L1= sll.List()
print(next_to_last(L1)) # None
print(next_to_last(L)) # 2
| [
"noreply@github.com"
] | noreply@github.com |
a511646d6604a9c524b484c4ff7546e7ca14116e | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav457.py | 245f22a2fc444ac2254832c1c88ff8828465938b | [] | no_license | s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | #!/usr/bin/python
import sys
def compute(prey):
temp0 = -1 * prey[1]
if temp0 != 0:
temp1 = temp0 / temp0
else:
temp1 = temp0
temp0 = -1 * prey[1]
temp1 = temp0 * prey[0]
if temp0 != 0:
temp2 = temp1 % temp0
else:
temp2 = temp0
temp3 = temp1 + prey[1]
if temp2 > temp0:
if temp3 != 0:
temp3 = temp3 % temp3
else:
temp3 = temp3
else:
if temp2 > temp0:
if temp3 != 0:
temp3 = prey[0] % temp3
else:
temp3 = temp3
else:
temp3 = temp3 * prey[0]
if temp3 != 0:
temp1 = temp1 / temp3
else:
temp1 = temp3
if prey[1] > temp3:
temp1 = temp2 * temp2
else:
temp1 = prey[1] + prey[1]
if temp0 != 0:
temp1 = prey[1] / temp0
else:
temp1 = temp0
temp0 = prey[0] + temp0
temp2 = prey[0] + temp3
temp4 = -1 * prey[1]
if temp3 != 0:
temp0 = temp1 % temp3
else:
temp0 = temp3
temp4 = prey[0] + temp2
temp3 = prey[1] + temp3
temp1 = max(prey[1], temp3)
temp2 = temp2 + prey[1]
if temp1 > prey[1]:
if prey[0] > prey[0]:
temp0 = -1 * temp1
else:
temp0 = temp1 + prey[0]
else:
if prey[1] != 0:
temp0 = temp0 / prey[1]
else:
temp0 = prey[1]
if temp3 != 0:
temp5 = prey[1] / temp3
else:
temp5 = temp3
return [prey[1], temp5]
| [
"i7674211@bournemouth.ac.uk"
] | i7674211@bournemouth.ac.uk |
6bea768e59c3e54463a70a4fb271ffb0ecef73fb | 67f7b3c56261a36c5d54cd895636f6778350012c | /singleton/test_singleton.py | 06cbf8b8c0e2fb3e981971299235ae0bae438d92 | [] | no_license | Va1da2/practical-python-design-patterns | 3b242314b3672d8d3295861ed72e60ac8824400b | db4baf1e600464da83472976c998dcabab9d2222 | refs/heads/master | 2020-03-28T02:20:21.728496 | 2018-09-05T18:12:43 | 2018-09-05T18:12:43 | 147,563,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | "Test singleton example object."
from singleton_object import SingletonObject
obj1 = SingletonObject()
obj1.val = "Object value 1"
print("print obj1: ", obj1)
print("---")
obj2 = SingletonObject()
obj2.value = "Object value 2"
print("print obj1: ", obj1)
print("print obj2: ", obj2) | [
"vaidas.armonas@gmail.com"
] | vaidas.armonas@gmail.com |
2bb133762300e9265d25a7e23a5f22a3d5955cd3 | 1d28ff527b9dab5aad32f15a0581f194b1ad686c | /reverseInt.py | 075d9e661a5cd4533454faad95f4a0cffad4ea02 | [] | no_license | omegaman123/ProblemSolving | 605262fcd4947ced0ba534bdde2449bf7f8c6a76 | 210f10da327aca8a70ed6432ff5a027251d4e512 | refs/heads/main | 2022-12-29T05:24:16.422141 | 2020-10-05T01:19:43 | 2020-10-05T01:19:43 | 299,765,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
idx = 0
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Reverse the digits of an integer removing trailing 0s from the front, keeping the sign
def reverse(x):
reversed_num = ''
sign = 1
if int(x) < 0:
sign = -1
for s in reversed(x):
if s != '-' and int(s) > 0:
reversed_num += s
return sign * int(reversed_num)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
test = (input("enter a number to reverse"))
print(reverse(test))
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| [
"35475840+omegaman123@users.noreply.github.com"
] | 35475840+omegaman123@users.noreply.github.com |
e6650002cc5f0ae2c46011989e4a6b8137cbf6bb | e91a96b50b15a4146ba2ecce8f2e8b132936be75 | /practise_20180723/test_class.py | 294679d78eeb8d276486f5d580c5c43deed73175 | [] | no_license | Ngwind/PycharmProjects | a7f1a8cbd8019ab51581b648a3182016362836d5 | d21873f2c956cc718f59f90e86cde4ef0c5c47ee | refs/heads/master | 2020-03-24T02:41:15.166778 | 2018-12-13T16:45:04 | 2018-12-13T16:45:04 | 142,381,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py |
class Person(object):
sex = "m"
age = 18
__birthday = "2018-09-01"
def __init__(self, name: "名字") -> "Person的构造函数":
print("init is called")
self.name = name
self.food_list = []
# 类方法,第一个参数是self
def eat(self, food):
self.food_list.append(food)
print("I have eat :", self.food_list)
def get_birthday(self):
return self.__birthday
# 静态方法staticmethod,第一个参数不用使用self,定义时要用@staticmethod修饰
@staticmethod
def sing(song):
print(song)
# 类方法classmethod,第一个参数不用self而是使用cls,可以直接被类调用,使用是要用@classmethod修饰
@classmethod
def run(cls, distance):
print("我跑了", distance, "m")
# 属性方法property,把函数变成一个静态属性,调用时不需要(),一般用在不注重过程,只关注结果的情况
@property
def get_age(self):
return print(self.age)
# 其他的一些内置方法:__new__当新建实例时调用.__str__当输出实例时调用,__del__当销毁实例时调用
def __new__(cls, *args, **kwargs):
print("new is called")
return super(Person, cls).__new__(cls)
def __del__(self):
print("del is called")
def __str__(self):
print("str is called")
return self.name
#a = Person(input("输入名字")) # demo
#print(a.sex)
#
## 输出类变量性别
#print("使用get函数访问私有变量birthday:", a.get_birthday())
#print("使用类名前缀访问:", a._Person__birthday)
#
## 输出私有变量,要使用get函数获取,不能直接访问变量 。但是其实能够使用"_class"+私有变量名访问
#for i in range(3):
# a.eat(input("输入要吃的食物")) # 进食
#
## 对象的静态方法演示
#a.sing("生日歌")
#
## 类的类方法演示
#Person.run(100)
#
## 属性方法演示
#a.get_age
#
## 其他内置函数调用演示
#b = Person("xiao_hong")
#print(b)
#del b
#
| [
"137476684@qq.com"
] | 137476684@qq.com |
efaa1812e24c0b64449e7ed4819b46ca098e9ada | 5a1b4527f06262065a992c503cd58097c4deb21c | /register/views.py | c95e4237f3579eede5d3d540f316264c742fba05 | [] | no_license | psachin/aakash_remote_technician | 3b922f3c87f3c7cdfada888543018b5cbbdc53f8 | aa686af8510357ddc2a15b271d1f656356a02df1 | refs/heads/master | 2020-05-17T22:07:15.914014 | 2013-08-07T07:32:37 | 2013-08-07T07:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,895 | py | from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.views import logout_then_login
from django.shortcuts import render_to_response, HttpResponse, HttpResponseRedirect, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required
from models import Profile, DeviceUser, Technician, Complaint
from forms import RegisterDeviceUser, LogComplaint, RegisterTechnician
from logged_in import get_all_logged_in_users
def index(request):
return HttpResponseRedirect('/login')
def register(request):
"""
User registration form
"""
# is user is already registered
'''
if request.user.is_authenticated():
return HttpResponseRedirect('/profiles/home')
'''
# user is submitting the form
if request.method == 'POST':
form = RegisterDeviceUser(request.POST)
#profile = request.user.get_profile()
if form.is_valid():
user = form.save()
if user:
# success="<html>sign_up_success</html>"
# return HttpResponse(success)
#messages.info(request, "Thanks for registering. You are now logged in.")
user = authenticate(username=request.POST['username'],
password=request.POST['password1'])
login(request, user)
return HttpResponseRedirect('/profiles/home')
else:
# user is NOT submitting the from, show him a blank form
form = RegisterDeviceUser()
context = {'form':form}
return render_to_response('sign_up.html',
context,
context_instance=RequestContext(request))
def register_technician(request):
"""
Technician registration form
"""
# is user is already registered
'''
if request.user.is_authenticated():
return HttpResponseRedirect('/profiles/home')
'''
# user is submitting the form
if request.method == 'POST':
form = RegisterTechnician(request.POST)
#profile = request.user.get_profile()
if form.is_valid():
user = form.save()
if user:
# success="<html>sign_up_success</html>"
# return HttpResponse(success)
#messages.info(request, "Thanks for registering. You are now logged in.")
user = authenticate(username=request.POST['username'],
password=request.POST['password1'])
login(request, user)
return HttpResponseRedirect('/profiles/home')
else:
# user is NOT submitting the from, show him a blank form
form = RegisterTechnician()
context = {'form':form}
return render_to_response('sign_up.html',
context,
context_instance=RequestContext(request))
#def list(request, pID):
def list(request):
users = User.objects.all()
# user_profiles = users.get(id=pID)
# user_profile = user_profiles.get_profile()
context = {'users':users,
# 'user_profile':user_profile,
}
return render_to_response('list.html',
context,
context_instance=RequestContext(request))
def login_view(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
login_success="<html><h1 align=center>Welcome user</h1></html>"
return HttpResponse(login_success)
else:
login_disabled="<html>Your login is disabled</html>"
return HttpResponse(login_disabled)
else:
login_error="<html>Username or password mismatch</html>"
return HttpResponse(login_error)
@login_required
def user_home(request):
users = User.objects.all()
# logged_in_users = request.user
# if logged_in_users.is_authenticated():
# logged_in_user = logged_in_users
# get profile for user = models.OneToOneField(User)
'''
# print request.user
user_obj = users.get(username=request.user)
# print user_obj
# if DeviceUser.objects.exists():
user_profile = Profile.objects.get(user=user_obj)
# print user_profile
dev_obj = DeviceUser.objects.get(user=user_profile)
# print dev_obj
'''
user_group = request.user.groups.values_list('name',flat=True)
if not user_group:
# if user don't belong to any group(ex: admin)
print "list is empty"
obj = None
else:
if user_group[0] == "aakash_user":
# print "%s belongs to %s group" %(request.user,user_group[0])
# print request.user
user_obj = users.get(username=request.user)
# print user_obj
# if DeviceUser.objects.exists():
user_profile = Profile.objects.get(user=user_obj)
# print user_profile
obj = DeviceUser.objects.get(user=user_profile)
# print obj
elif user_group[0] == "technician":
# print "%s belongs to %s group" %(request.user,user_group[0])
# print request.user
user_obj = users.get(username=request.user)
# print user_obj
# if DeviceUser.objects.exists():
user_profile = Profile.objects.get(user=user_obj)
# print user_profile
obj = Technician.objects.get(user=user_profile)
# print obj
context = {'users':users,
'dev_obj':obj,
'user_group':user_group,
}
return render_to_response('registration/profile.html',
context,
context_instance=RequestContext(request))
#def complaint_form(request, username):
@login_required
def complaint_form(request):
"""
show complaint box after user logs in
"""
username = User.objects.get(username=request.user)
user_profile = Profile.objects.get(user=username)
deviceuser = DeviceUser.objects.get(user=user_profile)
complaint_count = deviceuser.complaint_set.count()
if request.method == 'POST':
#print "Username: %s" %(username)
form = LogComplaint(request.POST)
#profile = request.user.get_profile()
if form.is_valid():
user = form.save(request.user)
if user:
success="<html>Complaint registered</html>"
return HttpResponse(success)
else:
# print request.user
form = LogComplaint()
context = {
'form':form,
'count':complaint_count,
}
return render_to_response('complaint.html',
context,
context_instance=RequestContext(request))
@login_required
def handle_complaint(request):
#print request.user
technician = User.objects.filter(username=request.user)
#print technician.values('id')
technician_complaint = Complaint.objects.filter(technician_id=technician.values_list('id'))
#print technician_complaint
try:
aakash_users = Group.objects.get(name='aakash_user')
# print aakash_users.user_set.count()
all_users = aakash_users.user_set.all()
obj = []
for ur in all_users:
user_obj = User.objects.get(username=ur)
user_profile = Profile.objects.get(user=user_obj)
obj.append(DeviceUser.objects.get(user=user_profile))
# print "%s: %s" %(ur,obj.complaint_set.count())
except Group.DoesNotExist:
all_users = None
obj = []
context = {
'obj':obj,
'all_users':all_users,
'technician_complaint':technician_complaint,
}
return render_to_response('handle_complaints.html',
context,
context_instance=RequestContext(request))
@login_required
def user_complaints(request, username):
"""
list compaints of a user
"""
user_group = request.user.groups.values_list('name',flat=True)[0]
user_obj = User.objects.get(username=username)
user_profile = Profile.objects.get(user=user_obj)
deviceuser_obj = DeviceUser.objects.get(user=user_profile)
#technician_group = Group.objects.get(name='technician')
complaint_obj = Complaint.objects.all()
technician_obj = Technician.objects.all()
# initial setting: if no technician has registered
try:
technician_group = Group.objects.get(name='technician')
except:
technician_group = None
print "technician group dont exist"
#print obj.complaint_set.count()
# print username
# all_users = User.objects.all()
context = {
'username':username,
'obj':deviceuser_obj,
'technician_obj':technician_obj,
'user_group':user_group,
'technician_group':technician_group,
'complaint_obj':complaint_obj,
# 'all_users':all_users,
}
return render_to_response('user_complaints.html',
context,
context_instance=RequestContext(request))
def render_logged_in_user_list(request):
return render_to_response('logged_in.html',
{'users':get_all_logged_in_users,
},
context_instance=RequestContext(request))
def logout_view(request):
logout_then_login(request)
return HttpResponseRedirect('/')
def assign(request, user_id, complaint_id, technician_id):
"""
take un-assigned complaint from user
required parameters: user_id, complaint_id and technician_id
"""
'''
first = Complaint.objects.filter(user_id=1,id=1)
first.update(technician=2)
'''
print user_id
print complaint_id
print technician_id
user_complaint = Complaint.objects.filter(user_id=user_id,id=complaint_id)
user_complaint.update(technician=technician_id)
'''
user_obj = User.objects.get(username=username)
user_profile = Profile.objects.get(user=user_obj)
obj = DeviceUser.objects.get(user=user_profile)
'''
return HttpResponseRedirect('/profiles/home')
@login_required
def shell(request, user_name, complaint_id, technician_id):
"""
"""
print user_name
print complaint_id
print technician_id
context = {'user_name':user_name}
return render_to_response("shell.html", context)
def jq(request):
"""
jq tesing
"""
return render_to_response("jq.html")
| [
"iclcoolster@gmail.com"
] | iclcoolster@gmail.com |
5930e9a61671605bbfdc30d70204e806177bb540 | aa9d2b3e754954a2fdb852e5eca352cc5546774b | /Logistic Regression/l1_reg.py | b97eac70a048ae94f87718f67f9a5a9b60c1e477 | [] | no_license | yash-sinha/deeplearningudemy | 499ed12ad1156ea7bf2086e7d530d30c84ab3bce | 5b0af9dad4aa64992f42a300fb5975fccb7e62b8 | refs/heads/master | 2021-01-21T18:43:50.330552 | 2017-06-04T23:47:25 | 2017-06-04T23:47:25 | 92,077,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | import numpy as np
import matplotlib.pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
N = 50
D = 50
X = (np.random.random((N,D))-0.5)*10 #Uniformly distributed btwn -5 and +5
true_w = np.array([1,0.5,-0.5]+ [0]*(D-3))
Y = np.round(sigmoid(X.dot(true_w) + np.random.randn(N)*0.5))
costs =[]
w = np.random.randn(D)/ np.sqrt(D)
learning_rate = 0.001
l1_penalty = 3.0
for t in range(5000):
Yhat = sigmoid(X.dot(w))
delta = Yhat -Y
w =w -learning_rate*(X.T.dot(delta) + l1_penalty*np.sign(w))
cost = -(Y*np.log(Yhat) + (1-Y)*np.log(1-Yhat)).mean() + l1_penalty*np.abs(w).mean()
costs.append(cost)
plt.plot(costs)
plt.show()
plt.plot(true_w, label = 'true w')
plt.plot(w, label='w map')
plt.legend()
plt.show() | [
"yashsinha08@gmail.com"
] | yashsinha08@gmail.com |
14e1a228d0680642f41d17ebeaa1552a75c5e0c5 | 1aa0ddb70fb893a6f958841b0a606cdcac954e18 | /settings/forms/batches.py | a1931328efe9b17262c46d238776331d3278fa66 | [] | no_license | shitalluitel/LibraryManagementSystem | 3042860a70096bf3821299fb10ca35958e680f62 | eecd909b272ad7e524a031c9142d22a356141fda | refs/heads/master | 2023-02-17T06:42:19.044516 | 2021-01-10T14:52:18 | 2021-01-10T14:52:18 | 166,533,846 | 2 | 1 | null | 2023-02-07T22:14:35 | 2019-01-19T10:22:41 | HTML | UTF-8 | Python | false | false | 1,003 | py | from django import forms
from django.forms import ModelMultipleChoiceField
from settings.models import Batch, CourseBatch, Course
class BatchForm(forms.ModelForm):
class Meta:
model = Batch
fields = ['name', 'code']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'code': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'name': 'Batch Name',
'code': 'Batch Code',
}
class CourseBatchCreateForm(forms.Form):
course = forms.ModelMultipleChoiceField(
queryset=Course.objects.all(),
label="Choose courses for this batch."
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#
# self.fields['course'] = ModelMultipleChoiceField(queryset=Course.objects.all())
self.fields['course'].widget.attrs['class'] = 'form-control'
self.fields['course'].empty_label = "Choose a countries"
| [
"ctalluitel@gmail.com"
] | ctalluitel@gmail.com |
391b6c0bf76d9ad8fdb00474e8986f20c727cd88 | 15c00a40dca1e08c3acce5d782225ac27c34b3c4 | /data/data_utils.py | 7bfb14a50249edd289ffb1d10cfe522a8cbc0fc1 | [
"MIT"
] | permissive | kamisoel/DigiGait | 826eb9420bbb0513386946363995e6bda0831c7d | 80b6fceeb3b426b9a551358bf34e246103d6c421 | refs/heads/main | 2023-07-21T14:06:39.566768 | 2021-09-07T07:53:30 | 2021-09-07T07:53:30 | 368,515,967 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
mpii_metadata = {
'layout_name': 'mpii',
'num_joints': 16,
'keypoints_symmetry': [
[3, 4, 5, 13, 14, 15],
[0, 1, 2, 10, 11, 12],
]
}
coco_metadata = {
'layout_name': 'coco',
'num_joints': 17,
'keypoints_symmetry': [
[1, 3, 5, 7, 9, 11, 13, 15],
[2, 4, 6, 8, 10, 12, 14, 16],
]
}
h36m_metadata = {
'layout_name': 'h36m',
'num_joints': 17,
'keypoints_symmetry': [
[4, 5, 6, 11, 12, 13],
[1, 2, 3, 14, 15, 16],
]
}
humaneva15_metadata = {
'layout_name': 'humaneva15',
'num_joints': 15,
'keypoints_symmetry': [
[2, 3, 4, 8, 9, 10],
[5, 6, 7, 11, 12, 13]
]
}
humaneva20_metadata = {
'layout_name': 'humaneva20',
'num_joints': 20,
'keypoints_symmetry': [
[3, 4, 5, 6, 11, 12, 13, 14],
[7, 8, 9, 10, 15, 16, 17, 18]
]
}
mediapipe_metadata = {
'layout_name': 'mediapipe',
'num_joints': 33,
'keypoints_symmetry': [
[1, 2, 3, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31],
[4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32]
]
}
def suggest_metadata(name):
names = []
for metadata in [mpii_metadata, coco_metadata, h36m_metadata, humaneva15_metadata, humaneva20_metadata, mediapipe_metadata]:
if metadata['layout_name'] in name:
return metadata
names.append(metadata['layout_name'])
raise KeyError('Cannot infer keypoint layout from name "{}". Tried {}.'.format(name, names))
def import_detectron_poses(path):
# Latin1 encoding because Detectron runs on Python 2.7
data = np.load(path, encoding='latin1')
kp = data['keypoints']
bb = data['boxes']
results = []
for i in range(len(bb)):
if len(bb[i][1]) == 0:
assert i > 0
# Use last pose in case of detection failure
results.append(results[-1])
continue
best_match = np.argmax(bb[i][1][:, 4])
keypoints = kp[i][1][best_match].T.copy()
results.append(keypoints)
results = np.array(results)
return results[:, :, 4:6] # Soft-argmax
#return results[:, :, [0, 1, 3]] # Argmax + score
def import_cpn_poses(path):
data = np.load(path)
kp = data['keypoints']
return kp[:, :, :2]
def import_sh_poses(path):
import h5py
with h5py.File(path) as hf:
positions = hf['poses'].value
return positions.astype('float32')
def suggest_pose_importer(name):
if 'detectron' in name:
return import_detectron_poses
if 'cpn' in name:
return import_cpn_poses
if 'sh' in name:
return import_sh_poses
raise KeyError('Cannot infer keypoint format from name "{}". Tried detectron, cpn, sh.'.format(name))
| [
"Leo Simak"
] | Leo Simak |
7c8e4675d0711026385f5328533e7c8eeb8fad4d | 56db1ccba3f8976b2df6d97c99e5aae7108149a1 | /spending/main/admin.py | 2c410651c1a51abbb5f05621793ae519229eae80 | [] | no_license | peterbe/django-spending | 4d60b7a77250fc58eb7a397e388fd22fe73576de | ab2ab1730fbdd999e5ef8d75575795fa3a48d2b9 | refs/heads/master | 2021-01-10T05:32:00.005607 | 2013-07-06T05:41:41 | 2013-07-06T05:41:41 | 8,384,613 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from django.contrib import admin
from spending.main.models import Household, Expense, Category
class HouseholdAdmin(admin.ModelAdmin):
list_display = ('name', 'no_users')
def no_users(self, obj):
return obj.users.all().count()
no_users.short_description = '# users'
class ExpenseAdmin(admin.ModelAdmin):
list_display = ('amount', 'date', 'user', 'category')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Household, HouseholdAdmin)
admin.site.register(Expense, ExpenseAdmin)
admin.site.register(Category, CategoryAdmin)
| [
"mail@peterbe.com"
] | mail@peterbe.com |
11ef2cc4fb52774a2fb7d480df6720fc9c79afd9 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /02_Statistical_Methods_for_Machine_Learning/14/01_tolerance.py | 4a81741857f5d7f81dd597a2d99ba09c2f2bae3b | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | # parametric tolerance interval
from numpy import mean
from numpy import sqrt
from numpy.random import randn
from numpy.random import seed
from scipy.stats import chi2
from scipy.stats import norm
# seed the random number generator
seed(1)
# generate dataset
data = 5 * randn(100) + 50
# specify degrees of freedom
n = len(data)
dof = n - 1
# specify data coverage
prop = 0.95
prop_inv = (1.0 - prop) / 2.0
gauss_critical = norm.ppf(prop_inv)
print('Gaussian critical value: %.3f (coverage=%d%%)' % (gauss_critical, prop * 100))
# specify confidence
prob = 0.99
prop_inv = 1.0 - prob
chi_critical = chi2.ppf(prop_inv, dof)
print('Chi-Squared critical value: %.3f (prob=%d%%, dof=%d)' % (chi_critical, prob * 100, dof))
# tolerance
interval = sqrt((dof * (1 + (1 / n)) * gauss_critical ** 2) / chi_critical)
print('Tolerance Interval: %.3f' % interval)
# summarize
data_mean = mean(data)
lower, upper = data_mean - interval, data_mean + interval
print('%.2f to %.2f covers %d%% of data with a confidence of %d%%' % (lower, upper, prop * 100, prob * 100))
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
85d95bdfbd59a153f246c62bca01d14bff2342be | 8382f4ec907950a8cfc618d3cceb97b0d00ab478 | /6kyu/encryptThis.py | 98249bc9ece7063bffc8fcf98db0cc716a54aaba | [] | no_license | naistangz/codewars_challenges | 80788f3869a4283c89ee2a05f19142b18ba4820c | 372bbb6f1668b378183a169206526b52315107a8 | refs/heads/master | 2023-04-14T11:52:31.412554 | 2021-04-25T09:39:03 | 2021-04-25T09:39:03 | 299,615,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | def encrypt_this(text):
words = text.split(" ")
res = []
for i in words:
new = ""
temp = ""
for j in range(len(i)):
if j == 0:
new += str(ord(i[j]))
elif j == 1:
temp = i[j]
new += i[-1]
elif j == len(i) - 1:
new += temp
else:
new += i[j]
res.append(new)
return " ".join(list(filter(None, res))) | [
"a6anaistang@hotmail.co.uk"
] | a6anaistang@hotmail.co.uk |
a02e5e47a56fdcbea48cd9e05aa8c477e211e3d3 | cc6e92dc9e91d8c9339a7585d542661b31c38489 | /src/topic_model.py | df9dd6dedefd4d17d1055f1e655b3d4c83074220 | [] | no_license | faizakk/topic_models | a9ead6af914d55c73ba34afefa4d3ca499bca29f | be072d1b6e000679e82b468a1f2ed7397240b161 | refs/heads/main | 2023-09-02T16:17:46.307141 | 2021-11-01T03:16:59 | 2021-11-01T03:16:59 | 423,319,743 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | from sklearn.decomposition import LatentDirichletAllocation
import numpy as np
class TopicModels:
"""
This class applies with topic modeling algorithm
methods:
topic_modelling: Applies topic modelling and does visualization
"""
@staticmethod
def topic_modelling(
doc_ready, vocab, tm_method, num_topic, num_top_words, topic_vis=True
):
# calculate_perplexity=False,
# calculate_coherence=True):
"""
Parametrs:
doc_ready: Cleaned and vectorized text
vocab: Vaculary ,
tm_method: the topic method to be applied,
num_topic: Number of topics ,
num_top_words: Number of top words to be displayed for each topic,
topic_vis: Variable if true will show the visualization
Returns:
Topic along with topic words
"""
if tm_method == "lda":
lda = LatentDirichletAllocation(n_components=num_topic, random_state=1)
id_topic = lda.fit_transform(doc_ready)
topic_words = {}
for topic, comp in enumerate(lda.components_):
word_ids = np.argsort(comp)[::-1][:num_top_words]
topic_words[topic] = [vocab[i] for i in word_ids]
return topic_words
| [
"faizakhankhattak@gmail.com"
] | faizakhankhattak@gmail.com |
36427016924bc734286ed9ff39b3812b2d38b21a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2367/60699/251530.py | e61d0d3cb0da03640cd9f10d895c7a604b12880b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | res1=1
list1=[1]
for i in range(0,30):
res1=res1*10+1
list1.append(res1)
n=int(input())
for i in list1:
if i%n==0:
print(i)
break
print(-1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
8425cd0230586cba7d321dc4706f57f721a3c5d4 | b246bdb4ae3d845bbf8dee704b8936c32211c0f5 | /Figure_1/initial_subtyping/do_tsne.py | fe059a8bd9f38cb0c2f026356b59d034111066fc | [] | no_license | KnottLab/bladder-snSeq | abfd3d77a04250622e6a28d84878e5adcd335d00 | 2e087dc745046e30c2814ab3e4c295bfa34e6820 | refs/heads/master | 2023-04-07T13:36:44.794889 | 2021-12-08T15:37:45 | 2021-12-08T15:37:45 | 323,445,511 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,966 | py | #!/usr/bin/env python
import numpy as np
import argparse
from load_data import load_data
from MulticoreTSNE import MulticoreTSNE as TSNE
try:
import cuml
CUML_FLAG=True
except:
print('[DO_TSNE] WARNING failed to import cuML. GPU accelerated TSNE will not be available.')
CUML_FLAG=False
"""
Modules have two modes: standalone from command line and pipelined
Both modes accept a preprocessed AnnData object as input.
Standalone mode writes back a AnnData with new metadata
Pipelined mode returns the AnnData object with new metadata
UMAPs with /umap-learn/cuML GPU-accelerated UMAP implementation
https://umap-learn.readthedocs.io/en/latest/
https://github.com/lmcinnes/umap
"""
def do_tsne(adata, ARGS):
latent = adata.obsm[ARGS.latent_key]
if ARGS.gpu and CUML_FLAG:
print('[DO_TSNE] Using cuML GPU-accelerated TSNE')
umap_class = cuml.UMAP
if ARGS.metric != 'euclidean':
print('[DO_TSNE] cuML TSNE requres euclidean distance metric.')
emb = cuml.TSNE(
perplexity = ARGS.perplexity,
learning_rate = ARGS.learning_rate,
early_exaggeration = ARGS.early_exaggeration,
).fit_transform(latent)
else:
print('[DO_TSNE] Using MulticoreTSNE')
emb = TSNE( perplexity = ARGS.perplexity,
metric = ARGS.metric,
verbose = False, n_jobs=ARGS.n_jobs).fit_transform(latent)
print(f'[DO_TSNE] placing embedding {emb.shape} in key {ARGS.tsne_key}')
adata.obsm[ARGS.tsne_key] = emb
print(f'[DO_TSNE] recording tSNE args')
adata.uns['tSNE_args'] = ARGS.__dict__
return adata
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str)
parser.add_argument('--latent_key', default='X_scVI_vanilla', type=str,
help = 'Key in adata.obsm to use as features for tsne.')
parser.add_argument('--tsne_key', default='X_scVI_tsne_vanilla', type=str,
help = 'Key in adata.obsm to save tsne embedding.')
parser.add_argument('--gpu', action='store_true',
help = 'Whether to use GPU-accelerated tsne via RapidsAI \
and the cuML library. ')
parser.add_argument('-j', '--n_jobs', default=12, type=int,
help = 'Number of jobs for MulticoreTSNE')
parser.add_argument('--perplexity', default=20, type=int)
parser.add_argument('--learning_rate', default=200., type=float)
parser.add_argument('--n_iter', default=1000, type=int)
parser.add_argument('--metric', default='euclidean', type=str)
parser.add_argument('--early_exaggeration', default=12, type=float)
parser.add_argument('--output_adata', default=None, type=str,
help = 'Path to save.')
ARGS = parser.parse_args()
adata = load_data(ARGS.dataset)
adata = do_tsne(adata, ARGS)
if ARGS.output_adata is not None:
print(f'[DO_TSNE] Writing to {ARGS.output_adata}')
adata.write(ARGS.output_adata)
| [
"ing.nathany@gmail.com"
] | ing.nathany@gmail.com |
15efdeea22135b88d20124f5c7de91f61585f160 | 09b8bf3aa9e70c4517f4dc277d980eba36c7fe46 | /SQUAD/unet/baseline/main.py | cd173de7da065a932a1540abc971a8a8e97548e1 | [] | no_license | lionsterben/NLP | e0c46771cdf9abc89e527e2f2d06a1e64d0d6f70 | c4bcaed813cfbb7ed852a9e6501ba914b81b57b5 | refs/heads/master | 2020-04-27T13:15:03.390430 | 2019-03-07T14:43:15 | 2019-03-07T14:43:15 | 152,017,194 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,130 | py | # Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the entrypoint to the rest of the code"""
# from __future__ import absolute_import
# from __future__ import division
import os
import io
import json
import sys
import logging
import tensorflow as tf
from baseline_model import Baseline
from data_util.glove_embedding import get_glove
# from official_eval_helper import get_json_data, generate_answers
logging.basicConfig(level=logging.INFO)
MAIN_DIR = os.path.dirname(os.path.abspath(__file__)) # relative path of the main directory
DEFAULT_DATA_DIR = os.path.join(MAIN_DIR, "data") # relative path of data dir
EXPERIMENTS_DIR = os.path.join(MAIN_DIR, "experiments") # relative path of experiments dir
# High-level options
tf.app.flags.DEFINE_integer("gpu", 0, "Which GPU to use, if you have multiple.")
tf.app.flags.DEFINE_string("mode", "train", "Available modes: train / show_examples / official_eval")
tf.app.flags.DEFINE_string("experiment_name", "", "Unique name for your experiment. This will create a directory by this name in the experiments/ directory, which will hold all data related to this experiment")
tf.app.flags.DEFINE_integer("num_epochs", 0, "Number of epochs to train. 0 means train indefinitely")
# Hyperparameters
tf.app.flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("dropout", 0.15, "Fraction of units randomly dropped on non-recurrent connections.")
tf.app.flags.DEFINE_integer("batch_size", 100, "Batch size to use")
tf.app.flags.DEFINE_integer("hidden_size", 200, "Size of the hidden states")
tf.app.flags.DEFINE_integer("context_len", 600, "The maximum context length of your model")
tf.app.flags.DEFINE_integer("question_len", 30, "The maximum question length of your model")
tf.app.flags.DEFINE_integer("embedding_size", 100, "Size of the pretrained word vectors. This needs to be one of the available GloVe dimensions: 50/100/200/300")
# How often to print, save, eval
tf.app.flags.DEFINE_integer("print_every", 1, "How many iterations to do per print.")
tf.app.flags.DEFINE_integer("save_every", 500, "How many iterations to do per save.")
tf.app.flags.DEFINE_integer("eval_every", 500, "How many iterations to do per calculating loss/f1/em on dev set. Warning: this is fairly time-consuming so don't do it too often.")
tf.app.flags.DEFINE_integer("keep", 1, "How many checkpoints to keep. 0 indicates keep all (you shouldn't need to do keep all though - it's very storage intensive).")
# Reading and saving data
tf.app.flags.DEFINE_string("train_dir", "", "Training directory to save the model parameters and other info. Defaults to experiments/{experiment_name}")
tf.app.flags.DEFINE_string("glove_path", "", "Path to glove .txt file. Defaults to data/glove.6B.{embedding_size}d.txt")
tf.app.flags.DEFINE_string("data_dir", DEFAULT_DATA_DIR, "Where to find preprocessed SQuAD data for training. Defaults to data/")
tf.app.flags.DEFINE_string("ckpt_load_dir", "", "For official_eval mode, which directory to load the checkpoint fron. You need to specify this for official_eval mode.")
tf.app.flags.DEFINE_string("json_in_path", "", "For official_eval mode, path to JSON input file. You need to specify this for official_eval_mode.")
tf.app.flags.DEFINE_string("json_out_path", "predictions.json", "Output path for official_eval mode. Defaults to predictions.json")
FLAGS = tf.app.flags.FLAGS
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
def initialize_model(session, model, train_dir, expect_exists):
"""
Initializes model from train_dir.
Inputs:
session: TensorFlow session
model: QAModel
train_dir: path to directory where we'll look for checkpoint
expect_exists: If True, throw an error if no checkpoint is found.
If False, initialize fresh model if no checkpoint is found.
"""
print("Looking for model at %s..." % train_dir)
ckpt = tf.train.get_checkpoint_state(train_dir)
v2_path = ckpt.model_checkpoint_path + ".index" if ckpt else ""
if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
if expect_exists:
raise Exception("There is no saved checkpoint at %s" % train_dir)
else:
print("There is no saved checkpoint at %s. Creating model with fresh parameters." % train_dir)
session.run(tf.global_variables_initializer())
print('Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables()))
def main(unused_argv):
# Print an error message if you've entered flags incorrectly
if len(unused_argv) != 1:
raise Exception("There is a problem with how you entered flags: %s" % unused_argv)
# Check for Python 2
if sys.version_info[0] != 3:
raise Exception("ERROR: You must use Python 3 but you are running Python %i" % sys.version_info[0])
# Print out Tensorflow version
# print("This code was developed and tested on TensorFlow 1.4.1. Your TensorFlow version: %s" % tf.__version__)
# Define train_dir
if not FLAGS.experiment_name and not FLAGS.train_dir and FLAGS.mode != "official_eval":
raise Exception("You need to specify either --experiment_name or --train_dir")
FLAGS.train_dir = FLAGS.train_dir or os.path.join(EXPERIMENTS_DIR, FLAGS.experiment_name)
# Initialize bestmodel directory
bestmodel_dir = os.path.join(FLAGS.train_dir, "best_checkpoint")
# Define path for glove vecs
FLAGS.glove_path = FLAGS.glove_path or os.path.join(DEFAULT_DATA_DIR, "glove.6B.{}d.txt".format(FLAGS.embedding_size))
# Load embedding matrix and vocab mappings
emb_matrix, word2id, id2word = get_glove(FLAGS.glove_path, FLAGS.embedding_size)
# Get filepaths to train/dev datafiles for tokenized queries, contexts and answers
train_context_path = os.path.join(FLAGS.data_dir, "train.context")
train_qn_path = os.path.join(FLAGS.data_dir, "train.question")
train_ans_path = os.path.join(FLAGS.data_dir, "train.span")
train_impossible_path = os.path.join(FLAGS.data_dir, "train.impossible")
dev_context_path = os.path.join(FLAGS.data_dir, "dev.context")
dev_qn_path = os.path.join(FLAGS.data_dir, "dev.question")
dev_ans_path = os.path.join(FLAGS.data_dir, "dev.span")
dev_impossible_path = os.path.join(FLAGS.data_dir, "dev.impossible")
# Initialize model
baseline_model = Baseline(FLAGS, id2word, word2id, emb_matrix)
# Some GPU settings
config=tf.ConfigProto()
config.gpu_options.allow_growth = True
# Split by mode
if FLAGS.mode == "train":
# Setup train dir and logfile
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
file_handler = logging.FileHandler(os.path.join(FLAGS.train_dir, "log.txt"))
logging.getLogger().addHandler(file_handler)
# Save a record of flags as a .json file in train_dir
# with open(os.path.join(FLAGS.train_dir, "flags.json"), 'w') as fout:
# json.dump(FLAGS.__flags, fout)
# Make bestmodel dir if necessary
if not os.path.exists(bestmodel_dir):
os.makedirs(bestmodel_dir)
with tf.Session(config=config) as sess:
# Load most recent model
initialize_model(sess, baseline_model, FLAGS.train_dir, expect_exists=False)
# Train
baseline_model.train(sess, train_context_path, train_qn_path, train_ans_path, train_impossible_path, dev_qn_path, dev_context_path, dev_ans_path, dev_impossible_path)
elif FLAGS.mode == "show_examples":
with tf.Session(config=config) as sess:
# Load best model
initialize_model(sess, baseline_model, bestmodel_dir, expect_exists=True)
# Show examples with F1/EM scores
_, _ = baseline_model.check_f1_em(sess, dev_context_path, dev_qn_path, dev_ans_path, dev_impossible_path, "dev", num_samples=10, print_to_screen=True)
# elif FLAGS.mode == "official_eval":
# if FLAGS.json_in_path == "":
# raise Exception("For official_eval mode, you need to specify --json_in_path")
# if FLAGS.ckpt_load_dir == "":
# raise Exception("For official_eval mode, you need to specify --ckpt_load_dir")
# # Read the JSON data from file
# qn_uuid_data, context_token_data, qn_token_data = get_json_data(FLAGS.json_in_path)
# with tf.Session(config=config) as sess:
# # Load model from ckpt_load_dir
# initialize_model(sess, qa_model, FLAGS.ckpt_load_dir, expect_exists=True)
# # Get a predicted answer for each example in the data
# # Return a mapping answers_dict from uuid to answer
# answers_dict = generate_answers(sess, qa_model, word2id, qn_uuid_data, context_token_data, qn_token_data)
# # Write the uuid->answer mapping a to json file in root dir
# print "Writing predictions to %s..." % FLAGS.json_out_path
# with io.open(FLAGS.json_out_path, 'w', encoding='utf-8') as f:
# f.write(unicode(json.dumps(answers_dict, ensure_ascii=False)))
# print "Wrote predictions to %s" % FLAGS.json_out_path
else:
raise Exception("Unexpected value of FLAGS.mode: %s" % FLAGS.mode)
if __name__ == "__main__":
tf.app.run()
| [
"1412465878@qq.com"
] | 1412465878@qq.com |
a1c8656936ae311af887db2840fb5a9f50e8de6c | e0cafa8fe6e249496584903f377695051e97009b | /customer.py | 867fc28d96a97e8a36052a9ce1bc0258d187f713 | [] | no_license | codelings1/Coursera_test | bdcd8a3fe55eda148e38064c7e5a98d6bada6e11 | 5822bd41b193feb2f4f99402699e81ea2db1afad | refs/heads/master | 2021-01-22T01:18:51.860102 | 2018-05-28T10:55:48 | 2018-05-28T10:55:48 | 102,216,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | import MySQLdb
conn = MySQLdb.connect(host="127.0.0.1", port = 3306)
cursor = conn.cursor()
query = "SHOW DATABASES";
cursor.execute(query);
results = cursor.fetchall();
print results
| [
"noreply@github.com"
] | noreply@github.com |
eb7cf05d82bebc72536e45721c486ec49eb01fe3 | 9dfdd54b73c3b848547b25779c3281cfdf88f3c9 | /main.py | a27f5a555ff2ecb359d025f886ff9f097208a7b1 | [] | no_license | BrejeMihai/FLCD_Lab08 | 5b70fb37a17107474d82cd600485b1837b96b23b | fb0d835a5e074e53c1b7cb51478345d4c984df68 | refs/heads/main | 2023-01-30T15:26:10.759040 | 2020-12-09T09:10:49 | 2020-12-09T09:10:49 | 313,889,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | import sys
from Grammar import Grammar, Lr0Parser
if __name__ == '__main__':
gr = Grammar()
gr.read_from_file(sys.argv[1])
print(gr)
parser = Lr0Parser(gr)
parser.canonical_collection()
_menu_string = "Choose an option:\n1.Show the grammar details.\n2.Show productions for nonterminal.\n3.Show parsing table.\n4.Parse word\n0.Exit"
end = False
while(end != True):
print(_menu_string)
option = int(input(">>> "))
if option == 1:
print(gr)
elif option == 2:
non_terminal = input()
print(gr.get_productions_for_nonterminal(non_terminal))
elif option == 3:
parser.show_parsing_table()
elif option == 4:
word = input("Enter sequence >>> ")
lista = word.split(" ")
if len(lista) > 1:
raise Exception("Sequence not accepted")
proper_form_word = [x for x in word]
parser.actual_parsing(proper_form_word)
elif option == 0:
break
else:
print("Invalid option!")
| [
"brejemihaipaul@gmail.com"
] | brejemihaipaul@gmail.com |
f51289a9282a25552d7f5b7a10d64965f7d62f94 | f20de8f5bd2c4e1a7348ce9d7dd7e82ac5d61543 | /03-FileHandling/16.py | 4fa4ae6e02d4f8307d6b18a1c894f2c60fd9df3e | [] | no_license | ego6ua/pp1 | 18e6647c4f08498ba3e23c1e0a89ebe91618d230 | c39684d2610537b9b05de549cbac3ee8f7ca657b | refs/heads/master | 2020-08-07T08:35:35.193399 | 2020-01-20T13:38:48 | 2020-01-20T13:38:48 | 213,374,168 | 0 | 0 | null | 2019-10-07T12:12:40 | 2019-10-07T12:12:40 | null | UTF-8 | Python | false | false | 333 | py | import re
komunikat = 'wtorek - 23C, środa - 21C, czwartek 25C'
cyfry = re.findall('\d{2}',komunikat)
a = int(cyfry[0])
b = int(cyfry[1])
c = int(cyfry[2])
x = a, b, c
y = sum(x)/len(cyfry)
print(f'srednia temperatura to: {int(y)}C')
with open('Temperatura.txt', 'w')as f:
f.write(f'srednia temperatura to: {int(y)}C')
f.close
| [
"ego6ua@users.noreply.github.com"
] | ego6ua@users.noreply.github.com |
70ba9905ee148002ae8203247549c0ac39a49b74 | 5b17b29a56b04590e702f3494a166cbde5563b44 | /view/loop_through_low_threshold_crossings.py | 70874c4037a810a679039fdfc9c4cc8514209e57 | [] | no_license | sdwfrost/wind_tunnel | 055f3a124c32f4274ab41238d5205bb63814fd58 | dec912066d9ed5ebeee10c77b6188419bccdb9fe | refs/heads/master | 2020-05-31T02:11:20.275982 | 2018-01-17T23:27:20 | 2018-01-17T23:27:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,253 | py | """
Loop through plots of plume crossings, where crossings are determined by arbitrarily low thresholds.
"""
from __future__ import print_function, division
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from db_api import models
from db_api.connect import session
ODOR_STATES = ('on', 'none', 'afterodor')
THRESHOLDS = {'fruit_fly': (0.01, 0.1), 'mosquito': (401, 410)}
DTH = 0.0001
TIMEPOINTS_BEFORE_ENTRY = 50
TIMEPOINTS_AFTER_EXIT = 50
FACE_COLOR = 'white'
FIG_SIZE = (8, 10)
LW = 2
plt.ion()
expts = session.query(models.Experiment).all()
keep_going = True
e_ctr = 0
o_ctr = 0
th_ctr = 0
fig, axs = plt.subplots(3, 1, facecolor=FACE_COLOR, figsize=FIG_SIZE,
tight_layout=True)
axs[2].twin = axs[2].twinx()
while keep_going:
# get new crossing group
expt = expts[e_ctr]
odor_state = ODOR_STATES[o_ctr]
th_val = THRESHOLDS[expt.insect][th_ctr]
threshold = session.query(models.Threshold).\
filter(models.Threshold.experiment == expt).\
filter(models.Threshold.value.between(th_val - DTH, th_val + DTH)).first()
crossing_group = session.query(models.CrossingGroup).\
filter_by(experiment=expt, odor_state=odor_state, threshold=threshold).first()
for crossing in crossing_group.crossings:
xs = crossing.timepoint_field(session, 'position_x',
first=-TIMEPOINTS_BEFORE_ENTRY,
last=TIMEPOINTS_AFTER_EXIT,
first_rel_to='entry',
last_rel_to='exit')
ys = crossing.timepoint_field(session, 'position_y',
first=-TIMEPOINTS_BEFORE_ENTRY,
last=TIMEPOINTS_AFTER_EXIT,
first_rel_to='entry',
last_rel_to='exit')
zs = crossing.timepoint_field(session, 'position_z',
first=-TIMEPOINTS_BEFORE_ENTRY,
last=TIMEPOINTS_AFTER_EXIT,
first_rel_to='entry',
last_rel_to='exit')
odors = crossing.timepoint_field(session, 'odor',
first=-TIMEPOINTS_BEFORE_ENTRY,
last=TIMEPOINTS_AFTER_EXIT,
first_rel_to='entry',
last_rel_to='exit')
headings = crossing.timepoint_field(session, 'heading_xyz',
first=-TIMEPOINTS_BEFORE_ENTRY,
last=TIMEPOINTS_AFTER_EXIT,
first_rel_to='entry',
last_rel_to='exit')
timesteps = crossing.timepoint_field(session, 'timestep',
first=-TIMEPOINTS_BEFORE_ENTRY,
last=TIMEPOINTS_AFTER_EXIT,
first_rel_to='entry',
last_rel_to='exit')
entry_timestep = crossing.timepoint_field(session, 'timestep',
first=0,
last=0,
first_rel_to='entry',
last_rel_to='exit')[0]
colors = (odors > threshold.value).astype(int)
# plot everything
[ax.cla() for ax in list(axs) + [axs[2].twin]]
axs[0].scatter(xs, ys, c=colors, lw=0, cmap=cm.hot, vmin=0, vmax=2)
axs[1].scatter(xs, zs, c=colors, lw=0, cmap=cm.hot, vmin=0, vmax=2)
axs[2].plot(timesteps, odors, c='r', lw=LW)
axs[2].axhline(threshold.value, c='r', ls='--', lw=LW)
axs[2].axvline(entry_timestep, c='r', ls='--', lw=LW)
axs[2].twin.plot(timesteps, headings, c='b', lw=LW)
axs[0].set_xlim(-0.3, 1)
axs[0].set_ylim(-0.15, 0.15)
axs[1].set_xlim(-0.3, 1)
axs[1].set_ylim(-0.15, 0.15)
axs[2].set_xlim(timesteps[0], timesteps[-1])
axs[2].twin.set_ylim(0, 180)
axs[0].set_ylabel('y (m)')
axs[1].set_ylabel('z (m)')
axs[1].set_xlabel('x (m)')
axs[2].set_ylabel('odor', color='r')
axs[2].twin.set_ylabel('heading (deg)', color='b')
axs[0].set_title('{}_{}_th{}'.format(expt.id, odor_state, threshold.value))
plt.draw()
command = raw_input('Command [e(xpt), o(dor_state), t(hreshold), q(uit)]?')
if command in ['e', 'o', 't', 'q']:
if command == 'e':
e_ctr += 1
e_ctr %= len(expts)
elif command == 'o':
o_ctr += 1
o_ctr %= len(ODOR_STATES)
elif command == 't':
th_ctr += 1
th_ctr %= len(THRESHOLDS['fruit_fly'])
elif command == 'q':
keep_going = False
break
elif command == 'pdb':
import pdb; pdb.set_trace() | [
"rpang.contact@gmail.com"
] | rpang.contact@gmail.com |
0dde655a5356b6dc06767f61325182aa266d924f | b41a713ded42967feba2ce9b2cb3d500cb11d1e6 | /naive_bayes.py | 537590623413be6f0674f050bf7446783663850f | [] | no_license | cihanerman/Machine_Learning_Ses | 43a09b26361381b6bfa918d0324a498a08941338 | e95fae17ecbddf396005cf36ada996d59c920c45 | refs/heads/master | 2020-04-16T10:26:01.127355 | 2019-02-16T22:13:33 | 2019-02-16T22:13:33 | 165,505,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 22:23:04 2019
@author: cihanerman
"""
# import library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings('ignore')
# data loading
data = pd.read_csv('veriler.csv')
x = data.iloc[:,1:4].values
y = data.iloc[:,4:].values
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.33, random_state = 0)
sc = StandardScaler()
X_train = sc.fit_transform(x_train)
X_test = sc.transform(x_test)
gnb = GaussianNB().fit(X_train,y_train)
predict_gnb = gnb.predict(X_test)
print('GaussianNB')
print(predict_gnb)
print(y_test)
print(y_test[0] == predict_gnb)
cm = confusion_matrix(y_test,predict_gnb)
print(cm)
mnb = MultinomialNB().fit(x_train,y_train) # negatif input almıyor
predict_mnb = mnb.predict(X_test)
print('MultinomialNB')
print(predict_mnb)
print(y_test)
print(y_test[0] == predict_mnb)
cm = confusion_matrix(y_test,predict_mnb)
print(cm)
# Bu veri seti için en başarılı naive bayes algoritması
cnb = ComplementNB().fit(x_train,y_train) # negatif input almıyor
predict_cnb = cnb.predict(X_test)
print('ComplementNB')
print(predict_cnb)
print(y_test)
print(y_test[0] == predict_cnb)
cm = confusion_matrix(y_test,predict_cnb)
print(cm) | [
"cihan.erman@outlook.com"
] | cihan.erman@outlook.com |
4fdc6b0d3c0d6e664d22960a9926a3b2127f2f29 | 753a70bc416e8dced2853f278b08ef60cdb3c768 | /models/research/deeplab/datasets/build_cityscapes_data.py | ce81baef20a460abaa634d3f1dcb6760a0858dec | [
"MIT",
"Apache-2.0"
] | permissive | finnickniu/tensorflow_object_detection_tflite | ef94158e5350613590641880cb3c1062f7dd0efb | a115d918f6894a69586174653172be0b5d1de952 | refs/heads/master | 2023-04-06T04:59:24.985923 | 2022-09-20T16:29:08 | 2022-09-20T16:29:08 | 230,891,552 | 60 | 19 | MIT | 2023-03-25T00:31:18 | 2019-12-30T09:58:41 | C++ | UTF-8 | Python | false | false | 6,244 | py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts Cityscapes data to TFRecord file format with Example protos.
The Cityscapes dataset is expected to have the following directory structure:
+ cityscapes
- build_cityscapes_data.py (current working directiory).
- build_data.py
+ cityscapesscripts
+ annotation
+ evaluation
+ helpers
+ preparation
+ viewer
+ gtFine
+ train
+ val
+ test
+ leftImg8bit
+ train
+ val
+ test
+ tfrecord
This script converts data into sharded data files and save at tfrecord folder.
Note that before running this script, the users should (1) register the
Cityscapes dataset website at https://www.cityscapes-dataset.com to
download the dataset, and (2) run the script provided by Cityscapes
`preparation/createTrainIdLabelImgs.py` to generate the training groundtruth.
Also note that the tensorflow model will be trained with `TrainId' instead
of `EvalId' used on the evaluation server. Thus, the users need to convert
the predicted labels to `EvalId` for evaluation on the server. See the
vis.py for more details.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import math
import os.path
import re
import sys
import build_data
from six.moves import range
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('cityscapes_root',
'./cityscapes',
'Cityscapes dataset root folder.')
tf.app.flags.DEFINE_string(
'output_dir',
'./tfrecord',
'Path to save converted SSTable of TensorFlow examples.')
_NUM_SHARDS = 10
# A map from data type to folder name that saves the data.
_FOLDERS_MAP = {
'image': 'leftImg8bit',
'label': 'gtFine',
}
# A map from data type to filename postfix.
_POSTFIX_MAP = {
'image': '_leftImg8bit',
'label': '_gtFine_labelTrainIds',
}
# A map from data type to data format.
_DATA_FORMAT_MAP = {
'image': 'png',
'label': 'png',
}
# Image file pattern.
_IMAGE_FILENAME_RE = re.compile('(.+)' + _POSTFIX_MAP['image'])
def _get_files(data, dataset_split):
"""Gets files for the specified data type and dataset split.
Args:
data: String, desired data ('image' or 'label').
dataset_split: String, dataset split ('train', 'val', 'test')
Returns:
A list of sorted file names or None when getting label for
test set.
"""
if data == 'label' and dataset_split == 'test':
return None
pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data])
search_files = os.path.join(
FLAGS.cityscapes_root, _FOLDERS_MAP[data], dataset_split, '*', pattern)
filenames = glob.glob(search_files)
return sorted(filenames)
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train, val).
Raises:
RuntimeError: If loaded image and label have different shape, or if the
image file with specified postfix could not be found.
"""
image_files = _get_files('image', dataset_split)
label_files = _get_files('label', dataset_split)
num_images = len(image_files)
num_per_shard = int(math.ceil(num_images / _NUM_SHARDS))
image_reader = build_data.ImageReader('png', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
shard_filename = '%s-%05d-of-%05d.tfrecord' % (
dataset_split, shard_id, _NUM_SHARDS)
output_filename = os.path.join(FLAGS.output_dir, shard_filename)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, num_images, shard_id))
sys.stdout.flush()
# Read the image.
image_data = tf.gfile.FastGFile(image_files[i], 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_data = tf.gfile.FastGFile(label_files[i], 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
re_match = _IMAGE_FILENAME_RE.search(image_files[i])
if re_match is None:
raise RuntimeError('Invalid image filename: ' + image_files[i])
filename = os.path.basename(re_match.group(1))
example = build_data.image_seg_to_tfexample(
image_data, filename, height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(unused_argv):
# Only support converting 'train' and 'val' sets for now.
for dataset_split in ['train', 'val']:
_convert_dataset(dataset_split)
if __name__ == '__main__':
tf.app.run()
| [
"finn.niu@apptech.com.hk"
] | finn.niu@apptech.com.hk |
5f01b00fb146fec0d23c878194633081590499e0 | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/client/gui/scaleform/framework/entities/abstract/tooltipmgrmeta.py | a6dfedb52bce9035a795727e60fc365f096a4dbc | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,329 | py | # 2016.05.01 15:22:59 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/framework/entities/abstract/ToolTipMgrMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIModule import BaseDAAPIModule
class ToolTipMgrMeta(BaseDAAPIModule):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIModule
null
"""
def onCreateComplexTooltip(self, tooltipId, stateType):
"""
:param tooltipId:
:param stateType:
:return :
"""
self._printOverrideError('onCreateComplexTooltip')
def onCreateTypedTooltip(self, type, args, stateType):
"""
:param type:
:param args:
:param stateType:
:return :
"""
self._printOverrideError('onCreateTypedTooltip')
def as_showS(self, tooltipData, linkage):
"""
:param tooltipData:
:param linkage:
:return :
"""
if self._isDAAPIInited():
return self.flashObject.as_show(tooltipData, linkage)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\framework\entities\abstract\tooltipmgrmeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:22:59 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
2ede8dd1250d270818bc22577cf17fe657d46615 | f67765cc9a707f462ae3eda4ac0ae4b278ad54d2 | /main3.py | efa5471e814e5d129606dc093627529545527485 | [] | no_license | orlovrs/python_qat_22 | 3c25ac9a0f18d3914172e2a1738bbb4730b2a822 | 747ea578e0e655f18201c9ec90f15f969191eef3 | refs/heads/master | 2022-12-15T01:16:45.129299 | 2020-09-10T07:50:08 | 2020-09-10T07:50:08 | 294,340,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | import pytest
# Задаем ids вручную
@pytest.mark.parametrize("x", [-1, 0, 1], ids=["negative", "zero", "positive"])
@pytest.mark.parametrize("y", [100, 1000], ids=["3 digit", "4 digit"])
def test_multiple_params(x, y):
print("x: {0}, y: {1}".format(x, y))
assert pytest.num == 1
# Генерируем ids автоматически с помощью функций
def ids_x(val):
return "x=({0})".format(str(val))
def ids_y(val):
return "y=({0})".format(str(val))
@pytest.mark.parametrize("x", [-1, 0, 1], ids=ids_x)
@pytest.mark.parametrize("y", [100, 1000], ids=ids_y)
def test_multiple_params_generated_ids(x, y):
print("x: {0}, y: {1}".format(x, y))
assert True
| [
"helltester666@gmail.com"
] | helltester666@gmail.com |
0f149d04b001516f7e44891758e4f4b9fe1459e9 | 88d555a009f9075e59177fac70036892f397b439 | /bin/saluki_train_folds.py | b0855e380c8a5fecbcf452fe9356988f2c5c8f01 | [
"Apache-2.0"
] | permissive | calico/basenji | f9f406971d355dda81821dcf274696a7d27e332d | 615b9eec8a591783b16d959029ddad08edae853d | refs/heads/master | 2023-09-04T11:14:15.620786 | 2023-07-27T00:05:13 | 2023-07-27T00:05:13 | 96,346,574 | 326 | 143 | Apache-2.0 | 2023-08-16T00:36:32 | 2017-07-05T17:54:18 | Python | UTF-8 | Python | false | false | 13,420 | py | #!/usr/bin/env python
# Copyright 2019 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser, OptionGroup
import copy
import glob
import json
from natsort import natsorted
import os
import pdb
import pickle
import shutil
import subprocess
import sys
import numpy as np
import pandas as pd
import slurm
"""
saluki_train_folds.py
Train Saluki model replicates on cross folds using given parameters and data.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <data1_dir> ...'
parser = OptionParser(usage)
# train
train_options = OptionGroup(parser, 'saluki_train.py options')
train_options.add_option('-o', dest='out_dir',
default='train_out',
help='Output directory for test statistics [Default: %default]')
parser.add_option_group(train_options)
# test
test_options = OptionGroup(parser, 'saluki_test.py options')
test_options.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option_group(test_options)
# multi
rep_options = OptionGroup(parser, 'replication options')
rep_options.add_option('-c', dest='crosses',
default=1, type='int',
help='Number of cross-fold rounds [Default:%default]')
rep_options.add_option('-e', dest='conda_env',
default='tf28',
help='Anaconda environment [Default: %default]')
rep_options.add_option('-f', dest='fold_subset',
default=None, type='int',
help='Run a subset of folds [Default:%default]')
rep_options.add_option('--name', dest='name',
default='fold', help='SLURM name prefix [Default: %default]')
rep_options.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
rep_options.add_option('-q', dest='queue',
default='geforce',
help='SLURM queue on which to run the jobs [Default: %default]')
rep_options.add_option('-r', dest='restart',
default=False, action='store_true')
rep_options.add_option('--test_off', dest='test_off',
default=False, action='store_true')
rep_options.add_option('--test_train_off', dest='test_train_off',
default=False, action='store_true')
parser.add_option_group(rep_options)
(options, args) = parser.parse_args()
if len(args) < 2:
parser.error('Must provide parameters and data directory.')
else:
params_file = os.path.abspath(args[0])
data_dirs = [os.path.abspath(arg) for arg in args[1:]]
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_train = params['train']
#######################################################
# prep work
if not options.restart and os.path.isdir(options.out_dir):
print('Output directory %s exists. Please remove.' % options.out_dir)
exit(1)
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
# read data parameters
num_data = len(data_dirs)
data_stats_file = '%s/statistics.json' % data_dirs[0]
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
# count folds
num_folds = len([dkey for dkey in data_stats if dkey.startswith('fold')])
# subset folds
if options.fold_subset is not None:
num_folds = min(options.fold_subset, num_folds)
# arrange data
for ci in range(options.crosses):
for fi in range(num_folds):
rep_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
os.makedirs(rep_dir, exist_ok=True)
# make data directories
for di in range(num_data):
rep_data_dir = '%s/data%d' % (rep_dir, di)
if not os.path.isdir(rep_data_dir):
make_rep_data(data_dirs[di], rep_data_dir, fi, ci)
#######################################################
# train
jobs = []
for ci in range(options.crosses):
for fi in range(num_folds):
rep_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
if options.restart and os.path.isdir('%s/train'%rep_dir):
print('%s found and skipped.' % rep_dir)
else:
# collect data directories
rep_data_dirs = []
for di in range(num_data):
rep_data_dirs.append('%s/data%d' % (rep_dir, di))
# train command
cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd += ' conda activate %s;' % options.conda_env
cmd += ' echo $HOSTNAME;'
cmd += ' saluki_train.py'
cmd += ' %s' % options_string(options, train_options, rep_dir)
cmd += ' %s %s' % (params_file, ' '.join(rep_data_dirs))
name = '%s-train-f%dc%d' % (options.name, fi, ci)
sbf = os.path.abspath('%s/train.sb' % rep_dir)
outf = os.path.abspath('%s/train.out' % rep_dir)
errf = os.path.abspath('%s/train.err' % rep_dir)
j = slurm.Job(cmd, name,
outf, errf, sbf,
queue=options.queue,
cpu=4,
gpu=params_train.get('num_gpu',1),
mem=30000, time='2-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, max_proc=options.processes, verbose=True,
launch_sleep=10, update_sleep=60)
#######################################################
# test train
jobs = []
if not options.test_train_off:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
for di in range(num_data):
if num_data == 1:
out_dir = '%s/test_train' % it_dir
model_file = '%s/train/model_best.h5' % it_dir
else:
out_dir = '%s/test%d_train' % (it_dir, di)
model_file = '%s/train/model%d_best.h5' % (it_dir, di)
# check if done
acc_file = '%s/acc.txt' % out_dir
if os.path.isfile(acc_file):
print('%s already generated.' % acc_file)
else:
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' saluki_test.py'
basenji_cmd += ' --head %d' % di
basenji_cmd += ' -o %s' % out_dir
if options.shifts:
basenji_cmd += ' --shifts %s' % options.shifts
basenji_cmd += ' --split train'
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s' % model_file
basenji_cmd += ' %s/data%d' % (it_dir, di)
name = '%s-testtr-f%dc%d' % (options.name, fi, ci)
basenji_job = slurm.Job(basenji_cmd,
name=name,
out_file='%s.out'%out_dir,
err_file='%s.err'%out_dir,
queue=options.queue,
cpu=2, gpu=1,
mem=23000,
time='8:00:00')
jobs.append(basenji_job)
#######################################################
# test best
if not options.test_off:
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%dc%d' % (options.out_dir, fi, ci)
for di in range(num_data):
if num_data == 1:
out_dir = '%s/test' % it_dir
model_file = '%s/train/model_best.h5' % it_dir
else:
out_dir = '%s/test%d' % (it_dir, di)
model_file = '%s/train/model%d_best.h5' % (it_dir, di)
# check if done
acc_file = '%s/acc.txt' % out_dir
if os.path.isfile(acc_file):
print('%s already generated.' % acc_file)
else:
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' saluki_test.py'
basenji_cmd += ' --head %d' % di
# TEMP
basenji_cmd += ' --save'
basenji_cmd += ' -o %s' % out_dir
if options.shifts:
basenji_cmd += ' --shifts %s' % options.shifts
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s' % model_file
basenji_cmd += ' %s/data%d' % (it_dir, di)
name = '%s-test-f%dc%d' % (options.name, fi, ci)
basenji_job = slurm.Job(basenji_cmd,
name=name,
out_file='%s.out'%out_dir,
err_file='%s.err'%out_dir,
queue=options.queue,
cpu=2, gpu=1,
mem=23000,
time='4:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, max_proc=options.processes, verbose=True,
launch_sleep=10, update_sleep=60)
def make_rep_data(data_dir, rep_data_dir, fi, ci):
# read data parameters
data_stats_file = '%s/statistics.json' % data_dir
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
# sequences per fold
fold_seqs = []
dfi = 0
while 'fold%d_seqs'%dfi in data_stats:
fold_seqs.append(data_stats['fold%d_seqs'%dfi])
del data_stats['fold%d_seqs'%dfi]
dfi += 1
num_folds = dfi
# split folds into train/valid/test
test_fold = fi
valid_fold = (fi+1+ci) % num_folds
train_folds = [fold for fold in range(num_folds) if fold not in [valid_fold,test_fold]]
# clear existing directory
if os.path.isdir(rep_data_dir):
shutil.rmtree(rep_data_dir)
# make data directory
os.mkdir(rep_data_dir)
# dump data stats
data_stats['test_seqs'] = fold_seqs[test_fold]
data_stats['valid_seqs'] = fold_seqs[valid_fold]
data_stats['train_seqs'] = sum([fold_seqs[tf] for tf in train_folds])
with open('%s/statistics.json'%rep_data_dir, 'w') as data_stats_open:
json.dump(data_stats, data_stats_open, indent=4)
# genes table
genes_df = pd.read_csv('%s/genes.tsv' % data_dir, sep='\t', index_col=0)
gene_split = np.array(['train']*genes_df.shape[0])
gene_split[genes_df.Fold==test_fold] = 'test'
gene_split[genes_df.Fold==valid_fold] = 'valid'
genes_df['Split'] = gene_split
genes_df.to_csv('%s/genes.tsv'%rep_data_dir, sep='\t')
# copy targets
shutil.copy('%s/targets.txt'%data_dir, '%s/targets.txt'%rep_data_dir)
# sym link tfrecords
rep_tfr_dir = '%s/tfrecords' % rep_data_dir
os.mkdir(rep_tfr_dir)
# test tfrecords
ti = 0
test_tfrs = natsorted(glob.glob('%s/tfrecords/fold%d-*.tfr' % (data_dir, test_fold)))
for test_tfr in test_tfrs:
test_tfr = os.path.abspath(test_tfr)
test_rep_tfr = '%s/test-%d.tfr' % (rep_tfr_dir, ti)
os.symlink(test_tfr, test_rep_tfr)
ti += 1
# valid tfrecords
ti = 0
valid_tfrs = natsorted(glob.glob('%s/tfrecords/fold%d-*.tfr' % (data_dir, valid_fold)))
for valid_tfr in valid_tfrs:
valid_tfr = os.path.abspath(valid_tfr)
valid_rep_tfr = '%s/valid-%d.tfr' % (rep_tfr_dir, ti)
os.symlink(valid_tfr, valid_rep_tfr)
ti += 1
# train tfrecords
ti = 0
train_tfrs = []
for tfi in train_folds:
train_tfrs += natsorted(glob.glob('%s/tfrecords/fold%d-*.tfr' % (data_dir, tfi)))
for train_tfr in train_tfrs:
train_tfr = os.path.abspath(train_tfr)
train_rep_tfr = '%s/train-%d.tfr' % (rep_tfr_dir, ti)
os.symlink(train_tfr, train_rep_tfr)
ti += 1
def options_string(options, train_options, rep_dir):
options_str = ''
for opt in train_options.option_list:
opt_str = opt.get_opt_string()
opt_value = options.__dict__[opt.dest]
# wrap askeriks in ""
if type(opt_value) == str and opt_value.find('*') != -1:
opt_value = '"%s"' % opt_value
# no value for bools
elif type(opt_value) == bool:
if not opt_value:
opt_str = ''
opt_value = ''
# skip Nones
elif opt_value is None:
opt_str = ''
opt_value = ''
# modify
elif opt.dest == 'out_dir':
opt_value = '%s/train' % rep_dir
# find matching restore
elif opt.dest == 'restore':
fold_dir_mid = rep_dir.split('/')[-1]
if options.trunk:
opt_value = '%s/%s/train/model_trunk.h5' % (opt_value, fold_dir_mid)
else:
opt_value = '%s/%s/train/model_best.h5' % (opt_value, fold_dir_mid)
options_str += ' %s %s' % (opt_str, opt_value)
return options_str
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| [
"drk@calicolabs.com"
] | drk@calicolabs.com |
4e720f06218c7fbc7e640ea6ec4530d270ebec9b | 5e32cccdde88ed44dae414941cff2ac579a081f0 | /Surveillance.NWB.py | c6aef8864b4f7541e22294a5905dc0a6bc2e0180 | [
"MIT"
] | permissive | Kavyapriyakp/Surveillance | 85392fc86713176bb9be8c3a51e9d84d78537734 | c2a47e5d780ba01a2cf5d9ec7edab64215f0c1d2 | refs/heads/main | 2023-07-14T11:10:20.178598 | 2021-08-23T13:57:45 | 2021-08-23T13:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,357 | py | #_____________________HEADER FILES______________________
import tkinter
from tkinter import*
from tkinter import ttk
from tkinter import filedialog
from _cffi_backend import callback
from PIL import ImageTk, Image
import cv2
from cv2 import *
import numpy as np
import sys
import time
import argparse
import imutils
from pathlib import Path
from utils import *
import time
from skimage.restoration import wiener, richardson_lucy
from scipy.special import j1
#_____________________USER-DEFINED FUNCTIONS______________________
kernel_d = np.ones((3,3), np.uint8)
kernel_e = np.ones((3,3), np.uint8)
kernel_gauss = (3,3)
is_blur = False #initializing_boolean_variables
is_close = True #initializing_boolean_variables
is_draw_ct = False #initializing_boolean_variables
fac = 2 #initializing_integer_variables
#___________________INITALIZING THE GUI WINDOW______________________
window=Tk()
window.configure(background="grey64");
window.title("Surveillance System")
window.resizable(0,0)
window.geometry('1300x480')
#_______________SETTING VARIBALES TO CHECK STATE OF BUTTON (CHECKED OR UNCHECKED)______________________
current_value1 = IntVar()
current_value2 = IntVar()
def get_current_value1():
return int('{}'.format(current_value1.get()))
def slider_changed1(event):
value_label1.configure(text=get_current_value1())
slider_label1 = Label(window,text='Dilation',font=("Times New Roman",12),fg="black",bg="grey64").place(x=832,y=52)
value_label1 = ttk.Label(window, text=get_current_value1())
slider1 = ttk.Scale(window, from_=5,to=25, orient='horizontal', command=slider_changed1, variable=current_value1)
slider1.set(15)
slider1.place(x=890,y=50)
value_label1.place(x=995,y=52)
def get_current_value2():
return int('{}'.format(current_value2.get()))
def slider_changed2(event2):
value_label2.configure(text=get_current_value2())
slider_label2 = Label(window,text='Erosion',font=("Times New Roman",12),fg="black",bg="grey64").place(x=832,y=82)
value_label2 = ttk.Label(window, text=get_current_value2())
slider2 = ttk.Scale(window, from_=5,to=25, orient='horizontal', command=slider_changed2, variable=current_value2)
slider2.set(15)
slider2.place(x=890,y=82)
value_label2.place(x=995,y=82)
#_____________________CREATING BUTTONS______________________
title = Label(window, text = "Surveillance System",font=("Times New Roman",18, 'bold'),fg="black",bg="grey64").place(x=495, y=10)
label_file_explorer = Label(window, text = "", fg = "blue")
label_file_explorer.place(x=20,y=60)
#____________________ADDING FUNCTIONALITES_________________________
def browseFiles():
global source_file
source_file = filedialog.askopenfilename(initialdir = "/", title = "Select a File", filetypes =[('All Files', '.*')],parent=window)
label_file_explorer.configure(text="File: "+source_file)
return source_file
def objdetect():
source_file=browseFiles()
capture = VideoCapture(source_file)
while(1):
(ret_old, old_frame) = capture.read()
gray_oldframe = cvtColor(old_frame, COLOR_BGR2GRAY)
if(is_blur):
gray_oldframe = GaussianBlur(gray_oldframe, kernel_gauss, 0)
oldBlurMatrix = np.float32(gray_oldframe)
accumulateWeighted(gray_oldframe, oldBlurMatrix, 0.003)
while(True):
ret, frame = capture.read()
gray_frame = cvtColor(frame, COLOR_BGR2GRAY)
if(is_blur):
newBlur_frame = GaussianBlur(gray_frame, kernel_gauss, 0)
else:
newBlur_frame = gray_frame
newBlurMatrix = np.float32(newBlur_frame)
minusMatrix = absdiff(newBlurMatrix, oldBlurMatrix)
ret, minus_frame = threshold(minusMatrix, 60, 255.0, THRESH_BINARY)
accumulateWeighted(newBlurMatrix,oldBlurMatrix,0.02)
imshow('Input', frame)
drawRectangle(frame, minus_frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
def drawRectangle(frame, minus_frame):
if(is_blur):
minus_frame = GaussianBlur(minus_frame, kernel_gauss, 0)
minus_Matrix = np.float32(minus_frame)
if(is_close):
for i in range(get_current_value1()):
minus_Matrix = dilate(minus_Matrix, kernel_d)
for i in range(get_current_value2()):
minus_Matrix = erode(minus_Matrix, kernel_e)
minus_Matrix = np.clip(minus_Matrix, 0, 255)
minus_Matrix = np.array(minus_Matrix, np.uint8)
contours, hierarchy = findContours(minus_Matrix.copy(), RETR_TREE, CHAIN_APPROX_SIMPLE)
for c in contours:
(x, y, w, h) = boundingRect(c)
rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if( is_draw_ct ):
drawContours(frame, contours, -1, (0, 255, 255), 2)
imshow('Object_Detection', frame)
def deturbulence():
source_file=browseFiles()
dataType = np.float32
N_FirstReference = 10
L = 11
patch_size = (L, L) # (y,x) [pixels]. isoplanatic region
patch_half_size = (int((patch_size[0] - 1) / 2), int((patch_size[1] - 1) / 2))
patches_shift = 1 # when equals to one we get full overlap.
registration_interval = (15, 15) # (y,x). for each side: up/down/left/right
R = 0.08 # iterativeAverageConstant
m_lambda0 = 0.55 * 10 ** -6
m_aperture = 0.06
m_focal_length = 250 * 10 ** -3
fno = m_focal_length / m_aperture
readVideo = 1
ReferenceInitializationOpt = 2 # 3 options: 1. via Lucky region for N_firstRef frames, 2. mean of N_firstRef frames 3. first frame.
video_path = source_file
ImagesSequence = loadVideo(video_path)
ImagesSequence = np.array(ImagesSequence).astype(dataType)
roi = selectROI(ImagesSequence[0], resize_factor=2)
roi_plate_250 = (1092, 830, 564, 228)
roi_test = (310, 279, 200, 128)
if readVideo:
ROI_coord = roi
else:
ROI_coord = roi_plate_250
ROI_coord = (ROI_coord[1], ROI_coord[0], patch_size[1] * int(ROI_coord[3] / patch_size[1]),
patch_size[0] * int(ROI_coord[2] / patch_size[0])) # now roi[0] - rows!
ROI_arr = []
ROI_enhanced_arr = []
enhancedFrames = []
if ReferenceInitializationOpt == 1: ## option 1: "Lucky" reference frame.
# create Reference frame by using "lucky imaging" concept on first N_reference frames.
FusedPatch = MaxSharpnessFusedPatch([frame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] \
for frame in ImagesSequence[:N_FirstReference]], patch_half_size)
ReferenceFrame = ImagesSequence[N_FirstReference]
ReferenceFrame[ROI_coord[0] + patch_half_size[0]:ROI_coord[0] + ROI_coord[2] - patch_half_size[0],
ROI_coord[1] + patch_half_size[1]:ROI_coord[1] + ROI_coord[3] - patch_half_size[1]] = FusedPatch
startRegistrationFrame = N_FirstReference
elif ReferenceInitializationOpt == 2: ## option 2: Mean of N_FirstReference frames.
ReferenceFrame = np.mean(ImagesSequence[:N_FirstReference], axis=0)
startRegistrationFrame = N_FirstReference
elif ReferenceInitializationOpt == 3: ## option 3: first frame
ReferenceFrame = ImagesSequence[0]
startRegistrationFrame = 1
else:
assert Exception("only values 1, 2 or 3 are acceptable")
enhancedFrames.append(ReferenceFrame)
i=0
for frame in ImagesSequence[startRegistrationFrame:]:
t = time.time()
enhancedFrame = np.copy(frame)
ROI = frame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]]
ROI_arr.append(ROI*255.0/ROI.max())
no_rows_Cropped_Frame, no_cols_Cropped_Frame = \
(ROI_coord[2] + 2 * registration_interval[0], ROI_coord[3] + 2 * registration_interval[1])
ReferenceFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] = \
(1 - R) * ReferenceFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] + \
R * frame[ROI_coord[0]: ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]]
ROI_registered = ReferenceFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]]
m_lambda0 = 0.55 * 10 ** -6
m_aperture_diameter = 0.055
m_focal_length = 250 * 10 ** -3
fno = m_focal_length / m_aperture_diameter
ROI_reg_norm = ROI_registered / 255
k = (2 * np.pi) / m_lambda0
Io= 1.0
L= 250
X = np.arange(-m_aperture_diameter/2, m_aperture_diameter/2, m_aperture_diameter/70)
Y = X
XX, YY = np.meshgrid(X, Y)
AiryDisk = np.zeros(XX.shape)
q = np.sqrt((XX-np.mean(Y)) ** 2 + (YY-np.mean(Y)) ** 2)
beta = k * m_aperture_diameter * q / 2 / L
AiryDisk = Io * (2 * j1(beta) / beta) ** 2
AiryDisk_normalized = AiryDisk/AiryDisk.max()
deblurredROI_wiener = wiener(ROI_reg_norm, psf=AiryDisk, balance=7)
deblurredROI = deblurredROI_wiener
deblurredROI = deblurredROI / deblurredROI.max() * 255.0
enhancedFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] = np.abs(deblurredROI)
ROI_enhanced_arr.append(deblurredROI)
enhancedFrames.append(enhancedFrame)
print('Frame analysis time: ', time.time() - t)
cv2.imshow('Input',ROI_arr[i].astype(np.uint8))
cv2.imshow('Output',ROI_enhanced_arr[i].astype(np.uint8))
if cv2.waitKey(20) & 0xFF == ord('q'):
break
i+=1
cv2.destroyAllWindows()
def endeturbulence():
source_file=browseFiles()
dataType = np.float32
N_FirstReference = 10
L = 11
patch_size = (L, L)
patch_half_size = (int((patch_size[0] - 1) / 2), int((patch_size[1] - 1) / 2))
patches_shift = 1
registration_interval = (15, 15)
R = 0.08
m_lambda0 = 0.55 * 10 ** -6
m_aperture = 0.06
m_focal_length = 250 * 10 ** -3
fno = m_focal_length / m_aperture
readVideo = 1
ReferenceInitializationOpt = 2
video_path = source_file
ImagesSequence = loadVideo(video_path)
ImagesSequence = np.array(ImagesSequence).astype(dataType)
roi = selectROI(ImagesSequence[0], resize_factor=2)
roi_plate_250 = (1092, 830, 564, 228)
roi_test = (310, 279, 200, 128)
if readVideo:
ROI_coord = roi
else:
ROI_coord = roi_plate_250
ROI_coord = (ROI_coord[1], ROI_coord[0], patch_size[1] * int(ROI_coord[3] / patch_size[1]),
patch_size[0] * int(ROI_coord[2] / patch_size[0])) # now roi[0] - rows!
ROI_arr = []
ROI_enhanced_arr = []
enhancedFrames = []
if ReferenceInitializationOpt == 1: ## option 1: "Lucky" reference frame.
FusedPatch = MaxSharpnessFusedPatch([frame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] \
for frame in ImagesSequence[:N_FirstReference]], patch_half_size)
ReferenceFrame = ImagesSequence[N_FirstReference]
ReferenceFrame[ROI_coord[0] + patch_half_size[0]:ROI_coord[0] + ROI_coord[2] - patch_half_size[0],
ROI_coord[1] + patch_half_size[1]:ROI_coord[1] + ROI_coord[3] - patch_half_size[1]] = FusedPatch
startRegistrationFrame = N_FirstReference
elif ReferenceInitializationOpt == 2: ## option 2: Mean of N_FirstReference frames.
ReferenceFrame = np.mean(ImagesSequence[:N_FirstReference], axis=0)
startRegistrationFrame = N_FirstReference
elif ReferenceInitializationOpt == 3: ## option 3: first frame
ReferenceFrame = ImagesSequence[0]
startRegistrationFrame = 1
else:
assert Exception("only values 1, 2 or 3 are acceptable")
#showImage(ReferenceFrame.astype(np.uint8))
enhancedFrames.append(ReferenceFrame)
i=0
for frame in ImagesSequence[startRegistrationFrame:]:
t = time.time()
enhancedFrame = np.copy(frame)
ROI = frame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]]
ROI_arr.append(ROI*255.0/ROI.max())
## Image Registration via optical flow
no_rows_Cropped_Frame, no_cols_Cropped_Frame = \
(ROI_coord[2] + 2 * registration_interval[0], ROI_coord[3] + 2 * registration_interval[1])
u, v = optical_flow_tvl1(
ReferenceFrame[ROI_coord[0] - registration_interval[0]:ROI_coord[0] + ROI_coord[2] + registration_interval[0],
ROI_coord[1] - registration_interval[1]:ROI_coord[1] + ROI_coord[3] + registration_interval[1]],
enhancedFrame[ROI_coord[0] - registration_interval[0]:ROI_coord[0] + ROI_coord[2] + registration_interval[0],
ROI_coord[1] - registration_interval[1]:ROI_coord[1] + ROI_coord[3] + registration_interval[1]],
attachment=10, tightness=0.3, num_warp=3, num_iter=5, tol=4e-4, prefilter=False)
row_coords, col_coords = np.meshgrid(np.arange(no_rows_Cropped_Frame), np.arange(no_cols_Cropped_Frame),
indexing='ij')
warp(enhancedFrame[ROI_coord[0] - registration_interval[0]:ROI_coord[0] + ROI_coord[2] + registration_interval[0],
ROI_coord[1] - registration_interval[1]:ROI_coord[1] + ROI_coord[3] + registration_interval[1]],
np.array([row_coords + v, col_coords + u]), mode='nearest', preserve_range=True).astype(dataType)
## Iterative averaging ROI
ReferenceFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] = \
(1 - R) * ReferenceFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] + \
R * frame[ROI_coord[0]: ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]]
ROI_registered = ReferenceFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]]
m_lambda0 = 0.55 * 10 ** -6
m_aperture_diameter = 0.055
m_focal_length = 250 * 10 ** -3
fno = m_focal_length / m_aperture_diameter
ROI_reg_norm = ROI_registered / 255
k = (2 * np.pi) / m_lambda0 # wavenumber of light in vacuum
Io= 1.0 # relative intensity
L= 250 # distance of screen from aperture
X = np.arange(-m_aperture_diameter/2, m_aperture_diameter/2, m_aperture_diameter/70) #pupil coordinates
Y = X
XX, YY = np.meshgrid(X, Y)
AiryDisk = np.zeros(XX.shape)
q = np.sqrt((XX-np.mean(Y)) ** 2 + (YY-np.mean(Y)) ** 2)
beta = k * m_aperture_diameter * q / 2 / L
AiryDisk = Io * (2 * j1(beta) / beta) ** 2
AiryDisk_normalized = AiryDisk/AiryDisk.max()
deblurredROI_wiener = wiener(ROI_reg_norm, psf=AiryDisk, balance=7)
deblurredROI = deblurredROI_wiener
deblurredROI = deblurredROI / deblurredROI.max() * 255.0
enhancedFrame[ROI_coord[0]:ROI_coord[0] + ROI_coord[2], ROI_coord[1]:ROI_coord[1] + ROI_coord[3]] = np.abs(deblurredROI)
ROI_enhanced_arr.append(deblurredROI)
enhancedFrames.append(enhancedFrame)
print('Frame analysis time: ', time.time() - t)
cv2.imshow('Input',ROI_arr[i].astype(np.uint8))
cv2.imshow('Output',ROI_enhanced_arr[i].astype(np.uint8))
if cv2.waitKey(20) & 0xFF == ord('q'):
break
i+=1
cv2.destroyAllWindows()
C3=Button(window,text = "Object Detection",font=("Times New Roman",12, 'bold'),command=objdetect).place(x=880,y=10)
C4=Button(window,text="Turbulence Mitigation",font=("Times New Roman",12, 'bold'),command=deturbulence).place(x=1090,y=10)
C5=Button(window,text="Enhanced - TM",font=("Times New Roman",12, 'bold'),command=endeturbulence).place(x=1090,y=60)
#___________________FOOTER OF THE GUI WINDOW______________________
frame=LabelFrame(window,width=1300, height=50,fg="black",bg="aqua").place(x=0,y=430)
foot=Label(frame,text = "Developed using Python 3.8",font=("Times New Roman",11),fg="black",bg="aqua").place(x=840,y=445)
window.mainloop()
#____________________END OF PROGRAM______________________
| [
"noreply@github.com"
] | noreply@github.com |
a75b6ed4fd5b72efddd1dd24d8f759d4ff1628e7 | 64c47e207af042b31d820d78d8933ffb8efcd18d | /logchecker/ui.py | b83e9ef58642fa8f882aff05920cf8e0d4938bde | [] | no_license | N0SO/moqputils | 1518395db30a982b8f4e624e9a5fe284acafd475 | 293a2a4fcada1bbce1245fc4f398c35c135c20c6 | refs/heads/master | 2023-06-10T04:34:14.602425 | 2023-05-27T01:53:35 | 2023-05-27T01:53:35 | 149,353,354 | 0 | 0 | null | 2019-09-02T03:41:01 | 2018-09-18T21:18:17 | Python | UTF-8 | Python | false | false | 12,916 | py | #!/usr/bin/python
import gi, os, sys
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from logchecker.filedialogs import *
from logchecker.runlogcheck import runLogCheck
from logchecker.__init__ import VERSION
class About1():
def __init__(self):
gtkvers='Gtk V%d.%d.%d'%(\
Gtk.MAJOR_VERSION,
Gtk.MINOR_VERSION,
Gtk.MICRO_VERSION)
pyvers = 'Python V%d.%d.%d'%(\
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
verinfo = "%s\n%s"%(gtkvers, pyvers)
#print(gtkvers, pyvers, verinfo)
about = Gtk.AboutDialog()
about.set_program_name("Missouri QSO Party Log Checker")
about.set_version('V%s'%(VERSION))
about.set_authors(["Mike, N0SO"])
about.set_copyright("(c) BEARS-STL 2021")
about.set_comments("\n%s"%(verinfo))
about.set_website("http://n0so.net")
about.run()
about.destroy()
class Handler():
def __init__(self):
#print('Handler starting...')
#print("I'm a %s."%(type(self)))
self.fileButton_text = None
self.status1_text = None
self.status2_text = None
self.sw_cabBonus = False
self.sw_loadLogs = False
self.sw_acceptErrors = False
self.sw_replaceExisting = False
self.log = None
self.Showme = False
self.Missouri = False
self.logstatusCallback = None
def set_logstatusCallback(self, callback):
self. logstatusCallback = callback
def on_win_show(self, args = None):
print('on_win_show called...')
"""
def childview(self, parent):
try:
childlist = parent.get_children()
except:
childlist = []
print('%s has %d children...'%(parent.get_name(), len(childlist)))
for child in childlist:
self.childview(child)
"""
def on_win_destroy(self, args):
Gtk.main_quit()
def on_cabBonus_activate(self, widget):
print('on_cabBonus_activate called...')
def on_cabBonus_state_set(self, widget, state):
print('on_cabBonus_state_set called...')
print('widget = %s\nstate=%s\n'%(widget, state))
self.sw_cabBonus = state
def on_loadLog_state_set(self, widget, state):
print('on_loadload_state_set called...')
print('widget = %s\nstate=%s\n'%(widget, state))
self.sw_loadLogs = state
def on_loadLog_state_set_accept(self, widget, state):
if (self.sw_loadLogs):
widget.set_visible(True)
else:
widget.set_visible(False)
print('on_loadLog_enable_accept called - Setting Accept Errors sw %s'%(self.sw_loadLogs))
def on_loadLog_state_set_acceptlbl(self, widget, state):
if (self.sw_loadLogs):
widget.set_visible(True)
else:
widget.set_visible(False)
print('on_loadLog_enable_acceptlbl called - Setting Accept Errors sw %s'%(self.sw_loadLogs))
def on_loadLog_state_set_replace(self, widget, state):
if (self.sw_loadLogs):
widget.set_visible(True)
else:
widget.set_visible(False)
print('on_loadLog_enable_replace called - Setting replace log sw %s'%(self.sw_loadLogs))
def on_loadLog_state_set_replacelbl(self, widget, state):
if (self.sw_loadLogs):
widget.set_visible(True)
else:
widget.set_visible(False)
print('on_loadLog_enable_replacelbl called - Setting replace log lable %s'%(self.sw_loadLogs))
def on_acceptErrs_state_set(self, widget, state):
print('on_acceptErrs_state_set called...')
print('widget = %s\nstate=%s\n'%(widget, state))
self.sw_acceptErrors = state
def on_replaceLog_state_set(self, widget, state):
print('on_replaceLog_state_set called...')
print('widget = %s\nstate=%s\n'%(widget, state))
self.sw_replaceExisting = state
def on_New1_activate(self, args=None):
print('on_New1_activate called')
self.fileButton_text = None
self.status1_text = None
self.status2_text = None
#self.sw_cabBonus = False
#self.sw_loadLogs = False
#self.sw_acceptErrors = False
#self.sw_replaceExisting = False
self.log = None
self.Showme = False
self.Missouri = False
self.logstatusCallback = None
texwin = self.get_descendant(args,'textWindow')
tbuffer = texwin.get_buffer()
tbuffer.delete(tbuffer.get_start_iter(), tbuffer.get_end_iter())
filebutton = self.get_descendant(args,'fileButton')
self.set_Button_label(filebutton)
stat1 = self.get_descendant(args,'status1')
self.set_status1(stat1)
stat2 = self.get_descendant(args,'status2')
self.set_status2(stat2)
def clear_list_store(self, liststore):
if (type(liststore) is gi.overrides.Gtk.ListStore):
liststore.clear()
def on_Open1_activate(self, args=None):
print('on_Open1_activate called -')
self.on_fileButton_clicked(args)
liststore = self.get_descendant(args,'liststore1',0,True)
def on_Open1_activate_item(self, args=None):
print('on_Open1_activate_item called')
def on_about1_activate(self, args=None):
about = About1()
def on_Quit1_activate(self, widget=None):
#print('on_Quit1_activate called')
self.on_win_destroy(widget)
def on_fileButton_clicked(self, widget):
#print('File Button Clicked!')
file1=my_file_open()
file1.on_file_clicked(widget)
#print('My File selected: %s'%(file1.fileName))
if (file1.fileName):# != None:
path = os.path.split(file1.fileName)
#print(path)
if os.path.isdir(file1.fileName):
print('Folder of logs selected:\n%s'%(file1.fileName))
fileList = os.listdir(file1.fileName)
addpath = file1.fileName + '/'
#print(fileList)
else:
fileList = [file1.fileName]
addpath = ''
for f in fileList:
f = addpath+f
#print(f)
if (os.path.isfile(f)):
print ('processing %s...'%(f))
self.status1_text = f
"""
Getlog, summarize and check for errors
"""
check = runLogCheck(f,
widget,
self.sw_cabBonus,
self.sw_loadLogs,
self.sw_acceptErrors,
self.sw_replaceExisting)
self.log = check.checkLog(f, self.sw_cabBonus)
if (self.log is not None):
self.status2_text = check.processAndDisplay(self.log)
self.Showme = self.log['BONUS']['SHOWME']
self.Missouri = self.log['BONUS']['MISSOURI']
print ('status2 = %s'%(self.status2_text))
#else:
# print('%s is not a file!'%(f))
else:
self.status1_text = None
self.status2_text = None
#print('on_fileButton1_clicked is complete.')
def set_Button_label(self, button):
if (self.status1_text != None):
fileOnly = os.path.basename(self.status1_text)
else:
fileOnly = 'Select Input File (None)'
while len(fileOnly) < 70 :
fileOnly += ' '
button.set_label(fileOnly)
def set_status1(self, stat1):
if (self.status1_text != None):
fileOnly = os.path.basename(self.status1_text)
stat1.set_text(fileOnly)
else:
stat1.set_text('status1')
def set_status2(self, stat2):
if (self.status2_text != None):
print('set_status2 called! %s'%(self.status2_text))
#fileOnly = os.path.basename(self.status2_text)
stat2.set_text(self.status2_text)
else:
stat2.set_text('status2')
def set_logstatus1(self, widget, log=None):
#print('set_logstatus1: \n%s'%(self.log['HEADER']))
if (log or self.log):
if (log == None):
header=self.log['HEADER']
else:
header=log['HEADER']
widget.append([header['CONTEST'],
header['CALLSIGN'],
header['CATEGORY-STATION'],
header['CATEGORY-OPERATOR'],
header['CATEGORY-POWER'],
header['CATEGORY-MODE'],
header['OPERATORS']])
def set_logstatus2(self, widget, log=None):
if (log or self.log):
if (log == None):
qsosum=self.log['QSOSUM']
bonus=self.log['BONUS']
else:
qsosum=log['QSOSUM']
bonus=log['BONUS']
widget.append(['%s'%(qsosum['QSOS']),
'%s'%(qsosum['CW']),
'%s'%(qsosum['PH']),
'%s'%(qsosum['DG']),
'%s'%(qsosum['VHF']),
'%s'%(qsosum['DUPES']),
'%s'%(self.log['SCORE']),
'%s'%(bonus['CABRILLO']),
'%s'%(bonus['W0MA']),
'%s'%(bonus['K0GQ']),
'%s'%(self.log['MULTS']),
'%s'%(len(self.log['ERRORS']))])
def set_logstatus3(self, widget, log=None):
if (log or self.log):
if (log == None):
moqpcat=self.log['MOQPCAT']
else:
moqpcat=log['MOQPCAT']
widget.append([moqpcat['MOQPCAT'],
moqpcat['LOCATION'],
moqpcat['VHF'],
moqpcat['DIGITAL'],
moqpcat['ROOKIE'],
'%s'%(self.Showme),
'%s'%(self.Missouri)])
def get_descendant(self, widget, child_name, level=0, doPrint=False):
if widget is not None:
buildableName = Gtk.Buildable.get_name(widget)
if buildableName == None: buildableName = 'None'
widgetName = widget.get_name()
#print(buildableName, widgetName)
if doPrint: print('+'*level + '>' + buildableName + ' :: ' + widgetName)
#if doPrint: print("-"*level + Gtk.Buildable.get_name(widget) + " :: " + widget.get_name())
else:
if doPrint: print("-"*level + "None")
return None
#/*** If it is what we are looking for ***/
if(Gtk.Buildable.get_name(widget) == child_name): # not widget.get_name() !
return widget;
#/*** If this widget has one child only search its child ***/
if (hasattr(widget, 'get_child') and callable(getattr(widget, 'get_child')) and child_name != ""):
child = widget.get_child()
if child is not None:
return self.get_descendant(child, child_name,level+1,doPrint)
# /*** It might have many children, so search them ***/
elif (hasattr(widget, 'get_children') and callable(getattr(widget, 'get_children')) and child_name !=""):
children = widget.get_children()
# /*** For each child ***/
found = None
for child in children:
if child is not None:
found = self.get_descendant(child, child_name,level+1,doPrint) # //search the child
if found: return found
def on_Save1_activate(self, widget):
print ('on_Save1_activate')
#print (type(widget))
#print (dir(widget))
my_report = my_file_save(self.log, self.status1_text)
my_report.on_save_clicked(widget)
def on_Open2_activate(self, widget):
print ('on_Open2_activate')
print (type(widget))
print (dir(widget))
class gui_MOQPLogCheck():
def __init__(self):
builder = Gtk.Builder()
builder.add_from_file("ui/logchecker.ui")
builder.connect_signals(Handler())
self.appMain(builder)
def appMain(self, builder):
window = builder.get_object("win")
"""
MlogsumTree1 = builder.get_object("liststore1")
MlogsumTree2 = builder.get_object("liststore2")
MlogsumTree3 = builder.get_object("liststore3")
"""
window.show_all()
Gtk.main()
if __name__ == '__main__':
app = gui_MOQPLogCheck()
| [
"mheitmann@n0so.net"
] | mheitmann@n0so.net |
5c058bce3f30c1740eeeb245c0d9a7dad6cc4d8c | 8b9d7a83cd2d1824b0f42d2be8bd9887bffcd64a | /pytorch-CycleGAN-and-pix2pix/options/test_options.py | 03063e1a39716e88a0cb08f963beccbe4d2a04bb | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | diaosiji/CRUI | 2c0505ab6556c189872458b5162141a1299f8e84 | 9f5981904a7254bb0d67d01651b1dfb25c9783a1 | refs/heads/main | 2023-03-13T22:56:17.661160 | 2021-03-02T06:44:36 | 2021-03-02T06:44:36 | 338,830,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=301, help='how many test images to run')
# rewrite devalue values
parser.set_defaults(model='test')
# To avoid cropping, the load_size should be the same as crop_size
parser.set_defaults(load_size=parser.get_default('crop_size'))
self.isTrain = False
return parser
| [
"diaosiji@users.noreply.github.com"
] | diaosiji@users.noreply.github.com |
5b52ddbe0bd41cdee13f55f6361a380438db5ae4 | 6bba678da92ee6e462b9e65cebd9f04ad8de54d1 | /week2/Homework/Exercise_12.py | 5b343e0baf56b594cdc55c061993743e441299d0 | [] | no_license | albihasani94/PH526x | 42cd9f84ccbb6b82c9eba911221925cfe3fd8818 | f09da5cfa66e0b95b76031401b432551e4d16cf1 | refs/heads/master | 2021-08-16T04:58:12.472946 | 2017-11-19T00:57:39 | 2017-11-19T00:57:39 | 109,987,441 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """
This result is expected --- when guessing at random, it's better to go first. Let's see if Player 1 can improve
their strategy. create_board(), random_place(board, player), and evaluate(board) have been created from previous
exercises. Create a function play_strategic_game(), where Player 1 always starts with the middle square, and
otherwise both players place their markers randomly.
Call play_strategic_game once.
"""
from week2 import create_board
from week2 import evaluate
from week2 import random_place
def play_strategic_game():
board, winner = create_board(), 0
board[1, 1] = 1
while winner == 0:
for player in [2, 1]:
random_place(board, player)
winner = evaluate(board)
if winner != 0:
break
return winner
play_strategic_game()
| [
"albin.hasani@ikubinfo.al"
] | albin.hasani@ikubinfo.al |
e0cef2e9484c65cdeaf1980ebb7c8d939eeb49b2 | 738b4fd5d8ebb8c424947a6786bd41ba30df46d6 | /ibeatles/fitting/fitting_launcher.py | 4d66c124f6dace8967669f0f642adedd8f81d6c0 | [
"MIT"
] | permissive | indudhiman/bragg-edge | ba6e5c02e2bf2c2c5f87b626a4578238f7973e43 | 56af0a448534ef9cb5428879ba900e194dc05db2 | refs/heads/master | 2020-04-16T22:49:53.274903 | 2019-01-08T14:18:32 | 2019-01-08T14:18:32 | 165,985,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,018 | py | try:
import PyQt4
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
from PyQt4.QtGui import QMainWindow
from PyQt4.QtGui import QApplication
except:
import PyQt5
import PyQt5.QtGui as QtGui
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QApplication
from pyqtgraph.dockarea import *
import pyqtgraph as pg
import numpy as np
from ibeatles.interfaces.ui_fittingWindow import Ui_MainWindow as UiMainWindow
from ibeatles.utilities.colors import pen_color
from ibeatles.utilities.array_utilities import find_nearest_index
from ibeatles.fitting.fitting_handler import FittingHandler
from ibeatles.fitting.value_table_handler import ValueTableHandler
from ibeatles.fitting.selected_bin_handler import SelectedBinsHandler
from ibeatles.table_dictionary.table_dictionary_handler import TableDictionaryHandler
from ibeatles.fitting.filling_table_handler import FillingTableHandler
from ibeatles.fitting.fitting_initialization_handler import FittingInitializationHandler
from ibeatles.fitting.create_fitting_story_launcher import CreateFittingStoryLauncher
class FittingLauncher(object):
def __init__(self, parent=None):
self.parent = parent
if self.parent.fitting_ui == None:
fitting_window = FittingWindow(parent=parent)
fitting_window.show()
self.parent.fitting_ui = fitting_window
o_fitting = FittingHandler(parent=self.parent)
o_fitting.display_image()
o_fitting.display_roi()
o_fitting.fill_table()
fitting_window.check_advanced_table_status()
else:
self.parent.fitting_ui.setFocus()
self.parent.fitting_ui.activateWindow()
class FittingWindow(QMainWindow):
data = []
there_is_a_roi = False
bragg_edge_active_button_status = True # to make sure active/lock button worked correctly
list_bins_selected_item = []
list_bins_locked_item = []
image_view = None
bragg_edge_plot = None
line_view = None
line_view_fitting = None #roi selected in binning window
all_bins_button = None
indi_bins_button = None
header_value_tables_match = {0: [0],
1: [1],
2: [2],
3: [3],
4: [4],
5: [5,6],
6: [7,8],
7: [9,10],
8: [11,12],
9: [13,14],
10: [15,16],
11: [17,18],
12: [19,20]}
para_cell_width = 110
header_table_columns_width = [30, 30, 50,50,100,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width,
para_cell_width]
fitting_table_columns_width = [header_table_columns_width[0],
header_table_columns_width[1],
header_table_columns_width[2],
header_table_columns_width[3],
header_table_columns_width[4],
np.int(header_table_columns_width[5]/2),
np.int(header_table_columns_width[5]/2),
np.int(header_table_columns_width[6]/2),
np.int(header_table_columns_width[6]/2),
np.int(header_table_columns_width[7]/2),
np.int(header_table_columns_width[7]/2),
np.int(header_table_columns_width[8]/2),
np.int(header_table_columns_width[8]/2),
np.int(header_table_columns_width[9]/2),
np.int(header_table_columns_width[9]/2),
np.int(header_table_columns_width[10]/2),
np.int(header_table_columns_width[10]/2),
np.int(header_table_columns_width[11]/2),
np.int(header_table_columns_width[11]/2),
np.int(header_table_columns_width[12]/2),
np.int(header_table_columns_width[12]/2)]
# status of alpha and sigma initialization
sigma_alpha_initialized = False
initialization_table = {'d_spacing': np.NaN,
'alpha': np.NaN,
'sigma': np.NaN,
'a1': np.NaN,
'a2': np.NaN,
'a5': np.NaN,
'a6': np.NaN}
bragg_edge_data = {'x_axis': [],
'y_axis': []}
def __init__(self, parent=None):
self.parent = parent
QMainWindow.__init__(self, parent=parent)
self.ui = UiMainWindow()
self.ui.setupUi(self)
self.setWindowTitle("5. Fitting")
self.init_pyqtgraph()
self.init_labels()
self.init_widgets()
self.init_table_behavior()
self.check_status_widgets()
def re_fill_table(self):
o_fitting = FittingHandler(parent=self.parent)
o_fitting.fill_table()
def init_table_behavior(self):
for _column, _width in enumerate(self.header_table_columns_width):
self.ui.header_table.setColumnWidth(_column, _width)
for _column, _width in enumerate(self.fitting_table_columns_width):
self.ui.value_table.setColumnWidth(_column, _width)
self.hori_header_table = self.ui.header_table.horizontalHeader()
self.hori_value_table = self.ui.value_table.horizontalHeader()
self.hori_header_table.sectionResized.connect(self.resizing_header_table)
self.hori_value_table.sectionResized.connect(self.resizing_value_table)
self.hori_header_table.sectionClicked.connect(self.column_header_table_clicked)
self.hori_value_table.sectionClicked.connect(self.column_value_table_clicked)
def column_value_table_clicked(self, column):
'''
to make sure that if the val or err column is selected, or unselected, the other
column behave the same
'''
if column < 5:
return
_item0 = self.parent.fitting_ui.ui.value_table.item(0, column)
state_column_clicked = self.parent.fitting_ui.ui.value_table.isItemSelected(_item0)
if column % 2 == 0:
col1 = column-1
col2 = column
else:
col1 = column
col2 = column+1
nbr_row = self.parent.fitting_ui.ui.value_table.rowCount()
range_selected = QtGui.QTableWidgetSelectionRange(0, col1, nbr_row-1, col2)
self.parent.fitting_ui.ui.value_table.setRangeSelected(range_selected,
state_column_clicked)
def column_header_table_clicked(self, column):
_value_table_column = self.header_value_tables_match.get(column, -1)
nbr_row = self.parent.fitting_ui.ui.value_table.rowCount()
# if both col already selected, unselect them
col_already_selected = False
_item1 = self.parent.fitting_ui.ui.value_table.item(0, _value_table_column[0])
_item2 = self.parent.fitting_ui.ui.value_table.item(0, _value_table_column[-1])
if self.parent.fitting_ui.ui.value_table.isItemSelected(_item1) and \
self.parent.fitting_ui.ui.value_table.isItemSelected(_item2):
col_already_selected = True
if column in [2,3]:
selection = self.parent.fitting_ui.ui.value_table.selectedRanges()
col_already_selected = False
for _select in selection:
if column in [_select.leftColumn(), _select.rightColumn()]:
col_already_selected = True
break
from_col = _value_table_column[0]
to_col = _value_table_column[-1]
range_selected = QtGui.QTableWidgetSelectionRange(0, from_col,
nbr_row-1, to_col)
self.parent.fitting_ui.ui.value_table.setRangeSelected(range_selected,
not col_already_selected)
def resizing_header_table(self, index_column, old_size, new_size):
if index_column < 5:
self.ui.value_table.setColumnWidth(index_column, new_size)
else:
new_half_size = np.int(new_size/2)
index1 = (index_column - 5) * 2 + 5
index2 = index1+1
self.ui.value_table.setColumnWidth(index1, new_half_size)
self.ui.value_table.setColumnWidth(index2, new_half_size)
def resizing_value_table(self, index_column, old_size, new_size):
if index_column < 5:
self.ui.header_table.setColumnWidth(index_column, new_size)
else:
if (index_column % 2) == 1:
right_new_size = self.ui.value_table.columnWidth(index_column + 1)
index_header = np.int(index_column - 5) / 2 + 5
self.ui.header_table.setColumnWidth(index_header, new_size + right_new_size)
else:
left_new_size = self.ui.value_table.columnWidth(index_column - 1)
index_header = np.int(index_column - 6) / 2 + 5
self.ui.header_table.setColumnWidth(index_header, new_size + left_new_size)
def init_widgets(self):
'''
such as material h,k,l list according to material selected in normalized tab
'''
hkl_list = self.parent.selected_element_hkl_array
str_hkl_list = ["{},{},{}".format(_hkl[0], _hkl[1], _hkl[2]) for _hkl in hkl_list]
self.ui.hkl_list_ui.addItems(str_hkl_list)
def check_status_widgets(self):
if (len(self.parent.data_metadata['normalized']['data_live_selection']) > 0) and \
not (self.parent.binning_line_view['pos'] is None):
status = True
else:
status = False
self.ui.instructions_step1_button.setEnabled(status)
def init_labels(self):
self.ui.lambda_min_label.setText(u"\u03BB<sub>min</sub>")
self.ui.lambda_max_label.setText(u"\u03BB<sub>max</sub>")
self.ui.lambda_min_units.setText(u"\u212B")
self.ui.lambda_max_units.setText(u"\u212B")
self.ui.bragg_edge_units.setText(u"\u212B")
self.ui.material_groupBox.setTitle(self.parent.selected_element_name)
def init_pyqtgraph(self):
if (len(self.parent.data_metadata['normalized']['data_live_selection']) > 0) and \
not (self.parent.binning_line_view['pos'] is None):
status = True
else:
status = False
area = DockArea()
self.ui.area = area
area.setVisible(status)
d1 = Dock("Image Preview", size=(200, 300))
d2 = Dock("Bragg Edge", size=(200, 100))
area.addDock(d1, 'top')
area.addDock(d2, 'bottom')
preview_widget = pg.GraphicsLayoutWidget()
pg.setConfigOptions(antialias=True) # this improve display
vertical_layout = QtGui.QVBoxLayout()
preview_widget.setLayout(vertical_layout)
# image view (top plot)
image_view = pg.ImageView()
image_view.ui.roiBtn.hide()
image_view.ui.menuBtn.hide()
self.image_view = image_view
image_view.scene.sigMouseMoved.connect(self.mouse_moved_in_image_view)
top_widget = QtGui.QWidget()
vertical = QtGui.QVBoxLayout()
vertical.addWidget(image_view)
# bin transparency
transparency_layout = QtGui.QHBoxLayout()
spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
transparency_layout.addItem(spacer)
label = QtGui.QLabel("Bin Transparency")
transparency_layout.addWidget(label)
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setMaximum(100)
slider.setMinimum(0)
slider.setValue(50)
slider.valueChanged.connect(self.slider_changed)
self.slider = slider
transparency_layout.addWidget(slider)
bottom_widget = QtGui.QWidget()
bottom_widget.setLayout(transparency_layout)
top_widget.setLayout(vertical)
d1.addWidget(top_widget)
d1.addWidget(bottom_widget)
# bragg edge plot (bottom plot)
bragg_edge_plot = pg.PlotWidget(title='')
bragg_edge_plot.plot()
self.bragg_edge_plot = bragg_edge_plot
# plot all or individual bins
buttons_layout = QtGui.QHBoxLayout()
spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
buttons_layout.addItem(spacer)
label = QtGui.QLabel("Plot")
label.setEnabled(False)
buttons_layout.addWidget(label)
# all bins button
active_button = QtGui.QRadioButton()
active_button.setText("Active Bins")
active_button.setChecked(True)
#active_button.setEnabled(False)
active_button.pressed.connect(self.active_button_pressed)
self.ui.active_bins_button = active_button
# indi bin button
buttons_layout.addWidget(active_button)
locked_button = QtGui.QRadioButton()
locked_button.setText("Locked Bins")
locked_button.setChecked(False)
#locked_button.setEnabled(False)
locked_button.pressed.connect(self.lock_button_pressed)
self.ui.locked_bins_button = locked_button
buttons_layout.addWidget(locked_button)
bottom_widget = QtGui.QWidget()
bottom_widget.setLayout(buttons_layout)
d2.addWidget(bragg_edge_plot)
d2.addWidget(bottom_widget)
vertical_layout.addWidget(area)
self.ui.widget.setLayout(vertical_layout)
def active_button_pressed(self):
self.bragg_edge_active_button_status = True
self.update_bragg_edge_plot()
def lock_button_pressed(self):
self.bragg_edge_active_button_status = False
self.update_bragg_edge_plot()
def mouse_moved_in_image_view(self):
self.image_view.setFocus(True)
def hkl_list_changed(self, hkl):
bragg_edges_array = self.parent.selected_element_bragg_edges_array
if bragg_edges_array:
if str(hkl) == '':
value = "N/A"
else:
hkl_array = self.parent.selected_element_hkl_array
str_hkl_list = ["{},{},{}".format(_hkl[0], _hkl[1], _hkl[2]) for _hkl in hkl_array]
hkl_bragg_edges = dict(zip(str_hkl_list, bragg_edges_array))
value = "{:04.3f}".format(hkl_bragg_edges[str(hkl)])
else:
value = "N/A"
self.ui.bragg_edge_calculated.setText(value)
def slider_changed(self):
o_fitting_handler = FittingHandler(parent=self.parent)
o_fitting_handler.display_roi()
def active_button_state_changed(self, status, row_clicked):
'''
status: 0: off
2: on
'''
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
update_lock_flag = False
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.ui.selection_table.blockSignals(True)
if status == 0:
status = False
else:
status = True
# perform same status on all rows
_selection = self.ui.value_table.selectedRanges()
_this_column_is_selected = False
for _select in _selection:
if 3 in [_select.leftColumn(), _select.rightColumn()]:
_this_column_is_selected = True
break
table_dictionary = self.parent.table_dictionary
if _this_column_is_selected:
update_selection_flag = True #we change the state so we need to update the selection
for _index in table_dictionary:
table_dictionary[_index]['active'] = status
_widget_lock = self.ui.value_table.cellWidget(int(_index), 3)
_widget_lock.blockSignals(True)
_widget_lock.setChecked(status)
_widget_lock.blockSignals(False)
if status:
_widget = self.ui.value_table.cellWidget(int(_index), 2)
if _widget.isChecked(): # because we can not be active and locked at the same time
table_dictionary[_index]['lock'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
else:
table_dictionary[str(row_clicked)]['active'] = status
if status:
_widget = self.ui.value_table.cellWidget(row_clicked, 2)
if _widget.isChecked():
table_dictionary[str(row_clicked)]['lock'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
update_lock_flag = True
self.parent.table_dictionary = table_dictionary
# hide this row if status is False and user only wants to see locked items
o_filling_handler = FillingTableHandler(parent = self.parent)
if (status == False) and (o_filling_handler.get_row_to_show_state() == 'active'):
self.parent.fitting_ui.ui.value_table.hideRow(row_clicked)
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_selected()
self.update_bragg_edge_plot()
o_bin_handler.update_bins_locked()
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.update_selection_table()
if update_lock_flag:
self.parent.advanced_selection_ui.update_lock_table()
self.parent.advanced_selection_ui.ui.selection_table.blockSignals(False)
QApplication.restoreOverrideCursor()
def lock_button_state_changed(self, status, row_clicked):
'''
status: 0: off
2: on
we also need to make sure that if the button is lock, it can not be activated !
'''
update_selection_flag = False
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.ui.lock_table.blockSignals(True)
if status == 0:
status = False
else:
status = True
# perform same status on all rows
_selection = self.ui.value_table.selectedRanges()
_this_column_is_selected = False
for _select in _selection:
if 2 in [_select.leftColumn(), _select.rightColumn()]:
_this_column_is_selected = True
break
table_dictionary = self.parent.table_dictionary
if _this_column_is_selected:
update_selection_flag = True #we change the state so we need to update the selection
for _index in table_dictionary:
table_dictionary[_index]['lock'] = status
_widget_lock = self.ui.value_table.cellWidget(int(_index), 2)
_widget_lock.blockSignals(True)
_widget_lock.setChecked(status)
_widget_lock.blockSignals(False)
if status:
_widget = self.ui.value_table.cellWidget(int(_index), 3)
if _widget.isChecked(): # because we can not be active and locked at the same time
table_dictionary[_index]['active'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
else:
table_dictionary[str(row_clicked)]['lock'] = status
if status:
_widget = self.ui.value_table.cellWidget(row_clicked, 3)
if _widget.isChecked(): # because we can not be active and locked at the same time
table_dictionary[str(row_clicked)]['active'] = False
_widget.blockSignals(True)
_widget.setChecked(False)
_widget.blockSignals(False)
update_selection_flag = True #we change the state so we need to update the selection
self.parent.table_dictionary = table_dictionary
# hide this row if status is False and user only wants to see locked items
o_filling_handler = FillingTableHandler(parent = self.parent)
if (status == False) and (o_filling_handler.get_row_to_show_state() == 'lock'):
self.parent.fitting_ui.ui.value_table.hideRow(row_clicked)
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_locked()
self.update_bragg_edge_plot()
o_bin_handler.update_bins_selected()
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.update_lock_table()
if update_selection_flag:
self.parent.advanced_selection_ui.update_selection_table()
self.parent.advanced_selection_ui.ui.lock_table.blockSignals(False)
def value_table_right_click(self, position):
o_table_handler = ValueTableHandler(parent=self.parent)
o_table_handler.right_click(position=position)
def update_image_view_selection(self):
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_selected()
def update_image_view_lock(self):
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bins_locked()
def update_bragg_edge_plot(self, update_selection=True):
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bragg_edge_plot()
if update_selection:
self.bragg_edge_linear_region_changing()
def selection_in_value_table_of_rows_cell_clicked(self, row, column):
# make sure the selection is right (val and err selected at the same time)
if column > 4:
_item0 = self.ui.value_table.item(0, column)
_is_selected = self.ui.value_table.isItemSelected(_item0)
if (column % 2) == 0:
left_column = column - 1
right_column = column
else:
left_column = column
right_column = column + 1
nbr_row = self.ui.value_table.rowCount()
_selection = QtGui.QTableWidgetSelectionRange(0, left_column,
nbr_row-1, right_column)
self.ui.value_table.setRangeSelected(_selection, _is_selected)
self.update_bragg_edge_plot()
def selection_in_value_table_changed(self):
self.selection_in_value_table_of_rows_cell_clicked(-1, -1)
def bragg_edge_linear_region_changing(self):
#current xaxis is
x_axis = self.parent.fitting_bragg_edge_x_axis
_lr = self.parent.fitting_lr
if _lr is None:
return
selection = list(_lr.getRegion())
left_index = find_nearest_index(array = x_axis, value=selection[0])
right_index = find_nearest_index(array = x_axis, value=selection[1])
# display lambda left and right
lambda_array = self.parent.data_metadata['time_spectra']['normalized_lambda'] * 1e10
_lambda_min = lambda_array[left_index]
_lambda_max = lambda_array[right_index]
self.ui.lambda_min_lineEdit.setText("{:4.2f}".format(_lambda_min))
self.ui.lambda_max_lineEdit.setText("{:4.2f}".format(_lambda_max))
def bragg_edge_linear_region_changed(self):
#current xaxis is
x_axis = self.parent.normalized_lambda_bragg_edge_x_axis
_lr = self.parent.fitting_lr
if _lr is None:
return
selection = list(_lr.getRegion())
left_index = find_nearest_index(array = x_axis, value=selection[0])
right_index = find_nearest_index(array = x_axis, value=selection[1])
list_selected = [left_index, right_index]
self.parent.fitting_bragg_edge_linear_selection = list_selected
def check_advanced_table_status(self):
button_status = self.ui.advanced_table_checkBox.isChecked()
self.advanced_table_clicked(button_status)
def advanced_table_clicked(self, status):
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
o_table_handler = FillingTableHandler(parent=self.parent)
o_table_handler.set_mode(advanced_mode = status)
QApplication.restoreOverrideCursor()
def update_table(self):
o_filling_table = FillingTableHandler(parent = self.parent)
self.parent.fitting_ui.ui.value_table.blockSignals(True)
o_filling_table.fill_table()
self.parent.fitting_ui.ui.value_table.blockSignals(False)
def min_or_max_lambda_manually_changed(self):
min_lambda = float(str(self.ui.lambda_min_lineEdit.text()))
max_lambda = float(str(self.ui.lambda_max_lineEdit.text()))
lambda_array = self.parent.data_metadata['time_spectra']['normalized_lambda'] * 1e10
left_index = find_nearest_index(array=lambda_array, value=min_lambda)
right_index = find_nearest_index(array=lambda_array, value=max_lambda)
self.parent.fitting_bragg_edge_linear_selection = [left_index, right_index]
o_bin_handler = SelectedBinsHandler(parent = self.parent)
o_bin_handler.update_bragg_edge_plot()
def initialize_all_parameters_button_clicked(self):
o_initialization = FittingInitializationHandler(parent=self.parent)
o_initialization.make_all_active()
o_initialization.run()
def initialize_all_parameters_step2(self):
o_initialization = FittingInitializationHandler(parent=self.parent)
o_initialization.finished_up_initialization()
# activate or not step4 (yes if we were able to initialize correctly all variables)
self.ui.step4_groupBox.setEnabled(o_initialization.all_variables_initialized)
self.update_bragg_edge_plot()
def fit_table_active_cell_checked(self):
pass
def create_fitting_story_checked(self):
o_story = CreateFittingStoryLauncher(parent=self.parent)
def closeEvent(self, event=None):
if self.parent.advanced_selection_ui:
self.parent.advanced_selection_ui.close()
if self.parent.fitting_set_variables_ui:
self.parent.fitting_set_variables_ui.close()
self.parent.fitting_ui = None
| [
"bilheuxjm@ornl.gov"
] | bilheuxjm@ornl.gov |
5eddcec5b87e61fbd22e0488898f5e200ba1eeec | 54862efb410ab30283cab76be3a1b2947296d4c0 | /ibbie_19_字符串.py | b172806bc2335a4e152129b255c6357b345c947b | [] | no_license | Three-Y/MyPythonLearning | 1e32a465896fad2caf24fddf6c00d4e63e98d0eb | 84c66632502b846f580cbd19781f7ec487e8b8ab | refs/heads/master | 2023-01-22T08:52:21.073278 | 2020-12-05T10:08:51 | 2020-12-05T10:08:51 | 316,262,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,234 | py | """
字符串
可以用双引号或单引号定义字符串,一般使用双引号
str = "abc"
假如字符串中包含双引号,则使用单引号来定义
str = 'lalala"啦啦啦"'
可以帮字符串看成是一个列表,列表的每个元素都是一个字符
字符串的索引值从0开始
"""
"""定义字符串"""
str1 = "abc"
print(str1)
str2 = '"哈哈"'
print(str2)
"""遍历字符串"""
str3 = "abc123321cba"
for char in str3:
print(char)
"""字符串的长度"""
print(len(str3))
"""统计子字符串在字符串中出现的次数"""
print(str3.count("a"))
print(str3.count("z")) # 没有则返回0
"""从字符串中查找,并返回索引值"""
idx = str3.index("123")
print(idx) # 返回的是查找的字符串的第一个字符在字符串中的索引
# idx2 = str3.index("1234") # 若查找的小字符串不存在,报错:ValueError: substring not found
"""***********字符串常用方法***********"""
"""是否只有空白字符"""
empty_str = ""
space_str = " "
print(empty_str.isspace()) # False
print(space_str.isspace()) # True
"""是否只包含数字"""
num_str = "一百一十一"
print(num_str.isdecimal()) # 12.3(×) 123(√) ①(×) 一百一十一(×)
print(num_str.isdigit()) # 12.3(×) 123(√) ①(√) 一百一十一(×)
print(num_str.isnumeric()) # 12.3(×) 123(√) ①(√) 一百一十一(√)
"""查找与替换"""
hello_str = "hello world"
print(hello_str.startswith("HELLO")) # False 是否以某字符串开头
print(hello_str.endswith("world")) # True 是否以某字符串结尾
print(hello_str.find("llo")) # 2 查找字符串,返回索引值
print(hello_str.find("abc")) # -1 与index不同,找不到也不会报错,会返回-1
print(hello_str.replace("world", "python")) # hello python 替换指定内容,返回替换后的新字符串
print(hello_str) # hello world 但是旧字符串不会被改动
"""文本对齐"""
poem = ["题目",
"作者",
"我是第一行诗",
"我是第二行诗",
"我是第三行诗",
"我是第四行诗", ]
for poem_str in poem:
print("|%s|" % poem_str.center(10, "🍔")) # 居中
# print("|%s|" % poem_str.center(10,"**"))
# 填充的字符只能是一个字符,否则报错:TypeError: The fill character must be exactly one character long
for poem_str in poem:
print("|%s|" % poem_str.ljust(10, "🍟")) # 左对齐
for poem_str in poem:
print("|%s|" % poem_str.rjust(10, "🍖")) # 右对齐
"""去除空白字符"""
space_str = " ab| |cdef "
print(space_str.lstrip()) # 去除左边的空白字符
print(space_str.rstrip()) # 去除右边的空白字符
print(space_str.strip()) # 去除两边的空白字符
"""拆分和连接"""
split_str = "abc*efg*jjj* kkk*ooo"
sp1 = split_str.split()
sp2 = split_str.split("*")
print(sp1) # 分割字符,默认以空白字符分割,返回一个list
print(sp1) # 分割字符,按指定字符分割,返回一个list2
print("🚗".join(sp2)) # 连接字符串,用指定字符连接
"""
截取字符串
字符串有两种索引的方式
正序 从第一个字符开始往后:0,1,2,3...
倒序 从最后一个字符开始往前:-1,-2,-3...
语法:
string[开始索引:结束索引:步长]
开始索引的字符包含在要截取的字符串中
结束索引的字符不包含在要截取的字符串中
步长为整数,从左往右走,步长为负数,从右往左走
"""
str4 = "012345678"
print(str4[2:5]) # 234 截取索引2到4的字符
print(str4[3:]) # 345678 截取索引3到末尾
print(str4[:6]) # 012345 截取开头到索引5
print(str4[::2]) # 02468 从头到尾,每隔一个取一个字符
print(str4[1::2]) # 1357 从索引1开始,每隔一个取一个字符
print(str4[2:-1]) # 234567 从索引2开始,取到倒数第二个字符
print(str4[-2]) # 7 取倒数第二的字符
print(str4[-2:]) # 78 取最后两个字符
print(str4[-1::-1]) # 876543210 逆序,从最后一个字符开始,步长-1,即每取一个字符向前移动一格
print(str4[::-1]) # 876543210 逆序,步长-1,即每取一个字符向前移动一格
| [
"55918473+Three-Y@users.noreply.github.com"
] | 55918473+Three-Y@users.noreply.github.com |
5c92369765c6c39d996b71992115a3db5689d7fc | 615729d4d8647bb819f5eb3f126d31cd8e0d770d | /web2/settings.py | 512ff154561935656a3a903802b2ae78e6eea9a0 | [] | no_license | DucThanh1997/login | e8837ed34048e7d036f750f05874ada66c2910c0 | 7c0705883e338009621d356f3dbb4683c07f50fd | refs/heads/master | 2020-05-04T07:08:47.392252 | 2019-04-02T08:11:52 | 2019-04-02T08:11:52 | 179,021,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,435 | py | """
Django settings for web2 project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7=owvstd3!enwg!1y@l4g1(h=2v-(c0y(wv!@si()h!&6l@fig'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chung.apps.App1Config',
'user.apps.UserConfig',
'my_admin.apps.MyAdminConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'open_id',
'USER': 'root',
'PASSWORD': 'thanh1997',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'chung.MyUsers'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'ducthanh191997@gmail.com'
EMAIL_HOST_PASSWORD = 'Thanh1997'
EMAIL_PORT = 587
| [
"ducthanh191997@gmail.com"
] | ducthanh191997@gmail.com |
fe19dafe65f6a5cc142d3a50c34fab4c64ecb935 | 5882d610d9b1243cd59361d949af7ee3335c07ba | /test/LSTM-sin/sin.py | e86be6531bfceaefeebcd729ea086979cb29a64f | [] | no_license | uranium410/MontezumaAI | 6eb62aad95fcc616bb659a2b77ce2a272221a15d | c3cd2e299d97727b3229fe7a94eb50abe11ee4bf | refs/heads/master | 2020-05-30T03:04:19.282973 | 2019-08-08T02:52:14 | 2019-08-08T02:52:14 | 189,506,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | import torch
import torch.nn as nn
from torch.optim import SGD
import math
import numpy as np
import csv
class Predictor(nn.Module):
def __init__(self, inputDim, hiddenDim, outputDim):
super(Predictor, self).__init__()
self.rnn = nn.LSTM(input_size = inputDim,
hidden_size = hiddenDim,
batch_first = True)
self.output_layer = nn.Linear(hiddenDim, outputDim)
def forward(self, inputs, hidden0=None):
output, (hidden, self.cell) = self.rnn(inputs, hidden0)
output = self.output_layer(output[:, -1, :])
return output
def mkDataSet(data_size, data_length=200, freq=120., noise=0.1):
"""
params\n
data_size : データセットサイズ\n
data_length : 各データの時系列長\n
freq : 周波数\n
noise : ノイズの振幅\n
returns\n
train_x : トレーニングデータ(t=1,2,...,size-1の値)\n
train_t : トレーニングデータのラベル(t=sizeの値)\n
"""
train_x = []
train_t = []
for offset in range(data_size):
train_x.append([[math.sin(2 * math.pi * (offset + i) / freq) + np.random.normal(loc=0.0, scale=noise)] for i in range(data_length)])
train_t.append([math.sin(2 * math.pi * (offset + data_length) / freq)])
with open('sin.csv', 'w', newline='') as f:
writer = csv.writer(f)
for i in range(data_length):
writer.writerow([train_x[0][i][0]])
with open('sinEnd.csv', 'w', newline='') as f:
writer = csv.writer(f)
for i in range(data_length):
writer.writerow([train_x[data_length-1][i][0]])
return train_x, train_t
def mkRandomBatch(train_x, train_t, batch_size=10):
"""
train_x, train_tを受け取ってbatch_x, batch_tを返す。
"""
batch_x = []
batch_t = []
for _ in range(batch_size):
idx = np.random.randint(0, len(train_x) - 1)
batch_x.append(train_x[idx])
batch_t.append(train_t[idx])
return torch.tensor(batch_x), torch.tensor(batch_t)
def main():
training_size = 10000
test_size = 1000
epochs_num = 1000
hidden_size = 5
batch_size = 100
train_x, train_t = mkDataSet(training_size)
test_x, test_t = mkDataSet(test_size)
model = Predictor(1, hidden_size, 1)
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=0.01)
accuracy = []
for epoch in range(epochs_num):
# training
running_loss = 0.0
training_accuracy = 0.0
celldat = []
for i in range(int(training_size / batch_size)):
optimizer.zero_grad()
data, label = mkRandomBatch(train_x, train_t, batch_size)
output = model(data)
cell = model.cell
cell = cell.detach().numpy()
celldat.append(cell)
loss = criterion(output, label)
loss.backward()
optimizer.step()
running_loss += loss.data
training_accuracy += np.sum(np.abs((output.data - label.data).numpy()) < 0.1)
if epoch == 1:
with open('valStart.csv', 'w', newline='') as f:
writer = csv.writer(f)
for i in range(len(celldat)):
writer.writerow([celldat[i][0,0,0],
celldat[i][0,0,1],
celldat[i][0,0,2],
celldat[i][0,0,3],
celldat[i][0,0,4]])
if epoch == 999:
with open('valEnd.csv', 'w', newline='') as f:
writer = csv.writer(f)
for i in range(len(celldat)):
writer.writerow([celldat[i][0,0,0],
celldat[i][0,0,1],
celldat[i][0,0,2],
celldat[i][0,0,3],
celldat[i][0,0,4]])
#test
test_accuracy = 0.0
for i in range(int(test_size / batch_size)):
offset = i * batch_size
data, label = torch.tensor(test_x[offset:offset+batch_size]), torch.tensor(test_t[offset:offset+batch_size])
output = model(data, None)
test_accuracy += np.sum(np.abs((output.data - label.data).numpy()) < 0.1)
training_accuracy /= training_size
test_accuracy /= test_size
print('%d loss: %.3f, training_accuracy: %.5f, test_accuracy: %.5f' % (
epoch + 1, running_loss, training_accuracy, test_accuracy))
accuracy.append(test_accuracy)
with open('accuracy.csv', 'w', newline='') as f:
writer = csv.writer(f)
for i in range(len(accuracy)):
writer.writerow([accuracy[i]])
if __name__ == '__main__':
main() | [
"36751459+uranium410@users.noreply.github.com"
] | 36751459+uranium410@users.noreply.github.com |
47e5b33bf2c46dffa3df76a2bf4134619041815a | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Reconstruction/RecExample/RecExCommon/share/ContainerRemapping.py | 00f5c31d568f606d42fba403b8779c3df62a656f | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,782 | py | include.block ("RecExCommon/ContainerRemapping.py")
from AthenaCommon.AppMgr import ServiceMgr
# Instantiate the address remapping service:
if not hasattr( ServiceMgr, "AddressRemappingSvc" ):
ServiceMgr += CfgMgr.AddressRemappingSvc()
pass
if not hasattr( ServiceMgr, "ProxyProviderSvc" ):
ServiceMgr += CfgMgr.ProxyProviderSvc()
pass
ServiceMgr.ProxyProviderSvc.ProviderNames += [ "AddressRemappingSvc" ]
# Declare the name conversion rules:
ServiceMgr.AddressRemappingSvc.TypeKeyOverwriteMaps += [
"xAOD::ElectronContainer#ElectronCollection->"
"xAOD::ElectronContainer#Electrons",
"xAOD::ElectronAuxContainer#ElectronCollectionAux.->"
"xAOD::ElectronAuxContainer#ElectronsAux.",
"xAOD::ElectronContainer#FwdElectrons->"
"xAOD::ElectronContainer#ForwardElectrons",
"xAOD::ElectronAuxContainer#FwdElectronsAux.->"
"xAOD::ElectronAuxContainer#ForwardElectronsAux.",
"xAOD::PhotonContainer#PhotonCollection->"
"xAOD::PhotonContainer#Photons",
"xAOD::PhotonAuxContainer#PhotonCollectionAux.->"
"xAOD::PhotonAuxContainer#PhotonsAux.",
"xAOD::CaloClusterContainer#egClusterCollection->"
"xAOD::CaloClusterContainer#egammaClusters",
"xAOD::CaloClusterAuxContainer#egClusterCollectionAux.->"
"xAOD::CaloClusterAuxContainer#egammaClustersAux.",
"xAOD::CaloClusterContainer#LArClusterEMFrwd->"
"xAOD::CaloClusterContainer#ForwardElectronClusters",
"xAOD::CaloClusterAuxContainer#LArClusterEMFrwdAux.->"
"xAOD::CaloClusterAuxContainer#ForwardElectronClustersAux.",
"xAOD::TrackParticleContainer#InDetTrackParticlesForward->"
"xAOD::TrackParticleContainer#InDetForwardTrackParticles",
"xAOD::TrackParticleAuxContainer#InDetTrackParticlesForwardAux.->"
"xAOD::TrackParticleAuxContainer#InDetForwardTrackParticlesAux.",
"xAOD::TrackParticleContainer#InDetTrackParticlesLowBeta->"
"xAOD::TrackParticleContainer#InDetLowBetaTrackParticles",
"xAOD::TrackParticleAuxContainer#InDetTrackParticlesLowBetaAux.->"
"xAOD::TrackParticleAuxContainer#InDetLowBetaTrackParticlesAux.",
"xAOD::TauJetContainer#TauRecContainer->"
"xAOD::TauJetContainer#TauJets",
"xAOD::TauJetAuxContainer#TauRecContainerAux.->"
"xAOD::TauJetAuxContainer#TauJetsAux.",
"xAOD::CaloClusterContainer#TauPi0ClusterContainer->"
"xAOD::CaloClusterContainer#TauPi0Clusters",
"xAOD::CaloClusterAuxContainer#TauPi0ClusterContainerAux.->"
"xAOD::CaloClusterAuxContainer#TauPi0ClustersAux.",
"xAOD::VertexContainer#TauSecondaryVertexContainer->"
"xAOD::VertexContainer#TauSecondaryVertices",
"xAOD::VertexAuxContainer#TauSecondaryVertexContainerAux.->"
"xAOD::VertexAuxContainer#TauSecondaryVerticesAux.",
"xAOD::PFOContainer#TauShotPFOContainer->"
"xAOD::PFOContainer#TauShotParticleFlowObjects",
"xAOD::PFOAuxContainer#TauShotPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauShotParticleFlowObjectsAux.",
"xAOD::PFOContainer#TauPi0ChargedPFOContainer->"
"xAOD::PFOContainer#TauChargedParticleFlowObjects",
"xAOD::PFOAuxContainer#TauPi0ChargedPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauChargedParticleFlowObjectsAux.",
"xAOD::PFOContainer#TauPi0NeutralPFOContainer->"
"xAOD::PFOContainer#TauNeutralParticleFlowObjects",
"xAOD::PFOAuxContainer#TauPi0NeutralPFOContainerAux.->"
"xAOD::PFOAuxContainer#TauNeutralParticleFlowObjectsAux.",
"xAOD::PFOContainer#chargedJetETMissPFO_eflowRec->"
"xAOD::PFOContainer#JetETMissChargedParticleFlowObjects",
"xAOD::PFOAuxContainer#chargedJetETMissPFO_eflowRecAux.->"
"xAOD::PFOAuxContainer#JetETMissChargedParticleFlowObjectsAux.",
"xAOD::PFOContainer#neutralJetETMissPFO_eflowRec->"
"xAOD::PFOContainer#JetETMissNeutralParticleFlowObjects",
"xAOD::PFOAuxContainer#neutralJetETMissPFO_eflowRecAux.->"
"xAOD::PFOAuxContainer#JetETMissNeutralParticleFlowObjectsAux.",
"xAOD::CaloClusterContainer#CaloCalTopoCluster->"
"xAOD::CaloClusterContainer#CaloCalTopoClusters",
"xAOD::CaloClusterAuxContainer#CaloCalTopoClusterAux.->"
"xAOD::CaloClusterAuxContainer#CaloCalTopoClustersAux.",
"xAOD::TruthEventContainer#TruthEvent->"
"xAOD::TruthEventContainer#TruthEvents",
"xAOD::TruthEventAuxContainer#TruthEventAux.->"
"xAOD::TruthEventAuxContainer#TruthEventsAux.",
"xAOD::TruthParticleContainer#TruthParticle->"
"xAOD::TruthParticleContainer#TruthParticles",
"xAOD::TruthParticleAuxContainer#TruthParticleAux.->"
"xAOD::TruthParticleAuxContainer#TruthParticlesAux.",
"xAOD::TruthVertexContainer#TruthVertex->"
"xAOD::TruthVertexContainer#TruthVertices",
"xAOD::TruthVertexAuxContainer#TruthVertexAux.->"
"xAOD::TruthVertexAuxContainer#TruthVerticesAux."
]
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
1c493e70c97ea95ec9b98c42aebac1f4257b036f | 741b69f020b60603f5063715557507feae0b08fd | /backend/src/main.py | b0c8d6c86d37d39923de059428a55126491e99ec | [] | no_license | Materson/Warehouse-with-us | 56c4d62a74192fd8ac76b2277462208d41b125eb | bbac2ebf526f72cbad9d4655bf7e862d3e1db9c0 | refs/heads/master | 2023-06-02T23:32:42.580713 | 2021-06-16T12:27:06 | 2021-06-16T12:27:06 | 372,070,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | from fastapi import FastAPI, Depends
from routers import user, product, auth, version, category
from utils.auth import get_current_active_user
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
origins = [
"https://localhost",
"https://localhost:3002",
"https://192.168.43.54:3002",
"https://elf:3002",
"https://elf"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
async def read_root():
return {"Hello": "World"}
app.include_router(auth.router, prefix="/auth")
app.include_router(version.router, prefix="/version")
app.include_router(user.router, prefix="/user", dependencies=[Depends(get_current_active_user)])
app.include_router(category.router, prefix="/category", dependencies=[Depends(get_current_active_user)])
app.include_router(product.router, prefix="/product", dependencies=[Depends(get_current_active_user)])
| [
"zapomnialem63@wp.pl"
] | zapomnialem63@wp.pl |
70b13e09918671ec8f42febe6f91674c2a84f798 | d4f2e2e3552ab4b111f78cfbad0d30c144201093 | /2016-12-20/semaphore.py | 2c1f4492110d89b3a3f1daa84001456b57596e8d | [
"Apache-2.0"
] | permissive | dongweiming/mp | c1e9f6f2c1fd8adbd4d7b8ffc45c5cc288cdcd80 | 129c31c818e1f0c39c983aad1f2f1ad9fa7efb1c | refs/heads/master | 2023-04-29T07:56:27.198574 | 2022-10-30T04:20:09 | 2022-10-30T04:21:27 | 75,051,758 | 96 | 35 | Apache-2.0 | 2023-04-17T17:34:17 | 2016-11-29T06:44:53 | Python | UTF-8 | Python | false | false | 509 | py | import aiohttp
import asyncio
NUMBERS = range(12)
URL = 'http://httpbin.org/get?a={}'
sema = asyncio.Semaphore(3)
async def fetch_async(a):
async with aiohttp.request('GET', URL.format(a)) as r:
data = await r.json()
return data['args']['a']
async def print_result(a):
with (await sema):
r = await fetch_async(a)
print('fetch({}) = {}'.format(a, r))
loop = asyncio.get_event_loop()
f = asyncio.wait([print_result(num) for num in NUMBERS])
loop.run_until_complete(f)
| [
"ciici1234@hotmail.com"
] | ciici1234@hotmail.com |
3d2cdb0df6994ed18122a7d3e04ebebc15aee7da | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnuiy.py | d0fcd3f412294aeefbace25ce9b8a1fa5cf588aa | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('MedwTAI2.py', 1), ('MereHHB.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
f1fb146c3e9cf353c726f32799c2e2ff4ae571ae | 5efdd9942b50acb711ba2dbc192fe58d5aa2b143 | /app.py | 6c2ef939995743edf51648e71bb74a43d3447dc8 | [
"Apache-2.0"
] | permissive | sveldurthik/housing_price | 27f95bf6ab7e84dbc4abe0f3222bbbb70aa602bf | c60acd5dd67122bc578911bfbe0ce591cade5373 | refs/heads/master | 2023-03-23T04:11:52.873148 | 2020-04-29T18:46:33 | 2020-04-29T18:46:33 | 260,011,518 | 0 | 0 | Apache-2.0 | 2021-03-20T03:46:30 | 2020-04-29T18:37:15 | HTML | UTF-8 | Python | false | false | 694 | py | import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
return render_template('index.html', prediction_text='House Price Approximately Rupees {}'.format(output))
if __name__ == "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
b097b7a2e91b91ea67969ca245e6a9c69ad4bc7f | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Selenium_Chromium/source/selenium/webdriver/edge/service.py | 9eac51171035f1d2bd648ca409aeee7b8c69b782 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,161 | py | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class Service(service.Service):
def __init__(self, executable_path, port=0, verbose=False, log_path=None):
"""
Creates a new instance of the EdgeDriver service.
EdgeDriver provides an interface for Microsoft WebDriver to use
with Microsoft Edge.
:param executable_path: Path to the Microsoft WebDriver binary.
:param port: Run the remote service on a specified port.
Defaults to 0, which binds to a random open port of the
system's choosing.
:verbose: Whether to make the webdriver more verbose (passes the
--verbose option to the binary). Defaults to False.
:param log_path: Optional path for the webdriver binary to log to.
Defaults to None which disables logging.
"""
self.service_args = []
if verbose:
self.service_args.append("--verbose")
params = {
"executable": executable_path,
"port": port,
"start_error_message": "Please download from http://go.microsoft.com/fwlink/?LinkId=619687"
}
if log_path:
params["log_file"] = open(log_path, "a+")
service.Service.__init__(self, **params)
def command_line_args(self):
return ["--port=%d" % self.port] + self.service_args
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
b64589cd1421d2fafd88fb37d715b2aaaa0206a5 | 5296285725fb1a6cbf809a33859b08d935934114 | /huahua/server.py | 3e13c80d2106af71f6e6fcf0adc12ba1c9c77af0 | [] | no_license | tony84727/huahua | 5aaf30f95699c5c08c7a6e61f2058bd1374e5128 | f7eaaff09bca8c4c532511b15c6a201af5a9cf11 | refs/heads/main | 2023-07-14T14:09:15.913083 | 2021-09-01T05:41:17 | 2021-09-01T05:41:17 | 395,523,578 | 0 | 0 | null | 2021-09-01T05:41:18 | 2021-08-13T04:47:49 | Python | UTF-8 | Python | false | false | 1,496 | py | from typing import List
import sqlalchemy
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.expression import select
from model import Server
class ServerList:
"""Server list interface"""
async def resolve(self, alias: str) -> Server:
"""Resolve server by alias"""
pass
async def add(self, server: Server):
pass
async def all(self) -> List[Server]:
pass
class AliasConflictException(Exception):
def __init__(self, *args: object) -> None:
super().__init__('Alias duplicated')
class DatabaseServerList(ServerList):
"""Server list presisted by a database"""
def __init__(self, engine: Engine):
self.engine = engine
async def resolve(self, alias: str) -> Server:
with Session(self.engine) as session:
return session.query(Server).filter_by(alias=alias).first()
async def add(self, server: Server):
try:
with Session(self.engine) as session:
session.add(server)
session.commit()
return
except sqlalchemy.exc.IntegrityError as err:
# assume this is a duplicated error because currently the only constraint is the UNIQUE index of the alias column
raise AliasConflictException() from err
async def all(self) -> List[Server]:
with Session(self.engine) as session:
return session.query(Server).limit(100).all()
| [
"noreply@github.com"
] | noreply@github.com |
6c1288b99b5652fc745dbe1c2daa5fa84a0b459f | cd91dd22b391968e077fd0a693813893543cdf1f | /src/opserver/consistent_schdlr.py | d4dee2598c34380e1ab5750983d488d72b64a7fd | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | forsakening/controller | 6c89476901d78423721b4b444d54123630eaa300 | 408892746ea189aecae33ce3b5a8ac6c1200a8d8 | refs/heads/master | 2020-04-08T12:08:27.204902 | 2019-02-13T08:56:03 | 2019-02-13T08:56:03 | 159,334,442 | 0 | 1 | Apache-2.0 | 2019-01-31T01:44:29 | 2018-11-27T12:48:27 | C++ | UTF-8 | Python | false | false | 10,996 | py | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from consistent_hash import ConsistentHash
import gevent
import os
import hashlib
import logging
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.handlers.gevent import SequentialGeventHandler
from random import randint
import struct
import traceback
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \
ConnectionType
class ConsistentScheduler(object):
'''
LibPartitionHelper abstract out workers and work_items, and their
mapping to partitions. So application can only deal with the work
items it owns, without bothering about partition mapping.
This class also provides syncronization premitives to ensure apps
to clean up b4 giving up their partitions
'''
_MAX_WAIT_4_ALLOCATION = 6 + randint(0, 9)
def __init__(self, service_name=None, zookeeper='127.0.0.1:2181',
delete_hndlr=None, add_hndlr=None, bucketsize=47,
item2part_func=None, partitioner=None, logger=None,
cluster_id=''):
if logger:
self._logger = logger
else:
self._logger = logging.getLogger(__name__)
self._service_name = service_name or os.path.basename(sys.argv[0])
self._item2part_func = item2part_func or self._device2partition
self._zookeeper_srvr = zookeeper
self._zk = None
self._bucketsize = bucketsize
self._delete_hndlr = delete_hndlr
self._add_hndlr = add_hndlr
self._partitioner = partitioner or self._partitioner_func
self._partitions = {}
self._con_hash = None
self._last_log = ''
self._last_log_cnt = 0
self._partition_set = map(str, range(self._bucketsize))
self._cluster_id = cluster_id
if self._cluster_id:
self._zk_path = '/'+self._cluster_id + '/contrail_cs' + '/'+self._service_name
else:
self._zk_path = '/'.join(['/contrail_cs', self._service_name])
self._conn_state = None
self._sandesh_connection_info_update(status='INIT', message='')
while True:
self._logger.error("Consistent scheduler zk start")
self._zk = KazooClient(self._zookeeper_srvr,
handler=SequentialGeventHandler())
self._zk.add_listener(self._zk_lstnr)
try:
self._zk.start()
while self._conn_state != ConnectionStatus.UP:
gevent.sleep(1)
break
except Exception as e:
# Update connection info
self._sandesh_connection_info_update(status='DOWN',
message=str(e))
self._zk.remove_listener(self._zk_lstnr)
try:
self._zk.stop()
self._zk.close()
except Exception as ex:
template = "Exception {0} in Consistent scheduler zk stop/close. Args:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s for %s" % \
(messag, traceback.format_exc(), self._service_name))
finally:
self._zk = None
gevent.sleep(1)
self._pc = self._zk.SetPartitioner(path=self._zk_path,
set=self._partition_set,
partition_func=self._partitioner)
self._wait_allocation = 0
gevent.sleep(0)
def _sandesh_connection_info_update(self, status, message):
new_conn_state = getattr(ConnectionStatus, status)
ConnectionState.update(conn_type = ConnectionType.ZOOKEEPER,
name = 'Zookeeper', status = new_conn_state,
message = message,
server_addrs = self._zookeeper_srvr.split(','))
if ((self._conn_state and self._conn_state != ConnectionStatus.DOWN) and
new_conn_state == ConnectionStatus.DOWN):
msg = 'Connection to Zookeeper down: %s' %(message)
self._supress_log(msg)
if (self._conn_state and self._conn_state != new_conn_state and
new_conn_state == ConnectionStatus.UP):
msg = 'Connection to Zookeeper ESTABLISHED'
self._supress_log(msg)
self._conn_state = new_conn_state
# end _sandesh_connection_info_update
def _zk_lstnr(self, state):
self._logger.error("Consistent scheduler listen %s" % str(state))
if state == KazooState.CONNECTED:
# Update connection info
self._sandesh_connection_info_update(status='UP', message='')
elif state == KazooState.LOST:
self._logger.error("Consistent scheduler connection LOST")
# Lost the session with ZooKeeper Server
# Best of option we have is to exit the process and restart all
# over again
self._sandesh_connection_info_update(status='DOWN',
message='Connection to Zookeeper lost')
os._exit(2)
elif state == KazooState.SUSPENDED:
self._logger.error("Consistent scheduler connection SUSPENDED")
# Update connection info
self._sandesh_connection_info_update(status='INIT',
message = 'Connection to zookeeper lost. Retrying')
def schedule(self, items, lock_timeout=30):
gevent.sleep(0)
ret = False
if self._pc.failed:
self._logger.error('Lost or unable to acquire partition')
os._exit(2)
elif self._pc.release:
self._supress_log('Releasing...')
self._release()
elif self._pc.allocating:
self._supress_log('Waiting for allocation...')
self._pc.wait_for_acquire(lock_timeout)
if self._wait_allocation < self._MAX_WAIT_4_ALLOCATION:
self._wait_allocation += 1
else:
self._logger.error('Giving up after %d tries!' %
(self._wait_allocation))
os._exit(2)
elif self._pc.acquired:
self._supress_log('got work: ', list(self._pc))
ret = True
self._wait_allocation = 0
self._populate_work_items(items)
self._supress_log('work items: ',
self._items2name(self.work_items()),
'from the list',
self._items2name(items))
return ret
def members(self):
return list(self._con_hash.nodes)
def partitions(self):
return list(self._pc)
def work_items(self):
return sum(self._partitions.values(), [])
def finish(self):
self._inform_delete(self._partitions.keys())
self._pc.finish()
self._zk.remove_listener(self._zk_lstnr)
gevent.sleep(1)
try:
self._zk.stop()
except:
self._logger.error("Stopping kazooclient failed")
else:
self._logger.error("Stopping kazooclient successful")
try:
self._zk.close()
except:
self._logger.error("Closing kazooclient failed")
else:
self._logger.error("Closing kazooclient successful")
def _items2name(self, items):
return map(lambda x: x.name, items)
def _supress_log(self, *s):
slog = ' '.join(map(str, s))
dl = ''
if slog != self._last_log_cnt:
if self._last_log_cnt:
dl += ' ' * 4
dl += '.' * 8
dl += '[last print repeats %d times]' % self._last_log_cnt
self._last_log_cnt = 0
dl += slog
self._last_log = slog
self._logger.debug(dl)
else:
self._last_log_cnt += 1
def _consistent_hash(self, members):
if self._con_hash is None:
self._con_hash = ConsistentHash(members)
self._logger.error('members: %s' % (str(self._con_hash.nodes)))
cur, updtd = set(self._con_hash.nodes), set(members)
if cur != updtd:
newm = updtd - cur
rmvd = cur - updtd
if newm:
self._logger.error('new members: %s' % (str(newm)))
self._con_hash.add_nodes(list(newm))
if rmvd:
self._logger.error('members left: %s' % (str(rmvd)))
self._con_hash.del_nodes(list(rmvd))
return self._con_hash
def _consistent_hash_get_node(self, members, partition):
return self._consistent_hash(members).get_node(partition)
def _partitioner_func(self, identifier, members, _partitions):
partitions = [p for p in _partitions \
if self._consistent_hash_get_node(members, p) == identifier]
self._logger.error('partitions: %s' % (str(partitions)))
return partitions
def _release(self):
old = set(self._pc)
new = set(self._partitioner(self._pc._identifier,
list(self._pc._party),
self._partition_set))
rmvd = old - new
added = new - old
if rmvd:
self._inform_delete(list(rmvd))
if added:
self._inform_will_add(list(added))
self._pc.release_set()
def _list_items_in(self, partitions):
return sum([self._partitions[k] for k in partitions if k in \
self._partitions], [])
def _inform_will_add(self, partitions):
if callable(self._add_hndlr):
self._add_hndlr(self._list_items_in(partitions))
def _inform_delete(self, partitions):
if callable(self._delete_hndlr):
self._delete_hndlr(self._list_items_in(partitions))
def _populate_work_items(self, items):
self._refresh_work_items()
for i in items:
part = str(self._item2part_func(i.name))
if part in list(self._pc):
if part not in self._partitions:
self._partitions[part] = []
if i.name not in map(lambda x: x.name,
self._partitions[part]):
self._partitions[part].append(i)
self._logger.debug('@populate_work_items(%s): done!' % ' '.join(
map(lambda v: str(v[0]) + ':' + ','.join(map(
lambda x: x.name, v[1])), self._partitions.items())))
gevent.sleep(0)
def _device2partition(self, key):
return struct.unpack('Q', hashlib.md5(key).digest(
)[-8:])[0] % self._bucketsize
def _refresh_work_items(self):
for k in self._partitions:
self._partitions[k] = []
| [
"forsakening@sina.cn"
] | forsakening@sina.cn |
ac4d859daae441283b736274186b6e1067921933 | c739152c0952d944e4304fd64300ee4927f09a9a | /bible/invertedIndex/mainFunction/inputRSV.py | d91390cfe82ce4f66a9c6bc9c9cbe6fc676b8bac | [] | no_license | jesicag/InvertedIndex | 4c74e488900bfa9e8956006a2a488835dfd8cea4 | f33b4e74832caba9d15b71301b9d0020faef85f0 | refs/heads/master | 2020-05-25T05:39:40.378689 | 2019-05-22T14:22:47 | 2019-05-22T14:22:47 | 187,654,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,344 | py | import xml.etree.ElementTree as ET
from nltk import word_tokenize
import string
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import math
from collections import OrderedDict
from operator import itemgetter
# =============================================================================
# QUERY PREPROCESSING
# QUERY(S) TOKENIZATION
# =============================================================================
def tokenizeQuery(queries):
tokenQueri=[]
translator=str.maketrans('','',string.punctuation)
for i in range(len(queries)):
queries[i] = queries[i].translate(translator)
queries[i] = ''.join([i for i in queries[i] if not i.isdigit()])
queries[i] = re.sub(r'^https?:\/\/.*[\r\n]*','', queries[i], re.MULTILINE)
tokenQueri.append(word_tokenize(queries[i]))
return tokenQueri
# =============================================================================
# QUERY(S) CASE FOLDING
# =============================================================================
def caseFoldingQuery(tokenQueri):
for i in range(len(tokenQueri)):
tokenQueri[i] = [query.lower() for query in tokenQueri[i]]
return tokenQueri
# =============================================================================
# QUERY(S) STOPWORD
# =============================================================================
def stopwordRemovalQuery(tokenQueri):
global newQueri
newQueri=[]
for i in range(len(tokenQueri)):
filtered = [w for w in tokenQueri[i] if not w in stopwords.words('english')]
newQueri.append(filtered)
return newQueri
# =============================================================================
# QUERY(S) STEMMING
# =============================================================================
def stemmingQuery(newQueri):
stemmer = PorterStemmer()
global listQueri
global uniqQueri
listQueri=[]
uniqQueri =[]
for i in range (len(newQueri)):
temp=[]
for word in newQueri[i]:
if(word != stemmer.stem(word)):
word = stemmer.stem(word)
temp.append(word)
else:
temp.append(word)
#menghindari duplikasi kata
if(word not in uniqQueri):
uniqQueri.append(word)
listQueri.append(temp)
del temp
return uniqQueri
# =============================================================================
# BIBLE PREPROCESSING
# READ FILE XML
# =============================================================================
def readBible(pathFile):
tree = ET.parse(pathFile)
return tree
# =============================================================================
# GET NAME OF BIBLE'S BOOK
# =============================================================================
def bibleBookName(pathFile):
bibleBookName = []
bible = readBible(pathFile)
for node in bible.iter('div'):
biblename = (node.attrib['bookName'])
bibleBookName.append(biblename)
return bibleBookName
# =============================================================================
# GET NUMBER OF BIBLE'S VERSES
# =============================================================================
def bibleNoVers(pathFile):
global noVers
noVers = []
bible = readBible(pathFile)
for node in bible.iter('verse'):
versNo = (node.attrib['vname'])
noVers.append(versNo)
return noVers
# =============================================================================
# GET WORDS IN EVERY VERSE
# =============================================================================
def bibleVers(pathFile):
bibleVersWord = []
bible = readBible(pathFile)
for word in bible.iter('verse'):
bibleVersWord.append(word.text)
return bibleVersWord
# =============================================================================
# TOKENIZATION
# =============================================================================
def tokenization(allTeks):
translator = str.maketrans('','',string.punctuation)
tokenize = []
for i in range(len(allTeks)):
allTeks[i] = allTeks[i].translate(translator)
allTeks[i] = re.sub(r'^https?:\/\/.*', '', allTeks[i],re.MULTILINE)
tokenize.append(word_tokenize(allTeks[i]))
return tokenize
# =============================================================================
# CASE FOLDING
# =============================================================================
def caseFolding(tokenize):
global caseFold
caseFold=[]
for i in range(len(tokenize)):
for n in range(len(tokenize[i])):
tokenize[i][n] = tokenize[i][n].lower()
caseFold.append(tokenize[i])
return caseFold
# =============================================================================
# STOPWORD
# =============================================================================
def checkStopword(sentence, stop_words):
sentence = [w for w in sentence if not w in stop_words]
return sentence
def stopwordRemove(textList):
stop_words = set(stopwords.words('english'))
text = []
for i in range(len(textList)):
text.append(checkStopword(textList[i], stop_words))
return text
# =============================================================================
# STEMMING
# =============================================================================
def stemming(newText):
stemmer = PorterStemmer()
global listText
listText=[]
for i in range (len(newText)):
for n in range(len(newText[i])):
newText[i][n] = stemmer.stem(newText[i][n])
return newText
def uniqueWords(listText):
global uniqWords
uniqWords = []
for i in range (len(listText)):
for n in range(len(listText[i])):
if(listText[i][n] not in uniqWords):
uniqWords.append(listText[i][n])
return uniqWords
# =============================================================================
# INDEXING
# =============================================================================
def createIndex(newText, docno):
terms = uniqueWords(newText)
proximity = {}
for term in terms:
position = {}
for n in range(len(newText)):
if(term in newText[n]):
position[docno[n]] = []
for i in range(len(newText[n])):
if(term == newText[n][i]):
position[docno[n]].append(i)
proximity[term] = position
return proximity
# =============================================================================
# FIND QUERY IN INDEX TERMS
# =============================================================================
def queryInIndex(query, index):
result = []
for word in query:
if word in index:
result.append(word)
return result
# =============================================================================
# DF
# =============================================================================
def df(query, index):
docFreq = {}
for word in query:
if word in index.keys():
docFreq[word] = len(index[word])
return docFreq
# =============================================================================
# IDF
# =============================================================================
def idf(df, N):
inv = {}
for word in df:
inv[word] = math.log10(N / df[word])
return inv
# =============================================================================
# TF
# =============================================================================
def tf(query, index):
termFreq = {}
for word in query:
freq = {}
if word in index:
for i in index[word]:
freq[i] = len(index[word][i])
termFreq[word] = freq
return termFreq
# =============================================================================
# TF-IDF
# =============================================================================
def tfidf(tf, idf):
w = {}
for word in tf:
wtd = {}
for doc in tf[word]:
wtd[doc] = (1 + (math.log10(tf[word][doc]))) * idf[word]
w[word] = wtd
return w
# =============================================================================
# SCORING
# =============================================================================
def score(TFIDF):
res = {}
for i in TFIDF:
for j in TFIDF[i]:
res[j] = 0
for i in TFIDF:
for j in TFIDF[i]:
res[j] = res[j] + TFIDF[i][j]
sorted_dict = OrderedDict(sorted(res.items(), key=itemgetter(1), reverse=True)[:10])
return sorted_dict
# =============================================================================
# PROCESS FOR QUERY(S)
# =============================================================================
def processQuery(word):
global g,h,j,queryStem
queries = []
for i in word:
queries = word.split()
g = tokenizeQuery(queries)
h = caseFoldingQuery(g)
j = stopwordRemovalQuery(h)
queryStem = stemmingQuery(j)
return queryStem
fileRSV = ('invertedIndex/mainFunction/bible_xml/RSV.xml')
bibleRSV = readBible(fileRSV)
bookNameRSV = bibleBookName(fileRSV)
noVersRSV = bibleNoVers(fileRSV)
versesRSV = bibleVers(fileRSV)
tokenRSV = tokenization(versesRSV)
caseFoldRSV = caseFolding(tokenRSV)
stopwordDelRSV = stopwordRemove(caseFoldRSV)
stemRSV = stemming(stopwordDelRSV)
uniqTermsRSV = uniqueWords(stemRSV)
indexRSV = createIndex(stopwordDelRSV, noVersRSV)
# =============================================================================
# PROCESS FOR DOCUMENTS
# =============================================================================
def mainRSV(textRSV):
l = queryInIndex(processQuery(textRSV),indexRSV)
print(l)
N = len(noVersRSV)
docFrequency = df(l, indexRSV)
invDocFrequency = idf(docFrequency, N)
termFrequency = tf(l, indexRSV)
TFIDF = tfidf(termFrequency, invDocFrequency)
sc = score(TFIDF)
result = []
for i in range(len(sc)):
a = noVersRSV.index(list(sc.keys())[i])
x = list(sc.keys())[i]
y = list(sc.values())[i]
result.append((x, y, versesRSV[a]))
return result | [
"elisajesica@gmail.com"
] | elisajesica@gmail.com |
e2412b63db991bbfe6f09266360a6f8823075b18 | 8516bcb2cd4f40f5fbdff488605019d6c7ac3e01 | /accelerator_abstract/models/base_partner_judge_application_assignment.py | 985959c1db8306eaf8a433c0243f6c2304d9c193 | [
"MIT"
] | permissive | masschallenge/django-accelerator | 0b0782ceb3b010715fbe829f774246ca4a68af23 | 665351d06c2adc65c48642905c499b8226276d12 | refs/heads/development | 2022-11-22T18:40:08.410117 | 2022-11-10T15:11:38 | 2022-11-10T15:11:38 | 94,241,861 | 6 | 0 | MIT | 2022-11-10T15:11:40 | 2017-06-13T17:55:54 | Python | UTF-8 | Python | false | false | 899 | py | from django.conf import settings
import swapper
from django.db.models import (
CASCADE,
ForeignKey,
)
from .accelerator_model import AcceleratorModel
class BasePartnerJudgeApplicationAssignment(AcceleratorModel):
judge = ForeignKey(settings.AUTH_USER_MODEL,
on_delete=CASCADE)
application = ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, "Application"),
on_delete=CASCADE)
judging_round = ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, "JudgingRound"),
on_delete=CASCADE)
partner = ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, "Partner"),
on_delete=CASCADE)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_partnerjudgeapplicationassignment'
abstract = True
| [
"jon@kiparsky.net"
] | jon@kiparsky.net |
870a4e1fa223674735ef734050320b5883e5a7cc | 6bf872f1506428e7d2cd216f251959a31c4f3be0 | /TB_IPR/TUT.IMG.pde/python/nonlineardiffusion.py | 7fb4caa9d39b8702dd0cfd00b627dee35d532471 | [
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] | permissive | yg42/iptutorials | 25e8c5f8e145085b3938237b6d216a4b7634a013 | a42f13d97ee8a43e3f2097fd3a9be599dfe2380b | refs/heads/master | 2022-11-05T11:29:34.932163 | 2022-10-14T12:43:41 | 2022-10-14T12:43:41 | 228,867,829 | 31 | 11 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 27 14:30:18 2015
@author: yann
"""
import numpy as np
from scipy import ndimage, misc
import matplotlib.pyplot as plt
import skimage
def c(I, alpha):
# diffusion coefficient
# I: image
# alpha: diffusion parameter
return np.exp(-(I/alpha)**2)
def nonlinearDiffusion(I, nbIter, alpha, dt):
# linear diffusion
# I: image
# nbIter: number of iterations
# dt: step time
hW = np.array([[1, -1, 0]])
hE = np.array([[0, -1, 1]])
hN = np.transpose(hW)
hS = np.transpose(hE)
Z = I
for i in range(nbIter):
#print "%d" % i
gW = ndimage.convolve(Z, hW, mode='constant')
gE = ndimage.convolve(Z, hE, mode='constant')
gN = ndimage.convolve(Z, hN, mode='constant')
gS = ndimage.convolve(Z, hS, mode='constant')
Z = Z + dt*(c(np.abs(gW), alpha)*gW + c(np.abs(gE), alpha)*gE
+ c(np.abs(gN), alpha)*gN + c(np.abs(gS), alpha)*gS)
return Z
alpha = 0.1
dt = .05
I = skimage.io.imread("cerveau.png")/255.
F = nonlinearDiffusion(I, 10, alpha, dt)
F2 = nonlinearDiffusion(I, 50, alpha, dt)
skimage.io.imsave("cerveau_nld_10.png", F)
skimage.io.imsave("cerveau_nld_50.png", F2)
plt.subplot(1, 3, 1)
plt.imshow(I, cmap=plt.cm.gray)
plt.subplot(1, 3, 2)
plt.imshow(F, cmap=plt.cm.gray)
plt.subplot(1, 3, 3)
plt.imshow(F2, cmap=plt.cm.gray)
| [
"gavet@emse.fr"
] | gavet@emse.fr |
0858da6354190a6e487b641799cd95e92128d8e9 | cb5d9a49406dcc8a48b6ac55042035ff6437bc05 | /slyd/slyd/server.py | 13f17a192cce9c31c433a908ef90cd7347c53f97 | [
"BSD-3-Clause"
] | permissive | randy-ran/portia | 45ae4e071f57da01bafcd8b248a1d8e643bfdfc8 | 6dd14b2c73d7db4e008efdfb322b853f72755132 | refs/heads/master | 2021-01-18T12:42:21.657190 | 2015-06-04T21:41:29 | 2015-06-04T21:41:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | from twisted.python import log
from twisted.python.compat import intToBytes
from twisted.web import http
from twisted.web.server import Site as WebSite, Request as WebRequest
class Request(WebRequest):
def is_ajax(self):
req_with = self.requestHeaders.getRawHeaders('X-Requested-With', [])
return 'XMLHttpRequest' in req_with
def processingFailed(self, reason):
if self.is_ajax():
log.err(reason)
if self.site.displayTracebacks:
body = reason.getTraceback()
else:
body = b"Processing Failed"
self.setResponseCode(http.INTERNAL_SERVER_ERROR)
self.setHeader(b'content-type', b"text/plain")
self.setHeader(b'content-length', intToBytes(len(body)))
self.write(body)
self.finish()
return reason
return WebRequest.processingFailed(self, reason)
class Site(WebSite):
requestFactory = Request
| [
"ruairi@scrapinghub.com"
] | ruairi@scrapinghub.com |
7ea93492ed3ad89ba1eca2a21213ebb6285063b3 | 63ab7f86a43b7707d7f18cdac64d3d7f6041e44b | /Chapter13/countingMapReduce.py | d102a02b2e18ede970b6554745f3edbc011ef440 | [] | no_license | rms15/PDA_Book | 3189dc9c72bb6287632c49f77e7ad62b0f710c30 | 35f17eff37b28ea3cd42ef4d6533fdf951d51adf | refs/heads/master | 2021-01-11T05:19:21.949423 | 2016-10-26T14:50:45 | 2016-10-26T14:50:45 | 71,997,565 | 0 | 0 | null | 2016-10-26T11:43:21 | 2016-10-26T11:43:21 | null | UTF-8 | Python | false | false | 472 | py | from bson.code import Code
from pymongo import MongoClient
con = MongoClient()
db = con.Corpus
tweets = db.tweets
map = Code("function(){ emit(this.via, 1); }")
reduce = Code("""function(key, values) {
var res = 0;
values.forEach(function(v){ res += 1})
return {count: res};
}""")
result = tweets.map_reduce(map,reduce,"via_count", full_response=True)
print(result)
for doc in db.via_count.find():
print(doc)
| [
"hm_cuesta@yahoo.com.mx"
] | hm_cuesta@yahoo.com.mx |
c2d71a7d3e13c4f19a3368fea8040b5146362a1b | 392e2773c0af4293060debe5f7a3ac217eb13eb0 | /architectures/cloud-centric/nodes/cloud-node/code/classes/policy.py | 6e7d2e63d9369afa3bfc1fa2e5fb3b36d3762e5c | [
"MIT"
] | permissive | Davide-DD/distributed-machine-learning-architectures | 16366c1faa22f218ba1da409448427987914039a | 998d86368c4122ad9937b505405191b316afb060 | refs/heads/master | 2020-08-07T09:00:10.347339 | 2019-10-15T12:42:26 | 2019-10-15T12:42:26 | 213,382,447 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | class Policy:
def __init__(self):
self.wrong_predictions = 0
def update_status(self):
self.wrong_predictions += 1
def is_respected(self):
if self.wrong_predictions < 25:
return True
self.wrong_predictions = 0
return False | [
"davidedidonato94@gmail.com"
] | davidedidonato94@gmail.com |
d29df58a54dda0c6b1cb8a80b91f79e0c52aea41 | 32e10056d02f36aab0195788db1fd46e36bb9d3d | /mysite2/mysite2/settings.py | c007e36e506ebde8d10f73956ebcf412cb305c89 | [] | no_license | dariodegiorgi/asmis_project | b18534f768b9b78f29686ddae44be13a8948c49a | a3414ae11bed6bd264755312cb2a0d9ba0eab4f3 | refs/heads/master | 2023-03-29T02:40:46.151702 | 2021-04-01T16:13:50 | 2021-04-01T16:13:50 | 353,737,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | """
Django settings for mysite2 project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%u^@3)o&^5i(a-3)^w@@br#j21zcr2)3$rq7*jaowbfc+p#1yx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"dario.degiorgi2@gmail.com"
] | dario.degiorgi2@gmail.com |
cba4850a56bcc405d2f955ea94145e68d71dc137 | 44c7b7cb9b2fc778774f6d2853a31f7f3edd10be | /documentparser.py | 549dbfbea6425e4965e9f1b46ac8b7816cb4773a | [] | no_license | thedern/Python3 | e7f92c66aeed62ad1197419d5291dd916114efdd | aa1a8ebee049c8191f1908c631aa8511a6e7dac9 | refs/heads/master | 2020-03-15T01:17:53.966230 | 2018-09-12T13:24:24 | 2018-09-12T13:24:24 | 131,890,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py |
fhandle=open('romeo.txt')
#print(fhandle)
masterList = []
#read each line and split the text into a list object
for line in fhandle:
words = line.split()
#print(words)
#get each item in the list
for t in range(len(words)):
#print each item in the list
print(words[t])
#if the item is not already in masterList, add it
if words[t] in masterList : continue
else:
masterList.append(words[t])
print(masterList)
print(len(masterList))
| [
"noreply@github.com"
] | noreply@github.com |
fceb31cf00b53fc8ef9df8a4ff1f3b5730c0e402 | ad475b83c8824b1fcb19292866986f2e9b31eddf | /influxdb_client/domain/notification_endpoint_base_links.py | 5f93a4216d9962778ca5d3ab9efc97909851636b | [
"MIT"
] | permissive | reinhard-brandstaedter/influxdb-client-python | 9ec79d7d07d1a52cefa2f0622151c02cce9701f0 | cd3278c90d751a25c8693367a6495a7d07e5c0a5 | refs/heads/master | 2023-04-25T21:39:49.655550 | 2021-05-28T05:03:38 | 2021-05-28T05:03:38 | 371,001,951 | 0 | 0 | MIT | 2021-05-26T10:59:00 | 2021-05-26T10:59:00 | null | UTF-8 | Python | false | false | 5,250 | py | # coding: utf-8
"""
Influx API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class NotificationEndpointBaseLinks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'_self': 'str',
'labels': 'str',
'members': 'str',
'owners': 'str'
}
attribute_map = {
'_self': 'self',
'labels': 'labels',
'members': 'members',
'owners': 'owners'
}
def __init__(self, _self=None, labels=None, members=None, owners=None): # noqa: E501,D401,D403
"""NotificationEndpointBaseLinks - a model defined in OpenAPI.""" # noqa: E501
self.__self = None
self._labels = None
self._members = None
self._owners = None
self.discriminator = None
if _self is not None:
self._self = _self
if labels is not None:
self.labels = labels
if members is not None:
self.members = members
if owners is not None:
self.owners = owners
@property
def _self(self):
"""Get the _self of this NotificationEndpointBaseLinks.
URI of resource.
:return: The _self of this NotificationEndpointBaseLinks.
:rtype: str
""" # noqa: E501
return self.__self
@_self.setter
def _self(self, _self):
"""Set the _self of this NotificationEndpointBaseLinks.
URI of resource.
:param _self: The _self of this NotificationEndpointBaseLinks.
:type: str
""" # noqa: E501
self.__self = _self
@property
def labels(self):
"""Get the labels of this NotificationEndpointBaseLinks.
URI of resource.
:return: The labels of this NotificationEndpointBaseLinks.
:rtype: str
""" # noqa: E501
return self._labels
@labels.setter
def labels(self, labels):
"""Set the labels of this NotificationEndpointBaseLinks.
URI of resource.
:param labels: The labels of this NotificationEndpointBaseLinks.
:type: str
""" # noqa: E501
self._labels = labels
@property
def members(self):
"""Get the members of this NotificationEndpointBaseLinks.
URI of resource.
:return: The members of this NotificationEndpointBaseLinks.
:rtype: str
""" # noqa: E501
return self._members
@members.setter
def members(self, members):
"""Set the members of this NotificationEndpointBaseLinks.
URI of resource.
:param members: The members of this NotificationEndpointBaseLinks.
:type: str
""" # noqa: E501
self._members = members
@property
def owners(self):
"""Get the owners of this NotificationEndpointBaseLinks.
URI of resource.
:return: The owners of this NotificationEndpointBaseLinks.
:rtype: str
""" # noqa: E501
return self._owners
@owners.setter
def owners(self, owners):
"""Set the owners of this NotificationEndpointBaseLinks.
URI of resource.
:param owners: The owners of this NotificationEndpointBaseLinks.
:type: str
""" # noqa: E501
self._owners = owners
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, NotificationEndpointBaseLinks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| [
"noreply@github.com"
] | noreply@github.com |
fe0e6cff95e5d8a330eff9257815093428fb3c63 | 43ab33b2f50e47f5dbe322daa03c86a99e5ee77c | /test/test_od_mcomplex_type_definition_range_check.py | 1654fc81fa59cb3e96dcfdc2ece04a4d325049a1 | [] | no_license | Sage-Bionetworks/rcc-client | c770432de2d2950e00f7c7bd2bac22f3a81c2061 | 57c4a621aecd3a2f3f9faaa94f53b2727992a01a | refs/heads/main | 2023-02-23T05:55:39.279352 | 2021-01-21T02:06:08 | 2021-01-21T02:06:08 | 331,486,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | # coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import rcc
from rcc.models.od_mcomplex_type_definition_range_check import ODMcomplexTypeDefinitionRangeCheck # noqa: E501
from rcc.rest import ApiException
class TestODMcomplexTypeDefinitionRangeCheck(unittest.TestCase):
"""ODMcomplexTypeDefinitionRangeCheck unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ODMcomplexTypeDefinitionRangeCheck
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = rcc.models.od_mcomplex_type_definition_range_check.ODMcomplexTypeDefinitionRangeCheck() # noqa: E501
if include_optional :
return ODMcomplexTypeDefinitionRangeCheck(
check_value = [
rcc.models.od_mcomplex_type_definition_check_value.ODMcomplexTypeDefinitionCheckValue(
value = '0', )
],
formal_expression = [
rcc.models.od_mcomplex_type_definition_formal_expression.ODMcomplexTypeDefinitionFormalExpression(
value = '0',
context = '0', )
],
measurement_unit_ref = rcc.models.od_mcomplex_type_definition_measurement_unit_ref.ODMcomplexTypeDefinitionMeasurementUnitRef(
measurement_unit_oid = '0', ),
error_message = rcc.models.od_mcomplex_type_definition_error_message.ODMcomplexTypeDefinitionErrorMessage(
translated_text = [
rcc.models.od_mcomplex_type_definition_translated_text.ODMcomplexTypeDefinitionTranslatedText(
value = '0',
lang = '0', )
], ),
comparator = 'LT',
soft_hard = 'SOFT'
)
else :
return ODMcomplexTypeDefinitionRangeCheck(
)
def testODMcomplexTypeDefinitionRangeCheck(self):
"""Test ODMcomplexTypeDefinitionRangeCheck"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"thomas.yu@sagebase.org"
] | thomas.yu@sagebase.org |
737c356e9ae7c86db618659b49bd60093191cca9 | 1d8e5a997f256abf7a046681c64554624db6fd8c | /advance_class/func1.py | 636a36aeab63a991cf1c4d2f23b7b03415c751ac | [] | no_license | luckywind/magic_box | 9d6f8eeef26c5431392a101a3dc54b03ef20df58 | 601b36c5008417f52f23af91449fbcce0d806f95 | refs/heads/master | 2021-01-01T05:24:48.181785 | 2016-04-22T01:30:50 | 2016-04-22T01:30:50 | 56,816,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | #coding=utf-8
#1.定义一个方法 func,该func可以引入任意多的整型参数,结果返回其中最大与最小的值。
'''
2.定义一个方法func,该func可以引入任意多的字符串参数,结果返回(长度)最长的字符串。
3.定义一个方法get_doc(module),module参数为该脚本中导入或定义的模块对象,该函数返回module的帮助文档。
例 print get_doc(urllib),则会输出urllib这个模块的帮助文档。
4.定义一个方法get_text(f),f参数为任意一个文件的磁盘路径,该函数返回f文件的内容。
5.定义一个方法get_dir(folder),folder参数为任意一个文件夹,该函数返回folder文件夹的文件列表。提示(可以了解python的glob模块)
''' | [
"1318247907@qq.com"
] | 1318247907@qq.com |
ca2b27f57589812b825aa419c929e23c13f248ee | 93636267195ed83a74fce8796d9b83a668a3160f | /fly_scraper/fly.py | 97ad0d777263d2e288b967bde4808e1a4c9574ca | [] | no_license | AlexandrMalahov/Learning | 46e8ea8c4c4035db483e2d09a76791d2263a0f3a | 18260de924b547e69bed4bfabfcc9a33ddb480f8 | refs/heads/master | 2020-04-22T11:35:03.125779 | 2019-09-12T14:57:14 | 2019-09-12T14:57:14 | 170,345,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,966 | py | """Python 3.7. Parsing of the web site https://www.orbest.com/."""
# сделать с аргпарс и объеденить проверки ввода в одну функцию
import argparse
import datetime
import re
import requests
from lxml import html
def input_query_params():
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--way', help='input way type("ONE_WAY" or "ROUND_TRIP")')
parser.add_argument('-d', '--dep_city', help='input departure city("LIS", "CUN", "PUJ")')
parser.add_argument('-a', '--arr_city', help='input arrival city("LIS", "CUN", "PUJ")')
parser.add_argument('-d_d', '--dep_date', help='input a departure date')
parser.add_argument('-r', '--ret_date', help='input a return date')
parser.add_argument('-n_a', '--num_adults', help='input a number of adults')
parser.add_argument('-n_c', '--num_child', help='input a number of children')
parser.add_argument('-n_i', '--num_infants', help='input a number of infants')
args = parser.parse_args()
params = (
args.way, args.dep_city,
args.arr_city, args.dep_date,
args.ret_date, args.num_adults,
args.num_child, args.num_infants
)
return params
def manual_input(count):
if count == 0:
query_params = input_query_params()
way = query_params[0]
dep_city = query_params[1]
arr_city = query_params[2]
dep_date = query_params[3]
ret_date = query_params[4]
adults = query_params[5]
children = query_params[6]
infants = query_params[7]
else:
way = input(
'Please, enter a way'
'("ONE_WAY" or "ROUND_TRIP"): '
)
dep_city = input('Please, enter IATA code of departure city: ')
arr_city = input('Please, enter IATA code of arrival city: ')
dep_date = input('Please, enter a departure date(dd/mm/yyyy): ')
ret_date = dep_date
if way.upper() == 'ROUND_TRIP':
ret_date = input('Please, enter a return date(dd/mm/yyyy): ')
adults = input(
'Please, enter a number of adults'
'(number must be more than 0 and less or equal 9): '
)
children = input(
'Please, enter a number of children'
'(number must be more or equal than 0 and less or equal number of adults): '
)
infants = input(
'Please, enter a number of infants'
'(number must be more or equal than 0 and less or equal number of adults): '
)
while True: # Checking and input a flight type.
if way is None:
way = input(
'Please, enter a way'
'("ONE_WAY" or "ROUND_TRIP"): '
).upper()
else:
way = way.upper()
if way == 'ONE_WAY' or way == 'ROUND_TRIP':
break
else:
print('Incorrect flight type. Please, enter a correct way.')
way = input(
'Please, enter a way'
'("ONE_WAY" or "ROUND_TRIP"): '
)
iata_code = ['CUN', 'LIS', 'PUJ']
# Checking and input IATA code of departure airport
while True:
if dep_city is None:
dep_city = input('Please, enter IATA code of departure city: ').upper()
else:
dep_city = dep_city.upper()
if dep_city in iata_code:
break
else:
print(
'Incorrect iata code. Please, '
'enter a correct iata code("{}" '
'or "{}" or "{}")'.format(
iata_code[0], iata_code[1], iata_code[2]
)
)
dep_city = input('Please, enter IATA code of departure city: ')
# Checking and input IATA code of arrival airport
while True:
if arr_city is None:
arr_city = input('Please, enter IATA code of arrival city: ').upper()
else:
arr_city = arr_city.upper()
if arr_city in iata_code:
break
else:
print(
'Incorrect iata code. Please, '
'enter a correct iata code("{}" '
'or "{}" or "{}")'.format(
iata_code[0], iata_code[1], iata_code[2]
)
)
arr_city = input('Please, enter IATA code of arrival city: ')
# Input and checking for correctness departure date
while True:
try:
if dep_date is None:
dep_date = input('Please, enter a departure date(dd/mm/yyyy): ')
dep_date = re.findall(r'(\d|\d{2}).(\d{2}).(\d{4})', dep_date)[0]
if datetime.date(int(dep_date[2]), int(dep_date[1]), int(dep_date[0])):
break
except (IndexError, TypeError, ValueError):
print(
'Incorrect date. Please, enter a '
'correct date in format: day/month/year'
)
dep_date = input('Please, enter a departure date(dd/mm/yyyy): ')
# Input and checking for correctness return date
while True:
if way == 'ONE_WAY':
ret_date = dep_date
break
elif way == 'ROUND_TRIP':
try:
if ret_date is None:
ret_date = input('Please, enter a return date(dd/mm/yyyy): ')
ret_date = re.findall(
r'(\d|\d{2}).(\d{2}).(\d{4})',
ret_date
)[0]
if datetime.date(int(ret_date[2]), int(ret_date[1]), int(ret_date[0])):
break
except (IndexError, TypeError, ValueError):
print(
'Incorrect date. Please, enter a '
'correct date in format: day/month/year'
)
ret_date = input('Please, enter a return date(dd/mm/yyyy): ')
# checking number of adults
while True:
if adults is None:
adults = input(
'Please, enter a number of adults'
'(number must be more than 0 and less or equal 9): '
)
try:
adults = int(adults)
if adults <= 0 or adults >= 9:
print('Number of adults must be more or equal 1 and less or equal 9.')
else:
break
except (ValueError, TypeError):
print('Number of adults must be integer.')
adults = input(
'Please, enter a number of adults'
'(number must be more than 0 and less or equal 9): '
)
# checking number of children
while True:
if children is None:
children = input(
'Please, enter a number of children'
'(number must be more or equal than 0 and less or equal number of adults): '
)
try:
children = int(children)
if children < 0 or children > adults:
print('Number of children must be more or equal 0 and less or equal number of adults')
else:
break
except (ValueError, TypeError):
print('Number of children must be integer number.')
children = input(
'Please, enter a number of children'
'(number must be more or equal than 0 and less or equal number of adults): '
)
# checking number of infants
while True:
if infants is None:
infants = input(
'Please, enter a number of infants'
'(number must be more or equal than 0 and less or equal number of adults): '
)
try:
infants = int(infants)
if infants < 0 or infants > adults:
print('Number of infants must be more or equal 0 and less or equal number of adults.')
else:
break
except (ValueError, TypeError):
print('Number of children must be integer number.')
infants = input(
'Please, enter a number of infants'
'(number must be more or equal than 0 and less or equal number of adults): '
)
return way, dep_city, arr_city, '/'.join(dep_date), '/'.join(ret_date), adults, children, infants
def connection(params_func):
"""Function gets params of searching and set connection with site."""
params = {
'buscadorVuelosEsb.tipoTransicion': 'S',
'buscadorVuelosEsb.routeType': params_func[0],
'buscadorVuelosEsb.origen': params_func[1],
'buscadorVuelosEsb.destino': params_func[2],
'buscadorVuelosEsb.fsalida': params_func[3],
'buscadorVuelosEsb.fregreso': params_func[4],
'buscadorVuelosEsb.numadultos': params_func[5],
'buscadorVuelosEsb.numninos': params_func[6],
'buscadorVuelosEsb.numbebes': params_func[7]
}
tree = html.fromstring(
requests.post(
'https://en.orbest.com/b2c'
'/pages/flight/disponibili'
'dadSubmit.html?', params
).content
)
return tree
def scrape(connect_func, params_func):
"""Gets data from site."""
params = params_func
tree = connect_func
flights = [[], []] # List of vars of flights
# (first lis for outbound flights, second list for return flight)
if params[0] == 'ONE_WAY':
data = tree.xpath(
'/html/body/div[@id="content"]'
'/div/div/form[@id="formularioValoracion"]'
'/div/div[@class="flexcols"]/section'
'/div[@id="tabs2"]/div/div/ol/li'
)
flights = [ # List of lists of flights
information.xpath(
'div[@class="vuelo-wrap'
' vuelo-wrap3"]//text()'
) for information in data
]
elif params[0] == 'ROUND_TRIP':
data = tree.xpath( # Getting data of outbound flights
'/html/body/div[@id="content"]'
'/div/div/form[@id="formularioValoracion"]'
'/div/div[@class="flexcols"]/section'
'/div[@id="tabs2"]/div/div/'
'div[@class="wrap-sel-custom combinado"]'
'/div[@class="grid-cols clearfix"]'
)
for details in data:
flight_first = ' '.join(
details.xpath(
'div[@class="col2 col-first"]'
'/div[@class="datos"]/div//text()'
)
) # Getting data of departure flights
fly_class = details.xpath(
'div[@class="col2 col-first"]'
'/div[@class="datos"]/div'
'/div[@class="clase"]/span//text()'
)
cities = re.findall(r'\b\w{3}\b', flight_first)
time = re.findall(r'\d{1,2}:\d{2}', flight_first)
price = re.findall(r'.\d+,\d{2}', flight_first)
time = [time[i:i+2] for i in range(0, len(time), 2)]
for i, class_type in enumerate(fly_class):
flights[0].append(
[cities[0],
cities[1],
class_type,
price[i],
time[i][0],
time[i][1]]
)
flight_last = ' '.join(details.xpath(
'div[@class="col2 col-last"]'
'/div[@class="datos"]/div//text()'
)) # Getting data of return flights
fly_class = details.xpath(
'div[@class="col2 col-last"]'
'/div[@class="datos"]/div'
'/div[@class="clase"]/span//text()'
)
cities = re.findall(r'\b\w{3}\b', flight_last)
time = re.findall(r'\d{1,2}:\d{2}', flight_last)
price = re.findall(r'.\d+,\d{2}', flight_last)
time = [time[i:i+2] for i in range(0, len(time), 2)]
for i, class_type in enumerate(fly_class):
flights[1].append([
cities[0],
cities[1],
class_type,
price[i],
time[i][0],
time[i][1]
])
return flights
def data_print(data_func, params_func):
"""Result printing."""
if data_func == list() or data_func == [[], []]:
print(
'There is not availability enough '
'for the selected flights. Please '
'select another date.'
)
else:
if params_func[0] == 'ONE_WAY':
for i, _ in enumerate(data_func):
print('Way:', data_func[i][3])
print('Departure time:', data_func[i][5])
print('Arrival time:', data_func[i][7])
print('Class:', data_func[i][9])
print('Price:', data_func[i][0])
print('\n')
elif params_func[0] == 'ROUND_TRIP':
print('Outbound flights', '\n')
for info in data_func[0]:
print('Way: {0}-{1}'.format(info[0], info[1]))
print('Departure time:', info[4])
print('Arrival time:', info[5])
print('Class:', info[2])
print('Price:', info[3])
print('\n')
print('Return flights', '\n')
for inform in data_func[1]:
print('Way: {0}-{1}'.format(inform[0], inform[1]))
print('Departure time:', inform[4])
print('Arrival time:', inform[5])
print('Class:', inform[2])
print('Price:', inform[3])
print('\n')
if __name__ == '__main__':
counter = 0
while True:
parameters = manual_input(counter)
connect = connection(parameters)
data_flights = scrape(connect, parameters)
data_print(data_flights, parameters)
counter += 1
if input('For quit enter "q"').upper() == 'Q':
break
| [
"knessdar2@gmail.com"
] | knessdar2@gmail.com |
07a83ac60630d1e7e99d2f2c893b69b5234db217 | 9950c7186f421d1bdcb52f6a6b8e3593272fc49f | /py/compare/functions/timepenalty.py | 9447147ff16e01d75048c29189d23e9b68556a88 | [] | no_license | music960633/DSnP_grading | b11016d00f1757d0f155991404ffb3102fd2203e | c0a8bd1244a3bff5b7152f660f4c77c937c514d8 | refs/heads/master | 2021-10-11T14:49:29.683556 | 2019-01-27T15:16:09 | 2019-01-27T15:16:09 | 116,963,124 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | """Compare class for "timepenalty" compare method."""
import re
import dsnp_setting
from compare import stat
from compare.functions import base
_RE_TOTAL_TIME = re.compile(r'Total time used\s*:\s*(\d*\.?\d+)\s*seconds')
class TimePenaltyCmp(base.BaseCmp):
def cmpCmd(self, ref_out, stu_out,
min_val, max_val, min_penalty, max_penalty, relative=False):
"""Compare ref output and student output.
Calculates a time penalty depending on ref and student time. The mapping
from student time (stu_time) to penalty is:
If stu_time <= min_time, penalty = `min_penalty`
If stu_time >= max_time, penalty = `max_penalty`
If min_time < stu_time < max_time, penalty increases linearly
If `relative` is set to False, min_time, max_time are equivalent to
`min_val` and `max_val`. If `relative` is set to True, min_time, max_time
are `min_val`, `max_val` multiplied by ref time.
Args:
ref_out: Ref output in list of strings.
stu_out: Student output in list of strings.
min_penalty: Described above.
max_penalty: Descrived above.
min_ratio: Described above.
max_ratio: Descrived above.
relative: Use relative time compared to ref time.
Returns:
Tuple (0, 1, stat.ERROR) if student time is not found. Otherwise return
(0, penalty, stat.STAT_OK)
"""
if relative is True:
ref_result = _RE_TOTAL_TIME.search('\n'.join(ref_out))
assert ref_result is not None, 'Cannot get ref time.'
ref_time = float(ref_result.group(1))
min_time = ref_time * min_val
max_time = ref_time * max_val
else:
min_time = min_val
max_time = max_val
stu_result = _RE_TOTAL_TIME.search('\n'.join(stu_out))
if stu_result is None:
return (0, 1, stat.STAT_ERROR)
stu_time = float(stu_result.group(1))
if stu_time <= min_time:
penalty = min_penalty
elif stu_time >= max_time:
penalty = max_penalty
else:
slope = float(max_penalty - min_penalty) / float(max_time - min_time)
penalty = min_penalty + slope * (stu_time - min_time)
return (0, penalty, stat.STAT_OK)
| [
"music960633@gmail.com"
] | music960633@gmail.com |
af2c88cf2423b3f63cf4992337ae51d62aaa93a0 | f5603094ddcd49d5ffef11c8feadd4b66edbb112 | /tf/Graph.py | eafd63974ef8561bb3e8601d23b9408339ca9326 | [] | no_license | jesen8/study_english_with_DL | 2cc643bd5e0b67fd1c5be9cc8dead869c2f9e411 | 7ad6b531332b3a7ebdd7bc6107f843ab5f09ceb3 | refs/heads/master | 2020-04-05T03:45:49.071774 | 2019-05-06T02:42:46 | 2019-05-06T02:42:46 | 156,526,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | # RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
tf.Graph.device(device_name_or_function)
with g.device('/gpu:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/gpu:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c_1.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.name == "e"
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, inputs):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2. | [
"360697611@qq.com"
] | 360697611@qq.com |
1581a1828244d936040e288b0a6211b5ae3ce290 | 4d83081d5bb6e20d4c79cbcc1eeb07310019c6d3 | /tensorflow_test.py | fe3d27da8ee35cd2584b7288ff7b0a74767cc348 | [] | no_license | hbshin00/VAILSP21 | 36760b74b5633b3eb2a134af1182748f6dc63c7a | 3ed0375959061b8d42efbbd4ab9e00bb83ae5df7 | refs/heads/main | 2023-05-12T17:06:35.071822 | 2021-06-04T19:25:15 | 2021-06-04T19:25:15 | 337,241,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | import tensorflow as tf
x1 = tf.constant([1,2,3,4])
x2 = tf.constant([5,6,7,7])
result = tf.multiply(x1, x2)
print(result)
#why doesn't this work tho
#sess = tf.Session()
#print(sess.run(result))
#sess.close()
#Belgian Traffic Signs
import os
import skimage
import imageio
import numpy as np
def load_date(data_directory):
directories = [d for f in os.listdir(data_directory)
if os.path.isdir(os.path.join(data_directory, d))]
labels = []
images = []
for d in directories:
label_directory = os.path.join(data_directory, d)
file_names = [os.path.join(label_directory, f)
for f in os.listdir(label_directory)
if f.endswith(".ppm")]
for f in file_names:
images.append(skimage.data.imread(f))
labels.append(int(d))
return images, labels
ROOT_PATH = "/Users/halib/Desktop/2021Spring/SureStart/BelgianTraffic"
train_data_directory = os.path.join(ROOT_PATH, "/Users/halib/Desktop/2021Spring/SureStart/BelgianTraffic/Training")
test_data_directory = os.path.join(ROOT_PATH, "/Users/halib/Desktop/2021Spring/SureStart/BelgianTraffic/Testing")
images, labels = load_data(train_data_directory)
import matplotlib.pyplot as plt
plt.hist(labels, 62)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
d273be6ad84e00816624c1c2db81aeb55764ad1f | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/python/ops/metrics_impl.py | 07a43d2961aef78c6af194c06eb1cd62d641f352 | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144,255 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
def _local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
def _remove_squeezable_dimensions(predictions, labels, weights):
"""Internal version of `remove_squeezable_dimensions` which handles weights.
Squeezes `predictions` and `labels` if their rank differs by 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Optional label `Tensor` whose dimensions match `predictions`.
weights: Optional weight `Tensor`. It will be squeezed if its rank is 1
more than the new rank of `predictions`
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
predictions = ops.convert_to_tensor(predictions)
if labels is not None:
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is None:
return predictions, labels, None
weights = ops.convert_to_tensor(weights)
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0:
return predictions, labels, weights
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif predictions_rank - weights_rank == 1:
weights = array_ops.expand_dims(weights, [-1])
else:
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(weights)
rank_diff = weights_rank_tensor - array_ops.rank(predictions)
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, -1),
lambda: array_ops.expand_dims(weights, [-1]),
lambda: weights)
# Don't attempt squeeze if it will fail based on static check.
if ((weights_rank is not None) and
(not weights_shape.dims[-1].is_compatible_with(1))):
maybe_squeeze_weights = lambda: weights
else:
maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1),
maybe_squeeze_weights,
_maybe_expand_weights)
# If weights are scalar, do nothing. Otherwise, try to add or remove a
# dimension to match predictions.
weights = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0),
lambda: weights, _maybe_adjust_weights)
return predictions, labels, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' % (
labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope),
lambda: labels)
def _create_local(name, shape, collections=None, validate_shape=True,
dtype=dtypes.float32):
"""Creates a new local variable.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
collections: A list of collection names to which the Variable will be added.
validate_shape: Whether to validate the shape of the variable.
dtype: Data type of the variables.
Returns:
The created variable.
"""
# Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES
collections = list(collections or [])
collections += [ops.GraphKeys.LOCAL_VARIABLES]
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype=dtype),
name=name,
trainable=False,
collections=collections,
validate_shape=validate_shape)
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return control_flow_ops.cond(
math_ops.equal(
array_ops.constant(0.0, dtype=dtypes.float64), denominator),
lambda: array_ops.constant(0.0, dtype=dtypes.float64),
lambda: math_ops.div(numerator, denominator),
name=name)
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = _create_local(
'total_confusion_matrix',
shape=[num_classes, num_classes],
dtype=dtypes.float64)
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return total_cm, update_op
def mean(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total', shape=[])
count = _create_local('count', shape=[])
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
mean_t = _safe_div(total, count, 'value')
update_op = _safe_div(update_total_op, update_count_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def accuracy(labels, predictions, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections,
updates_collections, name or 'accuracy')
def _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=None, includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.to_float(predictions),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(array_ops.reshape(
weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = _create_local('true_positives', shape=[num_thresholds])
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(
true_p, math_ops.reduce_sum(is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = _create_local('false_negatives', shape=[num_thresholds])
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(
false_n, math_ops.reduce_sum(is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = _create_local('true_negatives', shape=[num_thresholds])
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(
true_n, math_ops.reduce_sum(is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = _create_local('false_positives', shape=[num_thresholds])
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(
false_p, math_ops.reduce_sum(is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def auc(labels, predictions, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None,
curve='ROC', name=None, summation_method='trapezoidal'):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
summation_method: Specifies the Riemann summation method used, 'trapezoidal'
[default] that applies the trapezoidal rule, 'minoring' that applies
left summation for increasing intervals and right summation for decreasing
intervals or 'majoring' that applies the opposite.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'auc', (labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' %
(curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
if summation_method == 'trapezoidal':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.),
name=name)
elif summation_method == 'minoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.minimum(y[:num_thresholds - 1], y[1:])),
name=name)
elif summation_method == 'majoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.maximum(y[:num_thresholds - 1], y[1:])),
name=name)
else:
raise ValueError('Invalid summation_method: %s' % summation_method)
# sum up the areas of all the trapeziums
auc_value = compute_auc(
values['tp'], values['fn'], values['tn'], values['fp'], 'value')
update_op = compute_auc(
update_ops['tp'], update_ops['fn'], update_ops['tn'], update_ops['fp'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, auc_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
def mean_absolute_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
def mean_cosine_distance(labels, predictions, dim, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(radial_diffs,
reduction_indices=[dim,],
keep_dims=True)
mean_distance, update_op = mean(radial_diffs, weights,
None,
None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def mean_per_class_accuracy(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates the mean of the per-class accuracies.
Calculates the accuracy for each class, then takes the mean of that.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_accuracy`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_per_class_accuracy'
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_accuracy: A `Tensor` representing the mean per class accuracy.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'mean_accuracy',
(predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(
labels, predictions, num_classes, weights=weights)
def compute_mean_accuracy(name):
"""Compute the mean per class accuracy via the confusion matrix."""
per_row_sum = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = per_row_sum
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0), denominator,
array_ops.ones_like(denominator))
accuracies = math_ops.div(cm_diag, denominator)
return math_ops.reduce_mean(accuracies, name=name)
mean_accuracy_v = compute_mean_accuracy('mean_accuracy')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_accuracy_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_accuracy_v, update_op
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'mean_iou', (predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(labels, predictions,
num_classes, weights)
def compute_mean_iou(name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(math_ops.cast(
math_ops.not_equal(denominator, 0), dtype=dtypes.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0),
denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
# If the number of valid entries is 0 (no classes) we return 0.
result = array_ops.where(
math_ops.greater(num_valid_entries, 0),
math_ops.reduce_sum(iou, name=name) / num_valid_entries,
0)
return result
mean_iou_v = compute_mean_iou('mean_iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_iou_v)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
def mean_relative_error(labels, predictions, normalizer, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0),
array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
def mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections,
updates_collections, name or 'mean_squared_error')
def mean_tensor(values, weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = _create_local('total_tensor', shape=values.get_shape())
count = _create_local('count_tensor', shape=values.get_shape())
num_values = array_ops.ones_like(values)
if weights is not None:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
update_total_op = state_ops.assign_add(total, values)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
def compute_mean(total, count, name):
non_zero_count = math_ops.maximum(count,
array_ops.ones_like(count),
name=name)
return math_ops.truediv(total, non_zero_count, name=name)
mean_t = compute_mean(total, count, 'value')
update_op = compute_mean(update_total_op, update_count_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
def percentage_below(values, threshold, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold,
weights,
metrics_collections,
updates_collections,
name or 'percentage_below_threshold')
def _count_condition(values, weights=None, metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = _create_local('count', shape=[])
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((
check_ops.assert_rank_in(weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = array_ops.identity(count)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if metrics_collections:
ops.add_to_collections(metrics_collections, value_tensor)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
def false_negatives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(
name, 'false_negatives', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_negative = math_ops.logical_and(math_ops.equal(labels, True),
math_ops.equal(predictions, False))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
def false_negatives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fn',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['fn'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fn'])
return values['fn'], update_ops['fn']
def false_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'false_positives', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_positive = math_ops.logical_and(math_ops.equal(labels, False),
math_ops.equal(predictions, True))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
def false_positives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fp',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['fp'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fp'])
return values['fp'], update_ops['fp']
def true_negatives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tn',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['tn'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tn'])
return values['tn'], update_ops['tn']
def true_positives(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'true_positives', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_positive = math_ops.logical_and(math_ops.equal(labels, True),
math_ops.equal(predictions, True))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
def true_positives_at_thresholds(labels, predictions, thresholds, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tp',))
if metrics_collections:
ops.add_to_collections(metrics_collections, values['tp'])
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tp'])
return values['tp'], update_ops['tp']
def precision(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'precision', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_p, false_positives_update_op = false_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_precision(tp, fp, name):
return array_ops.where(
math_ops.greater(tp + fp, 0),
math_ops.div(tp, tp + fp),
0,
name)
p = compute_precision(true_p, false_p, 'value')
update_op = compute_precision(
true_positives_update_op, false_positives_update_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, p)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
def precision_at_thresholds(labels, predictions, thresholds,
weights=None,
metrics_collections=None,
updates_collections=None, name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(tp, fp, name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
prec = compute_precision(values['tp'], values['fp'], 'value')
update_op = compute_precision(
update_ops['tp'], update_ops['fp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, prec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
def recall(labels, predictions, weights=None,
metrics_collections=None, updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(
name, 'recall', (predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
false_n, false_negatives_update_op = false_negatives(
labels, predictions, weights, metrics_collections=None,
updates_collections=None, name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n),
0,
name)
rec = compute_recall(true_p, false_n, 'value')
update_op = compute_recall(
true_positives_update_op, false_negatives_update_op, 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(
ids, math_ops.equal(ids.values, selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(
ids_shape, array_ops.reshape(ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(
filled_selected_id_shape, math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(
name, 'true_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(
labels, predictions_idx, class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(
None, 'false_negatives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fn = sets.set_size(sets.set_difference(predictions_idx,
labels,
aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(
name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions)
_, top_k_idx = nn.top_k(predictions, k)
return _sparse_recall_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_recall_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
Differs from `recall_at_k` in that predictions must be in the form of top `k`
class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k`
for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and predictions has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name,
_at_k_name('recall', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def recall_at_thresholds(labels, predictions, thresholds,
weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(tp, fn, name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
rec = compute_recall(values['tp'], values['fn'], 'value')
update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def root_mean_squared_error(labels, predictions, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
mse, update_mse_op = mean_squared_error(
labels, predictions, weights, None, None,
name or 'root_mean_squared_error')
rmse = math_ops.sqrt(mse)
update_rmse_op = math_ops.sqrt(update_mse_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, rmse)
if updates_collections:
ops.add_to_collections(updates_collections, update_rmse_op)
return rmse, update_rmse_op
def sensitivity_at_specificity(
labels, predictions, specificity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_sensitivity_at_specificity(tp, tn, fp, fn, name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index],
tp[tf_index] + fn[tf_index] + kepsilon,
name)
sensitivity = compute_sensitivity_at_specificity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
update_op = compute_sensitivity_at_specificity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, sensitivity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(
name, 'expand_and_tile', (tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_top_k(labels, predictions_idx):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`,
`labels`, and the result `Tensors`. In the common case, this is [batch_size].
Each row of the results contains the average precision for that row.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if the last dimension of predictions_idx is not set.
"""
with ops.name_scope(
None, 'average_precision', (predictions_idx, labels)) as scope:
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
if predictions_idx.get_shape().ndims == 0:
raise ValueError('The rank of predictions_idx must be at least 1.')
k = predictions_idx.get_shape().as_list()[-1]
if k is None:
raise ValueError('The last dimension of predictions_idx must be set.')
labels = _maybe_expand_labels(labels, predictions_idx)
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k, math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def _streaming_sparse_average_precision_at_top_k(labels,
predictions_idx,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`. Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(name, 'average_precision_at_top_k',
(predictions_idx, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_top_k(
predictions_idx=predictions_idx, labels=labels)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = _local_variable(
array_ops.zeros([], dtype=dtypes.float64), name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean')
update = _safe_scalar_div(total_update, max_update, name=scope)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_average_precision)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if k is invalid.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(
name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
return _streaming_sparse_average_precision_at_top_k(
labels=labels,
predictions_idx=predictions_idx,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(
None, 'false_positives', (predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels,
predictions_idx,
class_id)
fp = sets.set_size(sets.set_difference(
predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.multiply(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx, labels=labels, class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = _local_variable(array_ops.zeros([], dtype=dtypes.float64), name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
def precision_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,
weights=weights)
metric = math_ops.div(tp, math_ops.add(tp, fp), name=scope)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return precision_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def specificity_at_sensitivity(
labels, predictions, sensitivity, weights=None, num_thresholds=200,
metrics_collections=None, updates_collections=None, name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds-2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):
"""Computes the specificity at the given sensitivity.
Args:
tp: True positives.
tn: True negatives.
fp: False positives.
fn: False negatives.
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index],
tn[tf_index] + fp[tf_index] + kepsilon,
name)
specificity = compute_specificity_at_sensitivity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
update_op = compute_specificity_at_sensitivity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, specificity)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
| [
"hanshuobest@163.com"
] | hanshuobest@163.com |
116edd3d8348affb82704ee2149a77b01bba66cd | 47a9270b5c84d441a68f5b7486a4897844b89875 | /simulation.py | 514575dc5b24744766b20a119c5e5d7958e8a067 | [] | no_license | RCIITG/HIVE | 74a7b8b9a0f6b9c13015aaf7863c7177e18c63b7 | 027c168280842a2d410bf92f4ea2e1e7c2ed7bfb | refs/heads/master | 2020-04-05T01:35:09.071062 | 2017-07-30T17:11:50 | 2017-07-30T17:11:50 | 81,948,703 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import random
import time
import numpy as np
from graphics import *
import math
def draw_windows(w, h):
# create windows
win=GraphWin('swarm', w, h)
win.setCoords(0,0,w,h)
return win
class robot:
max_range=50
def create_bot(self,win,x,y):
self.shape=Circle(Point(x, y), 10)
self.shape.draw(win)
self.forward=Line(Point(x,y),Point(x,y+self.max_range))
self.forward.draw(win)
new_x=self.max_range*math.sin(60)
new_y=self.max_range*math.cos(60)
self.ping[i]=Line(Point(x,y),Point(new_x,new_y))
self.ping[i].draw(win)
#p = Polygon(Point(1,1), Point(5,3), Point(2,7))
#p.draw(win)
if __name__ == '__main__':
win=draw_windows(400,400)
bee=robot()
bee.create_bot(win,20,20)
win.getMouse() | [
"afzalhmd14@gmail.com"
] | afzalhmd14@gmail.com |
33daf81383f07a60e1205b82c0c803721e8fd23e | 25985aeeee54373d26a164e4cc6a014770e3ebf3 | /windows/w3af/w3af/core/ui/gtkUi/.svn/text-base/exploittab.py.svn-base | 7ad6f040980ac9f8abcf7cb7387f8c80945f7141 | [] | no_license | sui84/tools | 4b750dae90940fbe3a226cba72dc071d8fb88b7c | 651cc08eb50199ce1044c684dbf714ea26df6432 | refs/heads/master | 2021-01-22T19:22:26.964580 | 2017-08-20T15:23:38 | 2017-08-20T15:23:38 | 100,774,276 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 29,792 | '''
exploittab.py
Copyright 2007 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
import gtk, gobject
from . import prompt, helpers, entries, confpanel
from core.ui.gtkUi.pluginEditor import pluginEditor
import core.data.kb.knowledgeBase as kb
from core.data.kb.vuln import vuln as vulnType
from core.controllers.w3afException import w3afException, w3afMustStopException
import operator
class Shells(gtk.TreeView):
'''The list of shells produced from vulnerabilities.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
self.w3af = w3af
# create the ListStore, with the shell name and id
self.liststore = gtk.ListStore(str, str)
self.listholder = {}
# create the TreeView using liststore
super(Shells,self).__init__(self.liststore)
# create a TreeViewColumn for the text
tvcolumn = gtk.TreeViewColumn('Shells')
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 0)
self.append_column(tvcolumn)
self.connect('row-activated', self.useShell)
gobject.timeout_add(500, self._update)
self.show()
def _update(self):
'''Updates the list of shells.
@return: True, to keep gobject.timeout_add calling it.
'''
shells = kb.kb.getAllShells()
for shell in shells:
shellid = str(id(shell))
if shellid not in self.listholder:
try:
self.liststore.append([str(shell), shellid])
except w3afException, w3:
msg = _("An error ocurren while generating the shell object: ") + str(w3)
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, msg)
dlg.destroy()
# I always perform this because I just want to be warned once
self.listholder[shellid] = shell
return True
def useShell(self, treeview, path, view_column):
'''Raises a prompt dialog to use the shell.'''
shellid = self.liststore[path][1]
shell = self.listholder[shellid]
try:
title = "Shell - " + shell.getRemoteSystem()
except w3afException, w3:
msg = _("Failed to get the remote system name from the shell object.\n")
msg += _("Original exception: ") + str(w3)
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, msg)
dlg.destroy()
else:
promptText = shell.getRemoteUser()+'@'+shell.getRemoteSystemName()
prompt.PromptDialog( title, promptText, shell.generic_user_input)
class ExploitAllDialog(gtk.Dialog):
'''A dialog with the About information.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
super(ExploitAllDialog,self).__init__("Multiple Exploit", None, gtk.DIALOG_MODAL,
(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_EXECUTE,gtk.RESPONSE_OK))
self.liststore = gtk.ListStore(str, gobject.TYPE_BOOLEAN)
# just build the tree with the plugin names
for plugin in sorted(w3af.getPluginList("attack")):
self.liststore.append([plugin, 1])
# create the TreeView using liststore
treeview = gtk.TreeView(self.liststore)
self.vbox.pack_start(treeview)
# create a TreeViewColumn for the text
tvcolumn = gtk.TreeViewColumn(_('Exploits'))
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 0)
treeview.append_column(tvcolumn)
# create a TreeViewColumn for the checkbox
tvcolumn = gtk.TreeViewColumn(_('Active'))
cell = gtk.CellRendererToggle()
cell.set_property('activatable', True)
cell.connect('toggled', self._toggle)
tvcolumn.pack_start(cell, False)
tvcolumn.add_attribute(cell, 'active', 1)
treeview.append_column(tvcolumn)
# stop on first
self.but_sof = gtk.CheckButton(_("First successful"))
if hasattr(self.but_sof, "set_tooltip_text"):
self.but_sof.set_tooltip_text(_("Stop on first successful exploit"))
self.vbox.pack_start(self.but_sof)
# the cancel button
but = self.action_area.get_children()[1]
but.connect("clicked", lambda x: self.destroy())
# the ok button
but = self.action_area.get_children()[0]
but.connect("clicked", self._ok)
self.connect("delete-event", lambda x,y: self.destroy())
self.activatedPlugins = None
self.stopOnFirst = None
self.show_all()
def _ok(self, w):
'''Collects the information.'''
self.activatedPlugins = [name for (name,act) in self.liststore if act]
self.stopOnFirst = self.but_sof.get_active()
self.destroy()
def _toggle(self, cell, path):
'''Toggles the plugin on/off.
@param cell: the cell that generated the signal.
@param path: the path that clicked the user.
'''
listrow = self.liststore[path]
listrow[1] = not listrow[1]
class ExploitTree(gtk.TreeView):
'''A list showing all the plugins of "attack" type.
@param w3af: The main core class.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
self.w3af = w3af
# create the ListStore, with the plugin name twice (the first could
# go bold, the second is the original name always)
self.liststore = gtk.ListStore(str, str)
# just build the tree with the plugin names
for plugin in sorted(w3af.getPluginList("attack")):
self.liststore.append([plugin, plugin])
# we will not ask for the plugin instances until needed, we'll
# keep them here:
self.plugin_instances = {}
# create the TreeView using liststore
super(ExploitTree,self).__init__(self.liststore)
# signals
self.connect('button-release-event', self.popup_menu)
self.connect('cursor-changed', self._changedSelection)
# create a TreeViewColumn for the text
tvcolumn = gtk.TreeViewColumn(_('Exploits'))
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'markup', 0)
self.append_column(tvcolumn)
# drag and drop setup, this is the SOURCE
target = [("explot-activ", 0, 1)]
self.enable_model_drag_source(gtk.gdk.BUTTON1_MASK, target, gtk.gdk.ACTION_COPY)
#self.set_enable_tree_lines(True)
self.show()
def setFilter(self, vuln):
new_liststore = gtk.ListStore(str, str)
for pname in sorted(self.w3af.getPluginList("attack")):
exploit = self.w3af.getPluginInstance(pname, "attack")
thisvulns = getExploitableVulns(exploit)
markedname = ("<b>%s</b>" % pname) if vuln in thisvulns else pname
new_liststore.append([markedname, pname])
self.set_model(new_liststore)
self.liststore = new_liststore
def _changedSelection(self, *w):
'''Changed which exploit is selected.'''
exploit = self.getSelectedExploit()
self.vulnerabs.setFilter(exploit)
# un-bold the rest
for row in self.liststore:
if row[1] != exploit.pname:
row[0] = row[1]
def getSelectedExploit(self):
'''Returns the selected exploit.
@return: The selected exploit.
'''
(path, column) = self.get_cursor()
if path is None:
return None
# Get the information about the click
plugin = self.getPluginInstance(path)
return plugin
def popup_menu( self, tv, event ):
'''Shows a menu when you right click on a plugin.
@param tv: the treeview.
@parameter event: The GTK event
'''
if event.button != 3:
return
(path, column) = tv.get_cursor()
# Is it over a plugin name ?
if path is not None and len(path) == 1:
# Get the information about the click
plugin = self.getPluginInstance(path)
pname = self.liststore[path][1]
# Ok, now I show the popup menu !
# Create the popup menu
gm = gtk.Menu()
# And the items
e = gtk.MenuItem(_("Edit plugin..."))
e.connect('activate', self._handleEditPluginEvent, pname, path)
gm.append( e )
e = gtk.MenuItem(_("Configure plugin..."))
e.connect('activate', self._configureExploit, plugin, pname)
gm.append( e )
e = gtk.MenuItem(_("Exploit ALL vulns"))
e.connect('activate', self._exploitAll, pname, False)
gm.append( e )
e = gtk.MenuItem(_("Exploit all until first successful"))
e.connect('activate', self._exploitAll, pname, True)
gm.append( e )
gm.show_all()
gm.popup( None, None, None, event.button, event.time)
def _handleEditPluginEvent(self, widget, pluginName, path):
'''
I get here when the user right clicks on a plugin name, then he clicks on "Edit..."
This method calls the plugin editor with the corresponding parameters.
'''
def f(t, n):
self._finishedEditingPlugin(path, pluginName)
pluginEditor("attack", pluginName, f)
def _finishedEditingPlugin(self, path, pluginName):
'''
This is a callback that is called when the plugin editor finishes.
'''
del self.plugin_instances[path]
self.w3af.reloadModifiedPlugin('attack', pluginName)
def _exploitAll(self, widget, pname, stoponfirst):
'''Exploit all the vulns.'''
effectivelyExploitAll(self.w3af, [pname], stoponfirst)
def _configureExploit(self, widget, plugin, pname):
'''Configure the exploit plugin.'''
title = "Configure " + pname
confpanel.ConfigDialog(title, self.w3af, plugin, showDesc=True)
def getPluginInstance(self, path):
'''Caches the plugin instance.
@param path: where the user is in the plugin list
@return The plugin
'''
try:
return self.plugin_instances[path]
except KeyError:
pass
# path can be a tuple of one or two values here
pname = self.liststore[path][1]
plugin = self.w3af.getPluginInstance(pname, "attack")
plugin.pname = pname
plugin.ptype = "attack"
self.plugin_instances[path] = plugin
return plugin
class VulnerabList(gtk.TreeView):
'''A tree showing all the found vulnerabilities.
@param w3af: The w3af core.
@param exploitlist: The widget that keeps the list of exploits
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af, exploitlist):
self.w3af = w3af
self.exploitlist = exploitlist
# simple empty List Store
# columns: the string to show, the string to order, the key
# for the plugin instance, and the icon
self.liststore = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf)
gtk.TreeView.__init__(self, self.liststore)
# the text & icon column
tvcolumn = gtk.TreeViewColumn(_("Vulnerabilities"))
cell = gtk.CellRendererPixbuf()
tvcolumn.pack_start(cell, expand=False)
tvcolumn.add_attribute(cell, "pixbuf", 3)
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, expand=True)
tvcolumn.add_attribute(cell, "markup", 0)
self.append_column(tvcolumn)
# here we will hold the instances, the key will be stored in the store
self.instances = {}
self.listholder = set()
# initial filters
self.applicable = []
# drag and drop setup, this is the DESTINATION
target = [("explot-activ", 0, 1)]
self.enable_model_drag_dest(target, gtk.gdk.ACTION_COPY)
self.connect("drag-data-received", self._dragDropped)
self.connect('cursor-changed', self._changedSelection)
# get the knowledge base and go live
self.fullkb = kb.kb.dump()
gobject.timeout_add(500, self._updateList)
self.lastcheck = False
self.show()
def _changedSelection(self, *w):
'''Changed which exploit is selected.'''
(path, column) = self.get_cursor()
vuln = self.getInstance(path)
self.exploitlist.setFilter(vuln)
# un-bold the rest
selected = vuln.getName()
for row in self.liststore:
if row[1] != selected:
row[0] = row[1]
def setFilter(self, exploit):
'''Sets a new filter and update the list.
@param active: which types should be shown.
'''
vulns = getExploitableVulns(exploit)
if vulns is None:
self.applicable = []
else:
self.applicable = vulns
new_liststore = gtk.ListStore(str, str, str, gtk.gdk.Pixbuf)
new_listholder = set()
self._updateList(new_liststore, new_listholder)
self.set_model(new_liststore)
self.liststore = new_liststore
self.listholder = new_listholder
def _filterKB(self):
'''Calculates the difference between the KB and the list.
This way, only is added to the list those nodes that are new.
@return: The filtered KB.
'''
# let's filter the real kb, to see what we should add
filteredkb = []
# iterate the first layer, plugin names
for pluginname, plugvalues in self.fullkb.items():
# iterate the second layer, variable names
for variabname, variabobjects in plugvalues.items():
# iterate the third layer, the variable objects
if isinstance(variabobjects, list):
for obj in variabobjects:
if type(obj) == vulnType:
severity = obj.getSeverity()
filteredkb.append((obj, severity))
return filteredkb
def _getBestObjName(self, obj):
'''
@return: The best obj name possible
'''
if hasattr(obj, "getName"):
realname = obj.getName()
else:
realname = repr(obj)
if obj in self.applicable:
showname = "<b>%s</b>" % realname
else:
showname = "%s" % realname
return showname, realname
def _updateList(self, liststore=None, listholder=None):
'''Updates the GUI with the KB.
@return: True to keep being called by gobject.
'''
# if the core is not running, don't have anything to update
if not self.w3af.isRunning():
if self.lastcheck:
return True
else:
self.lastcheck = True
self.lastcheck = False
# get the filtered knowledge base info
filteredKB = self._filterKB()
if liststore is None:
liststore = self.liststore
listholder = self.listholder
new_ones = []
for obj, severity in filteredKB:
idinstance = str(id(obj))
if idinstance in listholder:
continue
# it's new!
(showname, realname) = self._getBestObjName(obj)
newicon = helpers.KB_ICONS.get(("vuln", severity))
if newicon is not None:
newicon = newicon.get_pixbuf()
new_ones.append(
(idinstance, obj, showname, realname, newicon))
if new_ones:
self._addVulns(listholder, liststore, new_ones)
return True
def _addVulns(self, listholder, liststore, vulns):
'''Adds an element to the liststore.
@param listholder: the holder to check for instances
@param liststore: the list itself
@param vulns: what to add
'''
# order it by realname, in reverse to be able to do nice pops
vulns.sort(key=operator.itemgetter(3), reverse=True)
# add to listholder and instances
for idinstance, obj, showname, realname, newicon in vulns:
listholder.add(idinstance)
self.instances[idinstance] = obj
# add to the liststore, inserting into the right place to keep order
storelen = len(liststore)
ind = 0
idinstance, obj, showname, realname, newicon = vulns.pop()
while ind < storelen:
prvshowname,prvrealname, vln,icn = liststore[ind]
if realname <= prvrealname:
liststore.insert(ind, (showname,realname,idinstance,newicon))
storelen += 1
try:
idinstance, obj, showname, realname, newicon = vulns.pop()
except IndexError:
break
ind += 1
else:
# we had some more, add them at the end
liststore.append((showname,realname,idinstance,newicon))
for idinstance, obj, showname, realname, newicon in vulns[::-1]:
liststore.append((showname,realname,idinstance,newicon))
def getInstance(self, path):
'''Extracts the instance from the tree.
@param path: where the user is in the tree
@return The instance
'''
instanckey = self.liststore[path][2]
instance = self.instances.get(instanckey)
return instance
def _dragDropped(self, tv, drag_context, x, y, selection_data, info, timestamp):
'''Something was dropped (after a drag) on us.'''
droppoint = tv.get_dest_row_at_pos(x, y)
if droppoint is None:
return True
# collect info about source and dest
(destpath, where) = droppoint
sourcepath = self.exploitlist.get_cursor()[0]
sourcerow = self.exploitlist.liststore[sourcepath]
# it should select a destination row
if where not in (gtk.TREE_VIEW_DROP_INTO_OR_AFTER, gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
self.w3af.mainwin.sb(_("You must drop into a row, not in the middle of two"))
return
# get real objects
exploit = self.exploitlist.getPluginInstance(sourcepath)
dstvuln = self.getInstance(destpath)
if dstvuln is None:
self.w3af.mainwin.sb(_("You must select a vulnerability as destination"))
return
self._executeExploit(exploit, dstvuln)
return
def _executeExploit(self, expl, vuln):
'''Exploits a vulnerability.
This raises a text dialog that informs how the exploit
is going until it finishes.
This method is going to:
a) Create the TextDialog
b) spawn a thread to launch the exploit process
c) spawn a thread to read from the output manager queue
b and c both write messages to the TextDialog.
@param expl: the exploit to use
@param vuln: the vulnerability to exploit
'''
dlg = entries.TextDialog("Exploit!")
# Start the generator that writes the messages from output manager
console_task = helpers.write_console_messages(dlg)
gobject.idle_add(console_task.next)
# Start the generator that launches the exploit
exploit_task = self._launch_exploit(dlg, expl, vuln)
gobject.idle_add(exploit_task.next)
return
def _launch_exploit(self, dlg, expl, vuln):
'''
Launch the exploit and write messages to the TextDialog.
@parameter dlg: The TextDialog.
'''
# get the info, and see if we can go for it
dlg.addMessage("Checking suitability...\n")
vuln_id_list = vuln.getId()
yield True
try:
canexploit = expl.canExploit(vuln_id_list)
except w3afException, e:
dlg.addMessage(_("\nERROR: "))
dlg.addMessage(str(e) + '\n')
dlg.done() # set button to sensitive
dlg.dialog_run() # wait for user response
yield False
if not canexploit:
dlg.addMessage(_("Sorry, this attack plugin can not exploit this vulnerability\n"))
dlg.done() # set button to sensitive
dlg.dialog_run() # wait for user response
yield False
# ok, go for it!
dlg.addMessage(_("Ok, exploiting...\n"))
yield True
try:
expl.exploit()
yield True # print the console messages to the dialog
except w3afException, e:
dlg.addMessage(str(e) + '\n')
else:
dlg.addMessage(_("Done\n"))
yield True
dlg.done() # set button to sensitive
dlg.dialog_run() # wait for user response
yield False
class Proxies(gtk.Label):
'''Dummy class to alert that this will be done later.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self):
msg = "The 'Proxies' functionality\nwill be implemented\nin the future."
super(Proxies,self).__init__(msg)
self.set_justify(gtk.JUSTIFY_CENTER)
self.show()
def getExploitableVulns(exploit):
'''Returns the exploitable vulnerabilities.
@param exploit: the exploit to search.
'''
try:
vulns = exploit.getExploitableVulns()
except w3afException:
print "WARNING: The %r exploit has no getExploitableVulns method!" % exploit
vulns = []
return vulns
def effectivelyExploitAll(w3af, activatedPlugins, stopOnFirst):
'''Exploit all the vulnerabilities.
Just like in the 1-to-1 exploit, I'll create two generators that will perform the work
in a "threaded" way.
@param w3af: the core
@param activatedPlugins: Which plugins are to be used.
@param stopOnFirst: if the exploit should stop in the first exploited vuln.
'''
dlg = entries.TextDialog("Multiple Exploit!")
# Start the generator that writes the messages from output manager
console_task = helpers.write_console_messages(dlg)
gobject.idle_add(console_task.next)
# Start the generator that launches the exploit
exploit_task = _launch_exploit_all(dlg, w3af, activatedPlugins, stopOnFirst)
gobject.idle_add(exploit_task.next)
def _launch_exploit_all(dlg, w3af, activatedPlugins, stopOnFirst):
'''
A generator that will perform the exploitation of all the vulnerabilities.
@param dlg: The dialog where I'm going to write the messages
@param w3af: the core
@param activatedPlugins: Which plugins are to be used.
@param stopOnFirst: if the exploit should stop in the first exploited vuln.
'''
for exploitname in activatedPlugins:
dlg.addMessage(_("\nExploiting %r...\n") % exploitname)
exploit = w3af.getPluginInstance(exploitname, "attack")
vulns = getExploitableVulns(exploit)
dlg.addMessage(_(" %d vulnerabilites to exploit\n") % len(vulns))
yield True
for vuln in vulns:
# Let GTK handle events, I want a responsive GUI!
yield True
# check if o
dlg.addMessage(("Checking suitability for vuln %r...\n") % vuln.getName())
try:
canexploit = exploit.canExploit(vuln.getId())
except w3afException, e:
dlg.addMessage(_("\nERROR: "))
dlg.addMessage(str(e) + '\n')
dlg.done()
dlg.dialog_run()
yield False
except w3afMustStopException, wmse:
dlg.addMessage(_("\nERROR: "))
dlg.addMessage(str(wmse) + '\n')
dlg.done()
dlg.dialog_run()
yield False
if not canexploit:
dlg.addMessage(_(" nop\n"))
yield True
continue
dlg.addMessage(_(" ok\n"))
# exploitable, go for it!
dlg.addMessage(_("Exploiting...\n"))
try:
exploit.exploit()
except w3afException, e:
dlg.addMessage(str(e) + '\n')
yield True
continue
except w3afMustStopException, wmse:
dlg.addMessage(_("\nERROR:"))
dlg.addMessage(str(wmse) + '\n')
dlg.done()
dlg.dialog_run()
yield False
# Let GTK handle events, I want a responsive GUI!
yield True
# it was succesful!
if stopOnFirst:
dlg.addMessage(_("Done\n"))
dlg.done()
dlg.dialog_run()
yield False
dlg.addMessage(_("Done\n"))
dlg.done()
dlg.dialog_run()
yield False
class ExploitBody(entries.RememberingHPaned):
'''Body of the exploit tab.
@param w3af: the Core instance.
@author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
'''
def __init__(self, w3af):
super(ExploitBody,self).__init__(w3af, "pane-exploitbody")
self.w3af = w3af
self.panels = {}
# This is the index to use in the message diverter
#
# The first window that is poped up, gets 0 and starts from there
# that window consumes messages and increases this number.
#
# The next window will show messages starting from were the
# other window left the pointer.
#
# All the message_index handling is done with:
# - self.get_message_index()
# - self.inc_message_index()
#
self._message_index = 0
kb.kb.save('get_message_index', 'get_message_index', self.get_message_index)
kb.kb.save('inc_message_index', 'inc_message_index', self.inc_message_index)
# left & right
exploitvuln = self._buildExplVuln()
interac = self._buildInteraction()
self.panels["exploitvuln"] = exploitvuln
self.panels["interac"] = interac
# pack it all and show
self.pack1(exploitvuln)
self.pack2(interac)
self.panactiv = dict((x,True) for x in self.panels)
self.show()
def inc_message_index(self):
self._message_index += 1
def get_message_index(self):
return self._message_index
def _buildExplVuln(self):
'''The pane with the exploit list and vulnerabilities tree.'''
pan = entries.RememberingHPaned(self.w3af, "pane-epxlvuln", 200)
# left
exploitlist = ExploitTree(self.w3af)
scrollwin1 = gtk.ScrolledWindow()
scrollwin1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin1.add_with_viewport(exploitlist)
scrollwin1.show()
# rigth
interac = VulnerabList(self.w3af, exploitlist)
exploitlist.vulnerabs = interac
scrollwin2 = gtk.ScrolledWindow()
scrollwin2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin2.add_with_viewport(interac)
scrollwin2.show()
# pack it all and show
pan.pack1(scrollwin1)
pan.pack2(scrollwin2)
pan.show()
return pan
def _buildInteraction(self):
'''The pane with the shells and proxies list.'''
pan = entries.RememberingVPaned(self.w3af, "pane-explinteraction")
# left
shells = Shells(self.w3af)
scrollwin1 = gtk.ScrolledWindow()
scrollwin1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin1.add_with_viewport(shells)
scrollwin1.show()
# rigth
proxies = Proxies()
scrollwin2 = gtk.ScrolledWindow()
scrollwin2.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin2.add_with_viewport(proxies)
scrollwin2.show()
# pack it all and show
pan.pack1(scrollwin1)
pan.pack2(scrollwin2)
pan.show()
return pan
def togglePanels(self, panel, active):
'''Turn on and off the panels.
@param panel: The panel to turn on and off
@param active: If it should be activated or deactivated
'''
widg = self.panels[panel]
if active:
widg.show()
else:
widg.hide()
self.panactiv[panel] = active
def exploitAll(self):
'''Exploit all vulns with all plugins.'''
ea = ExploitAllDialog(self.w3af)
resp = ea.run()
if resp != gtk.RESPONSE_OK:
return
effectivelyExploitAll(self.w3af, ea.activatedPlugins, ea.stopOnFirst)
return
| [
"sui84@126.com"
] | sui84@126.com | |
7105f8ccff4a2e7c22be09b82854917a539b4377 | 29cb09a368d3ae3c913ac435d199028ea68963eb | /src/ndngitsync/repo.py | fa545472868d0408a515a1949e1ead1c13ce91a1 | [] | no_license | zjkmxy/GitSync | ba8ea6a918ca5617c8b49b924f5f782bdc6a37b7 | 77c525bc883554545d95eb619c64c11e7e496d1d | refs/heads/master | 2020-07-08T19:39:40.060194 | 2019-08-23T22:15:59 | 2019-08-23T22:15:59 | 203,758,756 | 0 | 0 | null | 2019-08-22T09:16:15 | 2019-08-22T09:16:13 | null | UTF-8 | Python | false | false | 8,894 | py | from pyndn import Face, Name, Data, Interest
from .sync import Sync
from .gitfetcher import GitFetcher, GitProducer, fetch_data_packet
from .storage import DBStorage, IStorage
import pickle
import asyncio
import struct
import logging
from .config import *
class BranchInfo:
def __init__(self, branch_name):
self.name = branch_name
self.custodian = ""
self.key = ""
self.timestamp = 0
self.head = ""
self.head_data = b""
class Repo:
def __init__(self, objects_db: IStorage, repo_name: str, face: Face):
self.repo_db = DBStorage(DATABASE_NAME, repo_name)
self.objects_db = objects_db
self.repo_prefix = Name(GIT_PREFIX).append(repo_name)
self.sync = Sync(face=face,
prefix=Name(self.repo_prefix).append("sync"),
on_update=self.on_sync_update)
self.producer = GitProducer(face=face,
prefix=Name(self.repo_prefix).append("objects"),
storage=objects_db)
self.face = face
self.branches = {}
self.load_refs()
face.registerPrefix(Name(self.repo_prefix).append("refs"),
self.on_refs_interest,
self.on_register_failed)
face.registerPrefix(Name(self.repo_prefix).append("ref-list"),
self.on_reflist_interest,
self.on_register_failed)
face.registerPrefix(Name(self.repo_prefix).append("branch-info"),
self.on_branchinfo_interest,
self.on_register_failed)
self.sync.run()
def on_sync_update(self, branch: str, timestamp: int):
event_loop = asyncio.get_event_loop()
event_loop.create_task(self.sync_update(branch, timestamp))
async def sync_update(self, branch: str, timestamp: int):
commit = ""
data = Data()
def update_db():
nonlocal commit
# Fix the database
if timestamp <= self.branches[branch].timestamp:
return
self.branches[branch].timestamp = timestamp
self.branches[branch].head = commit
self.branches[branch].head_data = data.wireEncode().toBytes()
self.repo_db.put(branch, pickle.dumps(self.branches[branch]))
self.branches[branch].head_data = b""
if branch in self.branches:
# Update existing branch
branch_info = self.branches[branch]
if branch_info.timestamp < timestamp:
interest = Interest(Name(self.repo_prefix).append("refs").append(branch).appendTimestamp(timestamp))
print("ON SYNC UPDATE", interest.name.toUri())
data = await fetch_data_packet(self.face, interest)
if isinstance(data, Data):
commit = data.content.toBytes().decode("utf-8")
else:
print("error: Couldn't fetch refs")
return
fetcher = self.fetch(commit)
await asyncio.wait_for(fetcher.wait_until_finish(), None)
update_db()
print("Update branch", branch, timestamp)
else:
# Fetch new branch
interest = Interest(Name(self.repo_prefix).append("branch-info").append(branch))
print("ON NEW BRANCH", interest.name.toUri())
data = await fetch_data_packet(self.face, interest)
if isinstance(data, Data):
branchinfo = pickle.loads(data.content.toBytes())
else:
print("error: Couldn't fetch branch-info")
return
self.branches[branch] = branchinfo
await self.sync_update(branch, timestamp)
def on_branchinfo_interest(self, _prefix, interest: Interest, face, _filter_id, _filter):
name = interest.name
print("ON BRANCH INFO INTEREST", name.toUri())
branch = name[-1].toEscapedString()
if branch not in self.branches:
return
data = Data(interest.name)
data.content = pickle.dumps(self.branches[branch])
data.metaInfo.freshnessPeriod = 1000
face.putData(data)
def on_refs_interest(self, _prefix, interest: Interest, face, _filter_id, _filter):
name = interest.name
print("ON REFS INTEREST", name.toUri())
if name[-1].isTimestamp:
timestamp = name[-1].toTimestamp()
name = name[:-1]
else:
timestamp = None
branch = name[-1].toEscapedString()
if branch not in self.branches:
return
if timestamp is not None and timestamp != self.branches[branch].timestamp:
if timestamp > self.branches[branch].timestamp:
self.on_sync_update(branch, timestamp)
return
data = Data()
raw_data = pickle.loads(self.repo_db.get(branch))
data.wireDecode(raw_data.head_data)
data.metaInfo.freshnessPeriod = 1000
face.putData(data)
def on_reflist_interest(self, _prefix, interest: Interest, face, _filter_id, _filter):
result = '\n'.join("{} refs/heads/{}".format(info.head, name)
for name, info in self.branches.items())
result = result + '\n'
print("On reflist -> return:", result)
data = Data(interest.name)
data.content = result.encode("utf-8")
data.metaInfo.freshnessPeriod = 1000
face.putData(data)
def on_register_failed(self, prefix):
logging.error("Prefix registration failed: %s", prefix)
def load_refs(self):
logging.info("Loading %s {", self.repo_prefix[-1])
for branch in self.repo_db.keys():
raw_data = self.repo_db.get(branch)
self.branches[branch] = pickle.loads(raw_data)
# Drop the data packet from memory
self.branches[branch].head_data = b""
logging.info(" branch: %s head: %s", self.branches[branch].name, self.branches[branch].head)
# Set Sync's initial state
self.sync.state = {name: info.timestamp for name, info in self.branches.items()}
logging.info("}")
def fetch(self, commit):
fetcher = GitFetcher(self.face, Name(self.repo_prefix).append("objects"), self.objects_db)
fetcher.fetch(commit, "commit")
return fetcher
def create_branch(self, branch, custodian):
if branch in self.branches:
return False
branch_info = BranchInfo(branch)
branch_info.head = "?"
branch_info.timestamp = 0
branch_info.custodian = custodian
self.branches[branch] = branch_info
self.repo_db.put(branch, pickle.dumps(self.branches[branch]))
asyncio.get_event_loop().create_task(self.sync.publish_data(branch, 0))
return True
async def push(self, branch, commit, timeout, face, name):
# TODO Check if new head is legal
fetcher = self.fetch(commit)
result = False
async def checkout():
nonlocal fetcher, result
await fetcher.wait_until_finish()
if not fetcher.success:
return
# TODO W-A-W conflict
timestamp = await self.sync.publish_data(branch)
self.branches[branch].timestamp = timestamp
self.branches[branch].head = commit
# Fix the database
head_data_name = Name(self.repo_prefix).append("refs")
head_data_name = head_data_name.append(branch).appendTimestamp(timestamp)
head_data = Data(head_data_name)
head_data.content = commit.encode("utf-8")
# TODO Sign data
self.branches[branch].head_data = head_data.wireEncode().toBytes()
self.repo_db.put(branch, pickle.dumps(self.branches[branch]))
self.branches[branch].head_data = b""
result = True
event_loop = asyncio.get_event_loop()
response = None
if branch not in self.branches:
response = PUSH_RESPONSE_FAILURE
if response is None:
try:
await asyncio.wait_for(fetcher.wait_until_finish(), timeout)
except asyncio.TimeoutError:
event_loop.create_task(checkout())
response = PUSH_RESPONSE_PENDING
if response is None:
await asyncio.wait_for(checkout(), None)
if result:
response = PUSH_RESPONSE_SUCCESS
else:
response = PUSH_RESPONSE_FAILURE
logging.info("Push Result: %s", response)
data = Data(name)
data.content = struct.pack("i", response)
data.metaInfo.freshnessPeriod = 1000
face.putData(data)
| [
"bitmxy@gmail.com"
] | bitmxy@gmail.com |
6e98e14a02e0d28ecb66e1529a759262fee35440 | 613f6f7eaa5062b12fe8cd08c77d74b6f424aebd | /scripts/iterm2_change_theme.py | 6c8a1b4551dfee07cb3e36b535c223791c24d4cb | [
"Unlicense"
] | permissive | Furkanzmc/dotfiles | 1009cc257d70f0cb43c79a9f4666ac6e7d8f1a95 | 7840005baaef78d860127868e478f35c72d257fa | refs/heads/master | 2023-08-21T17:33:33.157314 | 2023-08-16T22:14:42 | 2023-08-16T22:14:51 | 158,622,114 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | #!/usr/bin/env python3
# Python
from os.path import expanduser, exists
from subprocess import run
# iterm
import iterm2
async def main(connection):
async with iterm2.VariableMonitor(
connection, iterm2.VariableScopes.APP, "effectiveTheme", None
) as mon:
while True:
# Block until theme changes
theme = await mon.async_get()
if exists("/usr/bin/python3"):
python_path = "/usr/bin/python3"
elif exists("/usr/local/bin/python3"):
python_path = "/usr/local/bin/python3"
else:
raise RuntimeError("Cannot find python3 executable.")
pwsh_args = [
python_path,
expanduser("~/.dotfiles/scripts/nvim.py"),
"--command",
]
# Themes have space-delimited attributes, one of which will be
# light or dark.
parts = theme.split(" ")
if "dark" in parts:
preset = await iterm2.ColorPreset.async_get(
connection, "cosmic_latte_dark"
)
pwsh_args.append("set background=dark")
with open(
expanduser("~/.dotfiles/pwsh/tmp_dirs/system_theme"), "w"
) as file_handle:
file_handle.write("dark")
else:
preset = await iterm2.ColorPreset.async_get(
connection, "cosmic_latte_light"
)
pwsh_args.append("set background=light")
with open(
expanduser("~/.dotfiles/pwsh/tmp_dirs/system_theme"), "w"
) as file_handle:
file_handle.write("light")
run(pwsh_args)
# Update the list of all profiles and iterate over them.
profiles = await iterm2.PartialProfile.async_query(connection)
for partial in profiles:
# Fetch the full profile and then set the color preset in it.
profile = await partial.async_get_full_profile()
await profile.async_set_color_preset(preset)
iterm2.run_forever(main)
| [
"furkanuzumcu@gmail.com"
] | furkanuzumcu@gmail.com |
347c80b9aa919c844abeade2e3e7f294c98a2abf | f2a81a8186c540e58d393bbbb2606ef51c26cd35 | /Calculator/User_calculator_1.py | 06207556cb3f39242c7764605b269137066aab90 | [] | no_license | Manoj431/Python | 584befef18e4fd1c02bd58025794902d2266468a | b8fc0220fb8a44232788223973f7e6df7c19836f | refs/heads/main | 2023-04-18T16:45:38.603406 | 2021-05-06T18:25:39 | 2021-05-06T18:25:39 | 364,981,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,258 | py | from tkinter import *
from tkinter.messagebox import *
import math as m
#defining the operations...
def backspace():
equation = txt_field.get()
equation = equation[0:len(equation)-1]
txt_field.delete(0,END)
txt_field.insert(0,equation)
def all_clear():
txt_field.delete(0,END)
def on_click(flag):
b = flag.widget
text = b["text"]
print(text)
if text == "x":
txt_field.insert(END,"*")
return
if text == "=":
try:
equation = txt_field.get()
result = eval(equation)
txt_field.delete(0,END)
txt_field.insert(0, result)
except Exception as ex:
showerror("Error..:", ex)
return
txt_field.insert(END, text)
font = ("Lucida", 20,"bold")
#creating the box/window..
box = Tk()
box.title("Calculator")
box.geometry("500x430")
box.configure(background="Blue")
#Creating a menu bar
#heading label..
heading = Label(box, text = "Calculator", font =("Arial Rounded MT Bold", 24, "bold underline" ) )
heading.pack(side=TOP, pady=15) #adding the heading to our window..
#adding the text field...
txt_field = Entry(box, font = font, justify = CENTER)
txt_field.pack(side = TOP, padx = 15, fill = X)
#Frame class... This appears after we add buttons to it..
std_Frame = Frame(box)
std_Frame.pack(side=TOP, padx=15)
#adding buttons...
temp = 9
for i in range(3):
for j in range(3):
btn = Button(std_Frame, text=str(temp), font=font, width=6, relief="ridge", activebackground="grey" )
btn.grid(row=i, column=j, padx=3, pady=3)
temp-=1
btn.bind("<Button-1>", on_click)
btnZero = Button(std_Frame, text="0", font=font, width=6, relief="ridge", activebackground="grey" )
btnZero.grid(row=3, column=0, padx=3, pady=3)
btnDot = Button(std_Frame, text=".", font=font, width=6, relief="ridge", activebackground="grey" )
btnDot.grid(row=3, column=1, padx=3, pady=3)
btnEquals = Button(std_Frame, text="=", font=font, width=6, relief="ridge", activebackground="grey" )
btnEquals.grid(row=3, column=2, padx=3, pady=3)
btnMultiply = Button(std_Frame, text="x", font=font, width=6, relief="ridge", activebackground="grey" )
btnMultiply.grid(row=0, column=3, padx=3, pady=3)
btnPlus = Button(std_Frame, text="+", font=font, width=6, relief="ridge", activebackground="grey" )
btnPlus.grid(row=1, column=3, padx=3, pady=3)
btnMinus = Button(std_Frame, text="-", font=font, width=6, relief="ridge", activebackground="grey" )
btnMinus.grid(row=2, column=3, padx=3, pady=3)
btnDivision = Button(std_Frame, text="/", font=font, width=6, relief="ridge", activebackground="grey" )
btnDivision.grid(row=3, column=3, padx=3, pady=3)
btnDelete = Button(std_Frame, text="<<---", font=font, width=13, relief="ridge", activebackground="grey", command=backspace )
btnDelete.grid(row=4, column=0, columnspan=2, padx=3, pady=3)
btnClear = Button(std_Frame, text="C", font=font, width=13, relief="ridge", activebackground="grey", command=all_clear )
btnClear.grid(row=4, column=2, columnspan=2, padx=1, pady=1)
#binding the remaining buttons..
btnZero.bind("<Button-1>", on_click)
btnDot.bind("<Button-1>", on_click)
btnEquals.bind("<Button-1>", on_click)
btnMultiply.bind("<Button-1>", on_click)
btnPlus.bind("<Button-1>", on_click)
btnMinus.bind("<Button-1>", on_click)
btnDivision.bind("<Button-1>", on_click)
#Scientific calculator
scFrame = Frame(box)
sinBtn = Button(scFrame, text='sinθ', font=font, width=6, relief="ridge", activebackground="grey")
sinBtn.grid(row=0, column=0)
cosBtn = Button(scFrame, text='cosθ', font=font, width=6, relief="ridge", activebackground="grey")
cosBtn.grid(row=0, column=1)
tanBtn = Button(scFrame, text='tanθ', font=font, width=6, relief="ridge", activebackground="grey")
tanBtn.grid(row=0, column=2)
commaBtn = Button(scFrame, text=',', font=font, width=6, relief="ridge", activebackground="grey")
commaBtn.grid(row=0, column=3)
factBtn = Button(scFrame, text='x!', font=font, width=13, relief="ridge", activebackground="grey")
factBtn.grid(row=1, column=0, columnspan=2, padx=3, pady=3)
powerBtn = Button(scFrame, text='^', font=font, width=13, relief="ridge", activebackground="grey")
powerBtn.grid(row=1, column=2, columnspan=2, padx=1, pady=1)
normalcalc = True
def calculate_sc(event):
btn = event.widget
text = btn['text']
ex = txt_field.get()
answer = ''
if text == ",":
txt_field.insert(END,",")
return
if text == 'x!':
print("cal factorial")
answer = str(m.factorial(int(ex)))
elif text == 'sinθ':
print("cal sin")
answer = str(m.sin(m.radians(int(ex))))
elif text == 'cosθ':
answer = str(m.cos(m.radians(int(ex))))
elif text == 'tanθ':
answer = str(m.tan(m.radians(int(ex))))
elif text == '^':
print('pow')
base, power = ex.split(',')
print(base)
print(power)
answer = m.pow(int(base), int(power))
txt_field.delete(0, END)
txt_field.insert(0, answer)
def sc_click():
global normalcalc
if normalcalc:
std_Frame.pack_forget()
# add sc frame
scFrame.pack(side=TOP, pady=20)
std_Frame.pack(side=TOP)
box.geometry('500x600')
normalcalc = False
else:
scFrame.pack_forget()
box.geometry('500x430')
normalcalc = True
# end functions
# binding sc buttons
sinBtn.bind("<Button-1>", calculate_sc)
cosBtn.bind("<Button-1>", calculate_sc)
tanBtn.bind("<Button-1>", calculate_sc)
commaBtn.bind("<Button-1>", calculate_sc)
factBtn.bind("<Button-1>", calculate_sc)
powerBtn.bind("<Button-1>", calculate_sc)
fontMenu = ('Arial', 15)
menubar = Menu(box, font=fontMenu)
filemenu = Menu(menubar, font=fontMenu, tearoff=0)
menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Standard", command=sc_click)
filemenu.add_command(label="Scientific", command=sc_click)
box.config(menu=menubar)
box.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
463b10f7c87d9bc12379e0dc03342fcc89926b13 | e6dd07d6d6c3777fe5b47ffef0145380c95f68c4 | /sds_ml/test_tree_sds.py | 077f1f0510d054a2dcb73bd4bafd153122340bbf | [
"Apache-2.0"
] | permissive | AndrewOwenMartin/sds-ml | db18ddfb223dd2de0e426f30e468307d92e2d297 | c48a022b762e6e445608efdfcf30334a47934db3 | refs/heads/main | 2022-11-05T14:05:17.090871 | 2020-06-23T17:46:35 | 2020-06-23T17:52:56 | 273,550,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,777 | py | import collections, datetime, functools, itertools
import json, logging, pathlib, random, re
import unittest
from logging import DEBUG, INFO, WARNING, ERROR, FATAL
import sds_ml.tree_sds as tree_sds
import sds_ml.tree_search as tree_search
log = logging.getLogger(__name__)
class TestTreeSds(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger(__file__)
self.rng = random.Random()
self.tree = tree_search.build_tree(depth=5, branch_count=10)
self.name2node = {
node.name: node
for node
in self.tree.bredth_first()
}
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_name2node(self):
log.log(
INFO,
"\n%s",
"\n".join(
itertools.islice(
(str(x) for x in self.name2node.items()),
50,
)
)
)
def test_build_hyp(self):
hyp0 = tree_sds.Hyp(
root_split=None,
)
log.info("hyp0: %s", hyp0)
log.info("hyp0 leaf splits: %s", hyp0.count_leaf_splits())
log.info("hyp0 splittable leaf nodes: %s", hyp0.count_splittable_leaf_nodes())
hyp_simple = tree_sds.Hyp(
root_split=tree_sds.Split(
members=[
self.name2node["r0"],
self.name2node["r1"],
self.name2node["r2"],
self.name2node["r3"],
]
)
)
log.info("hyp simple: %s", hyp_simple)
log.info("hyp simple leaf splits: %s", hyp_simple.count_leaf_splits())
log.info("hyp simple splittable leaf nodes: %s", hyp_simple.count_splittable_leaf_nodes())
hyp1 = tree_sds.Hyp(
root_split=tree_sds.Split(
members=[
tree_sds.Split(
members=[
self.name2node["r00"],
tree_sds.Split(
members=[
self.name2node["r010"],
self.name2node["r011"],
tree_sds.Split(
members=[
self.name2node["r0120"],
self.name2node["r0121"],
self.name2node["r0122"],
self.name2node["r0123"],
]
),
self.name2node["r013"],
]
),
self.name2node["r02"],
self.name2node["r03"],
]
),
self.name2node["r1"],
self.name2node["r2"],
tree_sds.Split(
members=[
self.name2node["r30"],
self.name2node["r31"],
self.name2node["r32"],
self.name2node["r33"],
]
),
]
)
)
log.info("hyp1: %s", hyp1)
log.info("hyp1 leaf splits: %s", hyp1.count_leaf_splits())
log.info("hyp1 splittable leaf nodes: %s", hyp1.count_splittable_leaf_nodes())
hyp2 = tree_sds.Hyp(
root_split=tree_sds.Split(
members=[
tree_sds.Split(
members=[
self.name2node["r00"],
self.name2node["r01"],
self.name2node["r02"],
self.name2node["r03"],
]
),
tree_sds.Split(
members=[
self.name2node["r10"],
self.name2node["r11"],
self.name2node["r12"],
self.name2node["r13"],
]
),
tree_sds.Split(
members=[
self.name2node["r20"],
self.name2node["r21"],
self.name2node["r22"],
self.name2node["r23"],
]
),
tree_sds.Split(
members=[
self.name2node["r30"],
self.name2node["r31"],
self.name2node["r32"],
self.name2node["r33"],
]
),
]
)
)
log.info("hyp2: %s", hyp2)
log.info("hyp2 leaf splits: %s", hyp2.count_leaf_splits())
log.info("hyp2 splittable leaf nodes: %s", hyp2.count_splittable_leaf_nodes())
root_hyp = tree_sds.Hyp.first_split(rng=self.rng, tree=self.tree, split_num=4)
log.info("root hyp: %s", root_hyp)
log.info("root hyp leaf splits: %s", root_hyp.count_leaf_splits())
log.info("root hyp splittable leaf nodes: %s", root_hyp.count_splittable_leaf_nodes())
for leaf_num in range(hyp1.count_leaf_splits()):
clone_split = hyp1.root_split.deep_clone()
clone_split.prune_leaf(leaf_num=leaf_num)
prune_hyp = tree_sds.Hyp(root_split=clone_split)
log.info("prune_hyp%s: %s", leaf_num, prune_hyp)
log.info("prune_hyp%s leaf splits: %s", leaf_num, prune_hyp.count_leaf_splits())
log.info("prune_hyp%s splittable leaf nodes: %s", leaf_num, prune_hyp.count_splittable_leaf_nodes())
log.info("counting hyp1 splittable leaf nodes")
for node_num in range(hyp1.count_splittable_leaf_nodes()):
#log.info("hyp1 leaf node %s", node_num)
hyp1_clone = hyp1.root_split.deep_clone()
hyp1_clone.split_node(node_num=node_num, split_num=4, rng=self.rng)
grow_hyp = tree_sds.Hyp(root_split=hyp1_clone)
log.info("grow hyp1 node %s: %s", node_num, grow_hyp)
#hyp2 = tree_sds.Hyp(splits=[])
#hyp3 = hyp2.random_split(tree=self.tree, split_num=3, rng=self.rng)
#log.info("%s", hyp3)
#leaf_split_count = tree_sds.count_leaf_splits
| [
"a.martin@gold.ac.uk"
] | a.martin@gold.ac.uk |
9057b02d2ebad2cfc59b5649da5d1eeb5780b432 | 8d5f49fa1fda8ffc473e7f5a62786c77838a5820 | /website/drawquest/dbrouters.py | 23bbfb4be240253be8526040cf768de593b23d88 | [
"BSD-3-Clause"
] | permissive | MichaelBechHansen/drawquest-web | dfc6f5d9541860a5df23db678e82564a230bd42e | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | refs/heads/master | 2021-01-14T10:30:10.861222 | 2015-11-10T03:13:42 | 2015-11-10T03:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | from django.conf import settings
class DatabaseAppRouter(object):
"""
A router to control all database operations on models for different
databases.
In case an app is not set in settings.DATABASE_APPS_MAPPING, the router
will fallback to the `default` database.
Settings example:
DATABASE_APPS_MAPPING = {'app1': 'db1', 'app2': 'db2'}
"""
def db_for_read(self, model, **hints):
"""" Point all read operations to the specific database. """
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label)
def db_for_write(self, model, **hints):
""" Point all write operations to the specific database. """
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label)
def allow_relation(self, obj1, obj2, **hints):
""" Allow any relation between apps that use the same database. """
db_obj1 = settings.DATABASE_APPS_MAPPING.get(obj1._meta.app_label)
db_obj2 = settings.DATABASE_APPS_MAPPING.get(obj2._meta.app_label)
if db_obj1 and db_obj2:
if db_obj1 == db_obj2:
return True
else:
return False
def allow_syncdb(self, db, model):
""" Make sure that apps only appear in the related database. """
if model._meta.app_label == 'south':
return True
elif db in settings.DATABASE_APPS_MAPPING.values():
return settings.DATABASE_APPS_MAPPING.get(model._meta.app_label) == db
elif settings.DATABASE_APPS_MAPPING.has_key(model._meta.app_label):
return False
elif db != 'default':
return False
return True
| [
"alex.ehlke@gmail.com"
] | alex.ehlke@gmail.com |
444afd65d83f521bbd49a2443f13fc3fbfceb654 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03078/s480863669.py | ac933d8bc820956754a8b02303270586b6a2aaa3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # solution
x,y,z,k = map(int, input().split())
a = sorted(list(map(int, input().split())), reverse = True)
b = sorted(list(map(int, input().split())), reverse = True)
c = sorted(list(map(int, input().split())), reverse = True)
ans = []
for p in range(min(k,len(a))):
for q in range(min(k,len(b))):
for r in range(min(k,len(c))):
if((p+1)*(q+1)*(r+1) > k):
break
ans.append(a[p] + b[q] + c[r])
ans = sorted(ans, reverse = True)
for i in range(k):
print(ans[i]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d753dc57a8bfd4864a71762567e126d52cfbeeac | c5205dd59f574915766df1720d2e89a896d7e0f3 | /Pstage4_ModelCompression/code/src/modules/__init__.py | ce1754610b3380ff7f0786441024c8610bd75ccd | [
"MIT"
] | permissive | Hyerin-oh/Naver_AI_Boostcamp_Pstage | 8b965f42b7a631382c2b9ab523004b64ae84efd4 | b789e8eef8d37d614022a1c9b72b39656ab10446 | refs/heads/main | 2023-06-03T11:20:39.693955 | 2021-06-25T18:22:35 | 2021-06-25T18:22:35 | 360,912,985 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | """PyTorch Module and ModuleGenerator."""
from src.modules.base_generator import GeneratorAbstract, ModuleGenerator
from src.modules.bottleneck import Bottleneck, BottleneckGenerator
from src.modules.conv import Conv, ConvGenerator, FixedConvGenerator
from src.modules.dwconv import DWConv, DWConvGenerator
from src.modules.flatten import FlattenGenerator
from src.modules.invertedresidualv3 import (
InvertedResidualv3,
InvertedResidualv3Generator,
)
from src.modules.invertedresidualv2 import (
InvertedResidualv2,
InvertedResidualv2Generator,
)
from src.modules.mbconv import (
MBConv,
MBConvGenerator,
)
from src.modules.fusedmbconv import (
FusedMBConv,
FusedMBConvGenerator,
)
from src.modules.shufflev2 import (
ShuffleNetV2,
ShuffleNetV2Generator,
)
from src.modules.mixedconv import (
MixNetBlock,
MixNetBlockGenerator
)
from src.modules.linear import Linear, LinearGenerator
from src.modules.poolings import (
AvgPoolGenerator,
GlobalAvgPool,
GlobalAvgPoolGenerator,
MaxPoolGenerator,
)
__all__ = [
"ModuleGenerator",
"GeneratorAbstract",
"Bottleneck",
"Conv",
"DWConv",
"Linear",
"GlobalAvgPool",
"InvertedResidualv2",
"InvertedResidualv3",
"BottleneckGenerator",
"FixedConvGenerator",
"ConvGenerator",
"LinearGenerator",
"DWConvGenerator",
"FlattenGenerator",
"MaxPoolGenerator",
"AvgPoolGenerator",
"GlobalAvgPoolGenerator",
"InvertedResidualv2Generator",
"InvertedResidualv3Generator",
"MBConvGenerator",
"MBConv",
"FusedMBConv",
"FusedMBConvGenerator",
"ShuffleNetV2",
"ShuffleNetV2Generator",
"MixNetBlock",
"MixNetBlockGenerator"
]
| [
"ohhyerin.oh@gmail.com"
] | ohhyerin.oh@gmail.com |
57a86c8cef0f9b2ce056be15b4b0bd185e7a4f54 | 3c9e203640dd7a48c217a2676133e6aba7308e8c | /code/driving_models.py | 93574468339cb126c13739c9d8400721ad1b27c5 | [] | no_license | SeUnv/DeepBillboard | c8cecb5bdc4cb468bfa2c108c835ab5d0a47236e | 9ce636027059703bbe99897fdcd8e0623f4ffdb0 | refs/heads/master | 2021-10-21T07:48:19.848773 | 2019-03-03T12:18:13 | 2019-03-03T12:18:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,097 | py | # usage: python driving_models.py 1 - train the dave-orig model
from __future__ import print_function
import sys
from keras.models import Sequential
from keras.layers import Convolution2D, Input, Dense, Flatten, Lambda, MaxPooling2D, Dropout , Activation, SpatialDropout2D, merge
from keras import models, optimizers, backend
from keras.layers import core, convolutional, pooling, TimeDistributed, LSTM
from sklearn import model_selection
from configs import bcolors
from data_utils import load_train_data, load_test_data
from keras.optimizers import SGD
from keras.regularizers import l2
from utils import *
from collections import deque
from keras.models import model_from_json
def Dave_v2(input_tensor=None, load_weights=True):
model = Sequential()
model.add(Convolution2D(24, 5, 5, init = normal_init, subsample= (2, 2), name='conv1_1', input_shape=(66, 200, 3)))
model.add(Activation('relu'))
model.add(Convolution2D(36, 5, 5, init = normal_init, subsample= (2, 2), name='conv2_1'))
model.add(Activation('relu'))
model.add(Convolution2D(48, 5, 5, init = normal_init, subsample= (2, 2), name='conv3_1'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_1'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, init = normal_init, subsample= (1, 1), name='conv4_2'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(1164, init = normal_init, name = "dense_0"))
model.add(Activation('relu'))
#model.add(Dropout(p))
model.add(Dense(100, init = normal_init, name = "dense_1"))
model.add(Activation('relu'))
#model.add(Dropout(p))
model.add(Dense(50, init = normal_init, name = "dense_2"))
model.add(Activation('relu'))
#model.add(Dropout(p))
model.add(Dense(10, init = normal_init, name = "dense_3"))
model.add(Activation('relu'))
model.add(Dense(1, init = normal_init, name = "dense_4"))
model.add(Lambda(atan_layer, output_shape = atan_layer_shape, name = "prediction")) #######
model.load_weights('./models/dave2/dave2.hdf5') # it means the weights have been loaded
model.compile(loss = 'mse', optimizer = 'Adam')
return model
def Dave_orig(input_tensor=None, load_weights=False): # original dave
if input_tensor is None:
input_tensor = Input(shape=(100, 100, 3))
x = Convolution2D(24, (5, 5), padding='valid', activation='relu', strides=(2, 2), name='block1_conv1')(input_tensor)
x = Convolution2D(36, (5, 5), padding='valid', activation='relu', strides=(2, 2), name='block1_conv2')(x)
x = Convolution2D(48, (5, 5), padding='valid', activation='relu', strides=(2, 2), name='block1_conv3')(x)
x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1), name='block1_conv4')(x)
x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1), name='block1_conv5')(x)
x = Flatten(name='flatten')(x)
x = Dense(1164, activation='relu', name='fc1')(x)
x = Dense(100, activation='relu', name='fc2')(x)
x = Dense(50, activation='relu', name='fc3')(x)
x = Dense(10, activation='relu', name='fc4')(x)
x = Dense(1, name='before_prediction')(x)
x = Lambda(atan_layer, output_shape=atan_layer_shape, name='prediction')(x)
m = Model(input_tensor, x)
if load_weights:
m.load_weights('./Model1.h5')
# compiling
m.compile(loss='mse', optimizer='adadelta')
print(bcolors.OKGREEN + 'Model compiled' + bcolors.ENDC)
return m
def One_to_radius(x):
return tf.multiply(x,math.pi)
def Dave_v3(input_tensor = None,load_weights = False):
model = models.Sequential()
model.add(convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(convolutional.Convolution2D(32, 3, 3, activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(convolutional.Convolution2D(64, 3, 3, activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(core.Flatten())
model.add(core.Dense(500, activation='relu'))
#model.add(core.Dropout(.5))
model.add(core.Dense(100, activation='relu'))
#model.add(core.Dropout(.25))
model.add(core.Dense(20, activation='relu'))
model.add(core.Dense(1))
model.add(Lambda(One_to_radius, output_shape = atan_layer_shape, name = "prediction"))
if load_weights:
model.load_weights('./models/dave3/dave3.h5')
model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error')
return model
def Dave_norminit(input_tensor=None, load_weights=False): # original dave with normal initialization
if input_tensor is None:
input_tensor = Input(shape=(100, 100, 3))
x = Convolution2D(24, (5, 5), padding='valid', activation='relu', strides=(2, 2),
name='block1_conv1')(input_tensor)
x = Convolution2D(36, (5, 5), padding='valid', activation='relu', strides=(2, 2),
name='block1_conv2')(x)
x = Convolution2D(48, (5, 5), padding='valid', activation='relu', strides=(2, 2),
name='block1_conv3')(x)
x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1),
name='block1_conv4')(x)
x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1),
name='block1_conv5')(x)
x = Flatten(name='flatten')(x)
x = Dense(1164, kernel_initializer=normal_init, activation='relu', name='fc1')(x)
x = Dense(100, kernel_initializer=normal_init, activation='relu', name='fc2')(x)
x = Dense(50, kernel_initializer=normal_init, activation='relu', name='fc3')(x)
x = Dense(10, kernel_initializer=normal_init, activation='relu', name='fc4')(x)
x = Dense(1, name='before_prediction')(x)
x = Lambda(atan_layer, output_shape=atan_layer_shape, name='prediction')(x)
m = Model(input_tensor, x)
if load_weights:
m.load_weights('./Model2.h5')
# compiling
m.compile(loss='mse', optimizer='adadelta')
print(bcolors.OKGREEN + 'Model compiled' + bcolors.ENDC)
return m
def Dave_dropout(input_tensor=None, load_weights=False): # simplified dave
if input_tensor is None:
input_tensor = Input(shape=(100, 100, 3))
x = Convolution2D(16, (3, 3), padding='valid', activation='relu', name='block1_conv1')(input_tensor)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
x = Convolution2D(32, (3, 3), padding='valid', activation='relu', name='block1_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool2')(x)
x = Convolution2D(64, (3, 3), padding='valid', activation='relu', name='block1_conv3')(x)
x = MaxPooling2D(pool_size=(2, 2), name='block1_pool3')(x)
x = Flatten(name='flatten')(x)
x = Dense(500, activation='relu', name='fc1')(x)
x = Dropout(.5)(x)
x = Dense(100, activation='relu', name='fc2')(x)
x = Dropout(.25)(x)
x = Dense(20, activation='relu', name='fc3')(x)
x = Dense(1, name='before_prediction')(x)
x = Lambda(atan_layer, output_shape=atan_layer_shape, name="prediction")(x)
m = Model(input_tensor, x)
if load_weights:
m.load_weights('./Model3.h5')
# compiling
m.compile(loss='mse', optimizer='adadelta')
print(bcolors.OKGREEN + 'Model compiled' + bcolors.ENDC)
return m
def rmse(y_true, y_pred):
'''Calculates RMSE
'''
return K.sqrt(K.mean(K.square(y_pred - y_true)))
# def chauffeur(input_tensor=None, load_weights=True):
# '''
# Creates an LstmModel using a model in the input_model_config
# @param model_path - s3 uri to save the model
# @param input_shape - timestepped shape (timesteps, feature dims)
# @param batch_input_shape - (batch_size, feature dims)
# @param timesteps - timesteps inclusive of the current frame
# (10 - current frame + 9 previous frames)
# @param loss - loss function on the model
# @param W_l2 - W_l2 regularization param
# @param metrics - metrics to track - (rmse, mse...)
# @param scale - factor by which to scale the labels
# '''
# input_shape = (10, 120, 320, 3)
# W_l2 = 0.001
# loss = 'mean_squared_error'
# model = Sequential()
# model.add(TimeDistributed(Convolution2D(24, 5, 5,
# init= "he_normal",
# activation='relu',
# subsample=(5, 4),
# border_mode='valid'), input_shape=input_shape))
# model.add(TimeDistributed(Convolution2D(32, 5, 5,
# init= "he_normal",
# activation='relu',
# subsample=(3, 2),
# border_mode='valid')))
# model.add(TimeDistributed(Convolution2D(48, 3, 3,
# init= "he_normal",
# activation='relu',
# subsample=(1,2),
# border_mode='valid')))
# model.add(TimeDistributed(Convolution2D(64, 3, 3,
# init= "he_normal",
# activation='relu',
# border_mode='valid')))
# model.add(TimeDistributed(Convolution2D(128, 3, 3,
# init= "he_normal",
# activation='relu',
# subsample=(1,2),
# border_mode='valid')))
# model.add(TimeDistributed(Flatten()))
# model.add(LSTM(64, dropout_W=0.2, dropout_U=0.2, return_sequences=True))
# model.add(LSTM(64, dropout_W=0.2, dropout_U=0.2, return_sequences=True))
# model.add(LSTM(64, dropout_W=0.2, dropout_U=0.2))
# model.add(Dropout(0.2))
# model.add(Dense(
# units=256,
# init='he_normal',
# activation='relu',
# kernel_regularizer=l2(W_l2)))
# model.add(Dropout(0.2))
# model.add(Dense(
# units=1,
# init='he_normal',
# kernel_regularizer=l2(W_l2), name='before_prediction'))
# model.add(Lambda(atan_layer, output_shape = atan_layer_shape, name = "prediction"))
# model.compile(loss=loss, optimizer='adadelta', metrics=[rmse])
# # print("+++++++++++++++++++++++++++++++++++++++++++++")
# # if load_weights:
# # model.load_weights('./models/chauffeur/lstm.weights')
# # print("LOAD FINISH")
# # print("=============================================")
# return model
class ChauffeurModel(object):
def __init__(self,
cnn_json_path='./models/chauffeur/cnn.json',
cnn_weights_path='./models/chauffeur/cnn.weights',
lstm_json_path='./models/chauffeur/lstm.json',
lstm_weights_path='./models/chauffeur/lstm.weights'):
self.cnn = self.load_from_json(cnn_json_path, cnn_weights_path)
self.encoder = self.load_encoder(cnn_json_path, cnn_weights_path)
self.lstm = self.load_from_json(lstm_json_path, lstm_weights_path)
self.scale = 16.
self.timesteps = 100
self.timestepped_x = np.empty((1, self.timesteps, 8960))
def load_encoder(self, cnn_json_path, cnn_weights_path):
model = self.load_from_json(cnn_json_path, cnn_weights_path)
model.load_weights(cnn_weights_path)
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
return model
def load_from_json(self, json_path, weights_path):
model = model_from_json(open(json_path, 'r').read())
model.load_weights(weights_path)
return model
def make_cnn_only_predictor(self):
def predict_fn(img):
img = cv2.resize(img, (320, 240))
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img = img[120:240, :, :]
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img = ((img-(255.0/2))/255.0)
return self.cnn.predict_on_batch(img.reshape((1, 120, 320, 3)))[0, 0] / self.scale
return predict_fn
def make_stateful_predictor(self):
steps = deque()
def predict_fn(img):
# preprocess image to be YUV 320x120 and equalize Y histogram
img = cv2.resize(img, (320, 240))
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img = img[120:240, :, :]
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img = ((img-(255.0/2))/255.0)
# apply feature extractor
img = self.encoder.predict_on_batch(img.reshape((1, 120, 320, 3)))
# initial fill of timesteps
if not len(steps):
for _ in range(self.timesteps):
steps.append(img)
# put most recent features at end
steps.popleft()
steps.append(img)
timestepped_x = np.empty((1, self.timesteps, img.shape[1]))
for i, img in enumerate(steps):
timestepped_x[0, i] = img
return self.lstm.predict_on_batch(timestepped_x)[0, 0] / self.scale
return predict_fn
def calc_rmse(yhat, label):
mse = 0.
count = 0
if len(yhat) != len(label):
print ("yhat and label have different lengths")
return -1
for i in range(len(yhat)):
count += 1
predicted_steering = yhat[i]
steering = label[i]
#print(predicted_steering)
#print(steering)
mse += (float(steering) - float(predicted_steering))**2.
return (mse/count) ** 0.5
if __name__ == '__main__':
# train the model
batch_size = 256
nb_epoch = 10
model_name = sys.argv[1]
if model_name == '1':
model = Dave_orig()
save_model_name = './Model1.h5'
elif model_name == '2':
# K.set_learning_phase(1)
model = Dave_norminit()
save_model_name = './Model2.h5'
elif model_name == '3':
# K.set_learning_phase(1)
model = Dave_dropout()
save_model_name = './Model3.h5'
else:
print(bcolors.FAIL + 'invalid model name, must one of 1, 2 or 3' + bcolors.ENDC)
# the data, shuffled and split between train and test sets
train_generator, samples_per_epoch = load_train_data(batch_size=batch_size, shape=(100, 100))
# trainig
model.fit_generator(train_generator,
steps_per_epoch=math.ceil(samples_per_epoch * 1. / batch_size),
epochs=nb_epoch,
workers=8,
use_multiprocessing=True)
print(bcolors.OKGREEN + 'Model trained' + bcolors.ENDC)
# evaluation
K.set_learning_phase(0)
test_generator, samples_per_epoch = load_test_data(batch_size=batch_size, shape=(100, 100))
model.evaluate_generator(test_generator,
steps=math.ceil(samples_per_epoch * 1. / batch_size))
# save model
model.save_weights(save_model_name)
| [
"husheng.zhou@utdallas.edu"
] | husheng.zhou@utdallas.edu |
e020a43aa9bf4c84b6d4bcd929e8033592c30d60 | f3381cef5984b4100e64d12a25ed6429a177bf32 | /2.py | e69f223a8b5748fdd070981ab9112d4077b51b92 | [] | no_license | wzy-hb/python | e8a61c0d249b04dd6827bbfdce5d6cf27428a5e7 | 3174f7fe3d547c066f6f2bb740fd4b577e0f457f | refs/heads/master | 2021-01-25T23:24:29.053774 | 2020-02-27T14:22:29 | 2020-02-27T14:22:29 | 243,225,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | #! /usr/bin/env python3
n = 0
while n < 11:
print (n)
n += 1
| [
"19990349786@163.com"
] | 19990349786@163.com |
f742f0e72baff1300e7da34e28620c6c7e0731fc | df3639d68ffc636b6bac700cf7a62ee756dcb343 | /checklists/urls.py | 26cc51757bbad2cb811e54e3e4dcefc7a517a3f9 | [
"MIT"
] | permissive | wedk/todomvc-django-ujs | c3b4862ddb66bfaa9a5f7c2c1ef46ca387258751 | 3909f2448ee32412f0eebe1007d34fb66cbf7185 | refs/heads/master | 2021-01-10T02:14:18.254744 | 2016-01-08T21:51:38 | 2016-01-08T21:51:38 | 49,298,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | """checklists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('projects.urls', namespace='projects')),
] | [
"wedk@users.noreply.github.com"
] | wedk@users.noreply.github.com |
05f8acf899356527afbdb989c352a308f1fe1d95 | eb2fe5eaafad67c4f75154b3e30ffef245d93cfd | /SortingVisualizer.py | a31f5a541538c5cc2a32eb04e43e21628404e909 | [] | no_license | Nordicade/SortingVisualizer | 608a34a57763f59223b3225190736995344bd0f2 | eed67d336cd84b0dec6268f8e9ea87f82d3d91df | refs/heads/master | 2020-06-24T08:38:44.408888 | 2019-08-13T19:59:17 | 2019-08-13T19:59:17 | 198,917,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,045 | py | # Created by Nicholas Nord
# July 22, 2019
#
import pygame
import time
import math
import random
import heapq
import matplotlib.pyplot as plt
pygame.init()
display_width = 1200
display_height = 600
black = (0,0,0)
gray = (214,214,214, 1)
white = (255,255,255)
red = (255, 0 , 0)
blue = (0, 0, 255)
visualizer_dim = 50,125,1100,300
sorting_algo = 1
visualizer_array_size = 20
n_factor = 6
delay = .5
trial_count = 3
pause_UI = False
original_array = []
current_array = []
# Arrays holding timestamps for sorting arrays. (each element increases array size by 10^n where 0>n>10)
best_times = []
avg_times = []
worst_times = []
display = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Data Sorting Visualizer')
start_time = time.time()
class line_element:
def __init__(self):
self.line_length: 0
def build_line_array(array_size):
line_array = []
for index in range(1, array_size):
temp = line_element()
temp.line_length = index
line_array.append(temp)
return line_array
def convert_line_array(array):
line_array = []
for value in list(array):
temp = line_element()
temp.line_length = value
line_array.append(temp)
return line_array
def insertion_sort(unsorted_array):
for index in range(len(unsorted_array)):
current_element = unsorted_array[index]
checking_index = index - 1
while checking_index >= 0 and (unsorted_array[checking_index]) > current_element:
unsorted_array[checking_index + 1] = unsorted_array[checking_index]
checking_index = checking_index - 1
unsorted_array[checking_index + 1] = current_element
return unsorted_array
def demo_insertion_sort():
global current_array
global visualizer_dim
global delay
for index in range(1, len(current_array)):
# select first element to be index
current_element = current_array[index]
current_length = (current_array[index]).line_length
checking_index = index - 1
# compare the index with the element that came before index
while checking_index >= 0 and (current_array[checking_index]).line_length > current_length:
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
# if a swap is needed, keep shifting and overwriting elements until correct spot is found
current_array[checking_index + 1] = current_array[checking_index]
checking_index = checking_index - 1
current_array[checking_index + 1] = current_element
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
def selection_sort(unsorted_array):
for sorting_index in range(len(unsorted_array)):
min_index = sorting_index
for unsorted_index in range(sorting_index+1, len(unsorted_array)):
if unsorted_array[min_index] > unsorted_array[unsorted_index]:
# set as current_min and proceed looking throughout list
min_index = unsorted_index
#swap current index with current min
temp = unsorted_array[sorting_index]
unsorted_array[sorting_index] = unsorted_array[min_index]
unsorted_array[min_index] = temp
def demo_selection_sort():
global current_array
global visualizer_dim
global delay
for sorting_index in range(len(current_array)):
min_index = sorting_index
for unsorted_index in range(sorting_index+1, len(current_array)):
if current_array[min_index].line_length > current_array[unsorted_index].line_length:
# set as current_min and proceed looking throughout list
min_index = unsorted_index
#swap current index with current min
temp = current_array[sorting_index]
current_array[sorting_index] = current_array[min_index]
current_array[min_index] = temp
#draw changes to display and sleep for desired delay
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
# BIG help from https://www.pythoncentral.io/quick-sort-implementation-guide/
# This method is a clean method that takes in an unsorted array and sorts using median of 3 quick sort
def quick_sort(unsorted_array):
#initial quick sort call that calls the recursive function
quick_sort_recursive(unsorted_array, 0, len(unsorted_array) - 1)
# This method is called on repeat to divide the array into smaller chunks, finding new partitions each time
def quick_sort_recursive(unsorted_array, left_index, right_index):
if left_index < right_index:
# calls helper method to find partition, then calls this method for each sub-array (excluding pivot)
pivot_index = quick_sort_partition(unsorted_array, left_index, right_index)
#print(str(left_index) + " and " + str(pivot_index - 1))
#print(len(unsorted_array))
quick_sort_recursive(unsorted_array, left_index, pivot_index - 1)
quick_sort_recursive(unsorted_array, pivot_index + 1, right_index)
#this method finds the true position of the pivot point and sets unordered smaller/larger elements on their respective sides
def quick_sort_partition(unsorted_array, left_index, right_index):
#pick a pivot point using median of 3, sorting first,middle, and last element in the process
middle_index = (left_index + (right_index)) //2
pivot_index = median_of_three(unsorted_array, left_index, middle_index, right_index)
pivot_value = unsorted_array[pivot_index]
smaller = left_index + 1
larger = right_index
true_pivot_found = False
#SWAPPING PIVOT WITH FIRST ELEMENT!
swap(unsorted_array, pivot_index, left_index)
while not true_pivot_found:
while smaller <= larger and unsorted_array[smaller] <= pivot_value:
smaller = smaller + 1
while smaller <= larger and unsorted_array[larger] >= pivot_value:
larger = larger - 1
if larger < smaller:
true_pivot_found = True
else:
swap(unsorted_array, smaller, larger)
#SWAPPING PIVOT WITH FIRST ELEMENT!
swap(unsorted_array, pivot_index, left_index)
return larger
#helper method that performs the swap using a temp variable (imo cleaner than the # python swap)
def swap(array, left_index, right_index):
# array[left_index], array[right_index] = array[right_index], array[left_index]
temp = array[left_index]
array[left_index] = array[right_index]
array[right_index] = temp
# Reads the first/center/last positioned elements within array, sorts the 3, and returns the median element
def median_of_three(unsorted_array, left_index, middle_index, right_index):
if unsorted_array[right_index] < unsorted_array[left_index]:
swap(unsorted_array, left_index, right_index)
if unsorted_array[middle_index] < unsorted_array[left_index]:
swap(unsorted_array, left_index, middle_index)
if unsorted_array[right_index] < unsorted_array[middle_index]:
swap(unsorted_array, middle_index, right_index)
pivot = middle_index
#print("After: L: "+str(unsorted_array[left_index]) +" Mid: "+str(unsorted_array[middle_index])+" R: "+str(unsorted_array[right_index]))
return pivot
def demo_quick_sort():
global current_array
global visualizer_dim
global delay
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
demo_quick_sort_recursive(current_array, 0, len(current_array) - 1)
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
def demo_quick_sort_recursive(unsorted_array, left_index, right_index):
if left_index < right_index:
# calls helper method to find partition, then calls this method for each sub-array (excluding pivot)
pivot_index = demo_quick_sort_partition(unsorted_array, left_index, right_index)
#print(str(left_index) + " and " + str(pivot_index - 1))
#print(len(unsorted_array))
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
demo_quick_sort_recursive(unsorted_array, left_index, pivot_index - 1)
demo_quick_sort_recursive(unsorted_array, pivot_index + 1, right_index)
# print("done")
# for n in range(len(current_array)):
# print(current_array[n].line_length)
#this method finds the true position of the pivot point and sets unordered smaller/larger elements on their respective sides
def demo_quick_sort_partition(unsorted_array, left_index, right_index):
#pick a pivot point using median of 3, sorting first,middle, and last element in the process
middle_index = (left_index + (right_index)) //2
pivot_index = demo_median_of_three(unsorted_array, left_index, middle_index, right_index)
pivot_value = unsorted_array[pivot_index].line_length
smaller = left_index + 1
larger = right_index
true_pivot_found = False
#SWAPPING PIVOT WITH FIRST ELEMENT!
swap(unsorted_array, pivot_index, left_index)
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
#draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_element_array_comparison(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3], pivot_index, pivot_index)
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
while not true_pivot_found:
while smaller <= larger and unsorted_array[smaller].line_length <= pivot_value:
smaller = smaller + 1
while smaller <= larger and unsorted_array[larger].line_length >= pivot_value:
larger = larger - 1
if larger < smaller:
true_pivot_found = True
else:
swap(unsorted_array, smaller, larger)
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
#draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_element_array_comparison(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3], left_index, pivot_index)
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
#RESWAPPING PIVOT WITH FIRST ELEMENT!
swap(unsorted_array, left_index, larger)
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array_comparison(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3], pivot_index, pivot_index)
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
return larger
# Reads the first/center/last positioned elements within array, sorts the 3, and returns the median element
def demo_median_of_three(unsorted_array, left_index, middle_index, right_index):
if unsorted_array[right_index].line_length < unsorted_array[left_index].line_length:
swap(unsorted_array, left_index, right_index)
if unsorted_array[middle_index].line_length < unsorted_array[left_index].line_length:
swap(unsorted_array, left_index, middle_index)
if unsorted_array[right_index].line_length < unsorted_array[middle_index].line_length:
swap(unsorted_array, middle_index, right_index)
pivot = middle_index
print("After: L: "+str(unsorted_array[left_index].line_length) +" Mid: "
+str(unsorted_array[middle_index].line_length)+" R: "+str(unsorted_array[right_index].line_length))
return pivot
# help from https://www.geeksforgeeks.org/merge-sort/
def merge_sort(unsorted_array):
left_sub, right_sub = [], []
# this creates base case for merge sort recursive call
if len(unsorted_array) > 1:
middle = (len(unsorted_array)) // 2
# splitting unsorted array into subarrays is easy when using python's slicing!
left_sub = unsorted_array[:middle] #having no value for "x:middle" means it starts at the beginning of array
right_sub = unsorted_array[middle:] #having no value for "middle:x" means it ends at the end of array
merge_sort(left_sub)
merge_sort(right_sub)
# at this point, merging subarrays begins (adding smallest element from sub, into merge arr)
left_sub_index, right_sub_index, merge_sub_index = 0,0,0
while(left_sub_index < len(left_sub)) and (right_sub_index < len(right_sub)):
if left_sub[left_sub_index] < right_sub[right_sub_index]:
unsorted_array[merge_sub_index] = left_sub[left_sub_index]
left_sub_index = left_sub_index + 1
else:
unsorted_array[merge_sub_index] = right_sub[right_sub_index]
right_sub_index = right_sub_index + 1
merge_sub_index = merge_sub_index + 1
#while loop above exits when one array is emptied. This while loop adds any remaining elements to merge arr
while (left_sub_index < len(left_sub)):
unsorted_array[merge_sub_index] = left_sub[left_sub_index]
left_sub_index = left_sub_index + 1
merge_sub_index = merge_sub_index + 1
while (right_sub_index < len(right_sub)):
unsorted_array[merge_sub_index] = right_sub[right_sub_index]
right_sub_index = right_sub_index + 1
merge_sub_index = merge_sub_index + 1
def demo_merge_sort():
global current_array
global visualizer_dim
global delay
demo_merge_sort_recursive(current_array)
def demo_merge_sort_recursive(unsorted_array):
global current_array
global visualizer_dim
global delay
left_sub, right_sub = [], []
# this creates base case for merge sort recursive call
if len(unsorted_array) > 1:
middle = (len(unsorted_array)) // 2
# splitting unsorted array into subarrays is easy when using python's slicing!
left_sub = unsorted_array[:middle] #having no value for "x:middle" means [start, middle)
right_sub = unsorted_array[middle:] #having no value for "middle:x" means [middle, end]
demo_merge_sort_recursive(left_sub)
demo_merge_sort_recursive(right_sub)
# this displays the contents of each sub array to the screen
i = 0
while(i < len(left_sub)):
current_array[i] = left_sub[i]
i = i + 1
j = 0
while(j < len(right_sub)):
current_array[j + len(left_sub)] = right_sub[j]
j = j + 1
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array_comparison(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3], 1, len(left_sub) + 1)
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
# at this point, merging subarrays begins (adding smallest element from sub, into merge arr)
left_sub_index, right_sub_index, merge_sub_index = 0,0,0
while(left_sub_index < len(left_sub)) and (right_sub_index < len(right_sub)):
if left_sub[left_sub_index].line_length < right_sub[right_sub_index].line_length:
unsorted_array[merge_sub_index] = left_sub[left_sub_index]
left_sub_index = left_sub_index + 1
else:
unsorted_array[merge_sub_index] = right_sub[right_sub_index]
right_sub_index = right_sub_index + 1
merge_sub_index = merge_sub_index + 1
#while loop above exits when one array is emptied. This while loop adds any remaining elements to merge arr
while (left_sub_index < len(left_sub)):
unsorted_array[merge_sub_index] = left_sub[left_sub_index]
left_sub_index = left_sub_index + 1
merge_sub_index = merge_sub_index + 1
while (right_sub_index < len(right_sub)):
unsorted_array[merge_sub_index] = right_sub[right_sub_index]
right_sub_index = right_sub_index + 1
merge_sub_index = merge_sub_index + 1
if(len(unsorted_array) == len(current_array)):
# for n in range(len(unsorted_array)):
# print(unsorted_array[n].line_length)
current_array = unsorted_array
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
# copied code from https://docs.python.org/2/library/heapq.html#basic-examples
def heap_sort(array):
length = len(array)
sorted_array = []
for value in array:
heapq.heappush(sorted_array, value)
return[heapq.heappop(sorted_array) for i in range(len(sorted_array))]
def demo_heap_sort():
global current_array
#extract line_length values from current array
line_length_array = []
line_length_heap = []
#place into it's own array, and convert into heapq
for n in range(len(current_array)):
line_length_array.append(current_array[n].line_length)
for value in list(line_length_array):
print(value)
heapq.heappush(line_length_heap, value)
#before popping the largest element:
#build a new line element using line length values from heapq
#this temp array is to visualize the change while maintaining size
temp = []
for value in list(line_length_heap):
temp.append(value)
size_of_heap = len(temp)
for index in range(size_of_heap , len(line_length_array)):
temp.append(line_length_array[index])
#temp should now be filled with heap values, and excess from current (line length array is current but just length)
#line_heap = convert_line_array(line_length_heap)
line_heap = convert_line_array(temp)
#print to display
current_array = line_heap
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
#repeat
#now for each element in the heap, pull the smallest element out
#convert it into a line element and set it at beginning of current array
#draw to display and repeat
for index in range(len(temp)):
smallest = heapq.heappop(temp)
#set smallest to current array at index
#then set the remainder of the temp heap to current array and THEN display
line_length_array[index] = smallest
i = 0
for remainder in range(index + 1, len(line_length_array)):
line_length_array[remainder] = temp[i]
i = i + 1
#for n in list(line_heap):
# print(n)
current_array = convert_line_array(line_length_array)
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
draw_outline((visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3]))
time.sleep(delay)
def draw_element_array(x, y, width, height):
global current_array
width_var = (width / len(current_array))
height_var = (height/ len(current_array))
adjustment_var = (width / (2 * len(current_array)))
index = 1
for element in current_array:
element_rect = pygame.Rect(x + (index * width_var), y + height, width / len(current_array), (height_var * element.line_length))
element_rect.midbottom = x + (index * width_var) - adjustment_var ,(y + height)
pygame.draw.rect(display, black, element_rect, 3)
pygame.display.update()
index = index + 1
def draw_element_array_comparison(x, y, width, height, comparison_a, comparison_b):
global current_array
width_var = (width / len(current_array))
height_var = (height/ len(current_array))
adjustment_var = (width / (2 * len(current_array)))
index = 1
for element in current_array:
if index == comparison_a:
element_rect = pygame.Rect(x + (index * width_var), y + height, width / len(current_array), (height_var * element.line_length))
element_rect.midbottom = x + (index * width_var) - adjustment_var ,(y + height)
pygame.draw.rect(display, blue, element_rect, 0)
elif index == comparison_b:
element_rect = pygame.Rect(x + (index * width_var), y + height, width / len(current_array), (height_var * element.line_length))
element_rect.midbottom = x + (index * width_var) - adjustment_var ,(y + height)
pygame.draw.rect(display, red, element_rect, 0)
else:
element_rect = pygame.Rect(x + (index * width_var), y + height, width / len(current_array), (height_var * element.line_length))
element_rect.midbottom = x + (index * width_var) - adjustment_var ,(y + height)
pygame.draw.rect(display, black, element_rect, 3)
pygame.display.update()
index = index + 1
def initial_build():
global original_array
global current_array
global visualizer_dim
global visualizer_array_size
element_array = build_line_array(visualizer_array_size)
original_array = element_array.copy()
current_array = element_array
random.shuffle(element_array)
draw_outline(visualizer_dim)
def draw_outline(dimensions):
x, y, width, height = dimensions[0], dimensions[1], dimensions[2], dimensions[3]
rectangle = [(x,y) , (x + width, y), (x+width , y+height), (x , y+height)]
pygame.draw.lines(display, black,True,rectangle, 3)
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def retrieve_best_case(array_size):
global sorting_algo
best_case_array = []
if(sorting_algo == 1):
#best case for insertion sort is a fully sorted array
for value in range(array_size):
best_case_array.append(value)
return best_case_array
if(sorting_algo == 2):
#best case for selection sort is same as worst case, but without swaps. so also sorted.
for value in range(array_size):
best_case_array.append(value)
return best_case_array
if(sorting_algo == 3):
#best case for quick sort is a sorted array (pivot will be 50)
for value in range(array_size):
best_case_array.append(value)
return best_case_array
if(sorting_algo == 4):
#best case for quick sort is a sorted array (pivot will be 50)
for value in range(array_size):
best_case_array.append(value)
return best_case_array
if(sorting_algo == 5):
#best case for heap sort is a sorted array
for value in range(array_size):
best_case_array.append(value)
return best_case_array
def retrieve_worst_case(array_size):
global sorting_algo
worst_case_array = []
if(sorting_algo == 1):
#worst case for insertion sort is a inversely sorted array
for value in range(array_size):
worst_case_array.insert(0,value)
return worst_case_array
if(sorting_algo == 2):
#worst case for selection sort is same as best case, but with swaps. so also inversely sorted.
for value in range(array_size):
worst_case_array.insert(0,value)
return worst_case_array
if(sorting_algo == 3):
#worst case for merge sort is when each pair leads to a swap
for value in range(array_size):
worst_case_array.append(value)
worst_case_array = merge_scramble(worst_case_array)
return worst_case_array
if(sorting_algo == 4):
#current worst case is when median of three pivot is the second to smallest/largest elements
for value in range(array_size):
worst_case_array.append(value)
#for value in range(len(worst_case_array)):
# print(worst_case_array[value])
left, right, = 0, len(worst_case_array) - 1
middle = right - left // 2
while(right - left >= 4):
#swap middle element with second to first
swap(worst_case_array, middle, left + 1)
left = left + 2
middle = right - left // 2
#for value in range(len(worst_case_array)):
# print(worst_case_array[value])
return worst_case_array
if(sorting_algo == 5):
#worst case for heap sort is similar to best case, but with more swaps
for value in range(array_size):
worst_case_array.insert(0,value)
return worst_case_array
# helper method that inversely builds worst case
# (help from https://stackoverflow.com/questions/24594112/when-will-the-worst-case-of-merge-sort-occur)
def merge_scramble(sorted_array):
if len(sorted_array) == 1:
return sorted_array
#swap the sorted array to become unsorted
if len(sorted_array) == 2:
swap(sorted_array, 0, 1)
i, j = 0,0
middle = (len(sorted_array) + 1) // 2
left_sub, right_sub = [], []
# splitting unsorted array into subarrays is easy when using python's slicing!
left_sub = sorted_array[:middle] #having no value for "x:middle" means it starts at the beginning of array
right_sub = sorted_array[middle:] #having no value for "middle:x" means it ends at the end of array
while(i < len(sorted_array)):
left_sub[j] = sorted_array[i]
i = i + 2
j = j + 1
i, j = 1,0
while(i < len(sorted_array)):
right_sub[j] = sorted_array[i]
i = i + 2
j = j + 1
merge_scramble(left_sub)
merge_scramble(right_sub)
merge(sorted_array, left_sub, right_sub)
return sorted_array
def merge(sorted_array, left_sub, right_sub):
i, j = 0,0
while(i < len(left_sub)):
sorted_array[i] = left_sub[i]
i = i + 1
while(j < len(right_sub)):
sorted_array[i] = right_sub[j]
j = j + 1
i = i + 1
def retrieve_avg_case(array_size):
average_case_array = []
for value in range(array_size):
average_case_array.append(value)
random.shuffle(average_case_array)
return average_case_array
def sorting_switch(integer):
#print(integer)
switcher = {
1: demo_insertion_sort,
2: demo_selection_sort,
3: demo_merge_sort,
4: demo_quick_sort,
5: demo_heap_sort,
}
switcher[integer]()
def draw_sort_buttons():
b1 = pygame.Rect(0,0,(display_width /5) * 1, 100)
b2 = pygame.Rect((display_width /5) * 1,0,(display_width /5) * 1, 100)
b3 = pygame.Rect((display_width /5) * 2,0,(display_width /5) * 1, 100)
b4 = pygame.Rect((display_width /5) * 3,0,(display_width /5) * 1, 100)
b5 = pygame.Rect((display_width /5) * 4,0,(display_width /5) * 1, 100)
b6 = pygame.Rect((display_width /7) * 1,450,(display_width /7) * 1, 100)
b7 = pygame.Rect((display_width /7) * 3,450,(display_width /7) * 1, 100)
b8 = pygame.Rect((display_width /7) * 5,450,(display_width /7) * 1, 100)
pygame.draw.rect(display, black,b1)
pygame.draw.rect(display, gray, (b1.x + 5, b1.y + 5, b1.width - 10, b1.height - 10))
pygame.draw.rect(display, black,b2)
pygame.draw.rect(display, gray, (b2.x + 5, b2.y + 5, b2.width - 10, b2.height - 10))
pygame.draw.rect(display, black,b3)
pygame.draw.rect(display, gray, (b3.x + 5, b3.y + 5, b3.width - 10, b3.height - 10))
pygame.draw.rect(display, black,b4)
pygame.draw.rect(display, gray, (b4.x + 5, b4.y + 5, b4.width - 10, b4.height - 10))
pygame.draw.rect(display, black,b5)
pygame.draw.rect(display, gray, (b5.x + 5, b5.y + 5, b5.width - 10, b5.height - 10))
pygame.draw.rect(display, black,b6)
pygame.draw.rect(display, gray, (b6.x + 5, b6.y + 5, b6.width - 10, b6.height - 10))
pygame.draw.rect(display, black,b7)
pygame.draw.rect(display, gray, (b7.x + 5, b7.y + 5, b7.width - 10, b7.height - 10))
pygame.draw.rect(display, black,b8)
pygame.draw.rect(display, gray, (b8.x + 5, b8.y + 5, b8.width - 10, b8.height - 10))
play_icon = [(225, 475), (225, 525), (280, 500), (225, 475)]
restart_arrow = [(578,470),(598,460),(598,480),(578,470)]
cover_up = [(598, 500),(598,460),(560, 480),(598,500)]
pygame.draw.polygon(display, black, play_icon)
pygame.draw.circle(display, black, (598, 500), 35, 5)
pygame.draw.polygon(display, gray, cover_up)
pygame.draw.polygon(display, black, restart_arrow)
pygame.draw.rect(display, black, (887,520,30,20))
pygame.draw.rect(display, black, (927,490,30,50))
pygame.draw.rect(display, black, (967,460,30,80))
draw_text(b1.x + (b1.width / 4), b1.y+ (b1.height / 3), "Insertion")
draw_text(b2.x + (b2.width / 4), b2.y+ (b2.height / 3), "Selection")
draw_text(b3.x + (b3.width / 4), b3.y+ (b3.height / 3), "Merge")
draw_text(b4.x + (b4.width / 4), b4.y+ (b4.height / 3), "Quick")
draw_text(b5.x + (b5.width / 4), b5.y+ (b5.height / 3), "Heap")
def draw_text(x, y, text):
pygame.font.init()
small_text = pygame.font.SysFont("arial.ttf", 35)
text_surface = small_text.render(text, True, black)
display.blit(text_surface, (x, y))
def record_sorting_time(unsorted_array, case):
global sorting_algo
global worst_times
global avg_times
global best_times
if(sorting_algo == 1):
sort_time_start = time.time() - start_time
insertion_sort(unsorted_array)
sort_time_end = time.time() - start_time
if(case == 0):
worst_times.append(sort_time_end - sort_time_start)
elif(case == 1):
avg_times.append(sort_time_end - sort_time_start)
else:
best_times.append(sort_time_end - sort_time_start)
if(sorting_algo == 2):
sort_time_start = time.time() - start_time
selection_sort(unsorted_array)
sort_time_end = time.time() - start_time
if(case == 0):
worst_times.append(sort_time_end - sort_time_start)
elif(case == 1):
avg_times.append(sort_time_end - sort_time_start)
else:
best_times.append(sort_time_end - sort_time_start)
if(sorting_algo == 3):
sort_time_start = time.time() - start_time
merge_sort(unsorted_array)
sort_time_end = time.time() - start_time
if(case == 0):
worst_times.append(sort_time_end - sort_time_start)
elif(case == 1):
avg_times.append(sort_time_end - sort_time_start)
else:
best_times.append(sort_time_end - sort_time_start)
if(sorting_algo == 4):
sort_time_start = time.time() - start_time
quick_sort(unsorted_array)
sort_time_end = time.time() - start_time
if(case == 0):
worst_times.append(sort_time_end - sort_time_start)
elif(case == 1):
avg_times.append(sort_time_end - sort_time_start)
else:
best_times.append(sort_time_end - sort_time_start)
if(sorting_algo == 5):
sort_time_start = time.time() - start_time
result = heap_sort(unsorted_array)
sort_time_end = time.time() - start_time
if(case == 0):
worst_times.append(sort_time_end - sort_time_start)
elif(case == 1):
avg_times.append(sort_time_end - sort_time_start)
else:
best_times.append(sort_time_end - sort_time_start)
def main_loop():
global pause_UI
global sorting_algo
global current_array
global n_factor
global trial_count
global visualizer_array_size
global worst_times
global avg_times
global best_times
exit_request = False
x = []
draw_sort_buttons()
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
while not exit_request:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN and pause_UI is False:
if pygame.mouse.get_pos()[1] <= 100:
if pygame.mouse.get_pos()[0] <= 240 and pygame.mouse.get_pos()[0] > 0:
sorting_algo = 1
draw_sort_buttons()
pygame.draw.rect(display, white, (5, 5, 230, 90))
draw_text(60, 33.3, "Insertion")
if pygame.mouse.get_pos()[0] <= 480 and pygame.mouse.get_pos()[0] > 240:
sorting_algo = 2
draw_sort_buttons()
pygame.draw.rect(display, white, (240 + 5, 5, 230, 90))
draw_text(300, 33.3, "Selection")
if pygame.mouse.get_pos()[0] <= 720 and pygame.mouse.get_pos()[0] > 480:
sorting_algo = 3
draw_sort_buttons()
pygame.draw.rect(display, white, (480+5, 5, 230, 90))
draw_text(540, 33.3, "Merge")
if pygame.mouse.get_pos()[0] <= 960 and pygame.mouse.get_pos()[0] > 720:
sorting_algo = 4
draw_sort_buttons()
pygame.draw.rect(display, white, (720+5, 5, 230, 90))
draw_text(780,33.3, "Quick")
if pygame.mouse.get_pos()[0] <= 1200 and pygame.mouse.get_pos()[0] > 960:
sorting_algo = 5
draw_sort_buttons()
pygame.draw.rect(display, white, (960 + 5, 5, 230, 90))
draw_text(1020, 33.3, "Heap")
if pygame.mouse.get_pos()[1] >= 450 and pygame.mouse.get_pos()[1] < 550:
if pygame.mouse.get_pos()[0] >= 171 and pygame.mouse.get_pos()[0] < 342:
print("example button for play")
pause_UI = True
sorting_switch(sorting_algo)
pause_UI = False
if pygame.mouse.get_pos()[0] >= 513 and pygame.mouse.get_pos()[0] < 684:
print("example button for restart")
new_array = build_line_array(visualizer_array_size)
random.shuffle(new_array)
current_array = new_array
display.fill(white, (visualizer_dim[0]+1,visualizer_dim[1]+1,visualizer_dim[2]-2,visualizer_dim[3]-2))
draw_element_array(visualizer_dim[0],visualizer_dim[1],visualizer_dim[2],visualizer_dim[3])
if pygame.mouse.get_pos()[0] >= 855 and pygame.mouse.get_pos()[0] < 1026:
print("example button for stats")
# find time cost for sorting arrays of increasing size (max size determined by n_factor)
for index in range(1, n_factor):
multiplier = int(math.pow(10, index))
#multiplier = int(500 * index)
x.append(multiplier)
# run test on the same sized array x number of times (where x = trial_count)
for trial in range(trial_count):
best_case = retrieve_best_case(multiplier)
worst_case = retrieve_worst_case(multiplier)
avg_case = retrieve_avg_case(multiplier) #int(1000 * index)
record_sorting_time(best_case, 2)
record_sorting_time(avg_case, 1)
record_sorting_time(worst_case, 0)
best_case = []
worst_case = []
avg_case = []
# remove all time trials for same sized array and replace with 1 average time
if len(best_times) == (index - 1) + trial_count:
sum_time = 0
for sum_count in range(trial_count):
removed_element = best_times.pop(-1)
sum_time = sum_time + removed_element
best_times.append(sum_time / trial_count)
sum_time = 0
for sum_count in range(trial_count):
removed_element = avg_times.pop(-1)
sum_time = sum_time + removed_element
avg_times.append(sum_time / trial_count)
sum_time = 0
for sum_count in range(trial_count):
removed_element = worst_times.pop(-1)
sum_time = sum_time + removed_element
worst_times.append(sum_time / trial_count)
print_val = len(best_times)
print(print_val)
print("best at " + str(index)+ " " + str(best_times[print_val - 1]))
print("avg at " + str(index)+ " " + str(avg_times[print_val - 1]))
print("worst at " + str(index)+ " " + str(worst_times[print_val - 1]))
print(len(x))
print(len(best_times))
plt.plot(x, best_times, label = "Best case")
plt.plot(x, avg_times, label = "Average case")
plt.plot(x, worst_times, label = "Worst case")
plt.xlabel("Elements within list")
plt.ylabel("Time elapsed in seconds")
plt.legend()
if(sorting_algo == 1):
plt.title("Complexity of Insertion Sort")
if(sorting_algo == 2):
plt.title("Complexity of Selection Sort")
if(sorting_algo == 3):
plt.title("Complexity of Merge Sort")
if(sorting_algo == 4):
plt.title("Complexity of Quick Sort")
if(sorting_algo == 5):
plt.title("Complexity of Heap Sort")
plt.show()
best_times = []
avg_times = []
worst_times = []
x = []
pygame.display.update()
print("--- %s seconds ---" % (time.time() - start_time))
display.fill(white)
initial_build()
main_loop()
pygame.quit()
quit()
| [
"46762778+Nordicade@users.noreply.github.com"
] | 46762778+Nordicade@users.noreply.github.com |
3a21f478e29c918e025b224c83bff5371026c2fe | 56d558d7ff259b66850c0ee6558c3121867f987f | /test01/test02_10_02/example_4.py | 8605bf09528d716d557ef4d9f72a7f65219c7983 | [] | no_license | kongjingchun/PyTest | 9f97e42d03206091bfe6e78f22a967ae242a37fc | ad88c51b5482e5b32c35bd7d4049636f11786d74 | refs/heads/master | 2023-05-25T22:18:05.914096 | 2021-06-05T14:27:04 | 2021-06-05T14:27:04 | 341,505,772 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # coding:utf-8
# @Create time: 2021/5/25 12:26 上午
# @Author: KongJingchun
# @remark: 异常处理、开启事务
import mysql.connector
confiig = {
"host": "49.233.5.13",
"port": "3306",
"user": "root",
"password": "Jingchun0302!",
"database": "demo",
"auth_plugin": 'mysql_native_password'
}
try:
con = mysql.connector.connect(**confiig)
con.start_transaction()
cursor = con.cursor()
sql = "insert into t_emp(empno,ename, job, mgr, hiredate, sal, comm, deptno) values (%s,%s,%s,%s,%s,%s,%s,%s);"
cursor.execute(sql, (666, "kjc", "admin", 7839, "2020-02-20", 666, 666, 10))
con.commit()
except Exception as e:
if "con" in dir():
con.rollback()
print(e)
finally:
if "con" in dir():
con.close()
| [
"jing20130808@vip.qq.com"
] | jing20130808@vip.qq.com |
4528cfbfc0052ddbceffbff345f8f7d6fa21f310 | 3b940d3180fb3ffc5070de8c8aaed26c79df2917 | /Code/augment_bird_images.py | 1ccb78f23f76f4916e6eec2ab93b4394293671a6 | [] | no_license | sumanbhagavathula/kaggle | c6b4d1c22c9b78b5551ad1825510d627fb9f518a | ee4f9ca5863dcf9d863671651955aec034597cec | refs/heads/master | 2021-06-16T09:13:37.368719 | 2017-06-05T05:38:40 | 2017-06-05T05:38:40 | 92,707,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from io import BytesIO
from matplotlib.pyplot import imshow
from IPython.display import display
import copy
import numpy as np
from PIL import Image, ImageOps # PIL is the Python Imaging Library
import requests
import imageaugmentationfunctions as imgaug
import utilityfunctions as util
imgtype = 'test'
#images_dir = '/home/ubuntu/src/tensorflow/tensorflow/models/image/imagenet/TUTORIAL_DIR/images/'
imagedir = r'C:\Users\sumabh\OneDrive\MLDS\UW-MSDS\DATA558\GitHub\kaggle\data'
if imgtype == 'train':
augmentedimagedir = r'C:\Users\sumabh\desktop\augimages'
if imgtype == 'test':
augmentedimagedir = r'C:\Users\sumabh\desktop\augimages'
imgfilelist = util.list_all_files(imagedir,imgtype)
for imgfile in imgfilelist:
img = imgaug.image_open(imgfile)
id_img = imgaug.identity_image(img)
imgaug.image_save(augmentedimagedir, img)
break
| [
"noreply@github.com"
] | noreply@github.com |
8d692a48c5cb69d40c2ae0bf1496337292cdc744 | 4544f5cc683d1647c3c57da798806cd7c79f22a4 | /datasets/image_dataset.py | 610bd4afbe5c558cca16595dad25c30873472b0b | [] | no_license | zhangyahui520/myDetection | 86ba44e33279abdb3ccd3c04360577e97aa55874 | ff434c05c5170d119fab63e589cf4cdfdce89660 | refs/heads/master | 2022-11-05T14:08:19.352038 | 2020-06-25T20:36:01 | 2020-06-25T20:36:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,735 | py | import os
import json
import random
from collections import defaultdict
import PIL.Image
import torch
import torchvision.transforms.functional as tvf
from torch.utils.data import Dataset, DataLoader
import utils.image_ops as imgUtils
import utils.augmentation as augUtils
import utils.mask_ops as maskUtils
from utils.structures import ImageObjects
class ImageDataset(Dataset):
"""
Dataset for training object detection CNNs.
Args:
dataset_cfg:
img_dir: str, imgs folder
ann_path: str, path to the annotation file
ann_bbox_format: str, e.g., 'x1y1wh' for COCO
global_cfg: global config
"""
def __init__(self, dataset_cfg: dict, global_cfg: dict):
self.img_dir = dataset_cfg['img_dir']
self.ann_bbox_format = dataset_cfg['ann_bbox_format']
self.img_size = global_cfg['train.initial_imgsize']
self.input_format = global_cfg['general.input_format']
self.aug_setting = global_cfg['train.data_augmentation']
self.input_divisibility = global_cfg['general.input_divisibility']
self.skip_crowd_ann = True
self.skip_crowd_img = False
self.skip_empty_img = True
self.HEM = global_cfg['train.hard_example_mining']
self.img_ids = []
self.imgId2info = dict()
self.imgId2anns = defaultdict(list)
self.catId2idx = dict()
self.catIdx2id = []
self.categories = []
self._load_json(dataset_cfg['ann_path'])
def _load_json(self, json_path):
'''load json file'''
print(f'Loading annotations {json_path} into memory...')
with open(json_path, 'r') as f:
json_data = json.load(f)
if self.ann_bbox_format in {'x1y1wh', 'cxcywh'}:
bb_param = 4
elif self.ann_bbox_format == 'cxcywhd':
bb_param = 5
else:
raise Exception('Bounding box format is not supported')
for img in json_data['images']:
self.imgId2info[img['id']] = img
self.categories = json_data['categories']
for idx, cat in enumerate(json_data['categories']):
self.catId2idx[cat['id']] = idx
self.catIdx2id = [cat['id'] for cat in self.categories]
for ann in json_data['annotations']:
# Parse bounding box annotation
assert len(ann['bbox']) == bb_param
if self.skip_crowd_ann and ann['iscrowd']:
continue
# category inddex
ann['cat_idx'] = self.catId2idx[ann['category_id']]
# segmentation mask
imgInfo = self.imgId2info[ann['image_id']]
imh, imw = imgInfo['height'], imgInfo['width']
if ann.get('segmentation', []) != []:
ann['rle'] = maskUtils.segm2rle(ann.pop('segmentation'), imh, imw)
self.imgId2anns[ann['image_id']].append(ann)
for img in json_data['images']:
img_id = img['id']
anns = self.imgId2anns[img_id]
if self.skip_crowd_img and any(ann['iscrowd'] for ann in anns):
# if there is crowd gt, skip this image
continue
if self.skip_empty_img and len(anns) == 0:
# if there is no object in this image, skip this image
continue
self.img_ids.append(img_id)
self._length = len(self.img_ids)
if self.HEM is None:
self.hem_state = {}
elif self.HEM == 'hardest':
raise NotImplementedError()
self.hem_state = {
'iter': -1,
'APs': torch.ones(self._length)
}
elif self.HEM == 'probability':
self.hem_state = {
'iter': -1,
'APs': torch.zeros(self._length),
'counts': torch.zeros(self._length, dtype=torch.long)
}
else:
raise NotImplementedError()
# breakpoint()
def get_hem_index(self):
assert self.hem_state is not None
self.hem_state['iter'] += 1
if self.HEM is None:
# _iter = self.hem_state['iter'] % self._length
# if _iter == 0:
# self.hem_state['order'] = torch.randperm(self._length)
# index = self.hem_state['order'][_iter].item()
raise Exception()
elif self.HEM == 'probability':
probs = -torch.log(self.hem_state['APs'] + 1e-8)
index = torch.multinomial(probs, num_samples=1)
else:
raise NotImplementedError()
self.hem_state['counts'][index] += 1
return index
def update_ap(self, img_idx, aps):
momentum = 0.8
prev = self.hem_state['APs'][img_idx]
self.hem_state['APs'][img_idx] = momentum*prev + (1-momentum)*aps
def __len__(self):
return len(self.img_ids)
def __getitem__(self, index):
"""
Get an image-label pair
"""
mosaic = self.aug_setting['mosaic'] if self.aug_setting is not None else None
if mosaic:
raise NotImplementedError()
pairs = []
_additional = [random.randint(0, len(self.img_ids)-1) for _ in range(3)]
indices = [index] + _additional
for idx in range(index):
img_label_pair = self._load_single_pil(idx, to_square=False)
pairs.append(img_label_pair)
img_label_pair = augUtils.mosaic(pairs, self.img_size)
elif self.HEM is not None:
# Hard example mining
index = self.get_hem_index()
img_label_pair = self._load_single_pil(index, to_square=True)
else:
# Normal sequential sampling
img_label_pair = self._load_single_pil(index, to_square=True)
img, labels, img_id, pad_info = img_label_pair
# Convert PIL.image to torch.Tensor with shape (3,h,w) if it's not
if isinstance(img, PIL.Image.Image):
img = tvf.to_tensor(img)
else:
assert isinstance(img, torch.FloatTensor)
assert isinstance(labels, ImageObjects)
# Noise augmentation
if self.aug_setting is not None:
# blur = [augUtils.random_avg_filter, augUtils.max_filter,
# augUtils.random_gaussian_filter]
# if torch.rand(1).item() > 0.7:
# blur_func = random.choice(blur)
# img = blur_func(img)
if torch.rand(1).item() > 0.7:
p = self.aug_setting.get('satpepper_noise_density', 0.02)
img = augUtils.add_saltpepper(img, max_p=p)
# Convert into desired input format, e.g., normalized
img = imgUtils.format_tensor_img(img, code=self.input_format)
# Remove annotations which are too small
label_areas = labels.bboxes[:,2] * labels.bboxes[:,3]
labels = labels[label_areas >= 32]
# sanity check before return
if (labels.bboxes[:,0:2] > self.img_size).any():
print('Warning: some x,y in ground truth are greater than image size')
print('image id:', img_id)
# if (labels.bboxes[:,2:4] > self.img_size).any():
# print('Warning: some w,h in ground truth are greater than image size')
# print('image path:', img_path)
if (labels.bboxes[:,0:4] < 0).any():
print('Warning: some bbox in ground truth are smaller than 0')
print('image id:', img_id)
labels.bboxes[:,0:4].clamp_(min=0)
assert img.dim() == 3 and img.shape[1] == img.shape[2]
pair = {
'image': img,
'labels': labels,
'index': index,
'image_id': img_id,
'pad_info': pad_info,
'anns': {
'images': [self.imgId2info[img_id]],
'annotations': self.imgId2anns[img_id],
'categories': self.categories
}
}
return pair
# def _load_concat_frames(self, index, to_square=True) -> tuple:
# raise NotImplementedError()
# # load the image
# img_id = self.img_ids[index]
# img_name = self.imgId2info[img_id]['file_name']
# img_path = os.path.join(self.img_dir, img_name)
# img = imgUtils.imread_pil(img_path)
# # get labels
# anns = self.imgId2anns[img_id]
# labels = self._ann2labels(anns, img.height, img.width, self.bb_format)
# assert labels.masks is not None
# # if dataset is not videos, try to generate previous frames
# bg_img_dir = self.img_dir + '_np' # background image path
# assert os.path.exists(bg_img_dir)
# bg_path = os.path.join(bg_img_dir, img_name)
# background = imgUtils.imread_pil(bg_path)
# # import numpy as np; import matplotlib.pyplot as plt;
# # plt.imshow(np.array(img)); plt.show()
# # plt.imshow(np.array(background)); plt.show()
# t_interval = 1 / self.aug_setting['simulation_fps']
# augUtils.random_place(img, labels, background, dt=t_interval)
# labels.masks
# debug = 1
# # augUtils.augment_PIL()
# # return (img, labels, img_id, pad_info)
def _load_single_pil(self, index, to_square=True) -> tuple:
'''
One image-label pair for the given index is picked up and pre-processed.
Args:
index: image index
to_square: if True, the image will be pad to square
Returns:
img: PIL.Image
labels:
img_id:
pad_info:
'''
# load the image
img_id = self.img_ids[index]
imgInfo = self.imgId2info[img_id]
img_name = imgInfo['file_name']
img_path = os.path.join(self.img_dir, img_name)
img = imgUtils.imread_pil(img_path)
assert imgInfo['height'] == img.height and imgInfo['width'] == img.width
# get annotations
anns = self.imgId2anns[img_id]
labels = self._ann2labels(anns, img.height, img.width, self.ann_bbox_format)
# augmentation
if self.aug_setting is not None:
img, labels = augUtils.augment_PIL([img], [labels], self.aug_setting)
img, labels = img[0], labels[0]
# pad to square
aug_flag = (self.aug_setting is not None)
if to_square:
img, labels, pad_info = imgUtils.rect_to_square(img, labels,
self.img_size, aug=aug_flag, resize_step=self.input_divisibility)
else:
pad_info = None
return (img, labels, img_id, pad_info)
@staticmethod
def _ann2labels(anns, img_h, img_w, ann_format):
# If the dataset is using (x1,y1,w,h), convert to (cx,cy,w,h)
if ann_format == 'x1y1wh':
bboxes = []
for ann in anns:
_b = ann['bbox']
_cxcywh = [_b[0]+_b[2]/2, _b[1]+_b[3]/2, _b[2], _b[3]]
bboxes.append(_cxcywh)
ann_format = 'cxcywh'
elif ann_format in {'cxcywh', 'cxcywhd'}:
bboxes = [a['bbox'] for a in anns]
else:
raise NotImplementedError()
cat_idxs = [a['cat_idx'] for a in anns]
if 'rle' not in anns[0]:
rles = None
else:
rles = [a['rle'] for a in anns]
labels = ImageObjects(
bboxes=torch.FloatTensor(bboxes),
cats=torch.LongTensor(cat_idxs),
masks=None if rles is None else maskUtils.rle2mask(rles),
bb_format=ann_format,
img_hw=(img_h, img_w)
)
return labels
@staticmethod
def collate_func(batch):
batch = {
'images': torch.stack([items['image'] for items in batch]),
'indices': torch.LongTensor([items['index'] for items in batch]),
'labels': [items['labels'] for items in batch],
'image_ids': [items['image_id'] for items in batch],
'pad_infos': [items['pad_info'] for items in batch],
'anns': [items['anns'] for items in batch]
}
return batch
def to_iterator(self, **kwargs):
self.iterator = iter(DataLoader(self, collate_fn=self.collate_func,
**kwargs))
self._iter_args = kwargs
def get_next(self):
assert hasattr(self, 'to_iterator'), 'Please call to_iterator() first'
try:
data = next(self.iterator)
except StopIteration:
self.to_iterator(**self._iter_args)
data = next(self.iterator)
return data | [
"duanzh@bu.edu"
] | duanzh@bu.edu |
862615a3240f70d7ad9c2529170a5eb9b3cfa420 | 671f266694a1f8e8d5692d5b08871126702d1591 | /1_Arrays_and_Strings/1-7-rotate-matrix/rotate-matrix.py | 5a48b130897a1646ea720e0fc69f5f16263f99d6 | [] | no_license | LuqiPan/Life-is-a-Struggle | 07836fe410c04f6e4cef01e5cd069e96f583027d | 330f864bc03d0ba47b917ff2afa3b5c29688df11 | refs/heads/master | 2021-01-18T18:30:52.758562 | 2017-03-31T20:16:23 | 2017-03-31T20:16:23 | 86,857,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | def generate_matrix(n):
return [[x * n + y for y in range(n)] for x in range(n)]
def rotate(matrix):
dimension = len(matrix)
for layer in range((dimension + 1) / 2):
for j in range(layer, dimension - layer - 1):
temp = matrix[layer][j]
matrix[layer][j] = matrix[dimension - layer - 1 - j + layer][layer]
matrix[dimension - layer - 1 - j + layer][layer] = matrix[dimension - layer - 1][dimension - layer - 1 - j + layer]
matrix[dimension - layer - 1][dimension - layer - 1 - j + layer] = matrix[j][dimension - layer - 1]
matrix[j][dimension - layer - 1] = temp
return matrix
def helper(n):
for array in generate_matrix(n):
print ' '.join(['%2d' % i for i in array])
print '-----'
for array in rotate(
generate_matrix(n)
):
print ' '.join(['%2d' % i for i in array])
helper(5)
print '====='
helper(6)
print '====='
helper(1)
print '====='
helper(2)
| [
"luqi@yelp.com"
] | luqi@yelp.com |
b6ff23c0ecf8a41f7bdb4d157d1ac060eceb9977 | 61d8f5ac162d16f2a7785d00febf42c1621e4a1e | /PyCharm 2017.3.2/新建.py | 8e924159d0a48bf824fbea5433dae6247ee1d7e8 | [] | no_license | hezy10/myproject | c77d58c4d638c21b9356f5ead56c6d5e86b520d4 | c242773c3625b2e89f5da92acbcc044a8a2e12d8 | refs/heads/master | 2023-01-12T21:13:23.247800 | 2020-11-13T09:19:34 | 2020-11-13T09:19:34 | 312,503,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | # 从键盘输入一个字符串,将小写字母全部转换成大写字母,然后输出到一个磁盘文件"test"中保存。
def file_save():
str_up = input('请输入内容:')
data = str_up.upper()
with open('aaa.txt','w+') as f:
f.write(data)
f.close()
file_save()
# f = open('test.txt','w+')
# f.write(data)
# f.close()
# 求0—7所能组成的奇数个数
def an_odd_number():
pass
# list_1 = []
# i = 0
# while i <= 7:
# if i % 2 !=0:
# list_1.append(i)
# else:
# continue
# i +=1
# print(len(list_1))
# list_new = [1 ,2 ,2 ,6 ,3 ,5 ,5 ,6 ,4 ,3 ,4]
# list_new1 = [1,2,3,4,5,6]
# print(set(list_new))
# dict_1 = {}
# for i in list_new:
# pass
# srt_1 = '123'
# print(srt_1.endswith('3'))
# inp = input('输入内容:')
# while (1):
# if inp.isalnum() or inp.isalpha():
# print('输入内容是:',inp)
#
#
#
# else:
# print('输入错误')
# break
# list_str = []
# str_s = 'hello python'
# for i in str_s:
#
# if i not in list_str:
# list_str.append(i)
# print(list_str)
# print(list_str)
from collections import Counter
# zip()
# enumerate() 枚举
# ord() | [
"179644835@qq.com"
] | 179644835@qq.com |
346fd4d1f34f826cceeac467e46dac23ffa4b675 | 696ddf845f8058d572d25a443afb70711770eaeb | /UberooBehaviour/features/steps/read_statistics.py | 463bb60e6aae38cbd077864a40c8c9d5793eb7ad | [] | no_license | NassimBounouas/SI5-soa_api_lab_team_d | 14a47b96cdf4ac6462cc2b98f468947c29bf007e | 655c8cc39dc871d54582426c70f9219a07d8a397 | refs/heads/master | 2020-03-29T17:15:36.487575 | 2018-11-11T17:44:25 | 2018-11-11T17:44:25 | 150,152,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from behave import *
from share.fetch_callback import fetch_callback
from share.query_callback import query_callback
# Note the final $ for regex
@given('a steed "{steed_name}"$')
def step_impl(context, steed_name):
pass
@when('computing kpi "{kpi}"')
def step_impl(context, kpi):
pass
@then('the kpi is returned')
def step_impl(context):
pass
| [
"nikita.rousseau@etu.unice.fr"
] | nikita.rousseau@etu.unice.fr |
d93d462eb124d9027c852c07d16658e2754aacc5 | 09510e2016c93bd60669a84ec2ff466ec443ecc3 | /IModel.py | 3f77007e65da08ca2175a84af7a8bed1df879ae4 | [
"MIT"
] | permissive | Bigg-T/learn_flask_py | 5b5d508a7a63fa25c85343c9c99e1c254d530c0f | a9c61e562cba88f6f83dbfbf4301c62a04a15f07 | refs/heads/master | 2021-01-16T00:28:52.037401 | 2017-08-14T02:36:18 | 2017-08-14T02:36:18 | 99,965,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | from abc import ABC, abstractmethod
class IModel(ABC):
"""
Fetch all of the on the entries in the the database.
@params database_name the name of the the table
return tuple (nunber of entries, all entries in with the given table name)
"""
@abstractmethod
def fetchall(self, database_name):
pass
"""
@params username
return True if the given username in already existed in "user" table
"""
@abstractmethod
def user_existed(self, username):
pass
"""
Fetch an entry in with an specified id with the given table name.
@params database_name name of the table
@params id the given id
return the specified entry with the given table
"""
@abstractmethod
def art(self, database_name, id):
pass
"""
Register a user.
@params values all the require fields requred to register a users
return True if user was created, False otherwise
"""
@abstractmethod
def register_user(self, values):
pass
"""
Authenticate users before allows to login.
@params username the username
@params password_candidate the password user gave
return a tuble (Boolean, Boolean), first value if username existed, second is password correct
"""
@abstractmethod
def auth_login(self, username, password_candidate):
pass
"""
Add article into the table.
@params values all the field need for adding an article
return True if the article was added
"""
@abstractmethod
def add_art(self, values):
pass
"""
Edit an article/comments user wrote.
@params values the field needed to update the article
return True if updated in the database
"""
@abstractmethod
def update_art(self, values):
pass
"""
Delete post/comment user wrote.
@params id the unique id for the post user wanted to delete
return True if the post is deleted
"""
@abstractmethod
def delete_art(self, id):
pass
| [
"thienn503@gmail.com"
] | thienn503@gmail.com |
a69b56d6e1f220cfd671cf3e1d3815682fc394a0 | 3d5382d38ef618e56ec89c82ec3d2951b28fa364 | /py/Compiler/Exceptions.py | bc027d1e9b606bcebb0cf43dc98be4220c612505 | [
"Apache-2.0"
] | permissive | PierreBizouard/PoC | 26a5e414b630d65e9369d446906741f483f85dcb | 35f7f6cc3a8bf3ec1883f91cf3ec844ed6082379 | refs/heads/master | 2021-01-16T19:31:58.261996 | 2015-08-25T21:16:31 | 2015-08-25T21:16:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | # EMACS settings: -*- tab-width: 2; indent-tabs-mode: t -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
#
# Python Class: Base class for all PoC***Compilers
#
# Description:
# ------------------------------------
# TODO:
# -
# -
#
# License:
# ==============================================================================
# Copyright 2007-2015 Technische Universitaet Dresden - Germany
# Chair for VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# entry point
if __name__ != "__main__":
# place library initialization code here
pass
else:
from sys import exit
print("=" * 80)
print("{: ^80s}".format("The PoC Library - Python Module Compiler.Exceptions"))
print("=" * 80)
print()
print("This is no executable file!")
exit(1)
from Base.Exceptions import *
class CompilerException(BaseException):
def __init__(self, message=""):
super().__init__(message)
self.message = message
| [
"Patrick.Lehmann@tu-dresden.de"
] | Patrick.Lehmann@tu-dresden.de |
b4809ca9aa90fa49b994c884c229b299da19fb4b | 279170fed05a3001744847c54ab549ea7ee3c904 | /horse_test.py | aeb6fa97edbc37a387a383a8f900c58245c4eeab | [] | no_license | Sarefx/Scraping-Data-From-the-Web | 0c903bbd763f409cd538b3108ffb1850cfc21540 | c62f31e4a600c62e5403997a556966dcafb3027e | refs/heads/master | 2022-10-21T14:25:01.282026 | 2020-06-10T21:17:47 | 2020-06-10T21:17:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
import unittest
class TestHorseLand(unittest.TestCase):
soup = None
def setUpClass():
url = 'https://treehouse-projects.github.io/horse-land/index.html'
TestHorseLand.soup = BeautifulSoup(urlopen(url), 'html.parser')
def test_header1(self):
header1 = TestHorseLand.soup.find('h1').get_text()
self.assertEqual('Horse Land', header1)
if __name__ and '__main__':
unittest.main()
| [
"nikitakoba93@gmail.com"
] | nikitakoba93@gmail.com |
a5981b2b8148ceb8e003a84135f5cd35e8ab89a2 | 1ca965019d8e6501eb13ac9d0526877e2aefbca8 | /useful/pickup_images/2_generator_feature/generateFeatures.py | 5f8247f4168d7400483f28054a030080db56a7a6 | [] | no_license | starpicker/something | bcdea511501db2256d3e47ee267c1aecc5ec7f1e | 16a59a4b98ac7eda9216543ef61f19f3c2e03c8e | refs/heads/master | 2021-06-01T11:13:06.887059 | 2021-04-13T02:47:51 | 2021-04-13T02:47:51 | 115,477,577 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,750 | py | import math
import numpy
import itertools
LANDMARK_NUMBER = 68
def facialRatio(points):
x1 = points[0];
y1 = points[1];
x2 = points[2];
y2 = points[3];
x3 = points[4];
y3 = points[5];
x4 = points[6];
y4 = points[7];
dist1 = math.sqrt((x1-x2)**2 + (y1-y2)**2)
dist2 = math.sqrt((x3-x4)**2 + (y3-y4)**2)
# ratio = 0
# if(dist2 != 0):
# ratio = dist1/dist2
ratio = dist1/dist2
return ratio
def generateFeatures(pointIndices1, pointIndices2, pointIndices3, pointIndices4, allLandmarkCoordinates):
size = allLandmarkCoordinates.shape
allFeatures = numpy.zeros((size[0], len(pointIndices1)))
for x in range(0, size[0]):
landmarkCoordinates = allLandmarkCoordinates[x, :]
ratios = [];
for i in range(0, len(pointIndices1)):
x1 = landmarkCoordinates[2*(pointIndices1[i]-1)]
y1 = landmarkCoordinates[2*pointIndices1[i] - 1]
x2 = landmarkCoordinates[2*(pointIndices2[i]-1)]
y2 = landmarkCoordinates[2*pointIndices2[i] - 1]
x3 = landmarkCoordinates[2*(pointIndices3[i]-1)]
y3 = landmarkCoordinates[2*pointIndices3[i] - 1]
x4 = landmarkCoordinates[2*(pointIndices4[i]-1)]
y4 = landmarkCoordinates[2*pointIndices4[i] - 1]
points = [x1, y1, x2, y2, x3, y3, x4, y4]
ratios.append(facialRatio(points))
allFeatures[x, :] = numpy.asarray(ratios)
return allFeatures
def generateAllFeatures(allLandmarkCoordinates):
# a = [18, 22, 23, 27, 37, 40, 43, 46, 28, 32, 34, 36, 5, 9, 13, 49, 55, 52, 58]
a = [18, 22, 23, 27, 7, 20, 19, 3, 28, 31, 12, 6, 5, 9, 13, 25, 16, 26, 10]
combinations = itertools.combinations(a, 4)
i = 0
pointIndices1 = [];
pointIndices2 = [];
pointIndices3 = [];
pointIndices4 = [];
for combination in combinations:
pointIndices1.append(combination[0])
pointIndices2.append(combination[1])
pointIndices3.append(combination[2])
pointIndices4.append(combination[3])
i = i+1
pointIndices1.append(combination[0])
pointIndices2.append(combination[2])
pointIndices3.append(combination[1])
pointIndices4.append(combination[3])
i = i+1
pointIndices1.append(combination[0])
pointIndices2.append(combination[3])
pointIndices3.append(combination[1])
pointIndices4.append(combination[2])
i = i+1
return generateFeatures(pointIndices1, pointIndices2, pointIndices3, pointIndices4, allLandmarkCoordinates)
landmarks = numpy.loadtxt('landmarks.txt', delimiter=',', usecols=range(LANDMARK_NUMBER))
featuresALL = generateAllFeatures(landmarks)
numpy.savetxt('features.txt', featuresALL, delimiter=',', fmt = '%.04f')
#pointIndices1 = [20, 20, 45, 45]
#pointIndices2 = [58, 9, 58, 58]
#pointIndices3 = [5, 7, 5, 32]
#pointIndices4 = [13, 13, 11, 36]
#features = generateFeatures(pointIndices1, pointIndices2, pointIndices3, pointIndices4, landmarks)
| [
"starpicker1@163.com"
] | starpicker1@163.com |
d97a830a2007d70319995bdb9790a1ed2e890cfc | c2004a41f01fa6e0a4d0dcd142bdd589251230a8 | /load_model_and_test.py | 0f80a2f401749baad508dd2c456af319e2c540b8 | [] | no_license | vivekltp120/JBM_image_detection_model | c9978fe6556bc7be7524b39510e5ff4cddc32b9c | 83c5f1d44cfade5bd93ecdee733f4542dbfecb88 | refs/heads/JBM_image_detection | 2022-12-14T04:12:13.113723 | 2019-01-13T05:06:00 | 2019-01-13T06:31:03 | 165,473,819 | 0 | 0 | null | 2022-12-08T01:02:48 | 2019-01-13T06:32:31 | Python | UTF-8 | Python | false | false | 1,261 | py | from keras.models import load_model
from keras_preprocessing.image import ImageDataGenerator,array_to_img, img_to_array, load_img
import glob
from matplotlib import pyplot as plt
import cv2
import numpy as np
import os
import sys
d_path='./Data/validation/YE358311_Crack_and_Wrinkle_defect/'
h_path='./Data/validation/YE358311_Healthy/'
td_path='./Data/train/YE358311_Crack_and_Wrinkle_defect/'
th_path='./Data/train/YE358311_Healthy/'
# Load models
model = load_model('./detect_defect_v9.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
path_for_source=sys.argv[1]
#Load images
# dimensions of our images.
img_width, img_height = 250, 250
fig = plt.figure(figsize=(14, 14))
for cnt,testimagepath in enumerate(glob.glob(pathname=path_for_source+'*',),1):
image=load_img(path=testimagepath,target_size=(img_width, img_height))
img = cv2.imread(testimagepath)
img = cv2.resize(img, (img_width, img_height))
img = np.reshape(img, [1, img_width, img_height,3])
class_of_image = model.predict_classes(img)
print(os.path.split(testimagepath)[1],end=' ')
if class_of_image[0][0]==0:
print('- image is defected')
else:
print('- image is healthy')
| [
"vivekltp120@gmail.com"
] | vivekltp120@gmail.com |
ebd43be6f3a13daecb6f6bf526df0ca99451a7c8 | a890d538853c835f86e4b70173e23cb21b9f9ca5 | /math_library.py | 3d62a29fa2821baab816c43f2cdcd32e110c0a23 | [] | no_license | PSS1998/Math-Library | 4847d03f2dd2613d741581e369b5ab2faa1c056b | 3efdc7cf10de7da240c4d2c96b1819ed9dc4c642 | refs/heads/master | 2023-03-03T03:42:17.304967 | 2021-02-16T13:33:42 | 2021-02-16T13:33:42 | 290,165,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,847 | py | from abc import ABC, abstractmethod
import math
class Iexpression(ABC):
@abstractmethod
def derive(self):
pass
@abstractmethod
def eval(self):
pass
class expression(Iexpression):
def derive(self):
self.derive()
def eval(self):
self.eval()
class operator(Iexpression):
def __init__(self, left_exp, right_exp):
self.left_exp = left_exp
self.right_exp = right_exp
def derive(self):
pass
def eval(self):
pass
def check_number(self):
if(isinstance(self.left_exp, number) and isinstance(self.right_exp, number)):
return True
elif(isinstance(self.left_exp, number) and isinstance(self.right_exp, operator) and self.right_exp.check_number()):
return True
elif(isinstance(self.left_exp, operator) and isinstance(self.right_exp, number) and self.left_exp.check_number()):
return True
elif(isinstance(self.left_exp, operator) and isinstance(self.right_exp, operator) and self.left_exp.check_number() and self.right_exp.check_number()):
return True
else:
return False
class add_operator(operator):
def derive(self, var):
return add_operator(self.left_exp.derive(var), self.right_exp.derive(var))
def eval(self, var, value):
return add_operator(self.left_exp.eval(var, value), self.right_exp.eval(var, value))
def print(self):
return self.left_exp.print() + self.right_exp.print()
class sub_operator(operator):
def derive(self, var):
return sub_operator(self.left_exp.derive(var), self.right_exp.derive(var))
def eval(self, var, value):
return sub_operator(self.left_exp.eval(var, value), self.right_exp.eval(var, value))
def print(self):
return self.left_exp.print() - self.right_exp.print()
class mult_operator(operator):
def derive(self, var):
return add_operator(mult_operator(self.left_exp.derive(var), self.right_exp), mult_operator(self.right_exp.derive(var), self.left_exp))
def eval(self, var, value):
if((isinstance(self.left_exp.eval(var, value), number) and self.left_exp.eval(var, value).print()==0) or (isinstance(self.right_exp.eval(var, value), number) and self.right_exp.eval(var, value).print()==0)):
return number(0)
else:
return mult_operator(self.left_exp.eval(var, value), self.right_exp.eval(var, value))
def print(self):
if((isinstance(self.left_exp, number) and self.left_exp.print()==0) or (isinstance(self.right_exp, number) and self.right_exp.print()==0)):
return 0
elif(self.left_exp==0 or self.right_exp==0):
return 0
else:
return self.left_exp.print() * self.right_exp.print()
class div_operator(operator):
def derive(self, var):
return div_operator(sub_operator(mult_operator(self.left_exp.derive(var), self.right_exp), mult_operator(self.right_exp.derive(var), self.left_exp)), (mult_operator(self.right_exp,self.right_exp)))
def eval(self, var, value):
return div_operator(self.left_exp.eval(var, value), self.right_exp.eval(var, value))
def print(self):
if((isinstance(self.left_exp, number) and self.left_exp.print()==0)):
return 0
else:
return self.left_exp.print() / self.right_exp.print()
class operator_factory():
def operation(self, operator, left_exp, right_exp):
if(operator == "+"):
return add_operator(left_exp, right_exp)
elif(operator == "-"):
return sub_operator(left_exp, right_exp)
elif(operator == "*"):
return mult_operator(left_exp, right_exp)
elif(operator == "/"):
return div_operator(left_exp, right_exp)
class variable(Iexpression):
def __init__(self, name):
self.variable_name = name
def derive(self, var):
if (var.variable_name == self.variable_name):
return number(1)
else:
return number(0)
def eval(self, var, value):
if (var.variable_name == self.variable_name):
return value
else:
return self
def print(self):
return 0
class number(Iexpression):
def __init__(self, value):
self.value = value
def derive(self, var):
return number(0)
def eval(self, var, value):
return number(self.value)
def print(self):
return self.value
class function(Iexpression):
def derive(self):
pass
def eval(self):
pass
class sin_function(function):
def __init__(self, exp):
self.exp = exp
def derive(self, var):
if(self.exp != self.exp.derive(var)):
return mult_operator(cos_function(self.exp), self.exp.derive(var))
else:
number(0)
def eval(self, var, value):
if(isinstance(self.exp, number)):
return number(math.sin(self.var.print()))
else:
if(isinstance(self.exp.eval(var, value), number)):
return number(math.sin(self.exp.eval(var, value).print()))
elif(isinstance(self.exp.eval(var, value), operator) and self.exp.eval(var, value).check_number()):
return number(math.sin(self.exp.eval(var, value).print()))
else:
return sin_function(self.exp.eval(var, value))
class cos_function(function):
def __init__(self, exp):
self.exp = exp
def derive(self, var):
if(self.exp != self.exp.derive(var)):
return mult_operator(sin_function(self.exp), self.exp.derive(var))
else:
number(0)
def eval(self, var, value):
if(isinstance(self.exp, number)):
return number(math.cos(self.var.print()))
else:
if(isinstance(self.exp.eval(var, value), number)):
return number(math.cos(self.exp.eval(var, value).print()))
elif(isinstance(self.exp.eval(var, value), operator) and self.exp.eval(var, value).check_number()):
return number(math.cos(self.exp.eval(var, value).print()))
else:
return cos_function(self.exp.eval(var, value))
class function_factory():
def __init__(self):
self.map_function = {"sin":sin_function, "cos":cos_function}
self.user_function = {}
def get_function(self, function, input_variable=None):
if(function in self.user_function):
return self.user_function[function]
else:
return self.map_function[function](input_variable)
def add_function(self, name, exp):
self.user_function[name] = exp
| [
"p_sadri_s@yahoo.com"
] | p_sadri_s@yahoo.com |
851bb89a66ebd7d849b2460e58cb89a0a210f9c4 | 9b1bc7d4544a3a44fcf1ae548243d7a7d0b0617d | /fass.py | 6268da3e616ee3d6d81d7b1f176a8e885c83689a | [] | no_license | DakaRaka/GeorgeClooney | 5c5cc8ad59292b74838e8caee9f9421934262a93 | 0e193265bd337edbd48f07cd825ddb21aafe128d | refs/heads/master | 2020-09-20T14:50:26.829961 | 2020-01-22T23:16:43 | 2020-01-22T23:16:43 | 224,514,504 | 1 | 1 | null | 2019-11-27T20:58:31 | 2019-11-27T20:47:43 | Python | UTF-8 | Python | false | false | 879 | py | from fassword.utils import load_data
import argparse
from fassword.entries import init_data
from fassword.entries import add_entry
from fassword.entries import unlock_master
from fassword.entries import decrypt_entry
from fassword.entries import list_entrys
parser = argparse.ArgumentParser()
parser.add_argument(
'entry',
type=str,
help="The desired password to decrypt"
)
parser.add_argument(
'-a', '--add',
action="store_true",
default=False,
help="Add entry to password store"
)
parser.add_argument(
'-d', '--delete',
action="store_true",
default=False,
help="Delete an entry"
)
args = parser.parse_args()
def main():
data = load_data()
if not data:
init_data()
if args.add:
unlock_master()
add_entry(args.entry)
else:
decrypt_entry(args.entry)
#Carry on with
main()
| [
"DakaRaka@protonmail.com"
] | DakaRaka@protonmail.com |
e4a2a293357647b5d27f7fdba182e45a9f15b4de | e0994ddf4319aaf2e911d61c5ce57f2dd881ca20 | /faculty_non_teaching/admin.py | 692a8737a066b53f56970fe9657f664b05500a18 | [] | no_license | sarthak221995/ucoe | 98dee2de8b614da4e1a516cc4cbae47269c5f7f0 | 94530cde81ef060e94734cfaabce00d814de3b83 | refs/heads/master | 2021-01-10T04:05:04.102121 | 2016-08-27T12:18:23 | 2016-08-27T12:18:23 | 49,074,017 | 0 | 1 | null | 2016-01-08T07:19:07 | 2016-01-05T15:22:57 | HTML | UTF-8 | Python | false | false | 215 | py | from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin):
list_filter = ['created','modified']
list_display = ('name','created','modified')
admin.site.register(Post,PostAdmin)
| [
"sarthak221995@gmail.com"
] | sarthak221995@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.