blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7b7a177cf282c46d5b961d40aebf90c765102933 | Python | Amonoff/PY4E | /10.1.py | UTF-8 | 961 | 3.734375 | 4 | [] | no_license | '''Revise a previous program as follows: Read and parse the
“From” lines and pull out the addresses from the line. Count the num-
ber of messages from each person using a dictionary.
After all the data has been read, print the person with the most commits
by creating a list of (count, email) tuples from the dictionary. Then
sort the list in reverse order and print out the person who has the most
commits.'''
file = input('Enter file name:')
dict_addresses = dict()
dict_list = list()
try:
file = open(file)
except:
print('File does not exist')
for line in file:
words = line.split()
if not line.startswith('From ') : continue
else:
dict_addresses[words[1]] = dict_addresses.get(words[1], 0) + 1
for address, frequency in list(dict_addresses.items()):
dict_list.append((frequency, address))
dict_list.sort(reverse = True)
print(dict_list)
print(max((dict_list)))
| true |
da030d4126561529ef5b6f2e82885ad9db8b5d28 | Python | hypothesis/lms | /lms/models/public_id.py | UTF-8 | 3,598 | 2.921875 | 3 | [
"BSD-2-Clause"
] | permissive | from base64 import urlsafe_b64encode
from dataclasses import dataclass
from typing import Optional
from uuid import uuid4
from lms.models.region import Region, Regions
class InvalidPublicId(Exception):
"""Indicate an error with the specified public id."""
@dataclass
class PublicId:
"""
Get a globally unique value with prefixes like 'us.' or 'ca.'.
This is useful if you only have the id, but don't know which region you
should be looking for it in and is the only id suitable for sharing
outside a single region LMS context.
"""
region: Region
"""Region this model is in."""
model_code: str
"""Short identifier of the model type."""
app_code: str = "lms"
"""Code representing the product this model is in."""
instance_id: str = None
"""Identifier for the specific model instance."""
def __post_init__(self):
if self.instance_id is None:
self.instance_id = self.generate_instance_id()
@classmethod
def generate_instance_id(cls) -> str:
"""Get a new instance id."""
# We don't use a standard UUID-4 format here as they are common in Tool
# Consumer Instance GUIDs, and might be confused for them. These also
# happen to be shorter and guaranteed URL safe.
return urlsafe_b64encode(uuid4().bytes).decode("ascii").rstrip("=")
@classmethod
def parse(
cls,
public_id: str,
expect_app_code: Optional[str] = "lms",
expect_model_code: Optional[str] = None,
expect_region: Optional[Region] = None,
):
"""
Parse a public id string into a PublicID object.
:param public_id: Public id to parse
:param expect_app_code: Expect the specified app code
:param expect_model_code: Expect the specified model code
:param expect_region: Expect the specified region
:raises InvalidPublicId: If the public id is malformed or any
expectations are not met
"""
parts = public_id.split(".")
if not len(parts) == 4:
raise InvalidPublicId(
f"Malformed public id: '{public_id}'. Expected 4 dot separated parts."
)
region_code, app_code, model_code, instance_id = parts
if expect_app_code and app_code != expect_app_code:
raise InvalidPublicId(
f"Expected app '{expect_app_code}', found '{app_code}'"
)
if expect_model_code and model_code != expect_model_code:
raise InvalidPublicId(
f"Expected model '{expect_model_code}', found '{model_code}'"
)
try:
region = Regions.from_code(region_code)
except ValueError as exc:
raise InvalidPublicId(exc.args[0]) from exc
if expect_region and region != expect_region:
raise InvalidPublicId(
f"Expected region '{expect_region}', found '{region}'"
)
return cls(
region=region,
app_code=app_code,
model_code=model_code,
instance_id=instance_id,
)
def __str__(self):
# Ensure we stringify to the public code naturally
# We use '.' as the separator here because it's not in base64, but it
# is URL safe. The other option is '~'.
# See: https://www.ietf.org/rfc/rfc3986.txt (2.3)
# > unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
return (
f"{self.region.code}.{self.app_code}.{self.model_code}.{self.instance_id}"
)
| true |
b258d5d9df5dcd1f9bcd688595d47ff55f436001 | Python | mr-sigma/coursera | /principles-of-computing/wk5/cookie_clicker.py | UTF-8 | 9,050 | 3.328125 | 3 | [] | no_license | """
Cookie Clicker Simulator
"""
import simpleplot
import math
# Used to increase the timeout, if necessary
import codeskulptor
codeskulptor.set_timeout(20)
import poc_clicker_provided as provided
# Constants
SIM_TIME = 1000
# 3999999999.0 * 4
class ClickerState:
"""
Simple class to keep track of the game state.
"""
def __init__(self):
self._cookies = float(0)
self._cps = float(1)
self._total_cookies_produced = float(0)
self._time = float(0)
# history defined as:
# (time, item_bought = None, item_cost,
# total_cookies_produced)
self._history = [(0.0, None, 0.0, 0.0)]
def __str__(self):
"""
Return human readable state
"""
to_return = ""
to_return += "Time: " + str(self._time)
to_return += "\nTotal Cookies Produced: "
to_return += str(self._total_cookies_produced)
to_return += "\nCPS: " + str(self._cps) + "\n"
return to_return
def get_cookies(self):
"""
Return current number of cookies
(not total number of cookies)
Should return a float
"""
return float(self._cookies)
def get_cps(self):
"""
Get current CPS
Should return a float
"""
return float(self._cps)
def get_time(self):
"""
Get current time
Should return a float
"""
return float(self._time)
def get_history(self):
"""
Return history list
History list should be a list of tuples of the form:
(time, item, cost of item, total cookies)
For example: [(0.0, None, 0.0, 0.0)]
Should return a copy of any internal data structures,
so that they will not be modified outside of the class.
"""
return self._history[:]
def time_until(self, cookies):
"""
Return time until you have the given number of cookies
(could be 0.0 if you already have enough cookies)
Should return a float with no fractional part
"""
time_remaining = math.ceil((cookies - self._cookies) / self._cps)
if time_remaining < 0.0:
return 0.0
else:
return float(time_remaining)
def wait(self, time):
"""
Wait for given amount of time and update state
Should do nothing if time <= 0.0
"""
if time <= 0.0:
return
else:
self._time += time
cookies_generated = self._cps * time
self._total_cookies_produced += cookies_generated
self._cookies += cookies_generated
def buy_item(self, item_name, cost, additional_cps):
"""
Buy an item and update state and history
Should do nothing if you cannot afford the item
(time, item, cost of item, total cookies)
"""
if cost > self._cookies:
# print "Cannot afford", str(item_name)
return
else:
self._cookies -= cost
self._cps += additional_cps
self._history.append((self._time, item_name, cost,
self._total_cookies_produced))
return
def simulate_clicker(build_info, duration, strategy):
"""
Function to run a Cookie Clicker game for the given
duration with the given strategy. Returns a ClickerState
object corresponding to the final state of the game.
"""
# Replace with your code
# Thinking that the the function should pass most
# of the work to the strategy function
cookie_clicker = ClickerState()
while cookie_clicker.get_time() <= duration:
time_left = duration - cookie_clicker.get_time()
# strategy tells what item to buy
# may get really intentse with best strat
# which will incorporate a cps/$$ calculation
to_buy = strategy(cookie_clicker.get_cookies(),
cookie_clicker.get_cps(),
cookie_clicker.get_history(),
time_left,
build_info.clone())
# if strategy returns None, break the loop (nothing left to buy or
# out of time
if to_buy == None:
cookie_clicker.wait(time_left)
break
else:
item_cost = build_info.get_cost(to_buy)
item_cps = build_info.get_cps(to_buy)
# calculate the time required to wait (Might already be done by the strat
to_wait = cookie_clicker.time_until(item_cost)
# check that we can buy the item
if to_wait > time_left:
cookie_clicker.wait(time_left)
break
else:
# wait the required amount of time
cookie_clicker.wait(to_wait)
# buy the item and update the history & build_info
cookie_clicker.buy_item(to_buy, item_cost, item_cps)
build_info.update_item(to_buy)
return cookie_clicker
def strategy_cursor_broken(cookies, cps, history, time_left, build_info):
"""
Always pick Cursor!
Note that this simplistic (and broken) strategy does not properly
check whether it can actually buy a Cursor in the time left. Your
simulate_clicker function must be able to deal with such broken
strategies. Further, your strategy functions must correctly check
if you can buy the item in the time left and return None if you
can't.
"""
cookies_to_be_had = cookies + cps * time_left
if build_info.get_cost("Cursor") > cookies_to_be_had:
return None
else:
return "Cursor"
def strategy_none(cookies, cps, history, time_left, build_info):
"""
Always return None
This is a pointless strategy that will never buy anything, but
that you can use to help debug your simulate_clicker function.
"""
return None
def strategy_cheap(cookies, cps, history, time_left, build_info):
"""
Always buy the cheapest item you can afford in the time left.
"""
# find the minimum item
item_list = build_info.build_items()
min_cost = float("inf")
min_item = ""
for item in item_list:
item_cost = build_info.get_cost(item)
if item_cost < min_cost:
min_cost = item_cost
min_item = item
# determine if we can afford it before time expires
cookies_to_be_had = cookies + cps * time_left
# return None if we can't afford it
if cookies_to_be_had < min_cost:
return None
else:
return min_item
def strategy_expensive(cookies, cps, history, time_left, build_info):
"""
Always buy the most expensive item you can afford in the time left.
"""
# find the maximum cookies we can spend
cookies_to_be_had = cookies + cps * time_left
item_list = build_info.build_items()
max_cost = float("-inf")
max_item = None
for item in item_list:
item_cost = build_info.get_cost(item)
# find the most expensive item we can afford
if item_cost > max_cost and item_cost <= cookies_to_be_had:
max_cost = item_cost
max_item = item
return max_item
def strategy_best(cookies, cps, history, time_left, build_info):
"""
The best strategy that you are able to implement.
"""
cookies_to_be_had = cookies + cps * time_left
item_list = build_info.build_items()
best_cps_per_cost = 0
best_item = None
for item in item_list:
# find the cps/cost or each item
item_cost = build_info.get_cost(item)
item_cps_per_cost = build_info.get_cps(item) / item_cost
if item_cps_per_cost > best_cps_per_cost \
and item_cost <= cookies_to_be_had:
best_item = item
# return item with the maximum cps/cookie
return best_item
def run_strategy(strategy_name, time, strategy):
"""
Run a simulation for the given time with one strategy.
"""
state = simulate_clicker(provided.BuildInfo(), time, strategy)
# print strategy_name, ":", state
# Plot total cookies over time
# Uncomment out the lines below to see a plot of total cookies vs. time
# Be sure to allow popups, if you do want to see it
history = state.get_history()
history = [(item[0], item[3]) for item in history]
# simpleplot.plot_lines(strategy_name, 1000, 400, 'Time',
# 'Total Cookies', [history], True)
def run():
"""
Run the simulator.
"""
run_strategy("Cursor", SIM_TIME, strategy_cursor_broken)
run_strategy("None", SIM_TIME, strategy_none)
# Add calls to run_strategy to run additional strategies
run_strategy("Cheap", SIM_TIME, strategy_cheap)
run_strategy("Expensive", SIM_TIME, strategy_expensive)
run_strategy("Best", SIM_TIME, strategy_best)
# run()
| true |
b56d85020e682399f133c2001377d8e9a90f686b | Python | nanshika/xlsx2csv | /xlsx2csv.py | UTF-8 | 3,532 | 2.890625 | 3 | [] | no_license | import pandas as pd
import os
import sys
import click
bad_param_txt = 'Set at least one .xlsx file for input file[s].(対象の .xlsx ファイルを【一つ以上】指定してください。)'
@ click.group()
def cli():
pass
@ cli.command()
@ click.argument('input_files', type=click.Path(exists=True),
nargs=-1) # 必須(nargs=-1 にて入力数を任意化)
@ click.option('--output_merged_file_name', '-o',
default='merged.csv', help='Output merged file name') # 任意
@click.option('--merged_is_not_requied', '-m', is_flag=True,
default=False, help='Set if the merged output file is NOT required')
@click.option('--individuals_is_not_requied',
'-i',
is_flag=True,
default=False,
help='Set if the individual output file[s] is/are NOT required')
def xlsx2csv(
input_files,
output_merged_file_name,
merged_is_not_requied,
individuals_is_not_requied):
headers = None
# 入力ファイルが無いか、引数の中に '.xlsx' ファイルが一つもないとエラーを返す
if len(input_files) == 0 or not any(['.xlsx' in _ for _ in input_files]):
raise click.BadParameter(bad_param_txt)
# アウトプットフォルダは入力ファイルの1つ目と同じ。ファイル名のみの指定でも拾えるように。
output_dir = os.path.dirname(sys.argv[1])
if len(output_dir) == 0:
output_dir = os.getcwd()
# 特別な指定がない限り、マージファイルを出力する
if not merged_is_not_requied:
f_out0 = open('{}/{}'.format(output_dir, output_merged_file_name),
'w', encoding='utf_8_sig', newline="")
# 入力ファイルごとに開いて .csv ファイルに出力
for input_file in input_files:
book = pd.ExcelFile(input_file)
file_name = os.path.basename(book)
# 特別な指定がない限り、個別のファイルを出力する
if not individuals_is_not_requied:
f_out1 = open('{}/{}'.format(output_dir,
file_name.replace('.xlsx',
'.csv')),
'w',
encoding='utf_8_sig',
newline="")
for sheet_name in book.sheet_names:
df = pd.read_excel(
input_file,
sheet_name=sheet_name,
header=headers)
# 入力ファイル名に .xlsx が含まれないファイルは処理しない。(厳密には '.xlsx'
# 【を含む】じゃなく【で終わる】とする必要があるが最低限の確認とした。)
if '.xlsx' not in input_file:
continue
# 特別な指定がない限り、個別のファイルを出力する
if not individuals_is_not_requied:
df.to_csv(f_out1, sep=',', index=False, header=False)
# ファイル名を1列目に挿入
df.insert(0, 'file_name', file_name)
# シート名を2列目に挿入
df.insert(1, 'sheet_name', sheet_name)
# 特別な指定がない限り、マージファイルを出力する
if not merged_is_not_requied:
df.to_csv(f_out0, sep=',', index=False, header=False)
def main():
xlsx2csv()
if __name__ == '__main__':
main()
| true |
6f4d5c898abf8f27ff07bc0b816b22d2d48fd38c | Python | a-y-u-s-h/ST3 | /Scripts/shell.py | UTF-8 | 2,296 | 2.75 | 3 | [] | no_license | import os
import shutil
import glob
import yaml
import sys
def cd(path):
"""
======================================
Change current working directory.
======================================
"""
if os.path.exists(f"{path}"):
os.chdir(f"{path}")
# <---------------------------->
def ls(path):
"""
======================================
List contents of directory.
======================================
"""
return os.listdir(path)
# <---------------------------->
def mkdir(path):
"""
======================================
Create a new directory.
======================================
"""
if not os.path.exists(f"{path}"):
os.makedirs(f"{path}")
# <---------------------------->
def touch(path):
"""
======================================
Create an empty file.
======================================
"""
if not os.path.exists(f"{path}"):
with open(f"{path}", "w") as file:
pass
# <---------------------------->
def readyaml(path):
"""
======================================
Read a YAML file.
======================================
"""
with open(f"{path}", "r") as file:
content = yaml.load(file, Loader = yaml.FullLoader)
return content
# <---------------------------->
def rmdir(path):
"""
======================================
Delete directory tree.
======================================
"""
if os.path.exists(f"{path}"):
shutil.rmtree(f"{path}")
# <---------------------------->
def delete(path):
"""
======================================
Remove a file.
======================================
"""
if os.path.exists(f"{path}"):
os.remove(f"{path}")
# <---------------------------->
def rmac(path):
"""
======================================
Remove everything inside present
working directory.
======================================
"""
for f in os.listdir():
if os.path.isfile(f"{f}"):
os.remove(f)
elif os.path.isdir(f"{f}"):
shutil.rmtree(f"{f}")
def package(file):
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(file))))
| true |
35963acc2c5a08892bce174b2d0ecbaa3277201c | Python | rahmancam/ML-Algorithms | /03. Test_LogisticRegression.py | UTF-8 | 672 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
import matplotlib.pyplot as plt
from logistic_regression import LogisticRegression
bc = datasets.load_breast_cancer()
X, y = bc.data, bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
def accuracy(y_true, y_pred):
accuracy = np.sum(y_true == y_pred) / len(y_true)
return accuracy
classifier = LogisticRegression(lr = 0.001)
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
print("LogisticRegression classification accuracy: ", accuracy(y_test, predictions))
| true |
4ab96354643d1b3819ebb85ad56fddd9c9c333b2 | Python | alexsotocx/algorithms | /online_judges/Topcoder/Python/SequenceOfNumbers.py | UTF-8 | 97 | 2.734375 | 3 | [] | no_license | class SequenceOfNumbers:
def rearrange(self, sequence):
return sorted(sequence, key= int)
| true |
f458e310b8828e0af523fc88ce53a7727e308392 | Python | DawidCiechowski/TCPEchoClient | /tcpClient.py | UTF-8 | 297 | 2.953125 | 3 | [] | no_license | import socket
HOST = "" #Insert IP Address
PORT = 65432 #Default port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((HOST, PORT))
sock.sendall(b'') #Enter the message you wish to echo.
data = sock.recv(4096)
print("Received: " + repr(data))
| true |
810d71aaa2a2ba9466b0194d9b558acaae1eb83c | Python | moonlimb/scheme_to_js_translator | /xml_to_js/operators.py | UTF-8 | 430 | 2.90625 | 3 | [] | no_license |
# different block types:
# procedures_defreturn --> fcn with a return statement
# procedures_defnoreturn --> fcn w/ no return statement
# ex. math_arithmetic, variables_get
#block_type ={'procedures_defreturn': 'function', 'math_arithmetic':'', 'variables_get':''}
basic = {'ADD': ' + ', 'MINUS': ' - ', 'MULTIPLY': ' * ', 'DIVIDE':' / ', 'POWER':' ** ',
'EQ': ' == ', 'GT': ' > ', 'GTE': ' >= ', 'LT':' < ', 'LTE': ' <= '}
| true |
2776d7502e63b4c7e1b8171202d6d02c161c0d01 | Python | michalwilk123/internship-python-profil | /tests/test_handlers/test_sqlite_handler.py | UTF-8 | 3,003 | 2.71875 | 3 | [] | no_license | from my_logger import SQLLiteHandler, ProfilLogger, LogEntry, log_entry
import unittest
import os
import sqlite3
from datetime import datetime
SQL_FILENAME = "test.db"
class TestSQLLiteHandler(unittest.TestCase):
def setUp(self) -> None:
if os.path.exists(SQL_FILENAME):
os.remove(SQL_FILENAME)
return super().setUp()
def tearDown(self) -> None:
if os.path.exists(SQL_FILENAME):
os.remove(SQL_FILENAME)
return super().tearDown()
def test_handler_basic_usage(self):
plogger = ProfilLogger([SQLLiteHandler(SQL_FILENAME)])
plogger.info("this is json test file")
plogger.warning("this is json test file")
plogger.error("this is json test file")
def test_handler_bad_file(self):
with open(SQL_FILENAME, "w") as sql_file:
sql_file.write("this is a not valid sqllite file contents")
plogger = ProfilLogger([SQLLiteHandler(SQL_FILENAME)])
self.assertRaises(sqlite3.DatabaseError, plogger.info, "lorem ipsum")
def test_handler_add(self):
handler = SQLLiteHandler(SQL_FILENAME)
ref_list = [
LogEntry(
date=datetime(2021, 7, 6, 21, 53, 16, 837733),
level="INFO",
msg="this is json test file number 1",
),
LogEntry(
date=datetime(2021, 7, 6, 21, 53, 16, 837885),
level="WARNING",
msg="this is json test file number 2",
),
LogEntry(
date=datetime(2021, 1, 6, 22, 53, 16, 837885),
level="ERROR",
msg="this is test error log",
),
]
for log in ref_list:
handler.add_log(log)
base_form = handler.get_base_form()
self.assertEqual(base_form, ref_list)
def test_handler_add_to_existing(self):
handler = SQLLiteHandler(SQL_FILENAME)
ref_list = [
LogEntry(
date=datetime(2021, 7, 6, 21, 53, 16, 837733),
level="INFO",
msg="this is json test file number 1",
),
LogEntry(
date=datetime(2021, 7, 6, 21, 53, 16, 837885),
level="WARNING",
msg="this is json test file number 2",
),
LogEntry(
date=datetime(2021, 1, 6, 22, 53, 16, 837885),
level="ERROR",
msg="this is test error log",
),
]
for log in ref_list:
handler.add_log(log)
del handler
log_entry = LogEntry(
date=datetime(2020, 3, 6, 22, 53, 16, 837885),
level="WARNING",
msg="this is newly added log",
)
handler = SQLLiteHandler(SQL_FILENAME)
handler.add_log(log_entry)
base_form = handler.get_base_form()
ref_list.append(log_entry)
self.assertEqual(base_form, ref_list)
| true |
53ce29fa88399e5a336912df3a51462a18f76d53 | Python | haryadwi/idcamp-py | /branch/if.py | UTF-8 | 153 | 3.671875 | 4 | [] | no_license | var1 = 100
if var1:
print('1 - Got a true expression value')
print(var1)
var2 = 0
if var2:
print('2 = Got a true expression value')
print(var2) | true |
01b4eeac5627b7f8feec95705176df08c2d202e2 | Python | cpjk/Wubalubadubdub | /app/tests/wall.py | UTF-8 | 241 | 2.828125 | 3 | [] | no_license | WEIGHT = 1
# Returns a positive integer that will be multipled by weight and and the current score
# Return None if you do not want the snake to go in this direction
def run(data, direction):
return None if direction == "south" else 1
| true |
e9f3530ac59d150079debfcb5a0d7566336ba3ea | Python | ddyson1/pcc | /pcc/pcc_scatter.py | UTF-8 | 543 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 14:17:21 2019
@author: devindyson
"""
import matplotlib.pyplot as plt
x_values = list(range(1,1000))
y_values = [x**2 for x in x_values]
plt.scatter(x_values,y_values,c=y_values, cmap=plt.cm.Greens,edgecolor='none',s=10)
# set chart title and axis labels
plt.title("Cubed Numbers", fontsize=18)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Cube of Values",fontsize=14)
# set size of tick labels
plt.tick_params(axis='both',which='major',labelsize=14)
plt.show()
| true |
250779998e3a77b26d5dafe94aa6a7b27685d608 | Python | League-Doctor/League-Doc-JR | /___PaintTests/PaintTests.py | UTF-8 | 1,311 | 3.34375 | 3 | [] | no_license | """
PaintTests
Simply a test app to test the drawing capabilities of the Canvas widget from tkinter
@author Joseph Miller
@version September 12, 2021
"""
from tkinter import *
import colorScheme as Cs
color_scheme = Cs.basic_color_scheme
def run(gui):
raf = gui.running_app_frame
gui.set_scroll_region_size(gui.raf_width, gui.raf_height + 300)
# raf_height = 720: (1920 X 1080 display)
# raft_width = 1024: (1920 X 1080 display)
drawing_canvas = Canvas(raf, width=gui.raf_width, height=gui.raf_height)
drawing_canvas.pack()
draw_button = Button(raf, text='draw', fg=color_scheme['light_text'], bg=color_scheme['dark_text'],
command=lambda: rectangle_fall(gui, drawing_canvas), font=90)
draw_button.pack()
def rectangle_fall(gui, drawing_canvas):
rect = drawing_canvas.create_rectangle(gui.raf_width / 2 + 100, gui.raf_height / 2 + 100,
gui.raf_width / 2 - 100, gui.raf_height / 2 - 100,
fill='black', width=3)
print('drawn')
movement(drawing_canvas, rect, 100)
print('done')
def movement(drawing_canvas, rect, i):
if i > 0:
drawing_canvas.move(rect, 0, -10)
drawing_canvas.after(100, movement(drawing_canvas, rect, i - 1))
| true |
f07086b1ad05fec7c444189bd4ca812438269f79 | Python | gnitsua/VolumeCompensatedFunctionGenerator | /FrequencySweeper.py | UTF-8 | 1,567 | 3.0625 | 3 | [] | no_license | import csv
import time
from Agilent33220A import Agilent33220A
class FrequencySweeper():
"""
This class is designed to read a csv and perform a linearly interpolated sweep between the values.
Input file should be of the form, where time is the time in millisecond
Time1,Frequency1,Voltage1
Time2,Frequency2,Voltage2
.
.
.
TimeN,FrequencyN,Voltage3
"""
def __init__(self, filename, scope=None):
try:
self.file = csv.reader(open(filename, "r"), delimiter=',')
if (scope == None): # no scope object provide, so let's create one
self.scope = Agilent33220A()
except FileNotFoundError:
print("file not found")
except Exception as e:
raise e # not sure what is going to be thrown yet
def start(self):
start = time.time() # convert to string so the time and the csv data are the same type
self.scope.write("OUTP ON")
self.scope.write("FUNC SIN")
for point in self.file:
assert (len(point) == 3)
try:
current_time = int(point[0])/1000
frequency = int(point[1])
amplitude = float(point[2])
while (time.time() - start) < current_time:
time.sleep(0.001)
self.scope.write("FREQ %i"%(frequency))
self.scope.write("VOLT %i"%(amplitude))
except ValueError:
raise AssertionError("file is invalid")
self.scope.write("OUTP OFF")
| true |
d4b964ea34dbaa7bd2829cb8de6a1d6cf16fdf9d | Python | arunkumarpro1/twitter-sentiment-analysis | /spark.py | UTF-8 | 2,109 | 2.734375 | 3 | [] | no_license | from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
import ast
from pycorenlp import StanfordCoreNLP
import requests
import json
import time
url = 'http://localhost:9200/{}/tweets'
headers = {'Content-type': 'application/json'}
data = {
"tweet" : "John",
"sentiment" : "Doe",
"timestamp" : 1355563265,
"geo":{
"location" : "Illinois, USA",
"coordinates":{
"lat":36.518375,
"lon":-86.05828083
}
}
}
#Routine to get sentiment of tweet using Stanford CoreNLP
def get_sentiment(sentence):
res = nlp.annotate(sentence,properties={'annotators': 'sentiment', 'outputFormat': 'json', 'timeout': 5000,})
if not res["sentences"]:
return "NA"
else:
return res["sentences"][0]["sentiment"]
#Routine to send data to elastic search
def mapper(x):
words = x.split( ":" )
sentiment = get_sentiment(words[0])
if not sentiment == "NA":
data['tweet'] = words[0]
data['sentiment'] = sentiment
data['timestamp'] = int(time.time())
if not words[1] == "NA":
place = words[1].split( ";" )
data['location'] = place[0]
data['geo']['coordinates']['lat'] = place[1]
data['geo']['coordinates']['lon'] = place[2]
response = requests.post(url.format(words[2]), data=json.dumps(data), headers=headers)
return [words[0],sentiment,words[1]]
TCP_IP = 'localhost'
TCP_PORT = 9001
nlp = StanfordCoreNLP('http://localhost:9000')
# Pyspark
# create spark configuration
conf = SparkConf()
conf.setAppName('TwitterApp')
conf.setMaster('local[2]')
# create spark context with the above configuration
sc = SparkContext(conf=conf)
# create the Streaming Context from spark context with interval size 2 seconds
ssc = StreamingContext(sc, 4)
ssc.checkpoint("checkpoint_TwitterApp")
# read data from port 9001
dataStream = ssc.socketTextStream(TCP_IP, TCP_PORT)
words = dataStream.map(lambda x: mapper(x))
words.saveAsTextFiles("result", "txt")
ssc.start()
ssc.awaitTermination() | true |
ed84891cbaee860367379a1312d791e13421cabd | Python | ptweir/rotatorAnalysis | /make_movie.py | UTF-8 | 2,658 | 2.6875 | 3 | [] | no_license | # make_movie.py
# PTW 3/18/2011 based on old make_bigmovie2.py
# to make movie, run at command prompt:
# ffmpeg -b 8000000 -r 20 -i frame%05d.png movie.mpeg
# this will result in movie 10x actual speed
import flypod, sky_times
import motmot.FlyMovieFormat.FlyMovieFormat as FMF
import numpy as np
from os.path import join
import pylab
inDirName = '/media/weir05/data/rotator/white/diffuserPol/12trials/fly05'
outDirName = './movies/images'
def circle_fit(dataX,dataY):
"""fit a circle to data, returns x,y,radius
arguments:
dataX numpy array containing x data
dataY numpy array containing y data (must be same size as dataX)
example:
cx, cy, r = circle_fit(x,y)
"""
n = sum(~np.isnan(dataX))
a = np.ones((n,3))
a[:,0] = dataX[~np.isnan(dataX)]
a[:,1] = dataY[~np.isnan(dataY)]
b = -dataX[~np.isnan(dataX)]**2 - dataY[~np.isnan(dataY)]**2
ai = np.linalg.pinv(a)
out = np.dot(ai,b)
circCenterX = -.5*out[0]
circCenterY = -.5*out[1]
circR = ((out[0]**2+out[1]**2)/4-out[2])**.5;
return circCenterX, circCenterY, circR
def make_frames(inDirName,outDirName):
"""saves frames in movie with orientation superimposed
arguments:
inDirName directory with fly, sky, and .fmf files
outDirName directory to save frames to
example:
make_frames('/media/weir05/data/rotator/white/diffuserPol/12trials/fly08','./frames/')
"""
fly = flypod.analyze_directory(inDirName)
sky = sky_times.analyze_directory(inDirName)
FRAMESTEP = 65 #130
circleCenterX, circleCenterY, circleRadius = circle_fit(fly['x'],fly['y']) #NOTE:should have been saving this info from start.
fmf = FMF.FlyMovie(join(inDirName,fly['fileName']))
nFrames = fmf.get_n_frames()
timestamps = np.ma.masked_all(len(range(0,int(nFrames),FRAMESTEP)))
fig = pylab.figure()
ax = fig.add_subplot(111)
for fn,frameNumber in enumerate(range(0,int(nFrames),FRAMESTEP)):
frame,timestamps[fn] = fmf.get_frame(frameNumber)
#pylab.imsave(join(outDirName,'frame')+("%05d" %(fn+1))+'.png',frame,format='png',cmap='gray')
ax.imshow(frame,cmap='gray')
lineX = [fly['x'][frameNumber]+fly['ROI'][2], circleCenterX+fly['ROI'][2]]
lineY = [fly['y'][frameNumber]+fly['ROI'][1], circleCenterY+fly['ROI'][1]]
ax.plot(lineX,lineY,'b-', linewidth=2)
ax.set_axis_off()
fig.savefig(join(outDirName,'frame')+("%05d" %(fn+1))+'.png')
ax.cla()
print 'frame rate = ' + str(1/np.mean(np.diff(timestamps)))
make_frames(inDirName,outDirName)
| true |
ee43aea5daaea214dac67eccc7f7d8e3aee7d5d4 | Python | RaidenCJ/OpenCV_SVM_DEMO | /crop.py | UTF-8 | 463 | 2.515625 | 3 | [] | no_license | from PIL import Image
import os
import cv2
src = '.'
full_video_path = 'L2_M_55X7500F_XX_516.mp4'
cap = cv2.VideoCapture(full_video_path)
success = True
frame_count = 1
while(success):
filename = src + '/cropped/frameByframe/' + bytes(frame_count) + '.jpg'
success, frame = cap.read()
print 'Read a new frame: ', success
cropped = frame[200:450, 700:780]
cv2.imwrite(filename, cropped)
frame_count = frame_count + 1
cap.release()
| true |
d1668a17889e35e62a15c2e6bb67a8a48a6c6535 | Python | bmaheshnaidu/pythontraiing | /string123.py | UTF-8 | 2,361 | 3.875 | 4 | [] | no_license | '''
string are called as self implemented arrays in pyhton only
when we speak on arrays, we generally called as same set of data type items
arrays will work based on indexing , index will start from 0
'''
'''
mystr="india"
print(mystr[1])
# To reverse a string
print (mystr[::-1])
mystr=" my emplyee id 21"
revstr=mystr[::-1]
# croping a string
print (revstr[0:2])
# reverse the string
print (revstr[0:2][::-1])
mystr="SCHOOL"
i=0
while (i<= len(mystr)):
print (mystr[0:i])
i+=1;
for i in range(0,len(mystr)+1):
print(mystr[0:i])
uservalue=input("Enter a string to print : ")
for i in range(0,len(uservalue)+1):
print(uservalue[0:i])
uservalue=input("Enter a value : ")
for i in range (1,21):
print("{0} * {1} = {2}".format(uservalue,i,(int(uservalue)*i)))
'''
'''
Count: this method is used to get the count of a caharacter pr a word from the given source string
strVariable.count('character' or 'word')
mystr="india"
#print(mystr.count("i"))
for i in mystr:
print (i + " repeated for " + str(mystr.count(i)))
'''''
# split method is used to break a string based on user given delimiter, when we break a string we will get a list of workds
# syntax: string or stringvariable.split("delimiter")
mystr="india has many hotels in india manufacturs"
arr=mystr.split(" ")
print (arr)
count=0
for item in arr:
if (item=="india"):
count +=1
print (count)
# to convert to case sensitive string variable.upper() or string variable.lower()
mystr="India has many Hotels in india manufacture"
mystr=mystr.upper()
print (mystr)
mystr="India has many Hotels in india manufacture"
mystr=mystr.lower()
print (mystr)
mystr="India has many Hotels in india manufacture"
mystr=mystr.swapcase()
print (mystr)
#casefold() is used to convert to lower
mystr="India has many Hotels in india manufacture"
mystr=mystr.casefold()
print (mystr)
#title() is used to convert to first character of word to Upper
mystr="India has many Hotels in india manufacture"
mystr=mystr.title()
print (mystr)
mystr="India has many Hotels in india manufacture"
mystr=mystr.capitalize()
print (mystr)
str="my traction id is : 108956"
arr =str.split(" ")
for item in arr:
if (item.isdecimal()):
print(item)
break
str="my traction id is : 108N956"
arr =str.split(" ")
for item in arr:
if (item.isalnum()):
print(item)
| true |
2a2501689d6a60947677ea71d2468100bfffea49 | Python | scientificprogrammingUOS/lectures | /week12-Designing_Experiments_with_PsychoPy_and_Expyriment/psychopy/psychopy_5_jnd1.py | UTF-8 | 2,970 | 2.65625 | 3 | [] | no_license | assert '__file__' in locals() #to make sure to not run this inside Jupyter
from psychopy import core, visual, gui, data, event
from psychopy.tools.filetools import fromFile, toFile
import numpy, random
expInfo = {'observer':'jwp', 'refOrientation':0}
staircase = data.StairHandler(startVal = 20.0,
stepType = 'db', stepSizes=[8,4,4,2],
nUp=1, nDown=3, # will home in on the 80% threshold
nTrials=1)
# create window and stimuli
win = visual.Window([800,600],allowGUI=True, monitor='testMonitor', units='deg')
foil = visual.GratingStim(win, sf=1, size=4, mask='gauss', ori=expInfo['refOrientation'])
target = visual.GratingStim(win, sf=1, size=4, mask='gauss', ori=expInfo['refOrientation'])
fixation = visual.GratingStim(win, color=-1, colorSpace='rgb',tex=None, mask='circle', size=0.2)
# display instructions and wait
message1 = visual.TextStim(win, pos=[0,+3],text='Hit a key when ready.')
message2 = visual.TextStim(win, pos=[0,-3], text="Then press left or right to identify the %.1f deg probe." %expInfo['refOrientation'])
message1.draw()
message2.draw()
fixation.draw()
win.flip()
event.waitKeys()
for thisIncrement in staircase:
targetSide= random.choice([-1,1]) # will be either +1(right) or -1(left)
foil.setPos([-5*targetSide, 0])
target.setPos([5*targetSide, 0]) # in other location
# set orientation of probe
foil.setOri(expInfo['refOrientation'] + thisIncrement)
foil.draw()
target.draw()
fixation.draw()
win.flip()
core.wait(0.5) # wait 500ms; but use a loop of x frames for more accurate timing
fixation.draw()
win.flip()
# get response
thisResp=None
while thisResp==None:
allKeys=event.waitKeys()
for thisKey in allKeys:
if thisKey=='left':
if targetSide==-1: thisResp = 1 # correct
else: thisResp = -1 # incorrect
elif thisKey=='right':
if targetSide== 1: thisResp = 1 # correct
else: thisResp = -1 # incorrect
elif thisKey in ['q', 'escape']:
core.quit() # abort experiment
event.clearEvents() # clear other (eg mouse) events - they clog the buffer
# add the data to the staircase so it can calculate the next level
staircase.addData(thisResp)
core.wait(1)
# staircase has ended
# give some output to user in the command line in the output window
print('reversals:')
print(staircase.reversalIntensities)
approxThreshold = numpy.average(staircase.reversalIntensities[-6:])
print('mean of final 6 reversals = %.3f' % (approxThreshold))
# give some on-screen feedback
feedback1 = visual.TextStim(
win, pos=[0,+3],
text='mean of final 6 reversals = %.3f' % (approxThreshold))
feedback1.draw()
fixation.draw()
win.flip()
event.waitKeys() # wait for participant to respond
win.close()
core.quit()
| true |
89627e04391eb93c1e1408cc575e0ee44d4c3b1f | Python | Rosomack/AAR | /Assets.py | UTF-8 | 3,235 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Class definitions to abstract the common operations on 9-patches and normal
images.
"""
import os
from PIL import Image
import copy
import math
class Asset:
XHDPI = 320
HDPI = 240
MDPI = 160
LDPI = 120
def __init__(self, file_path, image_dpi):
self.img = Image.open(file_path)
self.filename = os.path.split(file_path)[1]
self.dpi = image_dpi
def clone(self, other):
new_asset = copy.deepcopy(self)
new_asset.img = self.img
new_asset.filename = self.filename
new_asset.dpi = self.dpi
def downscale(self, to_dpi):
scale = float(to_dpi) / self.dpi
new_size = (int(self.img.size[0] * scale), int(self.img.size[1] * scale))
return self.img.resize(new_size, Image.ANTIALIAS)
@staticmethod
def open_asset(path, dpi):
if Asset.check_ninepatch(path):
return NinePatchAsset(path, dpi)
else:
return Asset(path, dpi)
@staticmethod
def check_ninepatch(path):
full_ext = ''
path_tuple = os.path.splitext(path)
while len(path_tuple[1]):
full_ext = full_ext + path_tuple[1]
if len(path_tuple[1]) == 0:
break
path_tuple = os.path.splitext(path_tuple[0])
if '.9' in full_ext:
return True
else:
return False
class NinePatchAsset(Asset):
def __init__(self, file_path, file_dpi):
Asset.__init__(self, file_path, file_dpi)
def downscale(self, to_dpi):
scale = float(to_dpi) / float(self.dpi)
old_size = self.img.size
new_size = (int(math.floor(old_size[0] * scale)), int(math.floor(old_size[1] * scale)))
crop_src_rect = (1, 1, old_size[0] - 1, old_size[1] - 1)
crop_dest_size = (new_size[0] - 2, new_size[1] - 2)
new_image = Image.new(self.img.mode, new_size)
#Resize and paste the center image
stripped_image = self.img.crop(crop_src_rect)
stripped_image.load()
stripped_image = stripped_image.resize(crop_dest_size, Image.ANTIALIAS)
new_image.paste(stripped_image, (1, 1, new_size[0] - 1, new_size[1] - 1))
#Resize and paste the left border
border = self.img.crop((0, 0, 1, old_size[1]))
border.load()
border = border.resize((1, new_size[1]))
new_image.paste(border, (0, 0))
#Resize and paste the top border
border = self.img.crop((0, 0, old_size[0], 1))
border.load()
border = border.resize((new_size[0], 1))
new_image.paste(border, (0, 0))
#Resize and paste the right border
border = self.img.crop((old_size[0] - 1, 0, old_size[0], old_size[1]))
border.load()
border = border.resize((1, new_size[1]))
new_image.paste(border, (new_size[0] - 1, 0))
#Resize and paste the bottom border
border = self.img.crop((0, old_size[1] - 1, old_size[0], old_size[1]))
border.load()
border = border.resize((new_size[0], 1))
new_image.paste(border, (0, new_size[1] - 1))
return new_image
| true |
3c005bb7670ee321556da2f8b77e0ae20170a189 | Python | ravescovi/funcX | /funcx_sdk/funcx/tests/test_batch.py | UTF-8 | 1,218 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import time
def fn_batch1(a, b, c=2, d=2):
return a + b + c + d
def fn_batch2(a, b, c=2, d=2):
return a * b * c * d
def fn_batch3(a, b, c=2, d=2):
return a + 2 * b + 3 * c + 4 * d
def test_batch(fxc, endpoint):
funcs = [fn_batch1, fn_batch2, fn_batch3]
func_ids = []
for func in funcs:
func_ids.append(fxc.register_function(func, description="test"))
start = time.time()
task_count = 5
batch = fxc.create_batch()
for func_id in func_ids:
for i in range(task_count):
batch.add(
i, i + 1, c=i + 2, d=i + 3, endpoint_id=endpoint, function_id=func_id
)
task_ids = fxc.batch_run(batch)
delta = time.time() - start
print(f"Time to launch {task_count * len(func_ids)} tasks: {delta:8.3f} s")
print(f"Got {len(task_ids)} tasks_ids ")
for _i in range(10):
x = fxc.get_batch_result(task_ids)
complete_count = sum(
[1 for t in task_ids if t in x and not x[t].get("pending", False)]
)
print(f"Batch status : {complete_count}/{len(task_ids)} complete")
if complete_count == len(task_ids):
print(x)
break
time.sleep(5)
| true |
abeaa79c40fea6fcd310a5ad8fa7492239deebc9 | Python | Aasthaengg/IBMdataset | /Python_codes/p02971/s254067559.py | UTF-8 | 207 | 2.953125 | 3 | [] | no_license | n=int(input())
a=[int(input())for i in range(n)]
a_max=max(a)
index_max=a.index(a_max)
for i in range(n):
if i==index_max:
print(max(a[:index_max]+a[index_max+1:]))
else:
print(a_max) | true |
a0657f4f9532acdd111237df8f777a41e9302a38 | Python | mindspore-ai/models | /research/cv/dlinknet/postprocess.py | UTF-8 | 3,110 | 2.640625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import sys
import os
import numpy as np
import cv2
class IOUMetric:
"""
Class to calculate mean-iou using fast_hist method
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def evaluate(self, predictions, gts):
for lp, lt in zip(predictions, gts):
assert len(lp.flatten()) == len(lt.flatten())
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
# miou
_iou = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
_iou = _iou[0]
# mean acc
_acc = np.diag(self.hist).sum() / self.hist.sum()
_acc_cls = np.nanmean(np.diag(self.hist) / self.hist.sum(axis=1))
return _acc, _acc_cls, _iou
def tranform_bin_to_png(_predictpath):
_pres = os.listdir(_predictpath)
for file in _pres:
if file[-4:] == '.bin':
_pre = np.fromfile(os.path.join(_predictpath, file), np.float32).reshape(1024, 1024)
_pre[_pre > 0.5] = 255
_pre[_pre <= 0.5] = 0
_pre = np.concatenate([_pre[:, :, None], _pre[:, :, None], _pre[:, :, None]], axis=2)
file = file.split('.')[0]
file = file.split('_')[0] + "_mask.png"
cv2.imwrite(os.path.join(_predictpath, file), _pre.astype(np.uint8))
if __name__ == '__main__':
predictpath = sys.argv[1]
tranform_bin_to_png(predictpath)
label_path = sys.argv[2]
pres = os.listdir(predictpath)
labels = []
predicts = []
for im in pres:
if im[-4:] == '.png':
lab_path = os.path.join(label_path, im)
pre_path = os.path.join(predictpath, im)
label = cv2.imread(lab_path, 0)
pre = cv2.imread(pre_path, 0)
label[label > 0] = 1
pre[pre > 0] = 1
labels.append(label)
predicts.append(pre)
el = IOUMetric(2)
acc, acc_cls, iou = el.evaluate(predicts, labels)
print('acc: ', acc)
print('acc_cls: ', acc_cls)
print('iou: ', iou)
| true |
00ff390652798e7e6d9cc382ba0195ae1477d353 | Python | giannpelle/roll-and-rake | /roll_and_rake/envs/roll_and_rake_v1.py | UTF-8 | 2,513 | 2.671875 | 3 | [] | no_license | import gym
import numpy as np
import random
from .model_v1.roll_and_rake_state import RollAndRakeState
from .model_v1.enums import GameMove, GamePhase, MoveType, RenderType
class RollAndRakeEnvV1(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
super(RollAndRakeEnvV1, self).__init__()
self.name = 'roll_and_rake'
self.game_state = RollAndRakeState()
self.action_space = gym.spaces.Discrete(MoveType.Pass.value)
available_dice_space = len(self.game_state.available_dice) * 7
binary_sections_space = sum(map(lambda x: len(x.tick_list), self.game_state.sections))
elliott_space = 1
green_value_space = 1
game_phase_value_space = 4
rerolls_value_space = 1
dice_combination_choices_available_value_space = 1
legal_actions_space = self.action_space.n
self.observation_space = gym.spaces.Box(0, 1, (available_dice_space + binary_sections_space + elliott_space + green_value_space + game_phase_value_space + rerolls_value_space + dice_combination_choices_available_value_space + legal_actions_space, ))
@property
def observation(self):
return self.game_state.to_observation()
@property
def legal_actions(self):
return np.array(self.game_state.get_legal_env_actions_indices())
def score_game(self):
return self.game_state.get_current_score()
def step(self, env_action_index):
starting_score = self.game_state.get_current_score()
# print(f"Action taken: {GameMove(env_action_index)}")
self.game_state.step(with_env_action_index=env_action_index)
new_state = self.game_state.to_observation()
reward = self.game_state.get_current_score() - starting_score
done = self.game_state.is_done
self.done = done
return new_state, reward, done, {}
def reset(self):
self.game_state.reset()
self.done = False
# print('\n\n---- NEW GAME ----')
return self.observation
def render(self, mode='human'):
if mode == "human":
if self.done:
print('\n\nGAME OVER\n\n')
print(self.game_state)
elif mode== "train":
if self.done:
print('\n\nGAME OVER\n\n')
print(f"Current score: {self.game_state.get_current_score()}")
elif mode=="play":
print(f"Current score: {self.game_state.get_current_score()}")
| true |
e8e29a6981eafdac8c93eec2993b4223ab5e9c01 | Python | ronakzala/universal-schema-bloomberg | /universal_schema/src/learning/batchers.py | UTF-8 | 13,566 | 2.96875 | 3 | [] | no_license | """
Classes to stream int-mapped data from file in batches, pad and sort them (as needed)
and return batch dicts for the models.
"""
from __future__ import unicode_literals
from __future__ import print_function
import codecs
import copy
import random
from collections import defaultdict
import numpy as np
import torch
import data_utils as du
import le_settings as les
import pprint, sys
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
class GenericBatcher:
def __init__(self, num_examples, batch_size):
"""
Maintain batcher variables, state and such. Any batcher for a specific
model is a subclass of this and implements specific methods that it
needs.
- A batcher needs to know how to read from an int-mapped raw-file.
- A batcher should yield a dict which you model class knows how to handle.
:param num_examples: the number of examples in total.
:param batch_size: the number of examples to have in a batch.
"""
# Batch sizes book-keeping.
self.full_len = num_examples
self.batch_size = batch_size
if self.full_len > self.batch_size:
self.num_batches = int(np.ceil(float(self.full_len) / self.batch_size))
else:
self.num_batches = 1
# Get batch indices.
self.batch_start = 0
self.batch_end = self.batch_size
def next_batch(self):
"""
This should yield the dict which your model knows how to make sense of.
:return:
"""
raise NotImplementedError
@staticmethod
def raw_batch_from_file(ex_file, num_batches, to_read_count):
"""
Implement whatever you need for reading a raw batch of examples.
Read the next batch from the file.
:param ex_file: File-like with a next() method.
:param num_batches: int; number of batches to read.
:param to_read_count: int; number of examples to read from the file.
:return:
"""
raise NotImplementedError
class RCBatcher(GenericBatcher):
"""
A batcher to feed a model which inputs row-column as a single example.
"""
# Whether the padding on sequences should be ignored.
# (If yes simply ignores the first and last elm, make sure data has
# actually start-stop.)
ignore_ss = False
def __init__(self, ex_fnames, num_examples, batch_size):
"""
Batcher class for the universal-schema-like models which need a positive
and a negative example.
:param ex_fnames: dict('pos_ex_fname': str, 'neg_ex_fname': str)
:param num_examples: int.
:param batch_size: int.
"""
GenericBatcher.__init__(self, num_examples=num_examples,
batch_size=batch_size)
pos_ex_fname = ex_fnames['pos_ex_fname']
neg_ex_fname = ex_fnames.get('neg_ex_fname', None)
# Check that a file with negative examples has been provided.
self.train_mode = True if neg_ex_fname!=None else False
# Access the file with the positive and negative examples.
self.pos_ex_file = codecs.open(pos_ex_fname, 'r', 'utf-8')
if self.train_mode:
self.neg_ex_file = codecs.open(neg_ex_fname, 'r', 'utf-8')
def next_batch(self):
"""
Yield the next batch. Based on whether its train_mode or not yield a
different set of items.
:return:
batch_doc_ids: list; with the doc_ids corresponding to the
examples in the batch.
In train mode:
batch_dict:
{'batch_cr': dict; of the form returned by pad_sort_data.
'batch_neg': dict; of the form returned by pad_sort_data.}
else:
batch_dict:
{'batch_cr': dict; of the form returned by pad_sort_data.}
"""
for nb in xrange(self.num_batches):
# Read the batch of int-mapped data from the file.
if self.batch_end < self.full_len:
cur_batch_size = self.batch_size
else:
cur_batch_size = self.full_len - self.batch_start
batch_doc_ids, batch_row_raw, batch_col_raw = \
RCBatcher.raw_batch_from_file(self.pos_ex_file, cur_batch_size).next()
if self.train_mode:
_, _, batch_col_raw_neg = \
RCBatcher.raw_batch_from_file(self.neg_ex_file, cur_batch_size).next()
self.batch_start = self.batch_end
self.batch_end += self.batch_size
# Process the batch for feeding into rnn models; sort the batch and
# pad shorter examples to be as long as the longest one.
if self.train_mode:
batch_cr, batch_neg = RCBatcher.pad_sort_batch(raw_feed={
'im_row_raw': batch_row_raw,
'im_col_raw': batch_col_raw,
'im_col_raw_neg': batch_col_raw_neg
}, ignore_ss=self.ignore_ss, sort_by_seqlen=False)
batch_dict = {
'batch_cr': batch_cr,
'batch_neg': batch_neg
}
else:
batch_cr = RCBatcher.pad_sort_batch(raw_feed={
'im_row_raw': batch_row_raw,
'im_col_raw': batch_col_raw
}, ignore_ss=self.ignore_ss, sort_by_seqlen=False)
batch_dict = {
'batch_cr': batch_cr
}
yield batch_doc_ids, batch_dict
@staticmethod
def raw_batch_from_file(ex_file, to_read_count):
"""
Read the next batch from the file.
TODO: Change this to be as per your raw file of int mapped rels
and entity pairs.
:param ex_file: File-like with a next() method.
:param to_read_count: int; number of lines to read from the file.
:return:
read_ex_rows: list(list(int)); a list where each element is a list
which is the int mapped example row.
read_ex_cols: list(list(int)); a list where each element is a list
which is the int mapped example col.
"""
# Initial values.
read_ex_count = 0
read_ex_docids = []
read_ex_rows = []
read_ex_cols = []
# Read content from file until the file content is exhausted.
for ex in du.read_json(ex_file):
# If it was possible to read a valid example.
if ex:
read_ex_docids.append(ex['doc_id'])
read_ex_rows.append(ex['row'])
read_ex_cols.append(ex['col'])
read_ex_count += 1
if read_ex_count == to_read_count:
yield read_ex_docids, read_ex_rows, read_ex_cols
# Once execution is back here empty the lists and reset counters.
read_ex_count = 0
read_ex_docids = []
read_ex_rows = []
read_ex_cols = []
@staticmethod
def pad_sort_batch(raw_feed, ignore_ss, sort_by_seqlen=True):
"""
Pad the data and sort such that the sentences are sorted in descending order
of sentence length. Jumble all the sentences in this sorting but also
maintain a list which says which sentence came from which document of the
same length as the total number of sentences with elements in
[0, len(int_mapped_docs)]
:param raw_feed: dict; a dict with the set of things you want to feed
the model. Here the elements are:
im_row_raw: list(list(int)); a list where each element is a list
which is the int mapped example row.
im_col_raw: list(list(int)); a list where each element is a list
which is the int mapped example col.
im_col_raw_neg: list(list(int)); a list where each element is a list
which is the int mapped example col. But this is a random set of
col negative examples not corresponding to the rows.
:param ignore_ss: boolean; Whether the start-stop on sequences should be
ignored or not.
:param sort_by_seqlen: boolean; Optionally allow sorting to be turned off.
:return:
colrow_ex: (batch_col_row) dict of the form:
{'col': Torch Tensor; the padded and sorted-by-length col elements.
'row': Torch Tensor; the padded and sorted-by-length row elements.
'col_lens': list(int); lengths of all sequences in 'col'.
'row_lens': list(int); lengths of all sequences in 'row'.
'sorted_colrefs': list(int); ints saying which seq in col came
from which document. ints in range [0, len(docs)]
'sorted_rowrefs': list(int); ints saying which seq in row came
from which document. ints in range [0, len(docs)]}
colneg_ex: (batch_row_neg) dict of the form:
{'col': Torch Tensor; the padded and sorted-by-length entities.
'col_lens': list(int); lengths of all sequences in 'col'.
'sorted_colrefs': list(int); ints saying which seq in row came
from which document. ints in range [0, len(docs)]}
"""
# Unpack arguments.
im_row_raw = raw_feed['im_row_raw']
im_col_raw = raw_feed['im_col_raw']
im_col_raw_neg = raw_feed.get('im_col_raw_neg', None)
assert (len(im_col_raw) == len(im_row_raw))
# If there is no data in the batch the model computes a zero loss. This never
# happens in a purely rc model.
if len(im_row_raw) == 0:
if isinstance(im_col_raw_neg, list):
return None, None
else:
return None
col, col_lens, sorted_colrefs = RCBatcher.pad_sort_ex_seq(
im_col_raw, ignore_ss=ignore_ss, sort_by_seqlen=sort_by_seqlen)
row, row_lens, sorted_rowrefs = RCBatcher.pad_sort_ex_seq(
im_row_raw, ignore_ss=ignore_ss, sort_by_seqlen=sort_by_seqlen)
colrow_ex = {'col': col,
'row': row,
'col_lens': col_lens,
'row_lens': row_lens,
'sorted_colrefs': sorted_colrefs,
'sorted_rowrefs': sorted_rowrefs}
if im_col_raw_neg:
assert (len(im_col_raw) == len(im_row_raw) == len(im_col_raw_neg))
col_neg, col_neg_lens, sorted_neg_colrefs = RCBatcher.pad_sort_ex_seq(
im_col_raw_neg, ignore_ss=ignore_ss, sort_by_seqlen=sort_by_seqlen)
colneg_ex = {'col': col_neg,
'col_lens': col_neg_lens,
'sorted_colrefs': sorted_neg_colrefs}
return colrow_ex, colneg_ex
return colrow_ex
@staticmethod
def pad_sort_ex_seq(im_ex_seq, ignore_ss, sort_by_seqlen=True, pad_int=0):
"""
Pad and sort the passed list of sequences one corresponding to each
example.
:param im_ex_seq: list(list(int)); can be anything; each sublist can be
a different length.
:param ignore_ss: boolean; Whether the start-stop on sequences should be
ignored or not.
:param sort_by_seqlen: boolean; Optionally allow sorting to be turned off.
:param pad_int: int; int value to use for padding.
:return:
ex_seq_padded: torch.Tensor(len(im_ex_seq), len_of_longest_seq)
sorted_lengths: list(int); lengths of sequences in im_ex_seq. Sorted.
sorted_ref: list(int); indices of im_ex_seq elements in sorted order.
"""
doc_ref = range(len(im_ex_seq))
max_seq_len = max([len(l) for l in im_ex_seq])
# Get sorted indices.
if sort_by_seqlen:
sorted_indices = sorted(range(len(im_ex_seq)),
key=lambda k: -len(im_ex_seq[k]))
else:
sorted_indices = range(len(im_ex_seq))
if ignore_ss:
# If its the operation/row (single element long) then there's no need to
# ignore start/stops.
if len(im_ex_seq[sorted_indices[0]]) > 1:
max_length = max_seq_len - 2
else:
max_length = max_seq_len
else:
max_length = max_seq_len
# Make the padded sequence.
ex_seq_padded = torch.LongTensor(len(im_ex_seq), max_length).zero_()
if pad_int != 0:
ex_seq_padded = ex_seq_padded + pad_int
# Make the sentences into tensors sorted by length and place then into the
# padded tensor.
sorted_ref = []
sorted_lengths = []
for i, sent_i in enumerate(sorted_indices):
seq = im_ex_seq[sent_i]
if ignore_ss:
if len(seq) > 1:
# Ignore the start and stops in the int mapped data.
seq = seq[1:-1]
else:
seq = seq
else:
seq = seq
tt = torch.LongTensor(seq)
length = tt.size(0)
ex_seq_padded[i, 0:length] = tt
# Rearrange the doc refs.
sorted_ref.append(doc_ref[sent_i])
# Make this because packedpadded seq asks for it.
sorted_lengths.append(length)
return ex_seq_padded, sorted_lengths, sorted_ref
| true |
564d8723c8209d6b7d5f139c174acbe39be8253b | Python | ThanatosXPF/radar_image_propagation | /utility/notifier.py | UTF-8 | 3,944 | 2.515625 | 3 | [] | no_license | import smtplib
import socket
from email import encoders
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
import time
from config import c
class Notifier(object):
def __init__(self):
self.hostname = socket.gethostname()
self.mail_host = 'smtp.163.com'
# 163用户名
self.mail_user = 'thanatos_notifier'
# 密码(部分邮箱为授权码)
self.mail_pass = '1qaz2wsx3edc'
# 邮件发送方邮箱地址
self.sender = 'thanatos_notifier@163.com'
# 邮件接受方邮箱地址,注意需要[]包裹,这意味着你可以写多个邮件地址群发
self.receivers = ['thanatosxie@163.com']
self.smtpObj = smtplib.SMTP()
def login(self):
# 连接到服务器
print("smtp connection")
self.smtpObj.connect(self.mail_host, 25)
# 登录到服务器
print("smtp logging")
self.smtpObj.login(self.mail_user, self.mail_pass)
print("smtp config complete")
def close(self):
self.smtpObj.quit()
print("smtp connection closed")
def send(self, content):
self.login()
# 设置email信息
# 邮件内容设置
content += "\n" + "config file path: "+c.SAVE_PATH
message = MIMEText(content, 'plain', 'utf-8')
# 邮件主题
message['Subject'] = self.hostname
# 发送方信息
message['From'] = self.sender
# 接受方信息
message['To'] = self.receivers[0]
# 登录并发送邮件
try:
self.smtpObj.sendmail(
self.sender, self.receivers, message.as_string())
# 退出
print('success')
except smtplib.SMTPException as e:
print('error', e) # 打印错误
self.close()
def eval(self, step, img_path):
self.login()
# 邮件内容设置
message = MIMEMultipart()
# 邮件主题
message['Subject'] = self.hostname + f" in {step}"
# 发送方信息
message['From'] = self.hostname + f"<{self.sender}>"
# 接受方信息
message['To'] = "thanatos" + f"<{self.receivers[0]}>"
content = self.hostname + "<br>config file path: " + c.SAVE_PATH + f"<br>iter{step}<br>"
content = '<html><body><p>' + content + '</p>' +'<p><img src="cid:0"></p>' +'</body></html>'
txt = MIMEText(content, 'html', 'utf-8')
message.attach(txt)
# file = open('/Users/thanatos/Pictures/图片/杂乱/IMG_9782.JPG', 'rb')
# img_data = file.read()
# file.close()
# img = MIMEImage(img_data)
# img.add_header('Content-ID', 'dns_config')
# message.attach(img)
with open(img_path, 'rb') as f:
# 设置附件的MIME和文件名,这里是png类型:
mime = MIMEBase('image', 'png', filename='result.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='test.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
message.attach(mime)
# 登录并发送邮件
try:
self.smtpObj.sendmail(
self.sender, self.receivers, message.as_string())
# 退出
print('success')
except smtplib.SMTPException as e:
print('error', e) # 打印错误
message.attach(MIMEText(content, 'plain', 'utf-8'))
self.close()
if __name__ == '__main__':
no = Notifier()
no.eval(14999, '/extend/gru_tf_data/0916_ensemble/Metric/Valid_14999/average_14999.jpg')
| true |
dc6f42cfdf889221cbfbbfdd377403b4980fa206 | Python | wererLinC/Python-Learning | /python基础/10_tuple.py | UTF-8 | 172 | 3.1875 | 3 | [] | no_license | # tuple
dimensions = (200, 10)
print(dimensions[0])
# dimensions[0] = 100 tuple does not support assignment
# dimensions.append(10) tuple object has no attribute append
| true |
e0521cc0cc6be39fd8c45ebc7d327e2dbd8c8166 | Python | gregoriusdev/Infopub_Pandas_okkam76 | /part4/4.37_folium_map_tiles.py | UTF-8 | 427 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# 라이브러리 불러오기
import folium
# 서울 지도 만들기
seoul_map2 = folium.Map(location=[37.55,126.98], tiles='Stamen Terrain',
zoom_start=12)
seoul_map3 = folium.Map(location=[37.55,126.98], tiles='Stamen Toner',
zoom_start=15)
# 지도를 HTML 파일로 저장하기
seoul_map2.save('./seoul2.html')
seoul_map3.save('./seoul3.html') | true |
06e0b4e177ead8500fffcc79e2e02d09f580a954 | Python | sbis04/yoga-instructor-oak-server | /check_cam.py | UTF-8 | 642 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# author: Souvik Biswas
# For checking whether OAK-D is available
import cv2
import depthai as dai
print("Start")
# Create pipeline
pipeline = dai.Pipeline()
# Name of primary OAK-D cam
oakCamName = "CameraBoardSocket.RGB"
# Boolean to check the camera status
isAvailable = False
# Check if oak is available
try:
device = dai.Device(pipeline)
cameras = device.getConnectedCameras()
for camera in cameras:
if str(camera) == oakCamName:
isAvailable = True
break
except Exception:
pass
finally:
if isAvailable:
print("Success")
else:
print("Failed")
| true |
97868938d4c4d64e73b2b39dd1e50c722c3139df | Python | jclaggett/Project-Euler-Code | /problem1.py | UTF-8 | 315 | 3.5625 | 4 | [] | no_license | # Project Euler Problem 1
# Givens
max, f1, f2 = 1000, 3, 5
# Dictionary solution
def rng(f, max=max): return range(f,max,f)
print sum(dict(zip(rng(f1) + rng(f2), [None]*max)).keys())
# Addition/Subtraction solution
def sum_range(f): return sum(max_range(f))
print sum_rng(f1) + sum_range(f2) - sum_range(f1*f2)
| true |
03f11dcfcd8b0504f9e2335a3cc4dae1362781af | Python | 8thwlsci05/My_codes | /guass_hw4.py | UTF-8 | 485 | 3 | 3 | [] | no_license | '''
file: NA week hw.2
date: Mar.14.2022
phJuan
'''
import numpy as np
tolsiz = 4
coe = np.array([[1, 2, 2, 1],
[2, -4, 1, -5],
[2, 1, -2, -4],
[-1, 2, 1, -2]], dtype = float)
res = np.array([17, 8, 10, 17], dtype = float)
for i in range(tolsiz):
for j in range(i+1,tolsiz):
res[j] = res[j] - res[i]*(coe[j,i]/coe[i,i])
coe[j,i:tolsiz] = coe[j,i:tolsiz] - coe[i,i:tolsiz]*(coe[j,i]/coe[i,i])
print(coe)
| true |
09ea42e2ae7239fe81266d12604df8f3ccab36b9 | Python | songdanlee/python_code_basic | /day18/飞秋通信.py | UTF-8 | 575 | 3.109375 | 3 | [] | no_license | """
#飞秋发送消息,遵守飞秋自己的协议
#1:默认版本
#123456发送时间,可以任意写
#张无忌:能说的小牛 名称跟简称
#32:发送消息
"""
import socket
import time
# 创建套接字
udpSocket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# 发送消息
for i in range(5):
udpSocket.sendto("1:123456:张靓颖:仙女一枝花:32:在吗,我的小可爱?".encode('gbk'), ('10.10.116.151', 2425))
time.sleep(1)
content = udpSocket.recvfrom(1024)
print(content[0].decode("utf-8"),content[1])
# 关闭套接字
udpSocket.close() | true |
2d16df2e2fa78936ff4423aecd59183ac00617bf | Python | michal-shalev/ATM-Manager | /UserAuthentication.py | UTF-8 | 491 | 2.546875 | 3 | [] | no_license | from DataHandler import DataHandler
class UserAuthentication:
def __init__(self):
self.data = DataHandler()
def authentication(self, account_id, password):
if self.isAccountIDExists(account_id):
user_password = self.data.getUserDetail(account_id, "password")
if user_password == password:
return True
return False
def isAccountIDExists(self, account_id):
return self.data.isAccountIDExists(account_id) | true |
c65a209cc97a845a6389dfa1a1a66917a7698d1c | Python | pkug/matasano | /Set4/28.py | UTF-8 | 347 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
"""."""
import os
from sha1 import SHA1 as sha1
key = bytes(os.urandom(16))
def auth(key, msg):
return sha1(key + msg.encode()).digest() + msg.encode()
def check(key, msg):
return sha1(key + msg[20:]).digest() == msg[:20]
m = "Secret message!"
a = auth(key, m)
print("AUTH:", a)
print("CHECK:", check(key, a))
| true |
76d7728afca1ff91d622b8b601af2ad107bbce10 | Python | sineundong/python_minecraft | /chapter8-functions/test12.py | UTF-8 | 200 | 3.1875 | 3 | [] | no_license | from itertools import permutations
n=int(input())
alist=[ i for i in range(1,n+1)]
print(alist)
perm=permutations(alist,n)
blist=[]
for i in perm:
print(i)
blist.append(list(i))
print(blist) | true |
0c146c4fe14d272fd26e4ab06601e3f0c9e8d4e2 | Python | shangxiwu/services | /Module_09_TensorFlow基礎使用/corelab_9.3_sol.py | UTF-8 | 376 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Tensorflow的使用
# In[1]:
import tensorflow as tf
# ## 請使用tf.subtract、tf.divide等方式計算 (10/2)-1的答案
# In[2]:
x = tf.constant(10)
y = tf.constant(2)
z = tf.subtract(tf.divide(x,y),tf.cast(tf.constant(1), tf.float64))
with tf.Session() as sess:
output = sess.run(z)
print(output)
# In[ ]:
| true |
ba7b68bc4dbd7d9c426a0a665a2fe6ce8dc74677 | Python | asadi8/meta | /REINFORCE_agent.py | UTF-8 | 5,263 | 2.53125 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import utils
import sys
class agent():
gradBuffer=[]
weights=[]
def __init__(self, params):
self.weights=[]
self.gradBuffer=[]
#These lines established the feed-forward part of the network. The agent takes a state and produces an action.
self.state_in= tf.placeholder(shape=[None,params['observation_size']],dtype=tf.float32)
hidden=self.state_in
inputSize=params['observation_size']
outputSize=params['policy_num_hidden_nodes']
for h in range(params['policy_num_hidden_layers']):
W = tf.Variable(utils.xavier_init([inputSize, outputSize]))
b=tf.Variable(tf.zeros(shape=[outputSize]))
self.weights.append(W)
self.weights.append(b)
hidden = utils.Leaky_ReLU(tf.matmul(hidden, W) + b, leak=0.3)
inputSize=outputSize
W = tf.Variable(utils.xavier_init([params['policy_num_hidden_nodes'],params['num_actions']]))
b = tf.Variable(tf.zeros(shape=[params['num_actions']]))
self.weights.append(W)
self.weights.append(b)
self.output = tf.nn.softmax(tf.matmul(hidden,W)+b)
self.return_holder = tf.placeholder(shape=[None],dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[None],dtype=tf.int32)
self.indexes = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action_holder
self.responsible_outputs = tf.gather(tf.reshape(self.output, [-1]), self.indexes)
self.loss = -tf.reduce_mean(tf.log(self.responsible_outputs)*self.return_holder)
self.gradient_holders = []
for idx,var in enumerate(self.weights):
placeholder = tf.placeholder(tf.float32,name=str(idx)+'_holder')
self.gradient_holders.append(placeholder)
self.gradients = tf.gradients(self.loss,self.weights)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=params['policy_learning_rate'])
#optimizer = tf.train.AdamOptimizer(learning_rate=params['policy_learning_rate'])
self.update_batch = optimizer.apply_gradients(zip(self.gradient_holders,self.weights))
def action_selection(self,s,sess):
a_dist = sess.run(self.output,feed_dict={self.state_in:[s]})
a = np.random.choice(range(len(a_dist[0])),p=a_dist[0])
return a
def initialize_for_learning(self,params):
sess=params['tf_session']
self.gradBuffer = sess.run(self.weights)
for ix,grad in enumerate(self.gradBuffer):
self.gradBuffer[ix] = grad * 0
def update(self,params,batch_info,meta_learner):
#compute episode-based gradient estimator
#print(batch_info)
sess=params['tf_session']
for episode_number,episode_info in enumerate(batch_info):
returns=utils.rewardToReturn(episode_info['rewards'],params['discount_rate'])
feed_dict={self.return_holder:returns,
self.action_holder:episode_info['actions'],self.state_in:np.vstack(episode_info['states'])}
grads = sess.run(self.gradients, feed_dict=feed_dict)
for idx,grad in enumerate(grads):
self.gradBuffer[idx] += grad
#*****
flat=self.flatten_grad(self.gradBuffer)
#print(flat)
flat_meta=meta_learner.predict(flat,sess)
#print(sess.run(meta_learner.mu,feed_dict={meta_learner.state:flat}))
#print(flat_meta)
#print(flat_meta[0,0]/flat[0,0])
#print(flat_meta[0,1]/flat[0,1])
#print(flat.shape)
#sys.exit(1)
#use the meta learner here
#then pass the output of meta learner to the function bellow as first argument
self.augmented_grads_buffer=self.listicize_grad(flat_meta,self.gradBuffer)
#then update the policy using augmented grad!
#now update the policy
#*****
feed_dict = dict(zip(self.gradient_holders, self.augmented_grads_buffer))
_ = sess.run(self.update_batch, feed_dict=feed_dict)
#clear gradient holder
for ix,grad in enumerate(self.gradBuffer):
self.gradBuffer[ix] = grad * 0
#sys.exit(1)
return flat,flat_meta
def flatten_grad(self,grads):
f=None
for g in grads:
if f==None:
f=g.flatten()
else:
f=np.concatenate((f,g.flatten()))
return f.reshape(1,f.shape[0])
def listicize_grad(self,flat,original_grad):
out=[]
offset=0
for o in original_grad:
if len(o.shape)==2:
temp=flat[0,offset:(offset+o.size)]
out.append(temp.reshape((o.shape[0],o.shape[1])))
offset=offset+o.size
elif len(o.shape)==1:
temp=flat[0,offset:(offset+o.size)]
out.append(temp.reshape((o.shape[0],)))
offset=offset+o.size
else:
print("weights are not 1D or 2D ... exit")
sys.exit(1)
return out
def num_policy_parameters(self):
out=0
for w in self.weights:
out=out+int(np.prod(w.get_shape()))
return out
| true |
7320753d7cb59f989787eae06a41692991cfbe73 | Python | osceri/ltspice4-2-svg | /prim2svg.py | UTF-8 | 5,100 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: ascii -*-
import svgwrite
from config import *
def main():
# Create output file. Make sure that the size in this file is
# equal to the parameters in the other file. WILL fix
dwg = svgwrite.Drawing("test.svg", size=(svg_file_width, svg_file_height))
# The style of the schematic is derived from the css file
dwg.add_stylesheet('style.css', title="Circuit style")
# One may define seperate styles/classes for every primitive
# Therefore these groups are created
nodes = dwg.g(class_="line")
lines = dwg.g(class_="line")
rects = dwg.g(class_="line")
circles = dwg.g(class_="line")
arcs = dwg.g(class_="line")
texts = dwg.g(class_="text")
comments = dwg.g(class_="comment")
# The netlist file in the local directory is opened, and the primitives are added
# to their corresponding groups
with open("netlist.txt", "r", encoding='ISO-8859-1') as netlist:
for line in netlist:
if "node " == line[:5]:
_, x, y, rx, ry = line.split()
x, y, rx, ry = map(float, [x, y, rx, ry])
nodes.add(dwg.ellipse(center=(x, y), r=(rx, ry)))
if "line " == line[:5]:
_, x0, y0, x1, y1 = line.split()
x0, y0, x1, y1 = map(float, [x0, y0, x1, y1])
lines.add(dwg.line(start=(x0, y0), end=(x1, y1)))
if "rect " == line[:5]:
_, x0, y0, x1, y1 = line.split()
x0, y0, x1, y1 = map(float, [x0, y0, x1, y1])
lines.add(dwg.line(start=(x0, y0), end=(x1, y0)))
lines.add(dwg.line(start=(x0, y0), end=(x0, y1)))
lines.add(dwg.line(start=(x1, y1), end=(x1, y0)))
lines.add(dwg.line(start=(x1, y1), end=(x0, y1)))
if "circle " == line[:7]:
_, px, py, rx, ry = line.split()
px, py, rx, ry = map(float, [px, py, rx, ry])
circles.add(dwg.ellipse(center=(px, py), r=(rx, ry), style="fill:none"))
if "arc " == line[:4]:
_, x0, y0, x1, y1, rx, ry, angle, large_arc, angle_dir = line.split()
x0, y0, x1, y1, rx, ry, angle = map(float, [x0, y0, x1, y1, rx, ry, angle])
large_arc = (large_arc == "True")
path = svgwrite.path.Path(d=("M", x0, y0), stroke="black", fill="none")
path.push_arc(target=(x1, y1), r=(rx, ry), rotation=angle, large_arc=large_arc, angle_dir=angle_dir, absolute=True)
arcs.add(path)
# Text needs extra attention because anchoring, rotation and position is tricky
if "text " == line[:5]:
words = line.split()
_, x, y, align, size = words[:5]
text = " ".join(words[5:])
is_comment = ("!" == text[0])
text = text[1:]
x, y = map(float, [x, y])
size = round(10*float(size))
x_offset = 0
y_offset = 0
vertical = False
# The align-flag may have any number of direction/orientation-tokens preceding the
# Bottom and Top don't exist. They're just offset Center
align = align.replace('Bottom', 'uuCenter')
align = align.replace('Top', 'ddCenter')
# actual alignment.
while True:
token = align[0]
if token == "u":
y_offset -= size // 4
elif token == "d":
y_offset += size // 4
elif token == "l":
x_offset -= size // 4
elif token == "r":
x_offset += size // 4
elif token == "V":
vertical = True
else:
break
align = align[1:]
align = {"Center":"middle", "Left":"start", "Right":"end"}[align]
# This is added simply to make alignment slightly better
if vertical:
x_offset += size // 4
else:
y_offset += size // 4
x += x_offset
y += y_offset
if is_comment:
comments.add(dwg.text(text, insert=(x, y), style=f"text-anchor:{align};font-size:{size}px", transform=f"rotate({-90 if vertical else 0},{x},{y})"))
else:
texts.add(dwg.text(text, insert=(x, y), style=f"text-anchor:{align};font-size:{size}px", transform=f"rotate({-90 if vertical else 0},{x},{y})"))
# The primitive groups are added to the SVG
dwg.add(lines)
dwg.add(rects)
dwg.add(arcs)
if display_text:
dwg.add(texts)
if display_comments:
dwg.add(comments)
dwg.add(circles)
if display_nodes:
dwg.add(nodes)
# The SVG file is saved!
dwg.save()
main()
| true |
f419b301d74b856b2a7e7e69102783096f6c615e | Python | rajesh1994/lphw_challenges | /python_excercise/19_08_2018/ex29.py | UTF-8 | 529 | 3.765625 | 4 | [] | no_license | people = 20
cats = 30
dogs = 15
if people < cats:
print "Too many cats! The world is doomed!"
if people > cats:
print "Not many cats! The world is saved!"
if people < dogs:
print "The world is droold on!"
if people > dogs:
print "The world is dry!"
dogs += 5
if people <= dogs:
print "People are lesser than or equal to the dogs."
if people >= dogs:
print "People are grater than or equal to dogs."
if people == dogs:
print "Poeple are dogs."
if 1 + 5 == 6 and 10 - 10 == 0:
print "True"
| true |
d9464ef6891a5d0ba99e51b51e41ec7fcda68a9e | Python | Faye413/Interview-Box-Web-App | /src/api/interview.py | UTF-8 | 10,284 | 2.8125 | 3 | [] | no_license |
""" This module contains the API endponits related to creating and scheduling interviews. """
import datetime
import flask
import flask_restful
import peewee
import time
from flask import session
import data
import tools
import search
class CreateInterview(flask_restful.Resource):
"""
Create a new interview
id - the first person in the interview
target_id - the second person in the interview
timestamp - the number of seconds since the unix epoch that the interview will take place
"""
def post(self):
return handle_create_interview(create_interview_wrapper(flask.request.form))
def create_interview_wrapper(post_data):
"""
Create a new interview wrapper to convert post data to format above
target_username - the interviewer's username
timestamp - the unix timestamp of the interview start time
"""
updated_post_data = {
'id': None,
'target_id': None,
'time': None
}
if not 'id' in session:
return post_data
else:
updated_post_data['id'] = session['id']
updated_post_data['target_id'] = list(search.get_users_matching_fields({'username': post_data['target_username']}))[0]
updated_post_data['timestamp'] = post_data['timestamp']
return updated_post_data
class GetUpcomingInterviews(flask_restful.Resource):
"""
Get upcoming interviews for a user
id - the user to look for
"""
def get(self):
# return handle_get_upcoming_interviews(flask.request.args)
return handle_get_upcoming_interviews({'id': session['id']})
class GetPastInterviews(flask_restful.Resource):
"""
This endpoint retrieves all the past interviews for a user. It requires one parameter.
id - the database ID of the user.
"""
def get(self):
# return handle_get_past_interviews(flask.request.args)
return handle_get_past_interviews({'id': session['id']})
class AddFeedbackToInterview(flask_restful.Resource):
"""
Add feedback to an interview.
id - the database ID of the interview
feedback - a list of feedback strings to add to the interview
"""
def post(self):
return handle_add_feedback_to_interview(flask.request.form)
def handle_create_interview(post_data):
"""
Create a new interview.
id - the database ID of the scheduling user
target_id - the person with whom the interview will be scheduled
timestamp - the time at which the interview will take place
Note both users must be available at the provided timestamp.
"""
print post_data
response = {}
try:
user_id = get_id(post_data)
get_user_data_object_from_id(user_id)
target_id = get_target_id(post_data)
get_user_data_object_from_id(target_id)
timestamp = get_timestamp(post_data)
validate_user_is_available(user_id, timestamp)
validate_user_is_available(target_id, timestamp)
data.Interview.create(user1=user_id, user2=target_id, time=timestamp)
response['status'] = 'success'
except Exception as e:
response['status'] = 'failure'
response['message'] = e.message
return response
def handle_get_upcoming_interviews(get_data):
"""
Fetch all upcoming interviews for a given user from the database
id - the user ID for whom the interviews need to be fetched
"""
response = {}
try:
user_id = get_id(get_data)
get_user_data_object_from_id(user_id)
interviews = []
print 'the current user is', user_id
for interview in data.Interview.select().where(((data.Interview.user1 == user_id) | (data.Interview.user2 == user_id)) &
(data.Interview.time > datetime.datetime.now())):
interview_response = {}
interview_response['date'] = time.mktime(interview.time.timetuple())
interview_response['id'] = interview.id
if int(interview.user1.id) == int(user_id):
interview_response['person'] = interview.user2.id
interview_response['person_firstname'] = interview.user2.firstname
else:
interview_response['person'] = interview.user1.id
interview_response['person_firstname'] = interview.user1.firstname
interviews.append(interview_response)
response['interviews'] = interviews
response['status'] = 'success'
except Exception as e:
response['status'] = 'failure'
response['message'] = e.message
return response
def handle_get_past_interviews(get_data):
"""
Fetch all past interviews for a given user from the database
id - the user ID for whom the interviews need to be fetched
"""
response = {}
try:
user_id = get_id(get_data)
interviews = []
for interview in data.Interview.select().where(((data.Interview.user1 == user_id) | (data.Interview.user2 == user_id)) &
(data.Interview.time < datetime.datetime.now())):
interview_response = {}
interview_response['date'] = time.mktime(interview.time.timetuple())
interview_response['id'] = interview.id
if int(interview.user1.id) == int(user_id):
interview_response['person'] = interview.user2.id
interview_response['person_firstname'] = interview.user2.firstname
else:
interview_response['person'] = interview.user1.id
interview_response['person_firstname'] = interview.user1.firstname
interview_response['feedback'] = []
for feedback in data.InterviewFeedback.select().where(data.InterviewFeedback.interview_id == interview.id):
interview_response['feedback'].append(feedback.feedback)
interviews.append(interview_response)
response['interviews'] = interviews
response['status'] = 'success'
except Exception as e:
response['status'] = 'failure'
response['message'] = e.message
return response
def handle_add_feedback_to_interview(post_data):
"""
Add feedback to an interview.
interview_id - the database interview ID that the feedback will be added to
fedback - the string to store as feedback
"""
response = {}
try:
interview_id = get_id(post_data)
get_interview_data_object_from_id(interview_id)
feedback = get_feedback(post_data)
data.InterviewFeedback.create(interview_id=interview_id, feedback=feedback)
response['status'] = 'success'
except Exception as e:
response['status'] = 'failure'
response['message'] = e.message
return response
@tools.escape_html
def get_id(post_data):
""" Helper method to get the user_id from a JSON dictionary """
if 'id' not in post_data:
raise Exception('Missing required field: id')
try:
return int(post_data['id'])
except ValueError:
raise Exception('Invalid id specified')
@tools.escape_html
def get_target_id(post_data):
""" Helper method to get the target_id from a JSON dictionary """
if 'target_id' not in post_data:
raise Exception('Missing required field: target_id')
try:
return int(post_data['target_id'])
except ValueError:
raise Exception('Invalid target_id specified')
def get_user_data_object_from_id(user_id):
""" Helper method to get the user object from the database for a given user id """
try: user = data.InterviewboxUser.get(data.InterviewboxUser.id == user_id)
except peewee.DoesNotExist:
raise Exception('No such user')
return user
def get_interview_data_object_from_id(interview_id):
""" Helper method to get the interview object from the database for a given interview id """
try:
interview = data.Interview.get(data.Interview.id == interview_id)
except peewee.DoesNotExist:
raise Exception('No such interview')
return interview
@tools.escape_html
def get_feedback(post_data):
""" Helper method to get the feedback from a JSON dictionary """
if 'feedback' not in post_data:
raise Exception('Missing requried field: feedback')
feedback = post_data['feedback']
if len(feedback) < 1 or len(feedback) > 255:
raise Exception('Feedback must be between 1 and 255 characters')
return feedback
def get_timestamp(post_data):
""" Helper method to get the timestamp from a JSON dictionary """
if 'timestamp' not in post_data:
raise Exception('Missing required field: timestamp')
try:
timestamp = datetime.datetime.fromtimestamp(int(post_data['timestamp']))
except ValueError:
raise Exception('Invalid timestamp')
if datetime.datetime.now() > timestamp:
raise Exception('Date must be in the future')
return timestamp
def validate_user_is_available(target_id, timestamp):
""" Verify that a user is available at the provided timestamp """
pass
def datetime_to_weekly_availability(datetime_obj):
""" Convert a python time object to a numeric representation of the timeslot """
return (datetime_obj.isoweekday()-1) * 48 + int(datetime_obj.strftime('%H')) * 2 + int(datetime_obj.strftime('%M'))/30
def weekly_availability_to_datetime(weekly_avail, base_datetime_obj):
""" Convert a numeric representation of the timeslot to a timestamp """
hr = str(weekly_avail%48/2)
if len(hr) == 1:
hr = '0' + hr
mm = weekly_avail%48%2
if mm == 0:
mm = '00'
else:
mm = '30'
construct_string = (base_datetime_obj.strftime('%Y-%m-%d') + ' ' + hr + ':' + mm)
return datetime.datetime.strptime(construct_string, '%Y-%m-%d %H:%M')
def weekly_availability_to_timestamp(weekly_avail, base_datetime_obj):
""" Convert a numeric representation of the timeslot to a python time object """
return time.mktime(weekly_availability_to_datetime(weekly_avail, base_datetime_obj).timetuple())
def timestamp_to_weekly_availability(timestamp):
""" Convert a timestamp object to a numeric representation of the timeslot """
return datetime_to_weekly_availability(datetime.datetime.fromtimestamp(timestamp))
| true |
b55817704531184482509aaca257f29453d3d154 | Python | justintam5/Roboto | /Roboto/version_control/Webcam_test_v2.py | UTF-8 | 673 | 2.59375 | 3 | [] | no_license | #!/bin/usr/env python3
import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
l_w = np.array([0, 90, 90])
h_w = np.array([10, 160, 160])
webcam = cv2.VideoCapture(0)
while True:
width = 320
height = 160
ret_val, data = webcam.read()
data = cv2.flip(data, 0)
data = cv2.flip(data, 1)
data = cv2.resize(data, (width, height))
hsv = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, l_w, h_w)
cv2.imshow('mask', mask)
cv2.imshow('other frame', data)
print(data[height/2, width/2])
print(mask[height/2, width/2])
if cv2.waitKey(1) == ord('q'):
break
time.sleep(1)
input('Press Enter: ')
cap.release()
cv2.destroyAllWindows()
| true |
1559b860e3d92699ce779622bd7989b7e5f3873a | Python | jjmonte20/pythonPlayground | /logicalOperators.py | UTF-8 | 493 | 3.09375 | 3 | [] | no_license | has_high_income = False
has_good_credit = False
has_criminal_record = False
if has_high_income and has_good_credit:
print("You are eligible for a loan")
elif has_high_income:
print("You need to have better credit to get this loan")
elif has_good_credit:
print("You need a higher income to get this loan")
elif has_criminal_record:
print("I need to ask about your criminal record")
else:
print("You are ineligible for this loan, you need a higher income and better credit") | true |
7b02ab8a9187f5cf257986685621986c00dfd453 | Python | Ra6666/energy-py-linear | /tests/test_battery_optimization.py | UTF-8 | 1,032 | 2.828125 | 3 | [] | no_license | import pytest
import unittest
import energypylinear
@pytest.mark.parametrize(
'prices, initial_charge, expected_dispatch',
[
([10, 10, 10], 0, [0, 0, 0]),
([20, 10, 10], 1, [-1, 0, 0]),
([10, 50, 10, 50, 10], 0, [4, -4, 4, -4, 0])
]
)
def test_battery_optimization(prices, initial_charge, expected_dispatch):
power = 4
capacity = 6
model = energypylinear.Battery(
power=power, capacity=capacity, efficiency=1.0
)
info = model.optimize(
prices=prices, initial_charge=initial_charge, timestep='1hr'
)
dispatch = [res['Net [MW]'] for res in info]
unittest.TestCase().assertCountEqual(dispatch, expected_dispatch)
def test_battery_optimization_against_forecast():
model = energypylinear.Battery(
power=4, capacity=6, efficiency=1.0
)
info = model.optimize(
prices=[10, 10, 10], forecasts=[10, 10, 10]
)
result = [res['Net [MW]'] for res in info]
unittest.TestCase().assertCountEqual(result, [0, 0, 0])
| true |
4cabaa61de88f8dcd37a444d660be6bdea7e4fd9 | Python | YiminHu/symbol_recognition | /operation.py | UTF-8 | 3,869 | 2.578125 | 3 | [] | no_license | import numpy as np
import cv2
import random
import os
if os.path.exists('data') == False:
os.mkdir('data')
OUTPUT_DIR = './data/'
backg = [127,169,211,240]
salt = [0.02,0.04,0.06]
gauss = [0.5,1.0,1.5]
def translate(image, x, y):
M = np.float32([[1,0,x],[0,1,y]])
shifted = cv2.warpAffine(image,M,(image.shape[1],image.shape[0]),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_REPLICATE)
return shifted
def rotate(image, angle, center = None, scale=1.0):
(h,w) = image.shape[:2]
if center is None:
center = (w/2, h/2)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w,h),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_REPLICATE)
return rotated
def gaussianNoise(img,mean,val):
(h,w) = img.shape[:2]
for i in range(h):
for j in range(w):
img[i][j]=img[i][j]+random.gauss(mean,val)
return img
def saltpepperNoise(img,n):
m=int((img.shape[0]*img.shape[1])*n)
for a in range(m):
i=int(np.random.random()*img.shape[1])
j=int(np.random.random()*img.shape[0])
if img.ndim==2:
img[j,i]=255
elif img.ndim==3:
img[j,i,0]=255
img[j,i,1]=255
img[j,i,2]=255
for b in range(m):
i=int(np.random.random()*img.shape[1])
j=int(np.random.random()*img.shape[0])
if img.ndim==2:
img[j,i]=0
elif img.ndim==3:
img[j,i,0]=0
img[j,i,1]=0
img[j,i,2]=0
return img
def backgroundNoise(img,val):
(h,w) = img.shape
for i in range(h):
for j in range(w):
if img[i][j] > 127:
pixelval = random.gauss(val,2.5)
img[i][j] = pixelval
return img
def gen_from_seed(dirname):
index = 0
pathDir = os.listdir(dirname)
slist = dirname.split('/')
writedir = slist[len(slist)-1]
print(writedir)
for filename in pathDir:
if not filename.startswith('.'):
print(filename)
curimg = cv2.imread(dirname+'/'+filename,0)
h,w = curimg.shape
for trans in range(1):
origin1 = curimg.copy()
if not trans == 0:
x = random.randint(0,int(w*0.1))
y = random.randint(0,int(h*0.1))
origin1 = translate(origin1,x,y)
for rot in range(5):
origin2 = origin1.copy()
if not rot == 0:
angle = random.randint(-45,45)
origin2 = rotate(origin2,angle)
for bg in range(3):
origin3 = origin2.copy()
origin3 = backgroundNoise(origin3,backg[bg])
for noise in range(2):
origin4 = origin3.copy()
if noise < 3:
origin4 = saltpepperNoise(origin4,salt[noise])
else:
origin4 = gaussianNoise(origin4,0,gauss[noise-3])
for blur in range(2):
origin5 = origin4.copy()
if blur == 1:
origin5 = cv2.GaussianBlur(origin5,(5,5),1.5)
else:
origin5 = origin4.copy()
if os.path.exists(OUTPUT_DIR+writedir) == False:
os.mkdir(OUTPUT_DIR+writedir)
origin5 = cv2.resize(origin5,(28,28))
cv2.imwrite(OUTPUT_DIR+writedir+'/'+str(index)+'.jpg',origin5)
index = index + 1
| true |
33f485300d2ecd69d8e99daccc7fdce995a16e40 | Python | abhishek-basu-git/qikify | /qikify/views/charts.py | UTF-8 | 4,929 | 3.0625 | 3 | [
"MIT"
] | permissive | """Qikify charting helper functions.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from matplotlib import cm
import scipy.stats as st
def synthetic_and_real(s_data, b_data, d1, d2, filename=None):
"""2D Scatterplot of synthetic & actual data.
"""
_, ax = plt.subplots(1)
ax.scatter(s_data[:, d1], s_data[:, d2], alpha=0.5, c='r')
ax.scatter(b_data[:, d1], b_data[:, d2], alpha=0.5, c='g')
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
plt.close()
def histogram(s_data, b_data, i, filename=None):
"""overlay two histograms.
"""
_, ax = plt.subplots(1)
ax.hist(s_data[:, i], 50, normed=True, alpha=0.5, color='r')
ax.hist(b_data[:, i], 50, normed=True, alpha=0.5, color='g')
ax.grid(True)
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
plt.close()
def yp_vs_y(yp, y, filename=None):
"""
This method plots y predicted vs. y actual on a 45-degree chart.
"""
miny = min(min(y), min(yp))
maxy = max(max(y), max(yp))
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.scatter(yp, y)
ax.plot([miny, maxy], [miny, maxy])
ax.set_xlim((miny, maxy))
ax.set_ylim((miny, maxy))
ax.grid(True)
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
plt.close()
def qq(x, filename=None):
"""Quartile-Quartile plot, similar to Rstats qq() function."""
fig = plt.figure()
ax = fig.add_subplot(111)
osm, osr = st.probplot(x, fit=0, dist='norm') # compute
ax.plot(osm, osr, '.')
ax.grid(True)
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
plt.close()
def coef_path(coefs, filename=None):
"""
Plot the coefficient paths generated by elastic net / lasso.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
plt.plot(coefs)
plt.xlabel('-Log(lambda)')
plt.ylabel('weights')
plt.title('Lasso and Elastic-Net Paths')
plt.axis('tight')
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
def pairs(data, filename=None):
"""Generates something similar to R pairs() plot.
"""
n_vars = data.shape[1]
fig = plt.figure()
for i in range(n_vars):
for j in range(n_vars):
ax = fig.add_subplot(n_vars, n_vars, i * n_vars + j + 1)
if i == j:
ax.hist(data[:, i])
else:
ax.scatter(data[:, i], data[:, j])
ax.autoscale(True)
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
plt.close()
def te_and_yl(error, error_syn, filename=None):
"""Plot test escapes and yield loss comparison chart, from ICCAD 2011 paper.
"""
[te_syn, yl_syn] = np.mean(error_syn, 0)
[te_actual, yl_actual] = np.mean(error, 0)
n_wafers = np.size(error, 0)
prop = matplotlib.font_manager.FontProperties(size=10)
fig = plt.figure()
# Test escapes plot
ax = fig.add_subplot(211)
ax.plot(error[:, 0], 'k-')
ax.plot([0, n_wafers], [te_syn, te_syn], 'g--')
ax.plot([0, n_wafers], [te_actual, te_actual], 'k-')
ax.grid(True)
ax.set_title('Test Escapes')
ax.legend((r"$T_E$", r"$\hat{T}_E$", r"$\bar{T}_E$"), \
'best', shadow=True, prop = prop)
# Yield loss plot
ax = fig.add_subplot(212)
ax.plot(error[:, 1], 'k-', [0, n_wafers], [yl_syn, yl_syn], 'g--', \
[0, n_wafers], [yl_actual, yl_actual], 'k-')
ax.grid(True)
ax.set_title('Yield Loss')
ax.legend((r"$Y_L$", r"$\hat{Y}_L$", r"$\bar{Y}_L$"), \
'best', shadow=True, prop = prop)
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
plt.close()
def wafer_map(x, y, val, filename=None):
"""Plots a heatmap of argument val over wafer coordinates.
"""
x = np.array(x, dtype=int)
y = np.array(y, dtype=int)
val = np.array(val, dtype=float)
xmax, ymax = max(x), max(y)
wafer = np.ones((xmax, ymax)) * np.nan
for i in range(len(x)):
xc, yc, C = x[i], y[i], val[i]
wafer[xc-1, yc-1] = C
fig = plt.figure()
ax1 = fig.add_subplot(111)
cax = ax1.imshow(wafer.T, cm.RdYlGn, \
vmin=val.min(), \
vmax=val.max(), \
interpolation='nearest')
fig.colorbar(cax)
if filename is None:
plt.show()
else:
plt.savefig(filename, dpi = 150, format='pdf')
| true |
eeb332b4afcc60d421343ccf958dcf22c799684b | Python | wzgdavid/qqqhhh | /restart_ml/upmoreup.py | UTF-8 | 1,523 | 3.375 | 3 | [] | no_license | '''
验证是不是趋势越明显的情况下,后面也更容易有趋势
before = 今天的收盘除以前第n天的收盘
after = 后第n天的收盘除以今天的收盘
看相关系数,如果相关系数高,说明有关系
before2 = 今天前n天所有收盘价的标准差
after2 = 今天后n天所有收盘价的标准差
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from tool import *
plt.rcParams['font.sans-serif'] = ['SimHei'] # 正常显示中文
hy = 'rb' # rb ta m a ma c jd dy cs
df = pd.read_csv(r'..\data\{}.csv'.format(hy))
df = get_ma(df, 3)
#df = get_nhh(df, 3)
#df = get_atr(df, 50)
#df['st_h'] = df[['o', 'c']].apply(lambda x: x.max(),axis=1)
#df['st_l'] = df[['o', 'c']].apply(lambda x: x.min(),axis=1)
def foo():
n = 10
m = 10
#df['before'] = df.c / df.c.shift(n)
#df['after'] = df.c.shift(-1*m) / df.c
# 用ma
#df['before'] = df.ma / df.ma.shift(n)
#df['after'] = df.ma.shift(-1*m) / df.ma
df = df.dropna()
#df = df[(df.l - df.ma) > 0]
#df = df[(df.h - df.nhh) > 0]
print(df.shape[0])
print(df.head(90))
a = df.before.corr(df.after)
print(a)
#foo()
def foo2(df):
n = 20
m = 20
df['before'] = df.c.rolling(window=n, center=False).std()
#print(df.before)
df['after'] = df.c.shift(-1*m).rolling(window=m, center=False).std()
df = df.dropna()
#print(df.shape[0])
#print(df.head(90))
a = df.before.corr(df.after)
print(a)
foo2(df) | true |
35c1ede5797ce5db9d35278fcf1872e99c6c6bef | Python | POOLucasCardoso/Escalonadores | /Interativo/RoundRobinTestes.py | UTF-8 | 16,592 | 3.21875 | 3 | [] | no_license | import unittest
from Escalonadores import Escalonador,EscalonadorV2
from Processo import Processo
def ticks(vezes,escalonador):
for i in range(vezes):
escalonador.escalonar()
class EscalonadorIterativoTeste(unittest.TestCase):
def estado(self, escalonador, ativos, bloqueados, tick, quantun):
resultados = str(escalonador)
esperado = f'''Processos ativos: {ativos}
Processos bloqueados: {bloqueados}
Tick atual: {tick}
Quantun: {quantun}
'''
self.assertEqual(resultados, esperado)
def test1(self):
escalonador = Escalonador()
self.estado(escalonador, [], [], 0, 3)
def test2(self):
escalonador = Escalonador()
try:
ticks(1,escalonador)
self.estado(escalonador, [], [], 1, 3)
except Exception as e:
self.fail(str(e))
def test3(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
self.estado(escalonador, ['Status p1: None'], [], 0, 3)
ticks(1,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 1, 3)
ticks(1,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 2, 3)
def test4(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
ticks(3,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 3, 3)
def test5(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: r'], [], 3, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: r'], [], 6, 3)
def test6(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
escalonador.addProcesso(Processo('p3'))
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p3: w', 'Status p1: r'], [], 3, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p3: w', 'Status p1: w', 'Status p2: r'], [], 6, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: w', 'Status p3: r'], [], 9, 3)
def test7(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
self.estado(escalonador, ['Status p1: None'], [], 0, 3)
ticks(3,escalonador)
escalonador.addProcesso(Processo('p2'))
self.estado(escalonador, ['Status p1: r', 'Status p2: None'], [], 3, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: r'], [], 6, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: r'], [], 9, 3)
self.assertEqual(escalonador.pesquisarProcesso('p2').getEstados(),'wwwrrr')
def test8(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
ticks(1,escalonador)
escalonador.finalizarProcesso('p1')
ticks(1,escalonador)
self.estado(escalonador, ['Status p2: r'], [], 2, 3)
self.assertEqual(escalonador.pesquisarProcesso('p2').getEstados(),'wr')
def test9(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
ticks(1,escalonador)
escalonador.finalizarProcesso('p2')
ticks(1,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 2, 3)
ticks(1,escalonador)
self.assertEqual(escalonador.pesquisarProcesso('p1').getEstados(),'rrr')
def test10(self):
escalonador = Escalonador(5)
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
ticks(5,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: r'], [], 5, 5)
self.assertEqual('|p1|p1|p1|p1|p1|',escalonador.getHistorico())
ticks(5,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: r'], [], 10, 5)
self.assertEqual('|p1|p1|p1|p1|p1|p2|p2|p2|p2|p2|',escalonador.getHistorico())
def test11(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
ticks(2,escalonador)
escalonador.finalizarProcesso('p1')
ticks(2,escalonador)
escalonador.addProcesso(Processo('p2'))
ticks(2,escalonador)
self.estado(escalonador, ['Status p2: r'], [], 6, 3)
self.assertEqual('|p1|p1|None|None|p2|p2|',escalonador.getHistorico())
def test12(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
escalonador.addProcesso(Processo('p3'))
ticks(2,escalonador)
escalonador.bloquearProcesso()
ticks(4,escalonador)
self.estado(escalonador, ['Status p3: r', 'Status p2: w'], ['Status p1: b'], 6, 3)
self.assertEqual('|p1|p1|p2|p2|p2|p3|',escalonador.getHistorico())
def test13(self):
escalonador = Escalonador()
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
escalonador.addProcesso(Processo('p3'))
ticks(2,escalonador)
escalonador.bloquearProcesso()
ticks(2,escalonador)
escalonador.desbloquearProcesso('p1')
ticks(2,escalonador)
self.estado(escalonador, ['Status p3: r', 'Status p1: w', 'Status p2: w'], [], 6, 3)
self.assertEqual('|p1|p1|p2|p2|p2|p3|',escalonador.getHistorico())
def test14(self):
escalonador = Escalonador(1)
escalonador.addProcesso(Processo('p1'))
escalonador.addProcesso(Processo('p2'))
escalonador.addProcesso(Processo('p3'))
escalonador.bloquearProcesso()
escalonador.bloquearProcesso()
escalonador.bloquearProcesso()
ticks(1,escalonador)
escalonador.desbloquearProcesso('p2')
escalonador.desbloquearProcesso('p1')
escalonador.desbloquearProcesso('p3')
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: w', 'Status p3: r'], [], 4, 1)
self.assertEqual('|None|p2|p1|p3|',escalonador.getHistorico())
def test15(self):
escalonador = EscalonadorV2()
try:
escalonador.addProcesso(Processo('p1'))
self.fail("Exceção esperada")
except Exception as e:
self.estado(escalonador, [], [], 0, 3)
self.assertEqual('|',escalonador.getHistorico())
def test16(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
self.estado(escalonador, ['Status p1: None'], [], 0, 3)
self.assertEqual('|',escalonador.getHistorico())
def test17(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
escalonador.finalizarProcesso('p1')
self.estado(escalonador, [], [], 0, 3)
self.assertEqual('|',escalonador.getHistorico())
def test18(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: r'], [], 3, 3)
self.assertEqual('|p1|p1|p1|',escalonador.getHistorico())
ticks(3,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: r'], [], 6, 3)
self.assertEqual('|p1|p1|p1|p2|p2|p2|',escalonador.getHistorico())
def test19(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
escalonador.addProcesso(Processo('p3',1))
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p3: w', 'Status p1: r'], [], 3, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p3: w', 'Status p1: w', 'Status p2: r'], [], 6, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: w', 'Status p3: r'], [], 9, 3)
def test20(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
self.estado(escalonador, ['Status p1: None'], [], 0, 3)
ticks(3,escalonador)
escalonador.addProcesso(Processo('p2',1))
self.estado(escalonador, ['Status p1: r', 'Status p2: None'], [], 3, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: r'], [], 6, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: r'], [], 9, 3)
self.assertEqual(escalonador.pesquisarProcesso('p2').getEstados(),'wwwrrr')
def test21(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
ticks(1,escalonador)
escalonador.finalizarProcesso('p1')
ticks(1,escalonador)
self.estado(escalonador, ['Status p2: r'], [], 2, 3)
self.assertEqual(escalonador.pesquisarProcesso('p2').getEstados(),'wr')
def test22(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
ticks(1,escalonador)
escalonador.finalizarProcesso('p2')
ticks(1,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 2, 3)
ticks(1,escalonador)
self.assertEqual(escalonador.pesquisarProcesso('p1').getEstados(),'rrr')
def test23(self):
escalonador = EscalonadorV2(5)
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
ticks(5,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: r'], [], 5, 5)
self.assertEqual('|p1|p1|p1|p1|p1|',escalonador.getHistorico())
ticks(5,escalonador)
self.estado(escalonador, ['Status p1: w', 'Status p2: r'], [], 10, 5)
self.assertEqual('|p1|p1|p1|p1|p1|p2|p2|p2|p2|p2|',escalonador.getHistorico())
def test24(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
ticks(2,escalonador)
escalonador.finalizarProcesso('p1')
ticks(2,escalonador)
escalonador.addProcesso(Processo('p2',1))
ticks(2,escalonador)
self.estado(escalonador, ['Status p2: r'], [], 6, 3)
self.assertEqual('|p1|p1|None|None|p2|p2|',escalonador.getHistorico())
def test25(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
escalonador.addProcesso(Processo('p3',1))
ticks(2,escalonador)
escalonador.bloquearProcesso()
ticks(4,escalonador)
self.estado(escalonador, ['Status p3: r', 'Status p2: w'], ['Status p1: b'], 6, 3)
self.assertEqual('|p1|p1|p2|p2|p2|p3|',escalonador.getHistorico())
def test26(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
escalonador.addProcesso(Processo('p3',1))
ticks(2,escalonador)
escalonador.bloquearProcesso()
ticks(2,escalonador)
escalonador.desbloquearProcesso('p1')
ticks(2,escalonador)
self.estado(escalonador, ['Status p3: r', 'Status p1: w', 'Status p2: w'], [], 6, 3)
self.assertEqual('|p1|p1|p2|p2|p2|p3|',escalonador.getHistorico())
def test27(self):
escalonador = EscalonadorV2(1)
escalonador.addProcesso(Processo('p1',1))
escalonador.addProcesso(Processo('p2',1))
escalonador.addProcesso(Processo('p3',1))
escalonador.bloquearProcesso()
escalonador.bloquearProcesso()
escalonador.bloquearProcesso()
ticks(1,escalonador)
escalonador.desbloquearProcesso('p2')
escalonador.desbloquearProcesso('p1')
escalonador.desbloquearProcesso('p3')
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: w', 'Status p1: w', 'Status p3: r'], [], 4, 1)
self.assertEqual('|None|p2|p1|p3|',escalonador.getHistorico())
def test28(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
ticks(10,escalonador)
escalonador.addProcesso(Processo('p2',2))
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: r', 'Status p1: w'], [], 13, 3)
def test29(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',2))
ticks(10,escalonador)
escalonador.addProcesso(Processo('p2',1))
ticks(3,escalonador)
escalonador.bloquearProcesso()
ticks(3,escalonador)
self.estado(escalonador, ['Status p2: r'], ['Status p1: b'], 16, 3)
def test30(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',2))
ticks(10,escalonador)
escalonador.addProcesso(Processo('p2',1))
ticks(3,escalonador)
escalonador.bloquearProcesso()
ticks(3,escalonador)
escalonador.desbloquearProcesso('p1')
ticks(1,escalonador)
self.estado(escalonador, ['Status p1: r', 'Status p2: w'], [], 17, 3)
def test31(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',1))
ticks(1,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 1, 3)
escalonador.addProcesso(Processo('p2',2))
ticks(1,escalonador)
self.estado(escalonador, ['Status p2: r', 'Status p1: w'], [], 2, 3)
escalonador.finalizarProcesso('p2')
ticks(1,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 3, 3)
def test33(self):
escalonador = Escalonador()
try:
escalonador.addProcesso(Processo('p1',3))
self.fail('Exceção esperada')
except Exception:
self.estado(escalonador, [], [], 0, 3)
self.assertEqual('|',escalonador.getHistorico())
def test32(self):
escalonador = EscalonadorV2()
escalonador.addProcesso(Processo('p1',2))
self.estado(escalonador, ['Status p1: None'], [], 0, 3)
ticks(2,escalonador)
self.estado(escalonador, ['Status p1: r'], [], 2, 3)
escalonador.addProcesso(Processo('p2',4))
ticks(1,escalonador)
escalonador.bloquearProcesso()
self.estado(escalonador, ['Status p1: w'], ['Status p2: r'], 3, 3)
ticks(2,escalonador)
escalonador.addProcesso(Processo('p3',1))
self.estado(escalonador, ['Status p1: r', 'Status p3: None'], ['Status p2: b'], 5, 3)
ticks(1,escalonador)
escalonador.bloquearProcesso()
self.estado(escalonador, ['Status p3: w'], ['Status p2: b', 'Status p1: r'], 6, 3)
ticks(1,escalonador)
escalonador.desbloquearProcesso('p2')
self.estado(escalonador, ['Status p2: b', 'Status p3: r'], ['Status p1: b'], 7, 3)
ticks(1,escalonador)
escalonador.desbloquearProcesso('p1')
escalonador.addProcesso(Processo('p4',4))
self.estado(escalonador, ['Status p2: r', 'Status p4: None', 'Status p1: b', 'Status p3: w'], [], 8, 3)
ticks(2,escalonador)
escalonador.addProcesso(Processo('p5',3))
escalonador.addProcesso(Processo('p6',3))
ticks(10,escalonador)
self.estado(escalonador, ['Status p2: r', 'Status p4: w', 'Status p5: w', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 20, 3)
escalonador.bloquearProcesso()
ticks(4,escalonador)
self.estado(escalonador, ['Status p4: r', 'Status p5: w', 'Status p6: w', 'Status p1: w', 'Status p3: w'], ['Status p2: b'], 24, 3)
escalonador.desbloquearProcesso('p2')
ticks(1,escalonador)
self.estado(escalonador, ['Status p4: r', 'Status p2: w', 'Status p5: w', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 25, 3)
ticks(5,escalonador)
self.estado(escalonador, ['Status p4: r', 'Status p2: w', 'Status p5: w', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 30, 3)
escalonador.finalizarProcesso('p4')
ticks(1,escalonador)
self.estado(escalonador, ['Status p2: r', 'Status p5: w', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 31, 3)
ticks(1,escalonador)
self.estado(escalonador, ['Status p2: r', 'Status p5: w', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 32, 3)
escalonador.finalizarProcesso('p2')
ticks(1,escalonador)
self.estado(escalonador, ['Status p5: r', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 33, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p6: r', 'Status p5: w', 'Status p1: w', 'Status p3: w'], [], 36, 3)
ticks(3,escalonador)
self.estado(escalonador, ['Status p5: r', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 39, 3)
escalonador.bloquearProcesso()
ticks(1,escalonador)
self.estado(escalonador, ['Status p6: r', 'Status p1: w', 'Status p3: w'], ['Status p5: b'], 40, 3)
ticks(1,escalonador)
self.estado(escalonador, ['Status p6: r', 'Status p1: w', 'Status p3: w'], ['Status p5: b'], 41, 3)
ticks(2,escalonador)
self.estado(escalonador, ['Status p6: r', 'Status p1: w', 'Status p3: w'], ['Status p5: b'], 43, 3)
ticks(1,escalonador)
escalonador.desbloquearProcesso('p5')
ticks(1,escalonador)
self.estado(escalonador, ['Status p5: w', 'Status p6: r', 'Status p1: w', 'Status p3: w'], [], 45, 3)
ticks(1,escalonador)
self.estado(escalonador, ['Status p5: r', 'Status p6: w', 'Status p1: w', 'Status p3: w'], [], 46, 3)
escalonador.finalizarProcesso('p6')
ticks(1,escalonador)
self.estado(escalonador, ['Status p5: r', 'Status p1: w', 'Status p3: w'], [], 47, 3)
########################################################
if __name__ == '__main__':
unittest.main()
| true |
f149d6fdc475ea8c2b924f64d5fb248e0a3c1b43 | Python | JIMMY-KSU/hoDMD-experiments | /py_files/evalbert.py | UTF-8 | 1,676 | 2.640625 | 3 | [
"BSD-2-Clause"
] | permissive | import argparse
from dataloader import picked_train_test_data_loader
from sklearn import preprocessing
from classifier import train_best
import numpy
from bert_serving.client import BertClient
bc = BertClient()
def train_test(pickled_train_path, pickled_test_path):
train, test = picked_train_test_data_loader(pickled_train_path, pickled_test_path)
def vectorize_dataset(data):
X = []
Y = []
sentences = []
for row in data:
sentences.append(" ".join(row[3]))
Y.append(row[0])
if len(sentences)%20 == 0:
X.extend([e for e in bc.encode(sentences)])
sentences = []
if len(sentences) != 0:
X.extend([e for e in bc.encode(sentences)])
return numpy.vstack(X), Y
X_train, Y_train = vectorize_dataset(train)
X_test, Y_test = vectorize_dataset(test)
X_train = numpy.asarray(X_train)
X_test = numpy.asarray(X_test)
le = preprocessing.LabelEncoder()
le.fit(Y_train)
Y_train = le.transform(Y_train)
Y_test = le.transform(Y_test)
print ("Length of vector: %s"%X_train.shape[1])
return train_best(X_train, Y_train, X_test, Y_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluate ELMo based sentence embedding')
parser.add_argument("pickled_training_data_path", help="pickled train path")
parser.add_argument("pickled_test_data_path", help="pickled test path")
args = parser.parse_args()
pickled_training_data_path = args.pickled_training_data_path
pickled_test_data_path = args.pickled_test_data_path
results = train_test(pickled_training_data_path, pickled_test_data_path)
results = results.split("\n")[-2]
print (results)
| true |
bdf963db0019a4a445d22afef9a40f67fed83b2c | Python | hej6853/Rowdy_Rooster_Project3 | /clustering_aid_prediction.py | UTF-8 | 3,285 | 2.734375 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import numpy as np
from sklearn.ensemble import RandomForestClassifier
st.write("""
# Foreign Aid Needed Priority Level Prediction App
""")
st.write('---')
# Read the csv file into a pandas DataFrame
df_aid_raw = pd.read_csv('aid_clustering.csv')
aid = df_aid_raw.drop(columns=['k_labels'])
X = df_aid_raw[['child_mort', 'exports',
'health', 'imports', 'income',
'inflation', 'life_expec', 'total_fer', 'gdpp']]
# Sidebar
# Header of Specify Input Parameters
st.sidebar.header('User Input Features')
# Collects user input features into dataframe
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"])
if uploaded_file is not None:
input_df = pd.read_csv(uploaded_file)
else:
def user_input_features():
child_mort = st.sidebar.number_input('child_mort', float(X.child_mort.min()), float(X.child_mort.max()), float(X.child_mort.mean()))
exports = st.sidebar.number_input('exports', float(X.exports.min()), float(X.exports.max()), float(X.exports.mean()))
health = st.sidebar.number_input('health', float(X.health.min()), float(X.health.max()), float(X.health.mean()))
imports = st.sidebar.number_input('imports', float(X.imports.min()), float(X.imports.max()), float(X.imports.mean()))
income = st.sidebar.number_input('income', float(X.income.min()), float(X.income.max()), float(X.income.mean()))
inflation = st.sidebar.number_input('inflation', float(X.inflation.min()), float(X.inflation.max()), float(X.inflation.mean()))
life_expec = st.sidebar.number_input('life_expec', float(X.life_expec.min()), float(X.life_expec.max()), float(X.life_expec.mean()))
total_fer = st.sidebar.number_input('total_fer', float(X.total_fer.min()), float(X.total_fer.max()), float(X.total_fer.mean()))
gdpp = st.sidebar.number_input('gdpp', float(X.gdpp.min()), float(X.gdpp.max()), float(X.gdpp.mean()))
data = {'child_mort': child_mort,
'exports': exports,
'health': health,
'imports': imports,
'income': income,
'inflation': inflation,
'life_expec': life_expec,
'total_fer': total_fer,
'gdpp': gdpp
}
features = pd.DataFrame(data, index=[0])
return features
input_df = user_input_features()
df = pd.concat([input_df,aid],axis=0)
# Main Panel
# Displays the user input features
st.subheader('User Input features')
if uploaded_file is not None:
st.write(df)
else:
st.write('Awaiting CSV file to be uploaded. Currently using example input parameters (shown below).')
st.write(input_df)
# Reads in saved classification model
load_clf = pickle.load(open('aid_clf.pkl', 'rb'))
# Apply model to make predictions
prediction = load_clf.predict(input_df)
prediction_proba = load_clf.predict_proba(input_df)
st.subheader('Prediction')
aid_priority = np.array(['Aid needed priority-1','Aid needed priority-2','Aid needed priority-3','No Aid needed'])
st.write(aid_priority[prediction])
| true |
deac15b2bac6feaa3d93f8298db244af0f1054ac | Python | siddhiparkar151992/Online-Book-Store | /bookstore/src/searchengine/__init__.py | UTF-8 | 4,675 | 2.984375 | 3 | [] | no_license | from bookstore.src.dao.DataAccessor import DataAccessor
class SearchEngine(DataAccessor):
def __init__(self):
super(SearchEngine, self).__init__()
self.exact = '+'
self.notMatch = '-'
self.less = '~'
self.all = '*'
self.searchKeys = {}
def matchExactWord(self, word, front=False, back=False, filterList=[]):
if front == True or (front is True and back is True):
query = ("select isbn, title, authors, publisher, DATE_FORMAT(yop,'%Y-%m-%d') as yop, "
"available_copies, price, format, keywords, subject,image_loc "
"from books where title like '%{}%'").format(word)
if len(filterList) != 0:
inClause = ("and isbn in {}").format(tuple(filterList))
query = ' '.join([query, inClause])
exactmatch = super(SearchEngine, self).read(query=query)
return self.storeSearchKeys(exactmatch, 'isbn')
backSelector = '*' if back is True else ''
query = (
'select isbn, title, authors, publisher, DATE_FORMAT(yop,"%Y-%m-%d") as yop, '
'available_copies, price, format, keywords, subject,image_loc '
'from books where match(Title) against ("`{}`{}" in boolean mode)').format('+' + word.replace(' ', ' +'), backSelector)
if len(filterList) != 0:
inClause = ("and isbn in {}").format(tuple(filterList))
query = ' '.join([query, inClause])
exactmatch = super(SearchEngine, self).read(query=query)
return self.storeSearchKeys(exactmatch, 'isbn')
def matchAll(self, word, filterList=[]):
allMatch = []
matchedKeys = {}
if len(word) > 3:
for i in range(len(word), 4, -1):
query = (
"select isbn, title, authors, publisher, DATE_FORMAT(yop,'%Y-%m-%d') as yop, "
"available_copies, price, format, keywords, subject,image_loc "
"from books where match(Title) against ('+{}*' in boolean mode)").format(word[:i])
if len(filterList) != 0:
inClause = ("and isbn in {}").format(tuple(filterList))
query = ' '.join([query, inClause])
result = super(SearchEngine, self).read(query=query)
if len(result) > 0:
allMatch += result
return self.storeSearchKeys(allMatch, 'isbn')
def storeSearchKeys(self, searchList, key):
result = []
for r in searchList:
if r.get(key) not in self.searchKeys:
result.append(r)
self.searchKeys[r.get(key)] = True
return result
def searchSingleWord(self, word, filterList=[]):
exactMatch = self.matchExactWord(
word, back=True, filterList=filterList)
allMatch = self.matchAll(word, filterList=filterList)
return exactMatch + allMatch
def recursiveSearch(self, wordList, filterList=[]):
result = []
for i in range(len(wordList), 1, -1):
for j in range(len(' '.join(wordList[:i])), 4, -1):
word = ' '.join(wordList[:i])[:j]
if word is not '' and len(word.strip()) is len(word):
exactMatch = self.matchExactWord(
' '.join(wordList[:i])[:j], back=True, filterList=filterList)
result += exactMatch
if len(' '.join(wordList[:i])) >= 6:
frontOffset = len(' '.join(wordList[:i])) - 5
for k in range(frontOffset):
exactMatch = self.matchExactWord(
' '.join(wordList[:i])[k:], front=True, filterList=filterList)
result += exactMatch
return result
def searchMultiples(self, wordList, filterList=[]):
allsearch = []
exactMatch = self.matchExactWord(
' '.join(wordList), filterList=filterList)
if len(exactMatch) > 0:
allsearch += exactMatch
allWordsSearch = self.recursiveSearch(wordList, filterList=filterList)
if len(allWordsSearch) > 0:
allsearch += allWordsSearch
for i in wordList:
allsearch += self.matchAll(i, filterList=filterList)
return allsearch
def searchBooks(self, query, filterList=[]):
self.searchKeys = {}
wordList = query.split(' ')
wordList = list(filter(None, wordList))
if len(wordList) == 1:
return self.searchSingleWord(wordList[0], filterList=filterList)
return self.searchMultiples(wordList, filterList=filterList)
| true |
75c5ff2fcbce0046b73de1a7bc72c85a020dbe01 | Python | chrislazo22/OSSU | /Intro-to-Computer-Science/2-Intro-to-CS-Python/week_1/pset_1.py | UTF-8 | 110 | 3.421875 | 3 | [] | no_license | count = 0
for char in s:
if char in 'aeiou':
count += 1
else:
count += 0
print(count)
| true |
9af82885d81413f3c1e03a7c782c0ec59fa9c94c | Python | anastasisbele/code_samples | /dl_1.py | UTF-8 | 26,760 | 2.921875 | 3 | [] | no_license |
# coding: utf-8
# # 1 b
# inputs get very big or small softmax might have a hard time computing exp
# # 2
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# In[2]:
def load_data(filename):
with open(filename) as f:
train_data = f.readlines()
train = []
for line in train_data:
train.append((line.strip().split()))
#print(train)
#print(np.array(train, 'float')[:,0])
return np.array(train, 'float')[:,1:], np.array(train, 'float')[:,0]
# In[919]:
#load the data in train and test arrays
X_train, y_train = load_data('iris-train.txt')
X_test, y_test = load_data('iris-test.txt')
#normalize Training data
X_train = 2*(X_train-0.5)
print(X_train.mean(axis=0))
print(X_train.max())
X_test = 2*(X_test-0.5)
# In[4]:
W = np.random.rand(X_train.shape[1],np.unique(y_train).size) # 2 x 3 matrix
#np.dot(np.array([[2,4],[2,2]]),np.array([2,1]))
W.shape
# In[980]:
def softmax(W, X):
e_x = np.exp(np.clip(np.dot(W.transpose(),X.transpose()),-500,500)) #we need the transpose for dimensions to match(W columns corresponds to)
return np.divide(e_x,e_x.sum(0)) #ex.sum(0) sums elements by column and the devide function divides by row
# In[1055]:
def trainSoftmax(X_train, X_test, y_train, y_test):
#creation of target vectors (target array)
t_train = np.empty([y_train.size, np.unique(y_train).size])
t_test = np.empty([y_test.size, np.unique(y_train).size]) # labels in y_train and y_test should be the same
for k in range(t_train.shape[1]):
t_train[:,k] = np.where(y_train==k+1*(np.unique(y_train).size < 4),1,0) # + 1 depends on the labels indexing: +1 for IRIS, 0 for CIPHAR
t_test[:,k] = np.where(y_test==k+1*(np.unique(y_train).size < 4),1,0)
total_loss_train = []
total_loss_test = []
mean_train_accuracy = []
mean_test_accuracy = []
W = np.random.rand(X_train.shape[1],np.unique(y_train).size)*0.1 # weight initialization, 2 x 3 matrix
DW = np.zeros([X_train.shape[1],np.unique(y_train).size]) # momentum
batch_size = 100
l_r = 0.0001 # learning rate ciphar:0.0001
a = 0.001 # decay parameter ciphar: 0.001
m_r = 0.01 # momentum rate ciphar: 0.01
for epoch in range(100):
# minibatch creation
randomizer = np.arange(y_train.size)
np.random.shuffle(randomizer)
#initialize loss and class accuracy
Loss_train = 0
train_class_accuracy = []
# print('start')
#iterate over batches
for batch_no in range(y_train.size//batch_size):
batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection
P_train_b = softmax(W, X_train[batch,:]) # 3 x batch_size matrix
Loss_train = Loss_train - np.multiply(t_train[batch,:].transpose(), np.log(P_train_b)).sum()
y_train_pred = np.argmax(P_train_b, axis = 0) + 1*(np.unique(y_train).size < 4) # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing for IRIS data)
train_class_accuracy.append(sum(list(map(lambda x: (y_train_pred[y_train[batch]==x]==x).sum()/(y_train[batch]==x).sum(), [k for k in range(1*(np.unique(y_train).size < 4),np.unique(y_train[batch]).size+1*(np.unique(y_train).size < 4))])))/np.unique(y_train[batch]).size)
#gradient calculation WITH regularization (check end of next line)
dLoss = a*W.transpose() + np.dot((P_train_b - t_train[batch,:].transpose()), X_train[batch,:]) # leads to a 3 x 2 matrix, each row being the loss gradient for this class WITH regularization
#update momentum rule
DW = m_r*DW + l_r*dLoss.transpose()
W = W - DW
P_test = softmax(W, X_test) # 3 x 51 matrix
Loss_test = -np.multiply(t_test.transpose(), np.log(P_test)).sum()
y_test_pred = np.argmax(P_test, axis = 0) + 1*(np.unique(y_test).size < 4) # +1 for IRIS, 0 for CIPHAR-10
test_class_accuracy = sum(list(map(lambda x: (y_test_pred[y_test==x]==x).sum()/(y_test==x).sum(), [k for k in range(1*(np.unique(y_test).size < 4),np.unique(y_test).size+1*(np.unique(y_test).size < 4))])))/np.unique(y_test).size
total_loss_train.append(Loss_train)
total_loss_test.append(Loss_test)
mean_train_accuracy.append(np.mean(train_class_accuracy))
mean_test_accuracy.append(test_class_accuracy)
fig,ax = plt.subplots(1,2,figsize = (12,6))
ax[0].plot(np.arange(epoch+1), total_loss_train, 'r-', np.arange(epoch+1), total_loss_test, 'b-')
ax[0].set(title = 'Cross-Entropy Loss', xlabel = 'Epochs', ylabel = 'Loss')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())
ax[1].plot(np.arange(epoch+1), mean_train_accuracy, 'r-', np.arange(epoch+1), mean_test_accuracy, 'b-')
ax[1].tick_params(reset = True)
ax[1].set(title = 'mean per-class accuracy', xlabel = 'Epochs')
return W
# terrible results for l_r = 0.1, loss bounces back and forth
# In[1043]:
#randomizer = np.arange(90)
#np.random.shuffle(randomizer)
#print(X_train[randomizer,:].shape, y_train[randomizer].shape)
W_iris = trainSoftmax(X_train, X_test, y_train, y_test)
#print(W_iris)
P = softmax(W_iris, X_test)
y_pred = np.argmax(P, axis = 0) + 1 # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing)
print(y_pred, y_test, sep='\n')
accuracy = (y_test==y_pred).sum()/y_test.size
accuracy
# In[1044]:
plt.savefig('iris softmax.png')
# ## 2 b
# In[1045]:
# DISPLAYING THE DECISION BOUNDARIES
h = .02 # step size in the mesh
# create a mesh to plot in
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# # Plot the decision boundary. For that, we will assign a color to each
P_train = softmax(W_iris, np.c_[xx.ravel(), yy.ravel()])
Z = np.argmax(P_train, axis = 0) + 1
# # Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(figsize=(7,6))
plt.contourf(xx, yy, Z, cmap='cool', alpha = 0.8)
plt.scatter(X_train[:,0],X_train[:,1],s=20, c = y_train, edgecolors='b', linewidths=0.5)
plt.savefig('decision_bound.png')
# In[229]:
#[ 1., 1., 2., 2., 2., 3., 1., 2., 1., 3.]
y_pred = np.array([3, 3, 1, 1, 1, 1, 3, 1, 3, 1])
y_train[[25, 22, 45, 54, 51, 72, 28, 47, 27, 80]]
#(y_pred[y_train[[ 4, 2, 53, 8, 7, 52, 88, 47, 17, 19]] == 1]==1).sum()/(y_train[[ 4, 2, 53, 8, 7, 52, 88, 47, 17, 19]] == 1).sum()#== np.array( [3, 3, 2, 2, 3, 2, 2, 3, 3, 2])
# # 3
# In[1051]:
def loadCIFAR10():
import pickle
train_dict = {}
for file_no in range(1,6):
with open('cifar-10-batches-py/data_batch_{}'.format(file_no), 'rb') as fo:
train_dict[file_no] = pickle.load(fo, encoding='bytes')
with open('cifar-10-batches-py/test_batch', 'rb') as fo:
test_dict = pickle.load(fo, encoding='bytes')
#aggregating the train batches
data = train_dict[1][b'data'] # features are already in numpy arrays
labels = train_dict[1][b'labels'] # labels are in lists
for batch in range(2,6):
data = np.concatenate((data, train_dict[batch][b'data']))
labels.extend(train_dict[batch][b'labels']) #labels are in lists
return (data, np.array(labels), test_dict[b'data'], np.array(test_dict[b'labels']))
# In[1052]:
train_feat, train_labels, test_feat, test_labels = loadCIFAR10()
#display first images
rows = 3
cols = 10
fig, axes = plt.subplots(rows, cols, figsize=(12,6))
for i in range(rows*cols):
row_index = i//cols
col_index = i%cols
ax = axes[row_index, col_index]
ax.imshow(train_feat[train_labels==col_index,:][row_index,:].reshape(3,32,32).transpose(1,2,0))
plt.tight_layout()
plt.savefig('ciphar_images.png')
# # 4
# In[1056]:
train_feat = np.divide(train_feat - train_feat.min(axis=0),train_feat.max(axis=0) - train_feat.min(axis=0))
train_feat = train_feat - train_feat.mean(axis=0)
test_feat = np.divide(test_feat - test_feat.min(axis=0),test_feat.max(axis=0) - test_feat.min(axis=0))
test_feat = test_feat - test_feat.mean(axis=0)
#print(train_feat[0,1:10], test_feat[0,1:10])
W_ciphar = trainSoftmax(train_feat, test_feat, train_labels, test_labels)
P = softmax(W_ciphar, test_feat)
y_pred = np.argmax(P, axis = 0) # pick the class that maximizes the likelihood for every datapoint (no +1, classes start from 0)
print(y_pred, test_labels, sep='\n')
accuracy = (test_labels==y_pred).sum()/test_labels.size
accuracy
# In[1060]:
# create the confusion matrix
CM = []
for label in range(np.unique(test_labels).size):
CM.append([(test_labels[y_pred == label]==k).sum() for k in range(np.unique(test_labels).size)]) # no + 1 for CIPHAR-10
CM = np.array(CM)
# normalize it based on actual class, i.e.
# x% of class 1 objects are classified as class 2
CM = CM/CM.sum(axis=0)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(CM, cmap = 'Greys')
fig.colorbar(cax)
# plt.figure(figsize=(3,3))
# cax = ax.matshow(CM, cmap = 'Greys')
# plt.colorbar(cax)
ax.set_title(' Normalized per actual class Confusion Matrix', y = 1.1)
ax.set_xlabel('Actual class'); ax.set_ylabel('predicted class')
plt.show()
plt.savefig('confusion_heatmap.png')
# # 5
# ## Part 1
# In[440]:
def loadMusicData(fname, addBias):
data = []
with open(fname) as f:
for line in f.readlines():
data.append((line.strip().split(',')))
if addBias == True:
data[-1].extend('1')
#print(np.array(train, 'float')[:,0])
data = np.array(data, 'float')
return data[:463714,0].astype(int), data[:463714,1:], data[463714:,0].astype(int), data[463714:,1:]
# In[441]:
def musicMSE(pred, gt):
s_e = (gt - np.around(pred))**2
return np.mean(s_e)
# In[723]:
trainYears, trainFeat, testYears, testFeat = loadMusicData('YearPredictionMSD/YearPredictionMSD.txt', addBias = True)
# In[458]:
testFeat.max(axis = 0)-testFeat.min(axis = 0)
# range (max - min) of values is small for the first 12 features but it gets significantly larger for the following 78 features. This is expected of course since the former represent the average timbre whereas the latter represent timber covariance, i.e. the order is closer to that of timbre squared.
# In[462]:
print('Training years range from {} to {}'.format(trainYears.min(), trainYears.max()))
print('Testing years range from {} to {}'.format(testYears.min(), testYears.max()))
# In[489]:
fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)
fig.suptitle('Histograms of years for training and testing sets')
x,bins,p= ax[0].hist(testYears, range = (1921,2011 )) # with normed = True normalizes so that the area under the hist is 1
print(bins)
ax[0].set(title = 'Training Set', xlabel = 'Year', ylabel = 'Frequency')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())
ax[1].hist(testYears, range = (1921, 2011))
ax[1].tick_params(reset = True)
ax[1].set(title = 'Test Set', xlabel = 'Year')
#savefig("figureC.png", bbox_inches='tight')
#plt.hist(trainYears)
# We see that train and test labels follow a similar distribution which is good for our prediction purposes. However, both sets are dominated by songs from the last 2 decades that account for more than 90% of the sets' observations. Especially as we go back in time we get fewer and fewer instances, which results to years before the 70's being seriously underepresented.
# In[508]:
years, counts = np.unique(trainYears, return_counts=True)
year = years[np.argmax(counts)] # year = 2007
mse_2007 = musicMSE(year, testYears)
print(mse_2007)
mse_1998 = musicMSE(1998, testYears)
print(mse_1998)
# ## Part 2
# In[651]:
def RidgeRegression(X_train, X_test, y_train, y_test, regularization):
L2 = False
if regularization == 'L2':
L2 = True
W = np.random.rand(X_train.shape[1]) # weight initialization, 91-vector
batch_size = 100
l_r = 0.00001 # learning rate
a2 = 0.01 # L2 decay parameter
a1 = 0.01 # L1 decay parameter
#initial values of losses and MSEs
y_test_pred = np.dot(X_test,W)
y_train_pred = np.dot(X_train,W)
total_loss_train = [((y_train_pred-y_train)**2).sum() + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1)]
total_loss_test = [((y_test_pred-y_test)**2).sum() + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1)]
train_MSE = [musicMSE(y_train_pred, y_train)]
test_MSE = [musicMSE(y_test_pred, y_test)]
for epoch in range(20):
# minibatch creation
randomizer = np.arange(y_train.size)
np.random.shuffle(randomizer)
#initialize loss and class accuracy
Loss_train = 0
batch_MSE = []
print('start', Loss_train)
#iterate over batches
for batch_no in range(y_train.size//batch_size):
batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection
y_train_pred = np.dot(X_train[batch,:],W) # y_pred = W.t*X, batch size vector, will be used in all computations below
# train square loss
train_s_l = ((y_train_pred-y_train[batch])**2).sum() # sum of (y_pred-y)^2
# regularized train loss
Loss_train = Loss_train + train_s_l + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1) # L2 regularization term in the end
#gradient calculation WITH regularization (check end of next line)
dLoss = L2*2*a2*W + (1-L2)*a1*np.sign(W) + np.dot(X_train[batch,:].transpose(),(y_train_pred-y_train[batch])) # 91-vector gradient of loss
batch_MSE.append(train_s_l) #just the sum of squared errors here, average will be per epoch
# update rule for weights
W = W - l_r*dLoss
y_test_pred = np.dot(X_test,W)
Loss_test = ((y_test_pred-y_test)**2).sum() + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1)
total_loss_train.append(Loss_train)
total_loss_test.append(Loss_test)
train_MSE.append(np.sum(batch_MSE)/y_train.size)
test_MSE.append(musicMSE(y_test_pred, y_test)) # used the mse function from earlier for a change
fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)
fig.suptitle('Train and Test Mean Square Errors')
ax[0].plot(np.arange(epoch+2), train_MSE, 'r-') # with normed = True normalizes so that the area under the hist is 1
ax[0].set(title = 'Training MSE', xlabel = 'Epochs', ylabel = 'MSE')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())
ax[1].plot(np.arange(epoch+2), test_MSE, 'b-')
ax[1].tick_params(reset = True)
ax[1].set(title = 'Test MSE', xlabel = 'Epochs')
fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)
fig.suptitle('Train and Test Loss')
ax[0].plot(np.arange(epoch+2), total_loss_train, 'r-') # with normed = True normalizes so that the area under the hist is 1
ax[0].set(title = 'Training Loss', xlabel = 'Epochs', ylabel = 'Loss')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())
ax[1].plot(np.arange(epoch+2), total_loss_test, 'b-')
ax[1].tick_params(reset = True)
ax[1].set(title = 'Test Loss', xlabel = 'Epochs')
plt.show()
return W, test_MSE[-1]
# In[667]:
trainFeat_normed = trainFeat
trainFeat_normed[:,:-1] = np.divide((trainFeat[:,:-1] - trainFeat[:,:-1].mean(axis=0)),trainFeat[:,:-1].std(axis=0, ddof=1))
testFeat_normed = testFeat
testFeat_normed[:,:-1] = (testFeat[:,:-1] - trainFeat[:,:-1].mean(axis=0))/trainFeat[:,:-1].std(axis=0, ddof=1)
W_Ridge, MSE_Ridge = RidgeRegression(trainFeat_normed, testFeat_normed, trainYears, testYears, regularization = 'L2')
print(MSE_Ridge)
# we start with MSE and Loss before the first epoch to show how fast it drops in the first epoch. this happens because our dataset is big and many batch iterations occur during an epoch.
# OR
# We start with MSE and Loss after the first epoch has run because the value of lost and MSE before any training dominates the graph as it is too high and the curve is not clear after.
# In[612]:
# RIDGE solution with pseudoinverse --> W = (XtX+λI)^-1*Xt y
argument = np.dot(trainFeat_normed.transpose(),trainFeat_normed) + a*np.identity(trainFeat_normed.shape[1])
W_pseudo = np.dot(np.linalg.inv(argument), np.dot(trainFeat_normed.transpose(), trainYears))
pseudo_MSE = musicMSE(np.dot(testFeat_normed,W_pseudo), testYears)
print(pseudo_MSE)
# # Part 3
# In[668]:
W_Lasso, MSE_Lasso = RidgeRegression(trainFeat_normed, testFeat_normed, trainYears, testYears, regularization = 'L1')
print(MSE_Lasso)
# In[675]:
#print(np.sort(np.unique(W_Ridge[:-1])), np.sort(np.unique(W_Lasso)))
plt.hist((W_Ridge[:-1], W_Lasso[:-1]), bins = 20)
plt.title('Histogram of Ridge vs Lasso Weights')
plt.ylabel('absolute frequency')
plt.xlabel('feature weights')
plt.legend(('Ridge','Lasso'))
# # Part 4
# In[890]:
def exp_decay(l_r, epoch):
drop = 0.5 # decaying parameter
lr_new = l_r*drop**(epoch//2)
return lr_new
# In[909]:
def PoissonRegression(X_train, X_test, y_train, y_test):
# standardize train and test years based on train set to use them in our algorithm, otherwise they take large values and blow up computations
train_st_dev = np.std(y_train, ddof=1)
train_mean = np.mean(y_train)
print(train_st_dev, train_mean)
y_train = (y_train - train_mean)/train_st_dev
y_test = (y_test - train_mean)/train_st_dev
# initialize to random small weights
W = np.random.rand(X_train.shape[1])*0.001 # weight initialization, 91-vector
batch_size = 100
l_r = 0.000001 # learning rate
#initial values of losses and MSEs
y_train_pred = np.exp(np.dot(X_train,W))
y_test_pred = np.exp(np.dot(X_test,W))
total_loss_train = [(y_train_pred - np.multiply(np.dot(X_train,W),y_train)).sum()]
total_loss_test = [(y_test_pred - np.multiply(np.dot(X_test,W),y_test)).sum()]
train_MSE = [musicMSE(y_train_pred, y_train)*train_st_dev**2]
test_MSE = [musicMSE(y_test_pred, y_test)*train_st_dev**2]
for epoch in range(10):
# minibatch creation
randomizer = np.arange(y_train.size)
np.random.shuffle(randomizer)
#initialize loss and class accuracy
Loss_train = 0
batch_MSE = []
l_r = exp_decay(l_r, epoch)
print('start')
#iterate over batches
for batch_no in range(y_train.size//batch_size):
batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection
WtX = np.dot(X_train[batch,:],W) # W.t*X, batch size vector, will be used in all computations below
y_train_pred = np.exp(np.clip(WtX,-100,100)) # y_pred = exp(W.t*X), for Poisson y_pred is the mean value of P(Y|X)
# train loss
Loss_train = Loss_train + (y_train_pred - np.multiply(WtX,y_train[batch])).sum()
#gradient calculation
dLoss = np.dot(X_train[batch,:].transpose(),(y_train_pred - y_train[batch])) # 91-vector gradient of loss = sum((exp(WtX)-y)X)
batch_MSE.append(((y_train_pred-y_train[batch])**2).sum()) #just the sum of squared errors here, average will be per epoch
# update rule for weights
W = W - l_r*dLoss
test_WtX = np.dot(X_test,W) #will be used couple of times below
y_test_pred = np.exp(np.clip(test_WtX,-100,100))
Loss_test = (y_test_pred - np.multiply(test_WtX,y_test)).sum()
total_loss_train.append(Loss_train)
total_loss_test.append(Loss_test)
train_MSE.append((np.sum(batch_MSE)/y_train.size)*train_st_dev**2) #de-standardizing final MSE output
test_MSE.append(musicMSE(y_test_pred, y_test)*train_st_dev**2) # used the mse function from earlier for a change
fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)
fig.suptitle('Train and Test Mean Square Errors')
ax[0].plot(np.arange(epoch+2), train_MSE, 'r-') # with normed = True normalizes so that the area under the hist is 1
ax[0].set(title = 'Training MSE', xlabel = 'Epochs', ylabel = 'MSE')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())
ax[1].plot(np.arange(epoch+2), test_MSE, 'b-')
ax[1].tick_params(reset = True)
ax[1].set(title = 'Test MSE', xlabel = 'Epochs')
fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)
fig.suptitle('Train and Test Loss')
ax[0].plot(np.arange(epoch+2), total_loss_train, 'r-') # with normed = True normalizes so that the area under the hist is 1
ax[0].set(title = 'Training Loss', xlabel = 'Epochs', ylabel = 'Loss')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())
ax[1].plot(np.arange(epoch+2), total_loss_test, 'b-')
ax[1].tick_params(reset = True)
ax[1].set(title = 'Test Loss', xlabel = 'Epochs')
plt.show()
return W, test_MSE
# In[910]:
# print(trainYears_centered.min())
# trainYears_normed = (trainYears - np.mean(trainYears))/np.std(trainYears, ddof=1)
# testYears_normed = (testYears - np.mean(testYears))/np.std(testYears, ddof = 1)
W_Poisson, MSE_Poisson = PoissonRegression(trainFeat_normed, testFeat_normed, trainYears, testYears)
print(MSE_Poisson)
# In[ ]:
# In[1063]:
def SongSoftmax(X_train, X_test, y_train, y_test):
#creation of target vectors (target array)
t_train = np.empty([y_train.size, np.unique(y_train).size])
t_test = np.empty([y_test.size, np.unique(y_train).size]) # labels in y_train and y_test should be the same
for k in range(t_train.shape[1]): #years from 1922 to 2011, only 1923 is missing from train set
t_train[:,k] = np.where(y_train==k+1922,1,0) # + 1 depends on the labels indexing: +1 for IRIS, 0 for CIPHAR
t_test[:,k] = np.where(y_test==k+1922,1,0)
# print(X_train[randomizer,:].shape, y_train[randomizer].shape)
total_loss_train = []
total_loss_test = []
mean_train_accuracy = []
mean_test_accuracy = []
W = np.random.rand(X_train.shape[1],np.unique(y_train).size) # weight initialization, 2 x 3 matrix
DW = np.zeros([X_train.shape[1],np.unique(y_train).size]) # momentum
batch_size = 100
l_r = 0.00001 # learning rate
a = 0.001 # decay parameter
m_r = 0.01 # momentum rate
for epoch in range(50):
# minibatch creation
randomizer = np.arange(y_train.size)
np.random.shuffle(randomizer)
#initialize loss and class accuracy
Loss_train = 0
train_class_accuracy = []
print('start')
#iterate over batches
for batch_no in range(y_train.size//batch_size):
batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection
# print('batch =', batch)
P_train_b = softmax(W, X_train[batch,:]) # 3 x batch_size matrix
# print('softmax = ', P_train_b)
Loss_train = Loss_train - np.multiply(t_train[batch,:].transpose(), np.log(P_train_b)).sum()
# print('Loss_train = ', Loss_train)
y_train_pred = np.argmax(P_train_b, axis = 0) + 1922 # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing for IRIS data)
# print('Y predictions: ', y_train_pred)
# print('accuracy = ',sum(list(map(lambda x: (y_train_pred[y_train[batch]==x]==x).sum()/(y_train[batch]==x).sum(), [k for k in range(1,np.unique(y_train[batch]).size+1)])))/np.unique(y_train[batch]).size)
train_class_accuracy.append(sum(list(map(lambda x: (y_train_pred[y_train[batch]==x]==x).sum()/(y_train[batch]==x).sum(), [k for k in range(1922,np.unique(y_train[batch]).size+1922)])))/np.unique(y_train[batch]).size)
#gradient calculation WITH regularization (check end of next line)
dLoss = a*W.transpose() + np.dot((P_train_b - t_train[batch,:].transpose()), X_train[batch,:]) # leads to a 3 x 2 matrix, each row being the loss gradient for this class WITH regularization
# print('dLoss is ', dLoss)
#update momentum rule
DW = m_r*DW + l_r*dLoss.transpose()
# print('DW is ', DW)
W = W - DW
# print('batch over')
P_test = softmax(W, X_test) # 3 x 51 matrix
# print('test softmax:', P_test)
Loss_test = -np.multiply(t_test.transpose(), np.log(P_test)).sum()
# print('test Loss = ',Loss_test)
y_test_pred = np.argmax(P_test, axis = 0) + 1922 # +1 for IRIS, 0 for CIPHAR-10
# print('test predictions =', y_test_pred)
test_class_accuracy = sum(list(map(lambda x: (y_test_pred[y_test==x]==x).sum()/(y_test==x).sum(), [k for k in range(1922,np.unique(y_test).size+1*922)])))/np.unique(y_test).size
#test_class_accuracy2 = (1/np.unique(y_test).size)*((y_test_pred[y_test==1]==1).sum()/(y_test==1).sum() + (y_test_pred[y_test==2]==2).sum()/(y_test==2).sum() + (y_test_pred[y_test==3]==3).sum()/(y_test==3).sum())
# print('test class accuracy = ', list(map(lambda x: (y_test_pred[y_test==x]==x).sum()/(y_test==x).sum(), [k for k in range(1*(np.unique(y_test).size < 4),np.unique(y_test).size+1*(np.unique(y_test).size < 4))])))
# print('test class accuracy = ', test_class_accuracy)
total_loss_train.append(Loss_train)
# print('total loss train ', total_loss_train)
total_loss_test.append(Loss_test)
# print('total loss test ', total_loss_test)
mean_train_accuracy.append(np.mean(train_class_accuracy))
# print('mean_train_accuracy ', mean_train_accuracy)
mean_test_accuracy.append(test_class_accuracy)
# print('mean_test_accuracy ', mean_test_accuracy)
# print(total_loss_train)
plt.plot(np.arange(epoch+1), total_loss_train, 'r-', np.arange(epoch+1), total_loss_test, 'b-')
plt.figure()
plt.plot(np.arange(epoch+1), mean_train_accuracy, 'r-', np.arange(epoch+1), mean_test_accuracy, 'b-')
plt.show()
return W
# In[1064]:
W_songs = SongSoftmax(trainFeat_normed, testFeat_normed, trainYears, testYears)
print(W_songs)
P = softmax(W_songs, testFeat_normed)
y_pred = np.argmax(P, axis = 0) + 1922 # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing)
print(y_pred, testYears, sep='\n')
accuracy = (testYears==y_pred).sum()/testYears.size
accuracy
# In[ ]:
| true |
70584a1bff395cbab6b72c2fc9a18064de1a9a78 | Python | guillaumeevin/pynonstationarygev | /extreme_data/meteo_france_data/scm_models_data/case_studies/ribatet/day_for_the_maxima_v2.py | UTF-8 | 2,721 | 2.6875 | 3 | [] | no_license | import pandas as pd
import numpy as np
from extreme_data.meteo_france_data.scm_models_data.safran.safran import SafranSnowfall1Day
from extreme_data.meteo_france_data.scm_models_data.visualization.main_study_visualizer import \
SCM_STUDY_CLASS_TO_ABBREVIATION
from extreme_data.meteo_france_data.scm_models_data.altitudes_studies import AltitudesStudies
def generate_excel_with_annual_maxima(fast=True, maxima_dates=False):
if fast:
altitudes = [900, 1200]
else:
altitudes = [900, 1200, 1500, 1800, 2100]
study_class = SafranSnowfall1Day
study_name = 'annual maxima of ' + SCM_STUDY_CLASS_TO_ABBREVIATION[study_class]
if maxima_dates:
study_name += ' - number of days since 1st August, e.g. 1 represents the 2nd of August'
writer = pd.ExcelWriter('{}.xlsx'.format(study_name), engine='xlsxwriter')
altitude_studies = AltitudesStudies(study_class, altitudes)
for altitude, study in altitude_studies.altitude_to_study.items():
write_df_with_annual_maxima_v2(altitude, writer, study, maxima_dates)
writer.save()
def write_df_with_annual_maxima_v2(altitude, writer, study, maxima_dates=False) -> pd.DataFrame:
df = study.df_latitude_longitude
data_list = []
for massif_name in df.index:
if maxima_dates:
values = study.massif_name_to_annual_maxima_angle[massif_name]
else:
raise NotImplementedError
data_list.append(values)
data = np.array(data_list)
df2 = pd.DataFrame(data=data, index=df.index, columns=study.ordered_years).astype(float)
df = pd.concat([df, df2], axis=1)
print(df.head())
df.to_excel(writer, sheet_name='altitude = {} m'.format(altitude))
def write_df_with_annual_maxima(massif_name, writer, altitude_studies, maxima_dates=False) -> pd.DataFrame:
columns = []
altitudes = []
for altitude, study in altitude_studies.altitude_to_study.items():
df_maxima = study.observations_annual_maxima.df_maxima_gev
if massif_name in study.study_massif_names:
altitudes.append(altitude)
s = df_maxima.loc[massif_name]
if maxima_dates:
values = study.massif_name_to_annual_maxima_index[massif_name]
s = pd.Series(index=s.index, data=values)
# s.values = np.array(values)
# Fit the data and add the parameters as the first columns
columns.append(s)
df = pd.concat(columns, axis=1)
altitude_str = [str(a) + ' m' for a in altitudes]
df.columns = altitude_str
df.to_excel(writer, sheet_name=massif_name)
return df
if __name__ == '__main__':
generate_excel_with_annual_maxima(fast=False, maxima_dates=True)
| true |
4d6add4523ea5963a2d6237dcf57e3b06855e423 | Python | mattjp/leetcode | /practice/medium/0109-Convert_Sorted_List_to_Binary_Tree.py | UTF-8 | 1,249 | 3.71875 | 4 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
"""
let's just always insert the middle and then recurse
idea is there, just no need for bst_insert since you know the middle
is the next element to insert
"""
def bst_insert(node, v): # this is dumb, you already know where to go
if not node:
return TreeNode(v)
if node.val > v:
node.left = bst_insert(node.left, v)
elif node.val < v:
node.right = bst_insert(node.right, v)
return node
def find_insert_middle(ls):
if not ls:
return
m = len(ls)//2
self.root = bst_insert(self.root, ls[m])
find_insert_middle(ls[:m])
find_insert_middle(ls[m+1:])
ls = []
while head:
ls.append(head.val)
head = head.next
self.root = None
find_insert_middle(ls)
return self.root
| true |
d436c5f38334d52c390bc99826a2d794e7c5aab6 | Python | michaelfedell/canvas-card-generator | /app.py | UTF-8 | 2,412 | 2.921875 | 3 | [] | no_license | """[summary]."""
import random
from flask import Flask, render_template, make_response
from flask import redirect, request, jsonify, url_for
from canvas import STYLES, styles, randomize_colors, update_canvas_id, process
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
response = jsonify({
'status': 'great job, you found the index'
})
return make_response(response, 200)
@app.route('/canvas', methods=['GET'])
def get_randomized_canvas():
style = random.choice(STYLES)
response = jsonify({
'msg': 'A random style was chosen',
'style': style,
'original_code': styles[style]['code'],
'randomized': randomize_colors(styles[style]['code'])
})
return make_response(response, 200)
@app.route('/canvas/og', methods=['GET'])
def get_random_canvas():
style = random.choice(STYLES)
response = jsonify({
'msg': 'A random style was chosen',
'style': style,
'original_code': styles[style]['code']
})
return make_response(response, 200)
@app.route('/canvas/render', methods=['GET'])
def render_randomized_canvas():
style = random.choice(STYLES)
clean = process(styles[style]['code'])
return render_template(
'layouts/canvas.html',
title='Random Canvas',
name=f'Randomized {style}'.title(),
code=clean
)
@app.route('/canvas/<style>', methods=['GET'])
def get_canvas(style: str):
if style in STYLES:
response = jsonify({
'style': style,
'original_code': styles[style]['code'],
'randomized': randomize_colors(styles[style]['code'])
})
return make_response(response, 200)
else:
response = jsonify({
'msg': f'Sorry, but {style} is not an available style'
})
return make_response(response, 400)
@app.route('/canvas/<style>/render', methods=['GET'])
def render_canvas(style: str):
if style in STYLES:
clean = process(styles[style]['code'])
return render_template(
'layouts/canvas.html',
title='Random Canvas',
name=f'Randomized {style}'.title(),
code=clean
)
else:
response = jsonify({
'msg': f'Sorry, but {style} is not an available style'
})
return make_response(response, 400)
if __name__ == '__main__':
app.run()
| true |
fbfd4984d6f8fc9d6670e9620a147630fa7210bd | Python | jO-Osko/nadlogar | /nadlogar/problems/models/mnozice.py | UTF-8 | 6,609 | 3 | 3 | [
"MIT"
] | permissive | import random
import sympy
from django.db import models
from .meta import GeneratedDataIncorrect, Problem
class ElementiMnozice(Problem):
"""Problem za izpis elementov množice iz podanega predpisa."""
default_instruction = "Zapiši elemente množice $ \\mathcal{A} = \\{ @n; (n \\in \\mathbb{N}) \\land (n @pogoj @stevilo ) \\}$."
default_solution = "$ \\mathcal{A} =@mnozica$"
POGOJ = ["|", "<", "<="]
POGOJ_LATEX = {"|": r"\mid", "<": r"\lt", "<=": r"\le"}
linearna_kombinacija = models.BooleanField(
"linearna kombinacija",
help_text="Ali naj naloga vsebuje linearno kombinacijo?",
choices=[(True, "Da"), (False, "Ne")],
default=True,
)
class Meta:
verbose_name = "Množice / elementi iz predpisa"
def generate(self):
pogoj = random.choice(self.POGOJ)
n = sympy.symbols("n")
if not self.linearna_kombinacija:
a = 1
b = 0
else:
a = random.randint(1, 3)
b = random.randint(-2, 2)
if pogoj == "|":
stevilo = random.randint(15, 45)
ustrezni = sympy.divisors(stevilo)
elif pogoj == "<":
stevilo = random.randint(5, 12)
ustrezni = list(range(1, stevilo))
elif pogoj == "<=":
stevilo = random.randint(5, 8)
ustrezni = list(range(1, stevilo + 1))
mnozica = sympy.FiniteSet(*[a * x + b for x in ustrezni if a * x + b > 0])
return {
"n": sympy.latex(sympy.simplify(a * n + b)),
"pogoj": self.POGOJ_LATEX[pogoj],
"stevilo": sympy.latex(stevilo),
"mnozica": sympy.latex(mnozica),
}
class PotencnaMnozica(Problem):
"""Problem za izpis potenčne množice od dane množice."""
default_instruction = "Zapiši potenčno množico množice $ \\mathcal{A} =@mnozica$"
default_solution = "$\\mathcal{P}( \\mathcal{A} ) =@potencna$"
class Meta:
verbose_name = "Množice / potenčna množica"
def generate(self):
velikost = random.randint(2, 3)
mnozice = [
[sympy.Symbol("a"), sympy.Symbol("b"), sympy.Symbol("c")],
[1, 2, 3],
[sympy.Symbol("x"), sympy.Symbol("y"), sympy.Symbol("z")],
[sympy.Symbol("alpha"), sympy.Symbol("beta"), sympy.Symbol("gamma")],
[sympy.Symbol("Pi"), sympy.Symbol("Phi"), sympy.Symbol("Xi")],
[3, 6, 9],
[3, 7, 42],
]
mnozica = sympy.FiniteSet(*random.choice(mnozice)[:velikost])
potencna = mnozica.powerset()
return {"mnozica": sympy.latex(mnozica), "potencna": sympy.latex(potencna)}
class OperacijeMnozic(Problem):
"""Naloga za zapis unije, preseka, razlike, in kartezičnega produkta množic."""
default_instruction = "Dani sta množici $ \\mathcal{A} =@A$ in $ \\mathcal{B} =@B$.\r\n Zapiši množice $ \\mathcal{A} \\cup \\mathcal{B} $, $ \\mathcal{A} \\cap \\mathcal{B} $, $ \\mathcal{A} - \\mathcal{B} $ in $ \\mathcal{A} \\times \\mathcal{B} $."
default_solution = "$ \\mathcal{A} \\cup \\mathcal{B} =@unija$, $ \\mathcal{A} \\cap \\mathcal{B} =@presek$, $ \\mathcal{A} - \\mathcal{B} =@brez$, $ \\mathcal{A} \\times \\mathcal{B} =@kartezicno$"
class Meta:
verbose_name = "Množice / operacije z množicami"
@staticmethod
def generiraj_mnozico(velikost, od, do):
"""Pripravi naključno množico dane velikosti."""
izbor = [x for x in range(od, do + 1)]
mnozica = sympy.FiniteSet(*random.sample(izbor, velikost))
return mnozica
def generate(self):
A = self.generiraj_mnozico(random.randint(3, 4), 1, 6)
B = self.generiraj_mnozico(random.randint(3, 4), 1, 6)
unija = A.union(B)
presek = A.intersection(B)
brez = sympy.Complement(A, B)
kartezicno = sympy.FiniteSet(*A * B)
return {
"A": sympy.latex(A),
"B": sympy.latex(B),
"unija": sympy.latex(unija),
"presek": sympy.latex(presek),
"brez": sympy.latex(brez),
"kartezicno": sympy.latex(kartezicno),
}
class IzpeljaneMnozice(Problem):
"""Problem za zapis komplementa, unije in razlike množic ter izpis elementov izpeljane množice pri podani univerzalni množici."""
default_instruction = "Dana je univerzalna množica $ \\mathcal{U} =\\mathbb{N}_{ @velikost_univerzalne }$ in njene pomnožice $ \\mathcal{A} =\\{ @navodilo_A; k \\in \\mathbb{N} \\}$, $ \\mathcal{B} = \\{ @navodilo_B; k \\in \\mathbb{N} \\}$, $ \\mathcal{C} =@C$. Zapiši elemente množic $ \\mathcal{A} $, $ \\mathcal{B} $, $ \\mathcal{A} \\cup \\mathcal{B} $, $ \\mathcal{C} ^{\\mathsf{c} }$ in $ \\mathcal{B} - \\mathcal{A} $."
default_solution = "$ \\mathcal{A} =@A$, $ \\mathcal{B} =@B$, $ \\mathcal{A} \\cup \\mathcal{B} =@A_unija_B$, $ \\mathcal{C} ^{\\mathsf{c} }=@C_komplement$, $ \\mathcal{B} - \\mathcal{A} =@B_brez_A$"
class Meta:
verbose_name = "Množice / operacije na izpeljanih množicah"
def generate(self):
k = sympy.symbols("k")
a = random.randint(2, 5)
b = random.randint(-4, 4)
c = random.randint(2, 5)
d = random.randint(-4, 4)
if abs(b) == a or abs(d) == c:
raise GeneratedDataIncorrect
velikost_univerzalne = random.randint(12, 20)
univerzalna = sympy.FiniteSet(*range(1, velikost_univerzalne + 1))
navodilo_A = a * k + b
navodilo_B = c * k + d
mnozica_A = [
a * x + b
for x in range(1, velikost_univerzalne + 1)
if 0 < a * x + b <= velikost_univerzalne
]
mnozica_B = [
c * x + d
for x in range(1, velikost_univerzalne + 1)
if 0 < c * x + d <= velikost_univerzalne
]
A = sympy.FiniteSet(*mnozica_A)
B = sympy.FiniteSet(*mnozica_B)
C = sympy.FiniteSet(*random.sample(sorted(univerzalna), 8))
A_unija_B = A.union(B)
C_komplement = sympy.Complement(univerzalna, C)
B_brez_A = sympy.Complement(B, A)
return {
"navodilo_A": sympy.latex(navodilo_A),
"navodilo_B": sympy.latex(navodilo_B),
"A": sympy.latex(A),
"B": sympy.latex(B),
"C": sympy.latex(C),
"A_unija_B": sympy.latex(A_unija_B),
"C_komplement": sympy.latex(C_komplement),
"B_brez_A": sympy.latex(B_brez_A),
"velikost_univerzalne": sympy.latex(velikost_univerzalne),
}
| true |
4d386b8cfe3060623c9ddb522a0e1907bdbd41b2 | Python | glemieux/fates | /tools/BatchPatchParams.py | UTF-8 | 3,465 | 2.640625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python
#### this script modifies the default FATES parameter file to generate
# a file used in testing E3SM
# Parser code was based off of modify_fates_paramfile.py
import os
import argparse
import code # For development: code.interact(local=dict(globals(), **locals()))
from scipy.io import netcdf
# ---------------------------------------------------------------------------------------
class param_type:
def __init__(self,name,values_text):
self.name = name
self.values = values_text.replace(" ","") #[float(x) for x in values_text.split(',')]
# ---------------------------------------------------------------------------------------
def load_xml(xmlfile):
import xml.etree.ElementTree as et
xmlroot = et.parse(xmlfile).getroot()
print("\nOpenend: "+xmlfile)
base_cdl = xmlroot.find('base_file').text
new_cdl = xmlroot.find('new_file').text
pftparams = xmlroot.find('pft_list').text.replace(" ","")
paramroot = xmlroot.find('parameters')
paramlist = []
for param in paramroot:
print("parsing "+param.tag)
paramlist.append(param_type(param.tag,param.text))
return(base_cdl,new_cdl,pftparams,paramlist)
# Little function for assembling the call to the system to make the modification
# ----------------------------------------------------------------------------------------
def parse_syscall_str(fnamein,fnameout,param_name,param_val):
sys_call_str = "../tools/modify_fates_paramfile.py"+" --fin " + fnamein + \
" --fout " + fnameout + " --var " + param_name + " --silent " +\
" --val " + "\" "+param_val+"\"" + " --overwrite --all"
print(sys_call_str)
return(sys_call_str)
def main():
# Parse arguments
parser = argparse.ArgumentParser(description='Parse command line arguments to this script.')
parser.add_argument('--f', dest='xmlfile', type=str, help="XML control file Required.", required=True)
args = parser.parse_args()
# Load the xml file, which contains the base cdl, the output cdl,
# and the parameters to be modified
[base_cdl,new_cdl,pftlist,paramlist] = load_xml(args.xmlfile)
# Convert the base cdl file into a temp nc binary
base_nc = os.popen('mktemp').read().rstrip('\n')
gencmd = "ncgen -o "+base_nc+" "+base_cdl
os.system(gencmd)
# Generate a temp output file name
new_nc = os.popen('mktemp').read().rstrip('\n')
# Use FatesPFTIndexSwapper.py to prune out unwanted PFTs
swapcmd="../tools/FatesPFTIndexSwapper.py --pft-indices="+pftlist+" --fin="+base_nc+" --fout="+new_nc #+" 1>/dev/null"
os.system(swapcmd)
# We open the new parameter file. We only use this
# to do some dimension checking.
fp_nc = netcdf.netcdf_file(base_nc, 'r')
# On subsequent parameters, overwrite the file
for param in paramlist:
change_str = parse_syscall_str(new_nc,new_nc,param.name,param.values)
os.system(change_str)
# Sort the new file
newer_nc = os.popen('mktemp').read().rstrip('\n')
os.system("../tools/ncvarsort.py --fin "+new_nc+" --fout "+newer_nc+" --overwrite")
# Dump the new file to the cdl
os.system("ncdump "+newer_nc+" > "+new_cdl)
fp_nc.close()
print("\nBatch parameter transfer complete\n")
# This is the actual call to main
if __name__ == "__main__":
main()
| true |
d898de5121f67cc8acd3b02d63d4becebc509d31 | Python | jfhuete/pycon-opencv-docker | /app/tinto_verano.py | UTF-8 | 3,624 | 3.171875 | 3 | [] | no_license | import cv2
import numpy as np
from time import sleep
def clean_mask(mask):
# Dilation + closing to remove salt and pepper noise
kernel = np.ones((20, 20), np.uint8)
mask_dilation = cv2.dilate(mask, kernel, iterations=3)
mask_closing = cv2.morphologyEx(mask_dilation, cv2.MORPH_CLOSE, kernel)
return mask_closing
def detect(mask):
# Find contours in the mask and return the boundary rectangle
i, contours, h = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# Returns contours if exist, if not return None
if contours:
return contours[0]
else:
return None
def tinto_monitoring(frame, height):
'''
This function monitorize how much tinto are in the glass. And activate an
alarm when you have little tinto
'''
# thresold of amount of tinto de verano
thresold_warning = 230
thresold_alarm = 160
thresold_critical = 90
text_position = (230, 30)
# Set text according to the amount of tinto de verano
if height > thresold_warning:
cv2.putText(frame, "OK", text_position, cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 255, 0), thickness=2)
elif height <= thresold_warning and height > thresold_alarm:
cv2.putText(frame, "WARNING", text_position, cv2.FONT_HERSHEY_SIMPLEX,
1, (55, 175, 212), thickness=2)
elif height <= thresold_alarm and height > thresold_critical:
cv2.putText(frame, "ALARM", text_position, cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255), thickness=2)
elif height <= thresold_critical:
cv2.putText(frame, "CRITICAL", text_position, cv2.FONT_HERSHEY_SIMPLEX,
1, (72, 54, 176), thickness=2)
def main():
FRAME_RATE = 25
# Video capture
cap = cv2.VideoCapture('My_tinto_de_verano.mp4')
# Constans with WIDTH and HEIGHT of the screen
WIDTH = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
HEIGHT = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# HSV color filter values
H_max = 6.87
H_min = 0
S_max = 255
S_min = 107.1
V_max = 255
V_min = 0
max_hsv_values = np.array([H_max, S_max, V_max], np.uint8)
min_hsv_values = np.array([H_min, S_min, V_min], np.uint8)
while(cap.isOpened()):
ret, frame = cap.read()
if frame is None:
break
# Select a rectangle as roi (empirical)
v_from = (int(WIDTH/3), 0)
v_to = (int(WIDTH*2/3), int(HEIGHT))
# Draw roi
cv2.rectangle(frame, v_from, v_to, (255, 0, 0), 2)
# Get roi frame
roi_frame = frame[0:int(HEIGHT), int(WIDTH/3):int(WIDTH*2/3)]
# Transform the roi image to HSV color space
frame_hsv = cv2.cvtColor(roi_frame, cv2.COLOR_BGR2HSV)
# Get mask of the tinto de verano
mask = cv2.inRange(frame_hsv, min_hsv_values, max_hsv_values)
# Clean mask
mask = clean_mask(mask)
# Detect
contour = detect(mask)
if contour is not None:
# Obtain the coordinates of rectangle
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(roi_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
else:
h = 0
# Do somethings
tinto_monitoring(frame, h)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.moveWindow("frame", 280, 60)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
sleep(1/FRAME_RATE)
# When evideo finish, release the capture
cap.release()
cv2.destroyAllWindows()
| true |
dbc55c79b658ad8c787a619834000965eb9feebc | Python | samineup0710/Final_geneses_assignments | /session3.py | UTF-8 | 2,086 | 4.5625 | 5 | [] | no_license | """functions for printinig prime numbers from 1 to 100"""
def prime():
""" list to store prime numbers"""
primelist = []
"""outer loop for looping through our numbers"""
for j in range(1, 101):
count =0
for i in range(1, j+1):
if j%i == 0:
count = count +1
if (count ==2): #prime num have only 2 factors
primelist.append(str(j))
"""else:
nonprimelist.append(str(j))"""
print("The list of prime numbers are:"+'\n', primelist)
#print("The list of non-prime numbers are:" +'\n', nonprimelist)
"""---functions to check string if palindrome or not---"""
def palindrome(userinput):
"""printing the input from user"""
print("The user input is:", userinput)
"""rerversing the string"""
inpt = userinput[::-1]
print("The reversed string is:",inpt)
""" check if the reversed string matches the input string or not"""
if inpt!=userinput:
print("{}is not a palindrome string".format(str(userinput)))
else:
print("{} is a palindrome string".format(str(userinput)))
"""function to print to dictionary consisting of character and its repetitions."""
def intodict(inpt):
print("The input string of char is:", inpt)
dictval = {}
for ch in inpt:
#print(ch)
if len(inpt)==0:
print("invalid operations")
""" count each character repetition"""
x = inpt.count(ch)
dictval[ch] = str(x) # append into dictionary
print("The given dictionary of char is:" + '\n', dictval)
"""--MAIN FUNCTION---"""
if __name__ == '__main__':
print("---------PRINTING PRIME NUMBERS ONLY FROM 1 TO 100----------")
prime()
print('\n')
print("---------CHECKING WHETHER THE STRING IS PALINDROME OR NOT----------")
inpt_str = input("Enter the strings:")
palindrome(inpt_str)
print('\n')
print("---------PRINTING CHARACTERS FROM STRING AND ITS REPETITION INTO DICTIONARY---------")
userinp = input("Enter the strings: ")
intodict(userinp) | true |
570a60e9b9c8c1f46c18cf67e1caa1badec3609e | Python | danielzhang1998/Quiz-management-system | /app/forms.py | UTF-8 | 4,236 | 2.6875 | 3 | [
"MIT"
] | permissive | # forms written by: Mingchuan Tian (22636589)
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, TextField, validators
from wtforms.validators import DataRequired, Length
from wtforms.fields.html5 import EmailField
# teacher login form
class TeacherLoginForm(FlaskForm):
email = EmailField('Email', [validators.DataRequired(), validators.Email()])
password = PasswordField("Password", validators = [DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
# student login form
class StudentLoginForm(FlaskForm):
email = EmailField('Email', [validators.DataRequired(), validators.Email()])
password = PasswordField("Password", validators = [DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
# User registration form
class RegisterForm(FlaskForm):
name = StringField("Name", validators = [DataRequired()])
email = EmailField('Email', [validators.DataRequired(), validators.Email()])
password = PasswordField("Password", validators = [DataRequired()])
teacher = BooleanField("Register as teacher")
submit = SubmitField("Sign up")
# Quiz specification form (specify details of a quiz)
class QuizStartForm(FlaskForm):
title = StringField("Quiz Title", validators = [DataRequired()])
quiz_id = StringField("Give your quiz an ID", validators = [DataRequired()])
question_num = SelectField("Number of questions", choices=[('1','1'),('2','2'),('3','3'),('4','4'),('5','5'),('6','6'),('7','7'),('8','8'),('9','9'),('10','10')])
time_limit = StringField("Time Limit (minutes)", validators = [DataRequired()])
submit = SubmitField("Continue")
# Quiz edit form (for teachers adding questions to the new quiz)
class QuizEditForm(FlaskForm):
question = StringField("Question: ", validators = [DataRequired()])
submit = SubmitField("save question")
# Quiz answer form (for students adding answers to existing quiz)
class QuizAnswerForm(FlaskForm):
answer = StringField("Answer: ", validators = [DataRequired()])
submit = SubmitField("save answer")
# Quiz login form (for students retrieving quiz)
class QuizLoginForm(FlaskForm):
QuizID = StringField("QuizID: ")
submit = SubmitField("Start Quiz!")
# Quiz review form (for teacher retrieving quiz to be changed/modified)
class QuizReviewForm(FlaskForm):
QuizID = StringField("QuizID (this Quiz has to be in the database): ")
submit = SubmitField("Start Editing Quiz!")
# Change quiz form (for teacher changing/modifying questions)
class changeQuestionForm(FlaskForm):
newQuestion = StringField("Change question to :")
submit = SubmitField("Save question")
# Quiz mark form (for teachers retrieving the quiz to be marked)
class QuizMarkForm(FlaskForm):
QuizID = StringField("Quiz ID:")
submit = SubmitField("Find quiz to be marked!")
# Grading form (for teachers grading the quiz)
class GradingForm(FlaskForm):
mark = StringField("Mark:")
comment = TextField("Comment:")
submit = SubmitField("Save and Proceed to next question")
# Edit profile form (for users editing their profile)
class EditProfileForm(FlaskForm):
name = StringField("Name: ", validators = [DataRequired()])
title = SelectField("Title: ", choices=[('Mr. ', 'Mr.'),('Mrs. ', 'Mrs. '),('Miss. ', 'Miss '),('Dr. ', 'Dr. '),('Prof. ', 'Prof. ')], validators = [DataRequired()])
faculty = SelectField("Faculty: ", choices=[('Arts, Business, Law and Education','Arts, Business, Law and Education'),('Health and Medical Sciences','Health and Medical Sciences'),('Engineering and Mathematical Sciences','Engineering and Mathematical Sciences'),('Science','Science')], validators = [DataRequired()])
phone = StringField("Phone: ", validators=[DataRequired(), Length(min=10, max=10)])
address = TextField("Address: ", validators=[DataRequired()] )
submit = SubmitField("Save")
# Change avatar form (form users changing their avatars)
class ChangeAvatarForm(FlaskForm):
email = EmailField('New Email', [validators.DataRequired(), validators.Email()])
submit = SubmitField("Save")
# forms written by: Mingchuan Tian (22636589) | true |
641d95d613afeb3277f33689e712aa198f5f6fe4 | Python | sarma5233/pythonpract | /python/informations.py | UTF-8 | 630 | 3.703125 | 4 | [] | no_license | help("modules")#it'll show you what are modules(built-in) present in your python.
import math#importing mathematical module from it.
print(dir(math))#it'll show the functions present in the math module.
print(dir(str))#it'll show the functions present in the string module.
a = "srujan"
b = 12
print(dir())#it'll show the functions present in the a and b modules. o/p include a and b also
print(help(math.ceil))
import eval_inbuilt_function
print(help(eval_inbuilt_function))
print(help(math.cos))
print(help(str.isalpha))
print(help(str.maketrans))
import keyword
print(keyword.kwlist) #it'll show you the keywords list | true |
cedaeecad87d89700bcc8fcc97579c3793c42a9d | Python | NONO9527/gomk-b-search | /src/main/resources/python/testText.py | UTF-8 | 1,483 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import difflib
import sys
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
try:
textfile1 = sys.argv[1]
textfile2 = sys.argv[2]
except Exception,e:
print "Error:" +str(e)
print "Usage: python sample3.py filename1 filename2"
sys.exit()
def readfile(filename):
try:
filehandle = open(filename,'rb')
text = filehandle.read().splitlines()
filehandle.close()
return text
except IOError as error:
print ('Read file Error:' +str(error))
sys.exit()
#判断相似度的方法,用到了difflib库
def get_equal_rate_1(str1, str2):
return difflib.SequenceMatcher(None, str1, str2).quick_ratio()
def diffdo(t1,t2):
#text1_lines = readfile(textfile1)
#text2_lines = readfile(textfile2)
#text1_lines = readfile(t1)
#text2_lines = readfile(t2)
text1_lines = t1.splitlines()
text2_lines = t2.splitlines()
#d = difflib.Differ()
#d=difflib.Differ()
#diff=d.compare(text1_lines,text2_lines)
#print ('\n'.join(list(diff)))
d = difflib.HtmlDiff(tabsize=4,wrapcolumn=150)
#print (d.make_file(text1_lines,text2_lines))
print(get_equal_rate_1(text1_lines, text2_lines))
print d.make_table(text1_lines,text2_lines)
#return d.make_file(text1_lines,text2_lines)
#return d.make_file(str_2,str_3)
diffdo(textfile1,textfile2) | true |
fa7d9bee3fcf1e3342cafddf3a7ad729c2b87eab | Python | ming1in/gedcom-parser | /test/test_us10.py | UTF-8 | 1,171 | 2.703125 | 3 | [] | no_license | import unittest
import datetime
import sys
import os
sys.path.append(os.path.abspath('../src'))
import us10
class us10_test(unittest.TestCase):
def test1(self):
filename = "seeds/seed.ged"
ret = "All marriages at acceptable age!"
self.assertEqual(us10.marriedTooYoung(filename),ret)
def test2(self):
filename = "seeds/test7.ged"
ret = "All marriages at acceptable age!"
self.assertEqual(us10.marriedTooYoung(filename),ret)
def test3(self):
filename = "seeds/test18.ged"
ret = "All marriages at acceptable age!"
self.assertEqual(us10.marriedTooYoung(filename),ret)
def test4(self):
filename = "seeds/test19.ged"
ret = "The following people have marriage before the age of 14: Husbands[] Wives: ['I7']"
self.assertEqual(us10.marriedTooYoung(filename),ret)
def test5(self):
filename = "seeds/test20.ged"
ret = "The following people have marriage before the age of 14: Husbands['I4', 'I4'] Wives: ['I5', 'I7']"
self.assertEqual(us10.marriedTooYoung(filename),ret)
if __name__ == '__main__':
unittest.main()
| true |
5a1532d7ae0facdd9306eb3781d014d60bf7317e | Python | jke-zq/my_lintcode | /Animal Shelter.py | UTF-8 | 1,496 | 3.609375 | 4 | [] | no_license | class AnimalShelter(object):
def __init__(self):
# do some intialize if necessary
self.animals = []
self.dogIndex = []
self.catIndex = []
self.length = 0
self.start = 0
"""
@param {string} name
@param {int} type, 1 if Animal is dog or 0
@return nothing
"""
def enqueue(self, name, type):
# Write yout code here
self.animals.append((name, type))
self.length += 1
if type == 1:
self.dogIndex.append(self.length - 1)
else:
self.catIndex.append(self.length - 1)
# return a string
def dequeueAny(self):
# Write your code here
while self.animals[0] is None:
self.animals.pop(0)
self.start += 1
name, type = self.animals[0]
self.animals.pop(0)
self.start += 1
if type == 1:
self.dogIndex.pop(0)
else:
self.catIndex.pop(0)
return name
# return a string
def dequeueDog(self):
# Write your code here
index = self.dogIndex[0] - self.start
name, __ = self.animals[index]
self.animals[index] = None
self.dogIndex.pop(0)
return name
# return a string
def dequeueCat(self):
# Write your code here
index = self.catIndex[0] - self.start
name, __ = self.animals[index]
self.animals[index] = None
self.catIndex.pop(0)
return name | true |
2a17041ebd6a987446ca1b4cd066f125d4f0b9f2 | Python | Aastha-520609/python_learning | /DNA sequence.py | UTF-8 | 346 | 3.578125 | 4 | [] | no_license |
string=input()
set1=set(string)
set2=set({'A','C','T','G'})
if (len(string)>0 and set1==set2):
a=string.count('A')
t=string.count('T')
g=string.count('G')
c=string.count('C')
print("A")
print(a)
print("T")
print(t)
print("C")
print(c)
print("G")
print(g)
else:
print("Invalid input")
| true |
d8d97f338ac819f12d7ac8e19693c2b2ab6593bf | Python | kaustavbhattacharjee/is601_calculator | /Calculator/Def_Files/Division.py | UTF-8 | 49 | 3.046875 | 3 | [] | no_license | def division(a,b):
return round(float(b/a),9) | true |
5c765c314153ac89e61f877572ad0652113c6822 | Python | rohith46/Python | /cleaning data .py | UTF-8 | 1,412 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 12:44:10 2018
@author: rohithbharatha
"""
import pandas as pd
import numpy as np
df = pd.read_csv("/Users/rohithbharatha/Desktop/3_Getting_and_Cleaning_Data/melbourne.csv")
print(df.isnull().any(axis=1))
print(round(100*(df.isnull().sum())/len(df.index),2))
df= df.drop('BuildingArea',axis=1)
df= df.drop('YearBuilt',axis=1)
df= df.drop('CouncilArea',axis=1)
print(round(100*(df.isnull().sum())/len(df.index),2))
print(100*len(df[df.isnull().sum(axis=1) > 5].index)/len(df.index))
df= df[df.isnull().sum(axis=1) <=5]
print(round(100*(df.isnull().sum())/len(df.index),2))
df=df[~np.isnan(df['Price'])]
print(round(100*(df.isnull().sum())/len(df.index),2))
df=df[~np.isnan(df['Landsize'])]
df['Landsize'].describe()
df['Bathroom'].describe()
df.loc[:,['Lattitude', 'Longtitude']].describe()
df.loc[np.isnan(df['Lattitude']),['Lattitude']] = df['Lattitude'].mean()
df.loc[np.isnan(df['Longtitude']),['Longtitude']]=df['Longtitude'].mean()
df.loc[:,['Bathroom','Car']].describe()
df['Car']= df['Car'].astype('category')
df['Car'].value_counts()
df.loc[pd.isnull(df['Car']),['Car']] = 2
print(round(100*(df.isnull().sum())/len(df.index),2))
df['Bathroom']=df['Bathroom'].astype('category')
df['Bathroom'].value_counts()
df.loc[pd.isnull(df['Bathroom']),['Bathroom']] = 1
print(round(100*(df.isnull().sum())/len(df.index),2)) | true |
668b5c5690f8aba20fda05a6776e2c1c22b5930f | Python | dcramer/ec2 | /ec2/helpers.py | UTF-8 | 5,024 | 3.109375 | 3 | [
"BSD-2-Clause"
] | permissive | """
ec2.helpers
~~~~~~~~~~~
:copyright: (c) 2012 by Matt Robenolt.
:license: BSD, see LICENSE for more details.
"""
import re
def make_compare(key, value, obj):
"Map a key name to a specific comparison function"
if '__' not in key:
# If no __ exists, default to doing an "exact" comparison
key, comp = key, 'exact'
else:
key, comp = key.rsplit('__', 1)
# Check if comp is valid
if hasattr(Compare, comp):
return getattr(Compare, comp)(key, value, obj)
raise AttributeError("No comparison '%s'" % comp)
class Compare(object):
"Private class, namespacing comparison functions."
@staticmethod
def exact(key, value, obj):
try:
return getattr(obj, key) == value
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return obj.tags[tag] == value
# There is no tag found either
raise e
@staticmethod
def iexact(key, value, obj):
value = value.lower()
try:
return getattr(obj, key).lower() == value
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return obj.tags[tag].lower() == value
# There is no tag found either
raise e
@staticmethod
def like(key, value, obj):
if isinstance(value, basestring):
# If a string is passed in, we want to convert it to a pattern object
value = re.compile(value)
try:
return bool(value.match(getattr(obj, key)))
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return bool(value.match(obj.tags[tag]))
# There is no tag found either
raise e
# Django alias
regex = like
@staticmethod
def ilike(key, value, obj):
return Compare.like(key, re.compile(value, re.I), obj)
# Django alias
iregex = ilike
@staticmethod
def contains(key, value, obj):
try:
return value in getattr(obj, key)
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return value in obj.tags[tag]
# There is no tag found either
raise e
@staticmethod
def icontains(key, value, obj):
value = value.lower()
try:
return value in getattr(obj, key).lower()
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return value in obj.tags[tag]
# There is no tag found either
raise e
@staticmethod
def startswith(key, value, obj):
try:
return getattr(obj, key).startswith(value)
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return obj.tags[tag].startswith(value)
# There is no tag found either
raise e
@staticmethod
def istartswith(key, value, obj):
value = value.lower()
try:
return getattr(obj, key).startswith(value)
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return obj.tags[tag].lower().startswith(value)
# There is no tag found either
raise e
@staticmethod
def endswith(key, value, obj):
try:
return getattr(obj, key).endswith(value)
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return obj.tags[tag].endswith(value)
# There is no tag found either
raise e
@staticmethod
def iendswith(key, value, obj):
value = value.lower()
try:
return getattr(obj, key).endswith(value)
except AttributeError, e:
# Fall back to checking tags
if hasattr(obj, 'tags'):
for tag in obj.tags:
if key == tag.lower():
return obj.tags[tag].lower().endswith(value)
# There is no tag found either
raise e
| true |
a2d9c42146947f0981a2fd4dd58160f12412b23b | Python | tbnsok40/Algorithm | /2050. 알파벳을 숫자로 변환.py | UTF-8 | 417 | 3.9375 | 4 | [] | no_license |
# base 개념은 이러하다
# names = input()
# for idx,val in enumerate(names):
#
# print(idx+1,val,end=' ')
# for i in val:
# print(ord(i)-64,end=',')
# print()
# 문제를 풀어보자
# input을 받자
alphabet = (input())
for i in alphabet:
print(ord(i)-64, end=" ")
# ord() 메서드는 alphabet의 아스키코드 return
# 그에 맞춰서 64를 빼준다.
| true |
7f1c5e9876cf0fc59a61d6edbd9c4e22a4915e6e | Python | chivalry/exercism-python | /users/Vikdemen/python/raindrops/raindrops.py | UTF-8 | 249 | 3.5 | 4 | [] | no_license | def convert(number):
sound = ''
if number%3 == 0:
sound += 'Pling'
if number%5 == 0:
sound += 'Plang'
if number%7 == 0:
sound += 'Plong'
if sound:
return sound
else:
return str(number)
| true |
df9f8e386280b82c0a2b88dd3b39acaa1ee12590 | Python | luckydimdim/bobel-algo | /python/merge_sort/test.py | UTF-8 | 692 | 3.328125 | 3 | [] | no_license | import unittest
from index import merge, merge_sort
class TestMethods(unittest.TestCase):
def test_merge_exists(self):
self.assertTrue(merge)
def test_merge_sort_exists(self):
self.assertTrue(merge_sort)
def test_merge_concatenates_and_arranges_arrays(self):
self.assertEqual(merge([6,7,8,9,10], [1,2,3,4,5]), [1,2,3,4,5,6,7,8,9,10])
def test_merge_insites(self):
self.assertEqual(merge([1,2], [3,2]), [1,2,3,2])
def test_merge_sort_arranges_unsorted_array(self):
self.assertEqual(merge_sort([5,4,3,2,1]), [1,2,3,4,5])
def test_merge_sort_arranges_sorted_array(self):
self.assertEqual(merge_sort([1,2,3,4,5]), [1,2,3,4,5])
if __name__ == '__main__':
unittest.main() | true |
1c363dfcaba4aac0ef759a205766ff5e164ec78c | Python | FronTexas/Fron-Algo-practice | /code_fight_string_classifier.py | UTF-8 | 1,362 | 2.78125 | 3 | [] | no_license | def classifyStrings(s):
if len(s) < 3:
return 'good'
vowels = set(['a','i','u','e','o'])
vc = 0
cc = 0
# contains_questions_mark
cqm = False
for i,ch in enumerate(s):
if ch in vowels:
vc += 1
cc = 0
elif ch != '?':
cc += 1
vc = 0
else:
cqm = True
if vc == 2:
vc = 0
cc = 1
elif cc == 4:
cc = 0
vc = 1
else:
vc = 0
cc = 0
if vc == 3 or cc == 5:
return 'bad'
if cqm:
return 'mixed'
return 'good'
print classifyStrings('') == 'good'
print classifyStrings('aaa?') == 'bad'
print classifyStrings('a?a') == 'mixed'
print classifyStrings('abcdfae') == 'good'
print classifyStrings('?') == 'good'
print classifyStrings('???') == 'mixed'
print classifyStrings('?????') == 'mixed'
print classifyStrings('aa?bbb?a?bbb?aa') == 'bad'
print classifyStrings('aa?bbbb') == 'bad'
print classifyStrings('??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????') == 'mixed'
| true |
465023fb1372a1b9f74a6cac5708e5d7188fc5fd | Python | krishnausjs/learn_python_hardway | /ex20_functions_and_files.py | UTF-8 | 820 | 3.953125 | 4 | [] | no_license | from sys import argv
#Take input file
script, input_file = argv
#function to print input file contents
def print_all(f):
print f.read()
#function to rewind the file
def rewind(f):
f.seek(0)
#function to print a line
def print_a_line(line_count, f):
print line_count, f.readline()
#Open the input file
current_file = open(input_file)
#Print file contents first
print "First let's print the whole file:\n"
print_all(current_file)
#Rewind to beginning of the file
print "Now let's rewind, kind of like a tape."
rewind(current_file)
#Print line by line. Firs three lines
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
| true |
71e4f9fb3e9bc0f07f97d9959bb8f61fbf4c1fb2 | Python | LesyaLesya/python_qa_otus | /lesson4/conftest.py | UTF-8 | 1,953 | 2.625 | 3 | [] | no_license | import pytest
import requests
def pytest_addoption(parser):
parser.addoption(
"--url",
action="store",
default="https://jsonplaceholder.typicode.com/todos",
help="Enter url"
)
@pytest.fixture(scope="module")
def base_url(request):
return request.config.getoption("--url")
@pytest.fixture(scope="module")
def session():
return requests.Session()
@pytest.fixture(params=[1, 100, 200])
def fixture_id_positive(request):
return request.param
@pytest.fixture(params=[-1, 0, 201, "string", False, [1, 2, 3], None])
def fixture_id_negative(request):
return request.param
@pytest.fixture(params=[{"title": "A", "completed": True, "userId": 1},
{"title": "long_title_long_title_long_title_long_title_long", "completed": False, "userId": 10},
{"title": "Medium title", "completed": True, "userId": 10},
{"title": " Title with wihtspaces ", "completed": False, "userId": 11},
{"title": "Title with symbols *^[]/;< ", "completed": True, "userId": 5}])
def fixture_payload_positive(request):
return request.param
@pytest.fixture(params=[1, 5, 10])
def fixture_userid_positive(request):
return request.param
@pytest.fixture(params=[True, False])
def fixture_completed_positive(request):
return request.param
@pytest.fixture(params=["et praesentium aliquam est",
"fugiat aut voluptatibus corrupti deleniti velit iste odio",
"voluptates dignissimos sed doloribus animi quaerat aut",
"ipsam aperiam voluptates qui",
"delectus aut autem"])
def fixture_title_positive(request):
return request.param
@pytest.fixture(params=["lesson4/schemas/todos_schema.json",
"lesson4/schemas/todos_schema_file.json"])
def fixture_get_todos(request):
return request.param
| true |
dce2a94fb0ea75fa40eb112d0bb3937eb37b2d3c | Python | m358807551/Leetcode | /code/questions/241~250_/246.py | UTF-8 | 477 | 3.046875 | 3 | [] | no_license | """
https://leetcode-cn.com/problems/strobogrammatic-number/
"""
class Solution(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
left, right = 0, len(num)-1
while left < right:
if num[left]+num[right] not in '00 11 88 69 96'.split():
return False
left += 1
right -= 1
return num[left] in '018' if left == right else True
print(
)
| true |
ed42cc4faa8c7bfbcde42e3d6d92dbeb4a994753 | Python | guallo/mybank | /core/models.py | UTF-8 | 2,259 | 2.546875 | 3 | [] | no_license | from django.db import models as db_models
from django.db.models.fields import related
from django.contrib.auth import models as auth_models
from . import constants
# Create your models here.
class Account(db_models.Model):
uuid = db_models.UUIDField()
user = db_models.ForeignKey(auth_models.User)
def __str__(self):
return str(self.uuid)
class CurrencyType(db_models.Model):
name = db_models.CharField(max_length=256, unique=True)
def __str__(self):
return self.name
# TODO: Validate that two saving boxes that belongs from the same account
# can not have the same currency_type.
class SavingBox(db_models.Model):
currency_type = related.ForeignKey(CurrencyType)
account = related.ForeignKey(Account)
@property
def balance(self):
moves = Move.objects.filter(saving_box=self)
balance = sum(map(lambda move:
move.amount if constants.MoveType[move.typee] == constants.MoveType.DEPOSIT else -move.amount,
moves))
return balance
def __str__(self):
return '{currency_type} saving box of {account}'.format(
currency_type=self.currency_type,
account=self.account,
)
class Cause(db_models.Model):
name = db_models.CharField(max_length=256)
description = db_models.CharField(max_length=256, blank=True)
def __str__(self):
return self.name
class Move(db_models.Model):
typee = db_models.CharField(max_length=256, choices=(
(constants.MoveType.DEPOSIT.name, 'DEPOSIT'),
(constants.MoveType.EXTRACTION.name, 'EXTRACTION'), ))
description = db_models.CharField(max_length=256, blank=True)
date = db_models.DateTimeField()
cause = db_models.ForeignKey(Cause)
saving_box = db_models.ForeignKey(SavingBox)
amount = db_models.FloatField()
def __str__(self):
return '{typee} of ${amount} {direction} {saving_box} due to {cause} at {date}'.format(
typee=self.typee,
amount=self.amount,
direction=constants.MoveType[self.typee] == constants.MoveType.DEPOSIT and 'in' or 'from',
saving_box=self.saving_box,
cause=self.cause,
date=self.date,
) | true |
20febc8c065c224b7686192ecccc607b74f91d59 | Python | runtangr/python_test | /test_judge_num.py | UTF-8 | 183 | 3.875 | 4 | [] | no_license | #/usr/bin/env python
# -*- coding:utf-8 -*-
num = int(input("请输入一个数:"))
if num <= 0:
print("这个数是负数")
elif num >=0:
print("这个数是正数")
| true |
c3d670e84fd850eb517b441c3a63cb1b8bbd84b0 | Python | ava9/CS6700 | /temp/uctAI.py | UTF-8 | 9,623 | 3.171875 | 3 | [] | no_license | import copy
import random
import math
from board import board
class uctAI:
uctTree = {} #empty dictionary where keys are length-42 strings of board and value is [board value from -1 to 1, # visits to this node]
currnode = ""
numsteps = 0
# check if legal move
def legal(self, board):
arr = [0, 0, 0, 0, 0, 0, 0]
for c in range(0, board.columns):
if (board.colFull(c + 1)):
arr[c] = False
else:
arr[c] = True
return arr
def checkfirstmoves(self, board, player):
b = board.getBoard()
ret = 0
if b[3][0] == player:
ret = ret + .2
if b[2][0] != ((-1)*player):
ret = ret + .05
if b[4][0] != ((-1)*player):
ret = ret + .05
return ret
# checks how many two-in-a-rows there are
def check2(self, board, player):
b = board.getBoard()
ret = 0
#vertical check
for c in range(len(b)):
for r in range(len(b[c]) - 2):
if (b[c][r] == player):
if (b[c][r+1] == player):
ret = ret + .5
else:
continue
else:
continue
#horizontal check
for c in range(len(b) - 2):
for r in range(len(b[c])):
if (b[c][r] == player):
if (b[c+1][r] == player):
ret = ret + .5
else:
continue
else:
continue
#left to right, bottom to top / diagonal check
for c in range(len(b) - 2):
for r in range(len(b) - 3):
if (b[c][r] == player):
if (b[c+1][r+1] == player):
ret = ret + .5
else:
continue
else:
continue
#left to right, top to bottom \ diagonal check
for c in range(len(b) - 1, 1, -1):
for r in range(len(b[c]) - 2):
if (b[c][r] == player):
if (b[c-1][r+1] == player):
ret = ret + .5
else:
continue
else:
continue
return ret
# checks how many three-in-a-rows there are, one board eval function
def check(self, board, player):
b = board.getBoard()
ret = 0
#vertical check
for c in range(len(b)):
for r in range(len(b[c]) - 3):
if (b[c][r] == player):
if (b[c][r+1] == player):
if (b[c][r+2] == player):
ret = ret + 1
else:
continue
else:
continue
else:
continue
#horizontal check
for c in range(len(b) - 3):
for r in range(len(b[c])):
if (b[c][r] == player):
if (b[c+1][r] == player):
if (b[c+2][r] == player):
ret = ret + 1
else:
continue
else:
continue
else:
continue
#left to right, bottom to top / diagonal check
for c in range(len(b) - 3):
for r in range(len(b) - 4):
if (b[c][r] == player):
if (b[c+1][r+1] == player):
if (b[c+2][r+2] == player):
ret = ret + 1
else:
continue
else:
continue
else:
continue
#left to right, top to bottom \ diagonal check
for c in range(len(b) - 1, 2, -1):
for r in range(len(b[c]) - 3):
if (b[c][r] == player):
if (b[c-1][r+1] == player):
if (b[c-2][r+2] == player):
ret = ret + 1
else:
continue
else:
continue
else:
continue
return ret
#####################################DONE WITH CHECK FUNCTIONS###########################
def writeTree(self):
with open("uctTree.txt", "wb") as tree_doc:
for key in self.uctTree:
tree_doc.write(key + " " + str(self.uctTree[key][0]) + "\n")
def uctMoves(self, Board, player, depth):
#try this, new tree each time
self.uctTree = {}
#
nodelist = []
nodelist.append(Board.tostring())
currboard = Board
Boardval = self.check(Board, player) + self.check2(Board, player) + self.checkfirstmoves(Board, player)
normBoardval = Boardval/float(100)
self.uctTree[Board.tostring()] = [normBoardval, 1]
replica = copy.deepcopy(Board)
iters = 0
while (iters < 220):
#if nodelist has odd length, look for upper bound, else look for lower bound
#print currboard.columns #DEBUG
lMoves = self.legal(currboard) #all legal moves
childlist = []
indexlist = []
for col in range(currboard.columns):
replica = copy.deepcopy(currboard)
if lMoves[col]: #meaning it's true
replica.move(player, col+1)
childlist.append(replica.tostring())
indexlist.append(col)
#BUG BUG BUG WHAT IF THERE ARE NO LEGAL MOVES OKAY FIXED
if len(childlist) == 0:
#then the board is full and it's a draw (val = 0)
#print "NO LEGAL MOVES"
self.backpropogate(nodelist, 0)
iters += 1
self.numsteps = self.numsteps + 1
currboard = Board #(the original)
nodelist = []
nodelist.append(Board.tostring())
continue
chosenchild = ""
chosenindex = 0
maxval = -100 #or is there a better value to put this as? min should be -1 (WHEN LOOKING FOR UPPER BOUND, simulate uctAI)
minval = 100 #(WHEN LOOKING FOR LOWER CONFIDENCE BOUND, simulate opponent)
unexplored = 0
for ch in childlist:
if not ch in self.uctTree: # TODO too deterministic, actually it's fine
#unexplored += 1
chosenchild = ch
chosenindex = indexlist[childlist.index(ch)]
break
else: #choose node with highest value if nodelist has odd number elements, else choose node with lowest value
if (len(nodelist)%2) == 1: #odd, so find highest value
if ( self.uctTree[ch][0] + math.sqrt(float(self.numsteps)/self.uctTree[ch][1]) ) > maxval:
maxval = self.uctTree[ch][0] + math.sqrt(float(self.numsteps)/self.uctTree[ch][1])
chosenchild = ch
chosenindex = indexlist[childlist.index(ch)]
else: #even, so find lowest value
#CHECK ON THIS BECAUE IT WAS JUST COPY PASTE
if ( self.uctTree[ch][0] + math.sqrt(float(self.numsteps)/self.uctTree[ch][1]) ) < minval:
minval = self.uctTree[ch][0] + math.sqrt(float(self.numsteps)/self.uctTree[ch][1])
chosenchild = ch
chosenindex = indexlist[childlist.index(ch)]
#if unexplored > 0:
# randindex = random.randint(0, unexplored-1)
# chosenchild = childlist[randindex]
# chosenindex = indexlist[randindex]
if chosenchild == "":
print "ERROR NO CHILD CHOSEN"
if not chosenchild in self.uctTree:
newboard = board(7,6)
newboard.copyBoard(Board.toboard(chosenchild))
normQval = 0
#check if board is win state or lose state, else use board eval function
if (newboard.winner(player) == player):
normQval = 1
elif (newboard.winner((-1)*player) == (-1)*player):
normQval = -1
else:
Qval = self.check(newboard, player) + self.check2(newboard, player) + self.checkfirstmoves(newboard, player)
normQval = Qval/float(100)
self.uctTree[chosenchild] = [normQval, 1]
# now update everything back up the tree using the nodelist
self.backpropogate(nodelist, normQval)
iters += 1
self.numsteps = self.numsteps + 1
currboard = Board #(the original)
nodelist = []
nodelist.append(Board.tostring())
else:
#take this new board and explore deeper
nodelist.append(chosenchild)
newboard = board(7,6)
newboard.copyBoard(Board.toboard(chosenchild))
currboard = newboard
#NEED TO DO THIS WHERE IT LOOKS AT ALL CHILDREN AND TAKES MAX
lMoves = self.legal(Board) #all legal moves
childlist = []
indexlist = []
for col in range(Board.columns):
replica = copy.deepcopy(Board) #TODO move this inside loop
if lMoves[col]: #meaning it's true
replica.move(player, col+1)
childlist.append(replica.tostring())
indexlist.append(col)
if len(childlist) == 0:
print "NO LEGAL MOVES 2"
chosenchild = ""
chosenindex = 0
maxval = -100 #or is there a better value to put this as? min should be -1 (WHEN LOOKING FOR UPPER BOUND, simulate uctAI)
for ch in childlist:
if ( self.uctTree[ch][0] + math.sqrt(float(self.numsteps)/self.uctTree[ch][1]) ) > maxval: #use UCB or Q(s)??
maxval = self.uctTree[ch][0] + math.sqrt(float(self.numsteps)/self.uctTree[ch][1])
chosenchild = ch
chosenindex = indexlist[childlist.index(ch)]
if chosenchild == "":
print "ERROR NO CHILD CHOSEN"
return chosenindex+1
def backpropogate(self, nodelist, val):
#for each node in nodelist, update with newest val
for n in nodelist:
oldQ = self.uctTree[n][0]
newQ = oldQ + (val-oldQ)/float(self.uctTree[n][1])
self.uctTree[n][0] = oldQ
self.uctTree[n][1] = self.uctTree[n][1] + 1
#get all possible (legal) moves
#if one hasn't been explored, do that (pick random or in order), random playout or baord eval, add to tree
#propagate value back up the tree (update all nodes traversed)
def chooseMove(self, board, opp, depth):
'''aMoves = self.allMoves(board, opp, depth)
maxScore = max(aMoves)
lMoves = self.legal(board)
arr = []
for c in range(len(lMoves)):
if ((lMoves[c] and maxScore) == aMoves[c]):
arr.append(c)
if (len(arr) > 1):
# randomly select one of better moves - need to change later
r = random.randint(0, len(arr) - 1)
ret = arr[r] + 1
else:
ret = arr[0] + 1'''
return self.uctMoves(board, opp, depth) | true |
c0134b106d042e5cd08c633e70d5d7f1c3f8e013 | Python | SinaKhalili/bmp-compressor | /main.py | UTF-8 | 2,646 | 3.421875 | 3 | [] | no_license | from tkinter import ttk
import bitmap
from tkinter import Tk, mainloop, Canvas, PhotoImage, filedialog
def rgb2hex(r, g, b):
"""
Convert an r,g,b colour to a hex code
"""
return "#{:02x}{:02x}{:02x}".format(r, g, b)
class Root(Tk):
"""
This is the root object, which inherits from TK
The benefit of inheritance is we can write:
self.button instead of self.root.button
"""
def __init__(self):
super(Root, self).__init__()
self.title("BMP compression analyzer")
self.minsize(640, 400)
self.row = 3
self.labelFrame = ttk.LabelFrame(self, text="Open File")
self.labelFrame.grid(column=0, row=1, padx=20, pady=20)
self.button()
def get_row(self):
self.row += 1
return self.row
def button(self):
self.button = ttk.Button(
self.labelFrame, text="Browse A File", command=self.file_dialog
)
self.button.grid(column=1, row=1)
def file_dialog(self):
"""
Opens a file dialog and has the user chose a file
This then sets some labels afterwards
"""
self.filename = filedialog.askopenfilename(
initialdir="./", title="Select A File",
)
self.label = ttk.Label(self.labelFrame, text="")
self.label.grid(column=1, row=2)
self.label.configure(text=self.filename)
if self.filename:
self.get_bmp_info(self.filename)
def get_bmp_info(self, filename):
"""
Print some information about the bmp file
Shows on the UI bmp
"""
with open(filename, "rb") as bmp_file:
bmp_data = bitmap.Image(bmp_file.read())
self.show_image(
bmp_data.getBitmapWidth(),
bmp_data.getBitmapHeight(),
bmp_data.getPixels(),
self.get_row(),
0,
)
def show_image(self, width, height, pixels, row, col):
"""
Add an image to the gui
"""
self.canvas = Canvas(self, width=width, height=height)
self.canvas.grid(column=col, row=row)
img = PhotoImage(width=width, height=height)
self.canvas.create_image((width / 2, height / 2), image=img, state="normal")
self.canvas.image = img
for y_index, y in enumerate(pixels):
for x_index, x in enumerate(y):
blue, green, red = x
hex_code = rgb2hex(r=red, g=green, b=blue)
img.put(hex_code, (x_index, height - y_index))
if __name__ == "__main__":
root = Root()
root.mainloop()
| true |
7aaf2db6302cce28cad579a120ece15717f57c81 | Python | AlvaroMlgs/BachelorThesisCode | /RaspberryPi/sonarDriver.py | UTF-8 | 1,846 | 2.859375 | 3 | [] | no_license | from sonar import Sonar
from threads import thrd
import time
import threading
import matplotlib.pyplot as plt
import numpy as np
#sonar1=Sonar(3,4)
#sonar2=Sonar(14,15)
#sonar3=Sonar(17,18)
sonars=[Sonar(3,4),Sonar(14,15),Sonar(17,18)]
"""
plt.figure("distance")
#plt.figure("velocity")
plt.ion()
plt.show()
plt.hold(False)
t=np.arange(-len(sonars[0].timeArray),0,1)
dist=np.zeros((3,len(sonars[0].distanceBuffer)))
vel=np.zeros((3,len(sonars[0].velocityBuffer)))
"""
for c in range(5): # Pre-populate buffers
for s in range(3):
sonars[s].measureDistance()
sonars[s].computeVelocity()
while True:
for s in range(3):
sonars[s].measureDistance()
sonars[s].computeVelocity()
print "%.3fs> Sonar 0: %s [m] %s [m/s]" % (sonars[0].timeArray[0],sonars[0].distance, sonars[0].velocity)
print "%.3fs> Sonar 1: %s [m] %s [m/s]" % (sonars[1].timeArray[0],sonars[1].distance, sonars[1].velocity)
print "%.3fs> Sonar 2: %s [m] %s [m/s]" % (sonars[2].timeArray[0],sonars[2].distance, sonars[2].velocity)
print ""
"""
plt.figure("distance")
plt.plot(sonars[0].timeArray,sonars[0].distanceBuffer,sonars[1].timeArray,sonars[1].distanceBuffer,sonars[2].timeArray,sonars[2].distanceBuffer)
plt.axis([min(min(sonars[0].timeArray),min(sonars[1].timeArray),min(sonars[2].timeArray)),max(max(sonars[0].timeArray),max(sonars[1].timeArray),max(sonars[2].timeArray)),0,4])
plt.legend(["Sonar 0","Sonar 1","Sonar 2"])
plt.figure("velocity")
plt.plot(sonars[0].timeArray,sonars[0].velocityBuffer,sonars[1].timeArray,sonars[1].velocityBuffer,sonars[2].timeArray,sonars[2].velocityBuffer)
plt.axis([min(min(sonars[0].timeArray),min(sonars[1].timeArray),min(sonars[2].timeArray)),max(max(sonars[0].timeArray),max(sonars[1].timeArray),max(sonars[2].timeArray)),-1,1])
plt.legend(["Sonar 0","Sonar 1","Sonar 2"])
plt.pause(1e-6)
"""
| true |
23cfe514acfb2756cbba653536f03343c4e36c79 | Python | BarisSari/lda2vec | /sklearnn.py | UTF-8 | 1,968 | 2.9375 | 3 | [
"MIT"
] | permissive | from sklearn.decomposition import NMF, LatentDirichletAllocation as LDA, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer as CV
from nltk.corpus import brown
n_topics = 10
data = []
for file in brown.fileids():
document = ' '.join(brown.words(file))
data.append(document)
vectorizer = CV(min_df=5, max_df=0.9, stop_words='english',
lowercase=True, token_pattern='[a-zA-Z\-]{3,}')
vectorized_data = vectorizer.fit_transform(data)
# print(vectorized_data[:5])
lda_model = LDA(n_topics=n_topics, max_iter=10, learning_method='online')
lda_data = lda_model.fit_transform(vectorized_data)
# print(lda_data.shape)
nmf_model = NMF(n_components=n_topics)
nmf_data = nmf_model.fit_transform(vectorized_data)
lsi_model = TruncatedSVD(n_components=n_topics)
lsi_data = lsi_model.fit_transform(vectorized_data)
print(lda_data[0])
print(nmf_data[0])
print(lsi_data[0])
def print_topics(model, vectorizer, top_n=10):
for idx, topic in enumerate(model.components_):
print("Topic %d:" % (idx))
print([(vectorizer.get_feature_names()[i], topic[i])
for i in topic.argsort()[:-top_n - 1:-1]])
print("LDA Model:")
print_topics(lda_model, vectorizer)
print("=" * 20)
print("NMF Model:")
print_topics(nmf_model, vectorizer)
print("=" * 20)
print("LSI Model:")
print_topics(lsi_model, vectorizer)
print("=" * 20)
text = "The economy is working better than ever"
x = lsi_model.transform(vectorizer.transform([text]))[0]
print(x)
x = lda_model.transform(vectorizer.transform([text]))[0]
print(x)
from sklearn.metrics.pairwise import euclidean_distances
def most_similar(x, Z, top_n=5):
dists = euclidean_distances(x.reshape(1, -1), Z)
pairs = enumerate(dists[0])
most_similar = sorted(pairs, key=lambda item: item[1])[:top_n]
return most_similar
similarities = most_similar(x, nmf_data)
document_id, similarity = similarities[0]
print(data[document_id][:1000])
| true |
c0421ecc70adb2ca7c75fe80b6b89f21ec4e5cbf | Python | liwei123a/APITestFrame | /lib/request_method.py | UTF-8 | 1,699 | 2.78125 | 3 | [] | no_license | #coding:utf-8
"""
封装requests库
"""
import requests
class ReqMethod(object):
def __init__(self, url, cookies, header=None, params=None):
self.url = url
self.header = header
self.params = params
self.cookies = cookies
def get_method(self):
"""
封装get请求
:return:
"""
res = requests.get(url=self.url, params=self.params, headers=self.header, cookies=self.cookies,
verify=False)
self.cookies = res.cookies.get_dict()
return res
def post_method(self):
"""
封装post请求
:return:
"""
if self.header['content-type'] == 'application/json':
res = requests.post(url=self.url, json=self.params, headers=self.header, cookies=self.cookies,
verify=False)
elif 'multipart/form-data' in self.header['content-type']:
res = requests.post(url=self.url, files=self.params, cookies=self.cookies,verify=False)
else:
res = requests.post(url=self.url, data=self.params, headers=self.header, cookies=self.cookies,
verify=False)
self.cookies = res.cookies.get_dict()
return res
def get_cookies(self):
"""
获取cookie
:return:
"""
return self.cookies
def req_send(self, method):
"""
根据请求类型调用对应的请求
:param method:
:return:
"""
method = method.upper()
if method == "GET":
return self.get_method()
elif method == "POST":
return self.post_method()
| true |
b2b3ed2fd4c1e2479e0a6a39f90ef03a98c197b4 | Python | panditdandgule/Pythonpractice | /P16_SecreatMessage.py | UTF-8 | 1,688 | 3.71875 | 4 | [] | no_license | # X and Y are best friends and they love to chat with each other. But their recent concerns about the privacy
# of their messages has distant them. So they decided to encrypt their messages with a key, K, such that the
# character of their messages are now shifted K times towards right of their initial value. Their techniques
# only convert numbers and alphabets while leaving special characters as it is.
#
# Provided the value K you are required to encrypt the messages using their idea of encryption.
#
# INPUT FORMAT
#
# The first line of the input contains, T, the number of messages. The next line contains N, and K, no of
# characters in the message and key for encryption. The next line contains the message.
#
# OUTPUT FORMAT
#
# Output the encrypted messages on a new line for all the test cases.
#
# CONSTRAINS
#
# 1≤T≤100
# 1≤N≤106
# 0≤K≤106
#
# SAMPLE INPUT
# 2
# 12 4
# Hello-World!
# 16 50
# Aarambh@1800-hrs
#
# SAMPLE OUTPUT
# Lipps-Asvph!
# Yypykzf@1800-fpq
myString = 'abcdefghijklmnopqrstuvwxyz'
myStringU = myString.upper()
nums = '0123456789'
def access_char(string, i):
return string[i % len(string)]
for _ in range(int(input())):
n, k = map(int, input().split())
string = input()
result = []
for char in string:
if char.islower() and char.isalpha():
result.append(access_char(myString, myString.find(char) + k))
elif char.isupper() and char.isalpha():
result.append(access_char(myStringU, myStringU.find(char) + k))
elif char.isnumeric():
result.append(access_char(nums, nums.find(str(char)) + k))
else:
result.append(char)
print(''.join([str(i) for i in result])) | true |
f53a5b7505f7db0aff5c052e677610a3a2c9d27e | Python | 17764591637/jianzhi_offer | /LeetCode/131_partition.py | UTF-8 | 631 | 3.640625 | 4 | [] | no_license | '''
给定一个字符串 s,将 s 分割成一些子串,使每个子串都是回文串。
返回 s 所有可能的分割方案。
示例:
输入: "aab"
输出:
[
["aa","b"],
["a","a","b"]
]
'''
class Solution:
def partition(self, s):
res = []
def backtrack(List, tmp):
if not List:
res.append(tmp)
return
for i in range(len(List)):
if List[:i+1] == List[i::-1]:
backtrack(List[i+1:], tmp + [List[:i+1]])
backtrack(s,[])
return res
s = Solution()
res = s.partition('aab')
print(res)
| true |
709bebcbbb52c0d62bd1460ffc533a380027e2ca | Python | egure/clickstream | /predictions/others/generate_art.py | UTF-8 | 2,368 | 3.078125 | 3 | [] | no_license | #Objective: take a basic image and style it
import numpy as np
from keras.applications import vgg16
from keras import backend as K
from keras.preprocessing.image import load_img, img_to_array
#import image_processing as img # import image_processing as img #helper class to convert image to an array
from PIL import Image as img
from IPython.display import Image #image display
#Step 1 - prepare dataset
base_image = img.open('./base_image.jpg')
base_image = np.asarray(base_image)
print base_image.ndim
base_image = K.variable(base_image) #transform image to tensor
style_reference_image = img.open('./style_image.jpg')
style_reference_image = np.asarray(style_reference_image)
style_reference_image = K.variable(style_reference_image)
combination_image = K.placeholder((254,198,3)) #placeholder to initialize height and width
input_tensor = K.concatenate([base_image, style_reference_image, combination_image], axis=0) #combine the 3 images into a single Keras tensor
#Step 2 - create the model
model = vgg16.VGG16(input_tensor=input_tensor, weights='imagenet', include_top=False) #build our prebuilt model VGG16 with newly created tensor as input
model.add(Flatten(3))
#it already knows how to separate the image.
#It has a loss function, error value to minimize. Content loss and style loss.
#content loss and style loss function which we want to minimize.
#shape is given by content classification. Then we calculate the loss on the content.
#then, we calculate style loss, is also minimized.
#eucledian to calculate loss (rankings, recommendations, similarities)
#Step 3 - training or activation of the features of the image
#we add a step to measure correlation of the activations
#runs a gram matrix to measure which features tend to activate together
#once we have this we can define the style loss using the eucledian distance between the images.
#we compute only one layer for content
#we compute a weighted "style" loss with several layers of the image.
#runs a function to update our output image to minimize the lost.
loss = img.combination_loss(model, combination_image)
#get the gradients of the generated image write the loss
grads = K.gradients(loss, combination_image)
#run optimization over pixels to minimize loss
combination_image = img.minimize_loss(grads, loss, combination_image)
#optimization technique to enhance paterns
image() | true |
9823b05d0e7de436b2076c526bde803f4465bb19 | Python | gistable/gistable | /all-gists/5539628/snippet.py | UTF-8 | 3,499 | 3.390625 | 3 | [
"MIT"
] | permissive | # coding=UTF-8
import nltk
from nltk.corpus import brown
# This is a fast and simple noun phrase extractor (based on NLTK)
# Feel free to use it, just keep a link back to this post
# http://thetokenizer.com/2013/05/09/efficient-way-to-extract-the-main-topics-of-a-sentence/
# Create by Shlomi Babluki
# May, 2013
# This is our fast Part of Speech tagger
#############################################################################
brown_train = brown.tagged_sents(categories='news')
regexp_tagger = nltk.RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'(-|:|;)$', ':'),
(r'\'*$', 'MD'),
(r'(The|the|A|a|An|an)$', 'AT'),
(r'.*able$', 'JJ'),
(r'^[A-Z].*$', 'NNP'),
(r'.*ness$', 'NN'),
(r'.*ly$', 'RB'),
(r'.*s$', 'NNS'),
(r'.*ing$', 'VBG'),
(r'.*ed$', 'VBD'),
(r'.*', 'NN')
])
unigram_tagger = nltk.UnigramTagger(brown_train, backoff=regexp_tagger)
bigram_tagger = nltk.BigramTagger(brown_train, backoff=unigram_tagger)
#############################################################################
# This is our semi-CFG; Extend it according to your own needs
#############################################################################
cfg = {}
cfg["NNP+NNP"] = "NNP"
cfg["NN+NN"] = "NNI"
cfg["NNI+NN"] = "NNI"
cfg["JJ+JJ"] = "JJ"
cfg["JJ+NN"] = "NNI"
#############################################################################
class NPExtractor(object):
def __init__(self, sentence):
self.sentence = sentence
# Split the sentence into singlw words/tokens
def tokenize_sentence(self, sentence):
tokens = nltk.word_tokenize(sentence)
return tokens
# Normalize brown corpus' tags ("NN", "NN-PL", "NNS" > "NN")
def normalize_tags(self, tagged):
n_tagged = []
for t in tagged:
if t[1] == "NP-TL" or t[1] == "NP":
n_tagged.append((t[0], "NNP"))
continue
if t[1].endswith("-TL"):
n_tagged.append((t[0], t[1][:-3]))
continue
if t[1].endswith("S"):
n_tagged.append((t[0], t[1][:-1]))
continue
n_tagged.append((t[0], t[1]))
return n_tagged
# Extract the main topics from the sentence
def extract(self):
tokens = self.tokenize_sentence(self.sentence)
tags = self.normalize_tags(bigram_tagger.tag(tokens))
merge = True
while merge:
merge = False
for x in range(0, len(tags) - 1):
t1 = tags[x]
t2 = tags[x + 1]
key = "%s+%s" % (t1[1], t2[1])
value = cfg.get(key, '')
if value:
merge = True
tags.pop(x)
tags.pop(x)
match = "%s %s" % (t1[0], t2[0])
pos = value
tags.insert(x, (match, pos))
break
matches = []
for t in tags:
if t[1] == "NNP" or t[1] == "NNI":
#if t[1] == "NNP" or t[1] == "NNI" or t[1] == "NN":
matches.append(t[0])
return matches
# Main method, just run "python np_extractor.py"
def main():
sentence = "Swayy is a beautiful new dashboard for discovering and curating online content."
np_extractor = NPExtractor(sentence)
result = np_extractor.extract()
print "This sentence is about: %s" % ", ".join(result)
if __name__ == '__main__':
main()
| true |
101406d964aceeff063d556d474c1be5c4e4242f | Python | JerryCatLeung/word2vec_insight | /exptable.py | UTF-8 | 1,324 | 3.265625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
---------------------------------------
@brief expTable介绍
@author jungle
@date 2016/01/05
---------------------------------------
'''
import math
import matplotlib.pyplot as plt
EXP_TABLE_SIZE = 1000
MAX_EXP = 6
expTable = [0 for i in range(0, EXP_TABLE_SIZE)]
print len(expTable)
#----------------------
# 最原始的logit函数.
#----------------------
def raw_logistic(x):
e = math.exp(x)
return e/(e+1)
# 初始化。
for i in range(0, EXP_TABLE_SIZE):
expTable[i] = math.exp((i * 1.0 / EXP_TABLE_SIZE * 2 - 1) * MAX_EXP)
expTable[i] = expTable[i] / (expTable[i] + 1)
print expTable
## 此处f 范围(-6,6)用f/1000.0来代替, 取12000个点.
m = [int((f/10.0 + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2)) for f in range(-60, 60)]
print m
result = [expTable[x] for x in m]
print result
#result2 = [raw_logistic(x) for x in range(-10, 10)]
fig = plt.figure()
ax = plt.subplot(1,1,1)
## 设置坐标范围
#plt.ylim(ymax=1.2)
#plt.ylim(ymin=0.5)
#
#ax.plot(result)
## 绘制原始的坐标点.
print raw_logistic(6)
print raw_logistic(-6)
ax.plot(result2)
#ax.plot(expTable)
#ax.plot(expTable, label="exp")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# python自动取整.
plt.show()
| true |
e9588583d396fb7f79c155519d8e8686cbbf4df6 | Python | gnool/datastructure-algorithms | /np-complete/hamiltonian-path/hamiltonian_path.py | UTF-8 | 3,936 | 3.828125 | 4 | [] | no_license | # python3
import itertools
class HamiltonianPath:
"""Hamiltonian path to SAT converter:
Attributes:
total_vertices number of vertices
total_edges number of edges
edges list of edges
Further details:
Each state (i, j) represents whether vertex i is occupying position j in the
Hamiltonian path (1 = yes, 0 = no)
"""
def __init__(self):
self.total_vertices = 0
self.total_edges = 0
self.edges = []
def read_data(self):
"""Read input:
Format:
Line 0: n m
Line 1: x1 y1
Line 2: x2 y2
...
Line m: xm ym
n = number of vertices
m = number of edges
x, y = indices of vertices
1 <= x,y <= n
"""
self.total_vertices, self.total_edges = map(int, input().split())
self.edges = [list(map(int, input().split())) for i in range(self.total_edges)]
def var(self, i, j):
"""Return an integer representing a variable:
Parameters:
i vertex index [1, 2, 3, ...]
j color [0, 1, 2, ...]
"""
return (i-1) * self.total_vertices + j
def vert_occupy_exactly_one_pos(self, i, clauses):
"""Impose restriction that each vertex occupy exactly one position
in the Hamiltonian path.
(a OR b OR c) AND (-a OR -b) AND (-a OR -c) AND (-b OR -c)
"""
# (a OR b OR c OR ...)
clause = [self.var(i, j) for j in range(1, self.total_vertices+1)]
clauses.append(clause)
# (-a OR -b) AND (-a OR -c) AND ...
for pair in itertools.combinations(clause,2):
clauses.append([-l for l in pair])
def pos_occupy_by_exactly_one_vert(self, j, clauses):
"""Impose restriction that each vertex occupy exactly one position
in the Hamiltonian path.
(a OR b OR c) AND (-a OR -b) AND (-a OR -c) AND (-b OR -c)
"""
# (a OR b OR c OR ...)
clause = [self.var(i, j) for i in range(1, self.total_vertices+1)]
clauses.append(clause)
# (-a OR -b) AND (-a OR -c) AND ...
for pair in itertools.combinations(clause,2):
clauses.append([-l for l in pair])
def print_SAT(self):
"""Output the Hamiltonian Path problem in SAT form."""
n = self.total_vertices
edges = self.edges
clauses = []
adj = [[] for _ in range(n+1)] # Discard index 0
# Each vertex must occupy exactly one position in the Hamiltonian path.
for i in range(1, n+1):
self.vert_occupy_exactly_one_pos(i, clauses)
# Each position in path must be occupied by exactly one vertex.
for j in range(1, n+1):
self.pos_occupy_by_exactly_one_vert(j, clauses)
# Convert edges to adjacency list.
for edge in edges:
adj[edge[0]].append(edge[1])
adj[edge[1]].append(edge[0])
# Each pair of consecutive position in the path must correspond to an edge.
for i in range(1, n+1):
# Find the set of vertices which are not connected to i directly (i.e. no edge).
non_neighbours = set([i for i in range(1,n+1)]) - set(adj[i])
non_neighbours.remove(i)
for j in range(1, n):
for k in non_neighbours:
# If position j is occupied by i, the next position j+1 must be occupied by a
# neighbour of i (i.e. not k).
clauses.append([-self.var(i, j),-self.var(k, j+1)])
# Print number of clauses and number of variables
print(len(clauses), n**2)
# Print all clauses
for clause in clauses:
clause.append(0)
print(" ".join(map(str,clause)))
if __name__ == '__main__':
path = HamiltonianPath()
path.read_data()
path.print_SAT()
| true |
ebd0e56c55c2b8644dc185d37cc3e0f458eef02d | Python | ShaikhMohammaddanish/file-operation-in-python | /Save_Alphabets_in_File.py | UTF-8 | 582 | 4.03125 | 4 | [] | no_license | '''
==================
File Operations
==================
Subject :- to store alphabet capital and small letters in a file using a loop
new files should be a create alpha.txt
'''
# => Code To Save Lower Case Alphabates in File Name alpha.txt
import string
f=open("alpha.txt", "w")
f.write("=> lower case alphabates are \n\n")
for t in string.ascii_lowercase:
f.write("{0} ".format (t))
# Code To Save Upper Case Alphabates in File Name alpha.txt
f.write("\n\n=> Upper case alphabates are \n\n")
for t in string.ascii_uppercase:
f.write("{0} ".format (t))
f.close()
| true |
3e7ca75ef8a14bd3dcd527dd164309f9e633688f | Python | alex-nexus/programming_practice | /iteration/pascal.py | UTF-8 | 194 | 3.453125 | 3 | [] | no_license | def get_row(k):
row = []
for x in range(0, k):
row = [1 if y == 0 or y == x else row[y - 1] + row[y]
for y in range(0, x + 1)]
return row
print(get_row(10))
| true |
a78df18243a830ea78a6e84fae6600676442ab43 | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_5/jnsjul007/question4.py | UTF-8 | 514 | 3.5625 | 4 | [] | no_license | import math
function = input("Enter a function f(x):\n")
for i in range (10,-11,-1):
for x in range (-10,11):
graph = round(eval(function))
if i == graph:
print("o",end="")
elif x == 0 and i == 0:
print("+", end="")
elif i == 0:
print("-", end="")
elif x == 0:
print("|", end="")
else:
print(" ", end="")
print()
| true |
09e1df2485049bf61767989702af03fa1cb47ecf | Python | Sorryony/RPg | /Menu.py | UTF-8 | 4,642 | 3.140625 | 3 | [] | no_license | import pygame
import Main
class Start(object,):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.visible = True
self.hitbox = (self.x + 15, self.y + 7, 100, 50)
def draw(self, win):
if self.visible:
start = pygame.image.load("assets/Pics/Start.jpg").convert()
start2 = pygame.transform.scale(start, (100, 50))
win.blit(start2, (450, 75))
self.hitbox = (self.x + 15, self.y + 7, 100, 50)
class Exit(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.visible = True
def draw(self, win):
if pygame.MOUSEBUTTONUP:
pass
if self.visible:
exit = pygame.image.load("assets/Pics/Exit.jpg").convert()
exit2 = pygame.transform.scale(exit, (100, 50))
win.blit(exit2, (450, 200))
self.hitbox = (self.x + 15, self.y + 7, 100, 50)
class Character_image_intro(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.visible = True
self.walkCount = 0
self.images = [pygame.image.load('assets/Pics/adventurer-run3-00.png'), pygame.image.load('assets/Pics/adventurer-run3-01.png'),
pygame.image.load('assets/Pics/adventurer-run3-02.png'), pygame.image.load('assets/Pics/adventurer-run3-03.png'),
pygame.image.load('assets/Pics/adventurer-run3-04.png'), pygame.image.load('assets/Pics/adventurer-run3-05.png')]
def draw(self, win_intro):
if self.walkCount + 1 >= 18:
self.walkCount = 0
if self.visible:
win_intro.blit(self.images[self.walkCount // 3], (300, 217))
self.walkCount += 1
def game_intro():
win_intro = pygame.display.set_mode((700, 288))
intro_x1 = 0
intro_x2 = Main.bg.get_width()
intro = True
while intro:
intro_x1 -= 2
intro_x2 -= 2
if intro_x1 < Main.bg.get_width() * -1:
intro_x1 = Main.bg.get_width()
if intro_x2 < Main.bg.get_width() * -1:
intro_x2 = Main.bg.get_width()
win_intro.blit(Main.bg, (intro_x1, 0))
win_intro.blit(Main.bg, (intro_x2, 0))
start_press = pygame.image.load("assets/Pics/Start_press.jpg").convert()
start_press2 = pygame.transform.scale(start_press, (100, 50))
exit_press = pygame.image.load("assets/Pics/Exit_press.jpg").convert()
exit_press2 = pygame.transform.scale(exit_press, (100, 50))
# Character and Monsters
Main.start_intro.draw(win_intro)
Main.exit_intro.draw(win_intro)
Main.Cii_intro.draw(win_intro)
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if Main.start_intro.hitbox[0] + Main.start_intro.hitbox[2] > pos[0] > Main.start_intro.hitbox[0] and Main.start_intro.hitbox[1] + Main.start_intro.hitbox[3] > pos[1] > \
Main.start_intro.hitbox[1]:
Main.win.blit(start_press2, (450, 75))
Main.Chosen.health = 100
Main.game_loop()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if Main.exit_intro.hitbox[0] + Main.exit_intro.hitbox[2] > pos[0] > Main.exit_intro.hitbox[0] and Main.exit_intro.hitbox[1] + Main.exit_intro.hitbox[3] > pos[1] > \
Main.exit_intro.hitbox[1]:
Main.win.blit(exit_press2, (450, 200))
quit()
if Main.start_intro.hitbox[0] + Main.start_intro.hitbox[2] > pos[0] > Main.start_intro.hitbox[0] and Main.start_intro.hitbox[
1] + Main.start_intro.hitbox[3] > pos[1] > \
Main.start_intro.hitbox[1]:
Main.win.blit(start_press2, (450, 75))
if Main.exit_intro.hitbox[0] + Main.exit_intro.hitbox[2] > pos[0] > Main.exit_intro.hitbox[0] and Main.exit_intro.hitbox[1] + \
Main.exit_intro.hitbox[3] > pos[1] > \
Main.exit_intro.hitbox[1]:
Main.win.blit(exit_press2, (450, 200))
Main.clock.tick(27)
pygame.display.update()
| true |
e9fa1c0e618a2ff7cdd85b1778050c4ee312d214 | Python | jeffreyc-juniper-net/python-sdk | /laceworksdk/api/alert_channels.py | UTF-8 | 6,558 | 3 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Lacework Alert Channels API wrapper.
"""
import logging
logger = logging.getLogger(__name__)
class AlertChannelsAPI(object):
"""
Lacework Alert Channels API.
"""
def __init__(self, session):
"""
Initializes the AlertChannelsAPI object.
:param session: An instance of the HttpSession class
:return AlertChannelsAPI object.
"""
super(AlertChannelsAPI, self).__init__()
self._session = session
def create(self,
name,
type,
enabled,
data,
org=False):
"""
A method to create a new alert channel.
:param name: A string representing the alert channel name.
:param type: A string representing the alert channel type.
:param enabled: A boolean/integer representing whether the alert channel is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Creating alert channel in Lacework...")
# Build the Alert Channels request URI
api_uri = "/api/v2/AlertChannels"
data = {
"name": name,
"type": type,
"enabled": int(bool(enabled)),
"data": data
}
response = self._session.post(api_uri, org=org, data=data)
return response.json()
def get(self,
guid=None,
type=None,
org=False):
"""
A method to get all alert channels.
:param guid: A string representing the alert channel GUID.
:param type: A string representing the alert channel type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Getting alert channel info from Lacework...")
# Build the Alert Channels request URI
if guid:
api_uri = f"/api/v2/AlertChannels/{guid}"
elif type:
api_uri = f"/api/v2/AlertChannels/{type}"
else:
api_uri = "/api/v2/AlertChannels"
response = self._session.get(api_uri, org=org)
return response.json()
def get_by_type(self,
type,
org=False):
"""
A method to get all alert channels by type.
:param type: A string representing the alert channel type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(type=type, org=org)
def get_by_guid(self,
guid,
org=False):
"""
A method to get all alert channels.
:param guid: A string representing the alert channel GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
return self.get(guid=guid, org=org)
def search(self,
query_data=None,
org=False):
"""
A method to search alert channels.
:param query_data: A dictionary containing the desired search parameters.
(filters, returns)
:return response json
"""
logger.info("Searching alert channels from Lacework...")
# Build the Alert Channels request URI
api_uri = "/api/v2/AlertChannels/search"
response = self._session.post(api_uri, data=query_data, org=org)
return response.json()
def update(self,
guid,
name=None,
type=None,
enabled=None,
data=None,
org=False):
"""
A method to update an alert channel.
:param guid: A string representing the alert channel GUID.
:param name: A string representing the alert channel name.
:param type: A string representing the alert channel type.
:param enabled: A boolean/integer representing whether the alert channel is enabled.
(0 or 1)
:param data: A JSON object matching the schema for the specified type.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Updating alert channel in Lacework...")
# Build the Alert Channels request URI
api_uri = f"/api/v2/AlertChannels/{guid}"
tmp_data = {}
if name:
tmp_data["name"] = name
if type:
tmp_data["type"] = type
if enabled is not None:
tmp_data["enabled"] = int(bool(enabled))
if data:
tmp_data["data"] = data
response = self._session.patch(api_uri, org=org, data=tmp_data)
return response.json()
def delete(self,
guid,
org=False):
"""
A method to delete an alert channel.
:param guid: A string representing the alert channel GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Deleting alert channel in Lacework...")
# Build the Alert Channels request URI
api_uri = f"/api/v2/AlertChannels/{guid}"
response = self._session.delete(api_uri, org=org)
if response.status_code == 204:
return response
else:
return response.json()
def test(self,
guid,
org=False):
"""
A method to test an alert channel.
:param guid: A string representing the alert channel GUID.
:param org: A boolean representing whether the request should be performed
at the Organization level
:return response json
"""
logger.info("Testing alert channel in Lacework...")
# Build the Alert Channels request URI
api_uri = f"/api/v2/AlertChannels/{guid}/test"
response = self._session.post(api_uri, org=org)
if response.status_code == 204:
return response
else:
return response.json()
| true |
507df2941e19f6f355fda87f9ac64a66918ed1f0 | Python | adreena/MyStudyCorner | /LeetCode/codes/graphs/542.py | UTF-8 | 874 | 2.828125 | 3 | [] | no_license | # tim eO(NM)
# space O(NM)
class Solution:
def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:
if len(matrix)==0:
return []
m,n = len(matrix[0]), len(matrix)
q=[]
visited = set()
for i in range(n):
for j in range(m):
if matrix[i][j]==0:
visited.add((i,j))
q.append((i,j))
while q:
while q:
ci, cj = q.pop(0)
for ni, nj in [(ci+1,cj),(ci-1,cj),(ci,cj-1),(ci,cj+1)]:
if 0<=ni<n and 0<=nj<m and (ni,nj) not in visited:
matrix[ni][nj] = matrix[ci][cj]+1
q.append((ni,nj))
visited.add((ni,nj))
return matrix
| true |
fb3650b20dea12038f7037dcea29769ef248d3e7 | Python | churximi/bookreview | /Functions/ConnectDB.py | UTF-8 | 938 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
功能:连接数据库函数
返回:cur,conn
时间:2016年5月5日 14:12:24
"""
import pyodbc
import tkFileDialog
def main():
print u"请选择数据库文件..." # 通过文件对话窗口选择数据库文件,返回数据库路径
dbfile_path = tkFileDialog.askopenfilename(title=u"选择文件") # 路径最好不含中文
if dbfile_path:
conn = pyodbc.connect(r"Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=" + dbfile_path + ";Uid=;Pwd=;")
print u"已建立数据库连接。"
cur = conn.cursor()
return cur, conn
else:
print u"★注意:本次操作没有选择文件!"
if __name__ == "__main__":
pass
"""
更新日志:
2016年5月5日 14:13:01,增加文件选择窗口,tkFileDialog.askopenfilename()
2016年5月5日 14:33:56,添加提示(未选择文件直接退出时)
"""
| true |
4e534e3d36ffb42546488c16dbc1e0a74d9b1e7a | Python | nsidc/qgreenland | /qgreenland/util/model_validators/layer_style.py | UTF-8 | 5,176 | 2.53125 | 3 | [
"MIT"
] | permissive | from xml.etree import ElementTree
import qgreenland.exceptions as exc
from qgreenland.util.layer_style import get_style_filepath
def validate_style_file_exists(style_name: str):
"""Ensure the QML style file exists in the configuration."""
if style_name:
style_filepath = get_style_filepath(style_name)
if not style_filepath.is_file():
raise exc.QgrInvalidConfigError(
f"Style file does not exist: {style_filepath}"
)
return style_name
def validate_style_file_only_contains_allowed_fonts(style_name: str):
"""Ensure only fonts that can be downloaded by QGIS are in our style files.
This ensures we don't re-trigger an old issue:
https://github.com/nsidc/qgreenland/issues/515
"""
# TODO: Is the full list of supported fonts available in PyQGIS' API? I think
# this is the complete list, but haven't found it in the Python API yet:
# https://github.com/qgis/QGIS/blob/a7b31c7db29328fc44966a854d22c452f58c77c1/src/core/textrenderer/qgsfontmanager.cpp#L203-L925
allowed_fonts = ["Open Sans"]
if style_name:
style_filepath = get_style_filepath(style_name)
tree = ElementTree.parse(style_filepath)
# Check every XML tag for a `fontFamily` attribute with an undesired value
for elem in tree.getroot().iter():
if font_family := elem.attrib.get("fontFamily", False):
if font_family not in allowed_fonts:
raise exc.QgrInvalidConfigError(
f"Style {style_filepath} contains disallowed font:"
f" '{font_family}'."
f" Only the following fonts are allowed: {allowed_fonts}."
)
return style_name
def validate_style_file_continuous_legend(style_name: str):
"""Ensure common errors in continuous legend configuration are avoided.
* Ensures continuous legend is enabled for "linear" interpolated colorramps.
* Ensures continuous legends are horizontal.
* Ensures a "Suffix" is populated in the "Legend Settings" menu.
"""
if not style_name:
return style_name
style_filepath = get_style_filepath(style_name)
tree = ElementTree.parse(style_filepath)
errors = []
error_prefix = (
f"Style '{style_name}' has a continuous legend that is incorrectly"
" configured."
f" In QGIS >=3.28, edit this style ({style_filepath}) in the layer symbology"
' menu and configure the continuous legend in the "Legend Settings" submenu to'
" resolve the following:"
)
error_suffix = (
"Please see our documentation:"
" <https://qgreenland.readthedocs.io/en/latest/contributor/how-to"
"/contribute-styles.html#continuous-colormap-considerations>"
)
colorrampshader = tree.find(".//colorrampshader")
if colorrampshader is None:
# This style is not using a colorramp, so there is no "Label unit suffix"
# setting to be concerned with.
return style_name
if colorrampshader.attrib["colorRampType"] != "INTERPOLATED":
# This colorramp should not have a continuous legend, so the "Label unit
# suffix" will be used if specified.
return style_name
rampLegendSettings = colorrampshader.find(".//rampLegendSettings")
if not rampLegendSettings:
raise exc.QgrInvalidConfigError(
f"{error_prefix}\n"
" * Continuous (i.e. linearly interpolated) colorramps must be"
" re-configured with a newer version of QGIS to support continuous legends."
' Please ensure that the unit of measurement is populated in the "Suffix"'
' field, and that the "Orientation" field is set to "Horizontal".\n'
f"{error_suffix}"
)
if (orientation := rampLegendSettings.attrib["orientation"]) != "1":
errors.append(
f'"Orientation" must be horizontal ("1"). Is currently "{orientation}"'
)
if not (suffix := rampLegendSettings.attrib["suffix"]):
# Populating " " is a workaround for the rare case a layer with a continuous
# legend has no unit of measurement. Validating the Suffix matches the
# "Layer unit suffix" may be a more thorough check, but it's much more
# difficult because QGIS doesn't store "Layer unit suffix" in any particular
# field. It's calculated on-the-fly in an unreliable way, depending on QGIS
# version.
errors.append(
f'"Suffix" must contain a unit of measurement. Is currently "{suffix}".'
" If this layer truly has no unit of measurement (e.g. NDVI or a count),"
' populate a space (" ") to silence this error'
)
if errors:
# NOTE: chr(92) is a backslash to work around "f-string expression part
# cannot include a backslash" and still put a newline in there.
newline = "\n"
raise exc.QgrInvalidConfigError(
f"{error_prefix}\n"
f"{newline.join(f' * {err}' for err in errors)}\n"
f"{error_suffix}"
)
return style_name
| true |
ada479420918d66c2e79aef77beaed07bf36dece | Python | lixiongbiao/RecommendeAlgorithm | /Recommender.py | UTF-8 | 7,319 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 23 11:14:02 2016
@author: Troy
"""
from scipy.sparse import csr_matrix
from scipy.io import mmread
def load_items(filename): #读取商品及下标对应数据
df=open(filename)
items=dict() #下标为值 产品为值字典
items_reverse=dict() #产品为键 下标为值字典
for line in df.readlines():
line=line.strip().split('\t')
items[int(line[0])]=line[1] #此处商品下标对应字典中 需要将键值从字符串形式转换为整形,以便后续找对应产品时键值数据类型一致
items_reverse[line[1]]=int(line[0])
df.close()
return items,items_reverse
def load_visit_sparse_matrix(filename): #读取用户访问行为对应稀疏矩阵
data=mmread(filename)
data=csr_matrix(data)
return data
class Recommender(): #推荐函数类 类型命名切记下划线使用
def __init__(self,items_filename,visit_filename): #构造推荐类,输入为产品及下标对应文件和历史访客访问记录文件(稀疏矩阵)
self.items,self.items_reverse=load_items(items_filename) #下标为键 产品位值 字典 和产品为键 下标为值字典
self.visit_sparse_matrix=load_visit_sparse_matrix(visit_filename) #历史访问行为稀疏矩阵
def similarity(self,visit_vector,reco_numble,Distance='Jaccard Distance',method='UBCF'): #计算相关性,输入为一个横向量:一个用户的访问稀疏向量 或者一个产品的URL
if Distance=='Jaccard Distance':
if method=='UBCF':
distance_list=[0 for i in range(self.visit_sparse_matrix.shape[0])] #切记初始化没有设置空间 后续赋值就不能索引 只能append
for i in range(self.visit_sparse_matrix.shape[0]): #分解计算新用户与历史用户浏览行为的杰卡德距离 : q/(q+r+p) q+r+p=两用户所有浏览产品总和 - 公共浏览页面总和
distance_list[i]=(self.visit_sparse_matrix[i].dot(visit_vector.T).todense()[0,0])/(len(visit_vector.nonzero()[0])+len(self.visit_sparse_matrix[i].nonzero()[0])-self.visit_sparse_matrix[i].dot(visit_vector.T).todense()[0,0]) #前者巧妙通过两个稀疏向量点积和得到公共浏览页面总和!
#此处取[0,0]是为了取矩阵乘积后唯一的一个元素 下标为0,0
max_similarity=[]
similarity_degree=[]
for i in range(reco_numble): #计算相似度排名前n的用户
while max(distance_list)==1: #首先将相似度为1 的去掉,因为他们完全一样 没推荐价值
distance_list[distance_list.index(1)]=0
max_similarity.append(distance_list.index(max(distance_list)))
similarity_degree.append(max(distance_list))
distance_list[distance_list.index(max(distance_list))]=0
return max_similarity,similarity_degree
if method=='PBCF':
return
#函数返回值为排在前n个的相似性用户或者产品下标
def recommender_new_user(self,user_visit, method='UBCF',reco_numble=2,Distance='Jaccard Distance'): #推荐函数,输入为新用户访问产品名称记录列表,推荐方法(默认为基于用户),推荐产品个数或者基于多少个用户推荐
recommend_product_dict=dict()
recommend_product_list=[]
if method=='UBCF' and isinstance(user_visit,list): #判断方法为基于用户 且输入为用户访问记录列表才执行
user_visit_dict=dict()
row,col,data=[],[],[] #构建用户访问系数矩阵
max_similarity_user=[] #最大相似性的用户列表,个数基于reco_numble而定
for item in user_visit:
if isinstance(item,str):
if item not in self.items_reverse:
continue #如果用户访问记录中产品不存在于历史训练字典中 则自动过滤
row.append(0);
col.append(int(self.items_reverse[item])) #讲访问产品对应下标列置为1 代表访问
data.append(1)
user_visit_dict[item]=1
elif isinstance(item,int):
if item not in self.items:
continue #如果用户访问记录中产品不存在于历史训练字典中 则自动过滤
row.append(0);
col.append(item) #讲访问产品对应下标列置为1 代表访问
data.append(1)
user_visit_dict[self.items[item]]=1
user_sparse_visit=csr_matrix((data,(row,col)),shape=(1,len(self.items.keys()))) #构建访问稀疏一维向量,维度为历史产品数总数(为了后续计算相似性时维度一致)
max_similarity_user,max_similarity_degree=self.similarity(user_sparse_visit,reco_numble,Distance,method) #获得相关用户序号 进行后续推荐
for i in range(reco_numble):
trainning_user_dict=dict() #需要推荐的用户字典
for row,col in zip(self.visit_sparse_matrix[max_similarity_user[i]].nonzero()[0],self.visit_sparse_matrix[max_similarity_user[i]].nonzero()[1]): #此时循环选择的是该用户稀疏向量中的不为零的 列下标 行下表恒为0, .nonzero() 返回两个数组
trainning_user_dict[str(self.items[col])]=1 #获得相似度大的历史用户访问记录字典
for item in trainning_user_dict:
if item not in user_visit_dict:
recommend_product_dict[item]=1
for item in recommend_product_dict:
recommend_product_list.append(item)
print ("Method=%s\nUser_similarity:" %(method))
for i in max_similarity_degree:
print(i,end=' ')
print()
for item,i in zip(recommend_product_list,range(5)): #i条件控制最多推荐5个产品
print(item,end=' ')
return
elif method=='PBCF' and isinstance(user_visit,str): #判断方法为基于产品的推荐,且输入的为一个产品名称 才执行后续
pass
else:
print ("This method has not been developed, We will perfect the algorithm in the future!")
return
if __name__=='__main__':
reco=Recommender('items_numble.txt','visit_sparse_matrix.mtx')
reco.recommender_new_user([154])
#reco.recommender_new_user(['http://www.ehaier.com/product/comment/9667.html','http://www.ehaier.com/product/9667.html?ebi=ref-se-1-1'],reco_numble=3)
| true |
3e109d462704d52e34a9141e7d4228fe135dcea6 | Python | Dayan-Zhanchi/Maze-generator | /constants/dimension_consts.py | UTF-8 | 752 | 3.421875 | 3 | [] | no_license | # lines for the grid
number_of_horizontal_lines = 20
number_of_vertical_lines = 20
# dimensions for the game screen
canvas_width = 600
canvas_height = 900
maze_width = 500
maze_height = 500
# the offsets needed to place the maze in the very middle of the game screen
margin_width = int((canvas_width - maze_width) / 2)
# the bigger the denominator number is for the height the closer the maze is to the upper screen
margin_height = int((canvas_height - maze_height) / 6)
start_x = margin_width
start_y = margin_height
# offset is needed to avoid creating white pixels around the very beginning and end of an erased line
offset = 1
grid_size_x = int(maze_width / number_of_vertical_lines)
grid_size_y = int(maze_height / number_of_horizontal_lines)
| true |
b5bfbf52adc6ec292249e83ed825f39a50c13a65 | Python | AveyBD/rubiks-cube-ai | /main.py | UTF-8 | 726 | 2.671875 | 3 | [] | no_license | import argparse
from train import Train
from run import Run
parser = argparse.ArgumentParser(description='Run the program')
parser.add_argument('mode', metavar='mode', type=str,
help="Specify 'train' or 'run' to run or train the model")
args = parser.parse_args()
if args.mode.upper() == "TRAIN":
# Feel free to change these
t = Train(
lr=0.0001,
seed=0,
check_freq=10000,
check_path="./model_checkpoint",
timesteps=1000000
)
t.main()
if args.mode.upper() == "RUN":
# Feel free to change these
r = Run(
m=2,
path="./model.pkl",
num_episodes=100
)
r.main()
else:
print("Please enter 'train' or 'mode'")
| true |