blob_id string | repo_name string | path string | length_bytes int64 | score float64 | int_score int64 | text string |
|---|---|---|---|---|---|---|
9ecf1df964796de0c75126df6741b822dc1dd8b6 | falecomlara/CursoEmVideo | /ex066.py | 371 | 3.671875 | 4 | # leia varios numeros inteiros
# so vai parar quando for 999
# no final mostre quantos foram digitados e qual a soma deles
soma = media = num = cont = 0
while num != 999:
num = int(input('Digite um valor [999=parar] '))
if num == 999:
break
soma += num
cont += 1
media = soma/2
print (f'A soma dos {cont} valores é {soma} e a media é {media}') |
888b36e1b17ec28ede219f675d23431dae84f3f7 | cpkimber/cs107 | /fib.py | 306 | 3.515625 | 4 | #! /home/cam/anaconda3/bin/python3
"""
file: fib.py
Cameron Kimber
date: 2018-10-24
Class: CSE107
Assignment:
"""
def fib(x):
if x == 0:
return 0
elif x == 1:
return 1
else:
return (fib(x - 1) + fib(x - 2))
def main():
n = 7
print(fib(n))
if __name__ == "__main__":
main()
|
4ed99b6e3f40be6e9de9bd557f9be66079f09760 | lianxiaopeng/python | /python/hello8.py | 559 | 3.796875 | 4 | #装饰器作业
import functools
from collections import Iterable
def log(p1):
#print("log")
#print(p1)
def log_f1(p1):
#print("log_f1")
#print(p1)
@functools.wraps(p1)
def log_f2(*args):
print("log_f2")
print(args)
p1()
return 1
return log_f2
def log_f3(*args):
print("log_f3")
print(args)
p1()
return 2
if isinstance(p1,Iterable):
return log_f1
else:
return log_f3
@log("sss")
def f1():
print("f1")
@log
def f2():
print("f1")
#f1 = log("sss")(f1)
f1("abc")
print(f1.__name__)
f2("cba")
print(f2.__name__)
|
ace3da0ea87e1fb7f4da3a2660c2f5048cc6bdb9 | hrshtt/conf-ai-repo | /old_dir/poc/csv_generator.py | 2,500 | 3.828125 | 4 | import csv # import csv library
import pandas
import time_util
def append_to_csv( name, fields ):
with open(r'output/' + name + '', 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
def dict_to_csv( name, csv_columns, dict_data ):
""" Creates csv file named name in current directory
1. Create file with name from the name parameter in current directory
2. Load file as python object in memory as csvfile
3. Write headers from csv_columns parameter
4. Write data from dict_data parameter
# Test Columns
csv_columns = ['No','Name','Country']
# Test Dictionary
dict_data = [
{'No': 1, 'Name': arg_path, 'Country': 'India'},
{'No': 2, 'Name': arg_path, 'Country': 'USA'},
{'No': 3, 'Name': arg_path, 'Country': 'India'},
{'No': 4, 'Name': arg_path, 'Country': 'USA'},
{'No': 5, 'Name': 'Yuva Raj', 'Country': 'India'},
]
"""
csv_file = name + ".csv"
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
print("I/O error")
def list_to_csv( name, csv_columns, list_data ):
""" Creates csv file named name in current directory
1. Create file with name from the name parameter in current directory
2. Load file as python object in memory as csvfile
3. Write headers from csv_columns parameter
4. Write data from list_data parameter
# Test Columns
csv_columns = ['No','Name','Country']
# Test Dictionary
list_data = [[1.2,'abc',3],[1.2,'werew',4],[1.4,'qew',2]]
"""
csv_file = name + ".csv"
try:
with open('output/'+ csv_file,'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(csv_columns)
# NOTE for single dimension list use
#for item in list_data:
# writer.writerow([item])
# NOTE for multiple dimension list use
for data in list_data:
writer.writerows([data])
except IOError:
print("I/O error")
# Using Pandas **WORKS**
#df = pandas.DataFrame(data={"location": list_data})
#df.to_csv("./" + csv_file, sep=',',index=False)
if __name__ == '__main__':
name = "test.csv"
fields=['first','second','third']
# This is the start of the program
append_to_csv( name, fields ) # execute main
|
37fa710f1be54fa7f3eaef529b59a0c77142bb87 | SysOverdrive/real-estate-web-scrapper | /helper_functions.py | 896 | 3.59375 | 4 | import csv
def insert_headers_to_csv(csv_path, csv_file_name, headers):
csv_file = open(csv_path + csv_file_name, 'a', encoding="utf-8", newline='')
try:
with csv_file as f:
for header in headers:
writer = csv.writer(f)
writer.writerow(list(header.values()))
except IOError:
print("I/O error")
def clear_csv_file(csv_path):
csv_file = open(csv_path, 'r+')
csv_file.truncate(0)
csv_file.close()
def whitespace_remover(dataframe):
# iterating over the columns
for i in dataframe.columns:
# checking datatype of each columns
if dataframe[i].dtype == 'object':
# applying strip function on column
dataframe[i] = dataframe[i].str.strip()
else:
# if condn. is False then it will do nothing.
pass
|
ed18162ff3f645766fd91f0e63941a7fb58ff664 | dreamchild7/python-challenge-solutions | /AnuOyeboade/phase1/BASIC/DAY5/Q31.py | 355 | 3.921875 | 4 | """
Write a Python program to compute the greatest common divisor (GCD) of two positive integers.
"""
x = float(input("x = "))
y = float(input("y = "))
f = int(y/2)
def GCD(x,y):
GCD = 1
if x%y == 0:
return y
for a in range(f,0,-1):
if x%a == 0 and y%a == 0:
GCD = a
break
return GCD
print(GCD(x,y)) |
cb890e67626a9f252ffe2f06e552f4df754a0018 | ultrasuper/LearnPython | /playground/recursive_practice.py | 851 | 3.9375 | 4 | # -*- coding:utf-8 -*-
from random import randint
# # Calc the sum of the list
# l = [1,2,3,4]
# def recursive_sum(l:list) -> int:
# if len(l) == 0:
# return 0
# elif len(l) == 1:
# return l[0]
# else:
# return l[0] + recursive_sum(l[1:])
# result = recursive_sum(l)
# print(result)
# Calc the total number of how many numbers in the list
# def calc_numbers(l:list) -> int:
# if not l:
# return 0
# else:
# # return 1 + calc_numbers(l[:-1])
# return l.pop()/
# l = [randint(1,20) for i in range(10)]
# result = calc_numbers(l)
# print(result)
# # return the largest number of the list
# def find_big_num(l, max=0):
# try:
# new = l.pop()
# if new > max:
# max = new
# return find_big_num(l, max)
# except:
# print("The end")
# return max
# l = [1,2,3,4,-1,100]
# result = find_big_num(l)
# print(result)
|
e7f46d84d6b9dd5fa57f127f3979a9d52de64856 | anatulea/Educative_challenges | /Arrays/05_find_second_maximum.py | 1,506 | 3.984375 | 4 | '''
Given a list of size n, can you find the second maximum element in the list? Implement your solution in Python and see if your output matches the correct output!
'''
# Solution #1: Sort and index O(nlogn)
def find_second_maximum(lst):
lst.sort()
if len(lst) >= 2:
return lst[-2]
else:
return None
print(find_second_maximum([9, 2, 3, 6]))
# Solution #2: Traversing the list twice #O(n)
def find_second_maximum(lst):
first_max = float('-inf')
second_max = float('-inf')
# find first max
for item in lst:
if item > first_max:
first_max = item
# find max relative to first max
for item in lst:
if item != first_max and item > second_max:
second_max = item
return second_max
print(find_second_maximum([9, 2, 3, 6]))
# Solution #3: Finding the Second Maximum in one Traversal #O(n)
def find_second_maximum(lst):
if (len(lst) < 2):
return
# initialize the two to infinity
max_no = second_max_no = float('-inf')
for i in range(len(lst)):
# update the max_no max_no if max_no value found
if (lst[i] > max_no):
second_max_no = max_no
max_no = lst[i]
# check if it is the second_max_no max_no and not equal to max_no
elif (lst[i] > second_max_no and lst[i] != max_no):
second_max_no = lst[i]
if (second_max_no == float('-inf')):
return
else:
return second_max_no
print(find_second_maximum([9, 2, 3, 6]))
|
d86f2b4edbd30c2a96e14869cca83ddffd58bc8e | snowsaturday/SD_project | /main/number_game.py | 2,195 | 3.546875 | 4 | import random
number_pool = 20
# Диван
player_1_win_number = (1, 2, 3)
# клиент
player_2_win_number = (4, 5, 6, 7, 8, 9)
def player_1_pool_generator():
array = []
for i in range(0, int(number_pool) - int((number_pool / 100) * 30)):
x = random.choice(player_1_win_number)
y = random.choice(range(1, 99))
result = '{}{}'.format(x, y)
array.append(int(result))
for i in range(0, int(number_pool) - int((number_pool / 100) * 70)):
y = random.choice(range(1, 999))
array.append(y)
# for i in range(0, int(number_pool)):
# y = random.choice(range(1, 999))
# array.append(y)
print(array)
return array
def player_2_pool_generator():
array = []
for i in range(0, int(number_pool)):
y = random.choice(range(1, 999))
array.append(y)
# y = input('Введите число {} из {}:'.format(i + 1, number_pool))
# try:
# if y == '0':
# raise ValueError
# else:
# array.append(int(y))
#
# except ValueError:
# print('Вы ввели недопустимое значение, вводите любые цифры кроме 0 (ноль)')
# y = input('Введите число {} из {}:'.format(i + 1, number_pool))
# array.append(int(y))
print(array)
return array
p1_wins = 0
p2_wins = 0
no_winner = 0
for game in range(0, 100):
p1_pool = player_1_pool_generator()
p2_pool = player_2_pool_generator()
array = []
for _ in range(0, number_pool):
x = p1_pool.pop()
y = p2_pool.pop()
z = x * y
array.append(z)
print(array)
p1_points = 0
p2_points = 0
for _ in array:
result = str(_)[0]
# print(result)
if result in '123':
p1_points += 1
else:
p2_points += 1
if p1_points == p2_points:
print('Ничья!')
print('{}:{}'.format(p1_points, p2_points))
no_winner += 1
elif p1_points > p2_points:
print('ДИВАН победил!')
print('{}:{}'.format(p1_points, p2_points))
p1_wins += 1
elif p1_points < p2_points:
print('Клиент победил!')
print('{}:{}'.format(p2_points, p1_points))
p2_wins += 1
print('ДИВАН Победы: {}'.format(p1_wins))
print('Клиент Победы: {}'.format(p2_wins))
print('Ничьи: {}'.format(no_winner))
|
e29935a75875e56345e323fd6c38b0bcbdb6ff95 | logs10/Exercism-Exercises | /reverse-string/reverse_string.py | 344 | 4.25 | 4 | def reverse(input=''):
"""Reverse a given string, return empty string if empty string passed"""
sorted_list = []
if input:
for index, char in enumerate(input):
sorted_list.append(tuple([index, char]))
sorted_list.sort(reverse=True)
sorted2 = [char[1] for char in sorted_list]
return ''.join(sorted2)
else:
return '' |
02d6ae4fa5feb7264d831dc94d55f0dd72308eff | svvay/CodeWars | /work_directory/Simple_Pig_Latin.py | 569 | 4.0625 | 4 | # Move the first letter of each word to the end of it, then add "ay"
# to the end of the word. Leave punctuation marks untouched.
text = 'Aaa ! Hallo a A world ?'
def pig_it(text):
pig_text = []
for latin in text.split():
if latin.isalpha():
pig_text.append(latin[1:] + latin[0] + 'ay')
else:
pig_text.append(latin)
return ' '.join(pig_text)
print(pig_it(text))
# CODEWARS
# def pig_it(text):
# lst = text.split()
# return ' '.join( [word[1:] + word[:1] + 'ay' if word.isalpha() else word for word in lst]) |
cbe9ffa732efdce8ed52eb2a8dcdcb56d5600764 | Tima222/algorithms | /code.py | 415 | 3.796875 | 4 | Задано натуральное число А (А?9999). Определить, что больше заданное число А или число, записанное этими же цифрами, но в обратном порядке.
a=int(input())
a=str(a)
b=a[::-1]
if int(a)>int(b):
print("Число А больше")
else:
print("Перевернутое число А больше")
|
c6aa37427c1b14a981799690a576b6df61a84943 | DishantNaik/Python_prac | /hackExe.py | 714 | 3.984375 | 4 | # -*- coding: utf-8 -*-
"""
Created on Sat May 9 22:52:48 2020
Provlem description:Given the participants' score sheet for your University Sports Day, you are required to find the runner-up score.
You are given scores. Store them in a list and find the score of the runner-up.
Difficulty Level: Easy
@author: disha
"""
if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
arr1 = list(arr)
arr1.sort(reverse = True)
if(arr1[0] != arr1[1]):
print(arr1[1])
else:
length = len(arr1)
for i in range(length):
if(arr1[i] != arr1[i +1]):
tmp = arr1[i +1]
break
print(tmp) |
706872aad8e54dd03339b8e634834696d3a629d0 | dergaj79/python-learning-campus | /unit5_function/unit_5_hangman_5_5_1.py | 1,033 | 3.6875 | 4 | import pyfiglet
import sys
from pyfiglet import figlet_format
from pyfiglet import Figlet
custom_fig = Figlet(font='standard')
MAX_TRIES = 6
hangman_banner = pyfiglet.figlet_format("Hangman",font='standard')
print(hangman_banner,MAX_TRIES)
word = input ("Please enter a word: ")
print ("_ " * len(word))
print ()
letter_guessed = input("Guess a letter: ")
def is_valid_input(letter_guessed):
"""
hangman exercise 5.5.1
the function will check if the input guessing is valid and return boolean
:param letter_guessed
:rtype:bool
"""
if (letter_guessed.isalpha() and (len(letter_guessed) > 1)) :
return False
elif (letter_guessed.isalpha() != True) and (len(letter_guessed) == 1) :
return False
elif ((letter_guessed.isalpha() != True) and (len(letter_guessed) > 1)) :
return False
else :
return True
#print(letter_guessed.lower())
def main():
print(is_valid_input(letter_guessed = letter_guessed))
if __name__ == "__main__":
main()
|
5a559c08cd64b4bdf746224a465de65fdcc9dacb | HatGuy68/practice | /GreatLearning/Python_ML/lesson-3.py | 826 | 3.6875 | 4 | import numpy as np
# Comparing Elements
# Finding common elements in arrays
print('\nFinding common elements in arrays')
n1 = np.array([10, 20, 30, 40, 50])
n2 = np.array([40, 50, 60, 70])
print('\n>>> np.intersect1d(n1,n2)')
print(np.intersect1d(n1,n2))
print('-'*30)
# Finding uncommon elements
print('\nFinding uncommon elements ')
n3 = np.array([1, 2, 3, 4, 5])
n4 = np.array([4, 5, 6, 7])
# In first array
print('\nIn first array')
print('>>> np.setdiff1d(n3,n4)')
print(np.setdiff1d(n3,n4))
# In second array
print('\nIn second array')
print('>>> np.setdiff1d(n4,n3)')
print(np.setdiff1d(n4,n3))
print('-'*30)
# Comparing each element in the two arrays
print('\nComparing each element in the two arrays')
n5 = np.array([5, 6, 7, 8])
n6 = np.array([5, 8, 7, 6])
print('\n>>> n5 == n6')
print(n5 == n6)
print('-'*30)
|
b1e759ed2a4f7bf882bc1003f957bd0ff5e0f147 | childe/leetcode | /continuous-subarray-sum/solution.py | 2,123 | 4.25 | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/continuous-subarray-sum/description/
Given a list of non-negative numbers and a target integer k, write a function to check if the array has a continuous subarray of size at least 2 that sums up to the multiple of k, that is, sums up to n*k where n is also an integer.
Example 1:
Input: [23, 2, 4, 6, 7], k=6
Output: True
Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to 6.
Example 2:
Input: [23, 2, 6, 4, 7], k=6
Output: True
Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and sums up to 42.
Note:
The length of the array won't exceed 10,000.
You may assume the sum of all the numbers is in the range of a signed 32-bit integer.
"""
class Solution(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
>>> s = Solution()
>>> s.checkSubarraySum([23, 2, 4, 6, 7], k=6)
True
>>> s.checkSubarraySum([23, 2, 6, 4, 7], k=6)
True
>>> s.checkSubarraySum([23, 2, 6, 4, 7], k=0)
False
>>> s.checkSubarraySum([0, 0], k=0)
True
>>> s.checkSubarraySum([0], k=0)
False
>>> s.checkSubarraySum([1,2,3], k=6)
True
>>> s.checkSubarraySum([0, 0], k=-1)
True
"""
if not nums:
return False
# if '0,0' in nums
flag = False
for n in nums:
if n != 0:
flag = False
continue
if flag:
return True
flag = True
if k == 0:
return False
if k < 0:
k = -k
if k > sum(nums):
return False
r = [set() for n in nums]
r[0].add(nums[0] % k)
for i, n in enumerate(nums):
if i == 0:
continue
for e in r[i-1]:
r[i].add((n+e) % k)
if 0 in r[i]:
return True
r[i].add(n % k)
return False
|
5dafd3dc47af284c5295ddebbe06ee8200ad8251 | nadiamarra/learning_python | /grades_average.py | 349 | 3.953125 | 4 | def calculate_average(assignment_grades):
'''(list of lists[str, num])->float
Return the average of grades in assignment_grades.
>>>calculate_average([['Assig1',80],['Assig2',90]])
85.0
'''
result=0
for item in assignment_grades:
result+=item[1]
return result/len(assignment_grades)
|
7859818a79fff62f392011d6ac619180cd54233e | jaldd/python | /jichu/file/encode.py | 233 | 3.5625 | 4 | # -*- coding:utf-8 -*-
# Author:Dan Li
import sys
print(sys.getdefaultencoding())
s="你好"
s_gbk=s.encode("gbk")
print(s)
print(s_gbk)
print(s.encode())
print("utf8",s_gbk.decode("gbk").encode("utf-8"))
print(s_gbk.decode("gbk"))
|
17fe566dcbffd4e98127f81962df564d85c40b08 | Ale-Natalia/Games_Python | /battleship/game.py | 3,842 | 3.953125 | 4 | from players import Player, Human, Computer
import random
class Game(object):
def __init__(self, gameBoard1, gameBoard2, player1, player2):
self._gameBoard1 = gameBoard1
self._gameBoard2 = gameBoard2
self._player1 = player1
self._player2 = player2
@property
def Ships(self):
return self._gameBoard.Ships
@Ships.setter
def Ships(self, ships):
self._gameBoard.Ships = ships
def visualBoardForPlayer(self, player):
if player == 1:
return self._gameBoard1.visualBoardForPlayer()
return self._gameBoard2.visualBoardForPlayer()
def visualBoardForOpponent(self, player):
if player == 1:
return self._gameBoard1.visualBoardForOpponent()
return self._gameBoard2.visualBoardForOpponent()
def allShipsPlaced(self, player):
'''
function to determine whether the player placed two valid ships and can start the game
:return: True/False
'''
if player == 1:
return self._gameBoard1.allShipsPlaced()
else:
return self._gameBoard2.allShipsPlaced()
def computerPlaceShips(self):
'''
function for computer's ship placement
:param row1:
:param column1:
:param row2:
:param column2:
:param row3:
:param column3:
:return:
'''
orientation = random.choice(["horizontal", "vertical"])
if orientation == "horizontal":
row1 = random.choice(range(self._gameBoard1.Size))
row2 = row1
row3 = row1
column1 = random.choice(range(self._gameBoard1.Size//2))
column2 = column1 + 1
column3 = column2 + 1
elif orientation == "vertical":
column1 = random.choice(range(self._gameBoard1.Size))
column2 = column1
column3 = column1
row1 = random.choice(range(self._gameBoard1.Size // 2))
row2 = row1 + 1
row3 = row2 + 1
try:
self._gameBoard2.placeShip(row1, column1, row2, column2, row3, column3)
except Exception:
self.computerPlaceShips()
def humanPlaceShips(self, row1, column1, row2, column2, row3, column3):
self._gameBoard1.placeShip(row1, column1, row2, column2, row3, column3)
def placeShips(self, player, row1, column1, row2, column2, row3, column3):
'''
function for placing the ship of the player at the given coordinates
:param player:
:param row1:
:param column1:
:param row2:
:param column2:
:param row3:
:param column3:
:return:
'''
if player == 1:
self.humanPlaceShips(row1, column1, row2, column2, row3, column3)
else:
self.computerPlaceShips()
self.computerPlaceShips()
def humanAttack(self, rowCoordinate, columnCoordinate):
self._gameBoard2.attack(rowCoordinate, columnCoordinate)
def computerAttack(self):
rowCoordinate = random.choice(range(self._gameBoard1.Size))
columnCoordinate = random.choice(range(self._gameBoard1.Size))
try:
self._gameBoard1.attack(rowCoordinate, columnCoordinate)
except ValueError:
self.computerAttack()
def attack(self, opponent, rowCoordinate, columnCoordinate):
if opponent == 1:
self.computerAttack()
elif opponent == 2:
self.humanAttack(rowCoordinate, columnCoordinate)
def loser(self, player):
if player == 1:
return self._gameBoard1.loser()
else:
return self._gameBoard2.loser()
def initializeGame(self):
self._gameBoard1.initializeBoard()
self._gameBoard2.initializeBoard() |
54e6a4bd7b0080ad2dbc2cfe6ae610cdd75877e2 | muneerqu/AlgDS | /Part1/03-Conditions/01-Syntax.py | 643 | 4.1875 | 4 | #
#
#
if False:
print('if True')
elif False:
print('elif False 1')
elif False:
print('elif False 2')
elif True:
print('elif False 3')
elif False:
print('elif False 4')
elif False:
print('elif False 5')
elif False:
print('elif False 6')
else:
print('neither True or False')
print('#########################')
x = 3
if x == 1: # works like switch in C++
print('one')
elif x == 2:
print('Two')
elif x == 3:
print('Three')
elif x == 4:
print('Four')
elif x == 5:
print('Five')
elif x == 6:
print('Six')
elif x == 7:
print('Seven')
else:
print('None of the list')
|
f886849532196b81f6ebde575210419ce866b905 | Parya1112009/mytest | /re_cisco.py | 98 | 3.6875 | 4 | import re
str = "abb"
match = re.search(r'(.*)?',str)
print match.group()
#print match.group(2)
|
a31738e8dd336afde1656dfd827aa101561dffde | Xingyu-Zhao/algorithm020 | /Week7/Trie.py | 3,083 | 3.59375 | 4 | import collections
class Trie(object):
def __init__(self):
self.root = {}
self.end_of_word = "#"
def insert(self, word):
node = self.root
for char in word:
node = node.setdefault(char, {})
node[self.end_of_word] = self.end_of_word
def search(self, word):
node = self.root
for char in word:
if char not in node:
return False
node = node[char]
return self.end_of_word in node
def startsWith(self, prefix):
node = self.root
for char in prefix:
if char in prefix:
return False
node = node[char]
return True
#1.words遍历 --> board找 O(N*m*m*4^k)
#trie做法
#1. 所有的words,全部放到一个trie里面去,构建起一个字典树
#2. board,进行DFS,dfs产生的每个字符串都在trie里面查找,如果是他的子串且存在,就输出,否则不输出
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
END_OF_WORD = "#"
def dfs(board, i, j, cur_word, cur_dict):
cur_word += board[i][j]
cur_dict = cur_dict[board[i][j]]
if END_OF_WORD in cur_dict:
result.add(cur_word)
tmp, board[i][j] = board[i][j], "@"
for k in range(4):
x, y = i + dx[k] + dy[k]
if 0 <= x < m and 0 <= y < self.n\
and board[x][y] != '@' and board[x][y] in cur_dict:
dfs(board, x, y, cur_word, cur_dict)
board[i][j] = tmp
class solution:
def _dfs(self, board, i, j, cur_word, cur_dict):
###递归的终止条件
cur_word += board[i][j]
cur_dict = cur_dict[board[i][j]]
if END_OF_WORD in cur_dict:
self.result.add(cur_word)
###
###当前逻辑的处理
tmp, board[i][j] = board[i][j], '@'
for k in range(4):
x, y = i + dx[k], j+ dy[k]
###下探到下一层
if 0 <= x < self.m and 0 <= y < self.n \
and board[x][y] != '@' and board[x][y] in cur_dict:
self._dfs(board, x, y, cur_word, cur_dict)
##恢复之前的层的状态
board[i][j] = tmp
def findwords(self, board, words):
if not board or not board[0] : return []
if not words: return []
self.result = set()
#构建trie,且把单词插入进去
root = collections.defaultdict()
for word in words:
node = root
for char in word:
node = node.setdefault(char, collections.defaultdict())
node[END_OF_WORD] = END_OF_WORD
self.m, self.n = len(board), len(board[0])
for i in range(self.m):
for j in range(self.n):
if board[i][j] in root:
self._dfs(board, i, j, "", root)
return list(self.result)
words = ["oath", "pea", "eat", "rain"]
board = [['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e'],
['i', 'h', 'k', 'r'],
['i', 'f', 'l', 'v']]
aa = solution()
result = aa.findwords(board = board, words= words)
print(result)
|
66a6efc546c34cdf9947380e5ca5c52b77949917 | lc-orozco/Leisure-Programming | /Programming—Python/todo.py | 1,865 | 3.890625 | 4 | class Todo():
def __init__(self):
self.data = []
def add(self, chore):
self.data.append(chore)
def length_dec(self):
num = len(self.data)
pos_nums = []
while (num != 0):
pos_nums.append(num % 10)
num = num // 10
return pos_nums
def length(self):
return len(self.data)
def main():
it = 0
todo = Todo()
print("Got anything to do today? List it here (Type Stop now to exit or at any time to conclude the list): ")
print("")
while(True):
it += 1
chores = input()
chores_ns = chores.replace(" ", "")
if (chores_ns == ""):
continue
if (str.upper(chores) == 'STOP'):
break
todo.add(chores)
print("")
list_length = todo.length_dec()
if ((str.upper(chores) == 'STOP') and it == 0):
print("There are no chores to complete today, be the rest of your day okay!")
exit()
else:
if (todo.length() >= 10):
print("There are " + str(list_length[1] * 10) + "+ chores to display, want me to continue? (Reply Yes or No)")
cont = input()
print("")
if (str.upper(cont) == "NO"):
exit()
elif (str.upper(cont) != "YES"):
while (True):
print("Please type either Yes or No")
print("")
contt = input()
print(""
)
if (str.upper(contt) == "YES"):
break
elif (str.upper(contt) == "NO"):
exit()
print("Here is the given list of chores to complete today:")
for i in range(todo.length()):
print(f'{i + 1}-{todo.data[i]}')
main() |
bb35031d8fd1b9f04ed66fd01f5dfb16db767629 | wwstory/leetcode | /easy/29.两数相除.py | 1,332 | 3.640625 | 4 | # 超时
# class Solution:
# def divide(self, dividend: int, divisor: int) -> int:
# count = 0
# symbol = (dividend < 0) ^ (divisor < 0)
# dividend = abs(dividend)
# divisor = abs(divisor)
# while dividend >= divisor:
# dividend -= divisor
# count += 1
# if symbol:
# count = -count
# return count
# 题解
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
symbol = (dividend < 0) ^ (divisor < 0)
count = 0
result = 0
dividend = abs(dividend)
divisor = abs(divisor)
while dividend >= divisor:
count += 1
divisor <<= 1
while count > 0:
count -= 1
divisor >>= 1
result <<= 1
if dividend >= divisor:
result += 1
dividend -= divisor
if symbol:
result = -result
return result if -(1<<31) <= result <= (1<<31) - 1 else (1<<31) - 1
if __name__ == "__main__":
dividend_list = [10, 7, -7, -10, 0, 0, 100, -2147483648]
divisor_list = [3, -3, 3, -3, -1, 1, 3, -1]
for dividend, divisor in zip(dividend_list, divisor_list):
print(Solution().divide(dividend, divisor))
|
a77b1a6528d53df66b2ecf424a8bb666ed20a3ef | StephenwCrown/PayrollProject | /payroll.py | 839 | 4.125 | 4 | #Payroll#
#Name,Gross,Tax,Net#
#gross pay = hours worked x hourly pay rate#
#gross = total#
#tax = percentage taken off the gross#
#tax = static variable#
#name = user input#
#gross shaved off by tax end goal is net pay#
def payroll():
tax = float(0.2)
hourly_pay =10
hours_worked = 0
pay = hourly_pay * hours_worked
menu = ""
name = input('Enter Your name: ')
while(menu != "q"):
grosspay = hourly_pay * hours_worked
net = grosspay * tax
yourpay = grosspay-net
menu = input(f"Welcome to the menu {name}: Press 1 to enter hours worked, Press q to quit")
if (menu == "1"):
hours_worked = int(input("Enter your hours worked"))
print(f"Your name is: {name} and you worked {hours_worked} hours for a total gross of {grosspay} and net {yourpay}")
payroll() |
8a43e2919e3ef2faae147c667aac71a9f2195583 | wapj/justpython | /chapter7/6_lotto_generator.py | 399 | 4.03125 | 4 | # 로또 생성기 만들기
import random
def get_lotto():
lotto = set()
while len(lotto) < 6:
num = random.randrange(1, 47)
lotto.add(num)
lotto_list = list(lotto)
lotto_list.sort()
return tuple(lotto_list)
if __name__ == '__main__':
lotto_set = set()
for x in range(5):
lotto_set.add(get_lotto())
for x in lotto_set:
print(x)
|
3dd392c1b9a0e6978cb3729d3d61a92bdd6c5b23 | knutab/INF1100 | /gaussian2.py | 1,137 | 3.5 | 4 | import math
def gauss(x, m=0, s=1):
fx = 1./(math.sqrt(2*math.pi)*s)*math.exp(-0.5*(float(x-m)/s)**2)
return fx
"""
Chooses to create the table for the interval x=-5 to x=5 where x is
increased by one for each itteration.
"""
list1 = [] #list created to hold the x values
list2 = [] #list created to hold the f(x) values
x = -5 #Starting value of x
dF = 1 #increment of x
while x <=5:
f_x = gauss(x)
list1.append(x)
list2.append(f_x)
x = x + dF
#Uses same method to print the list as for f2c_approx_table.py
table =[]
for e1, e2 in zip(list1, list2):
table.append([e1, e2])
print '----x-------f(x)------'
print ' '
for e1, e2 in table:
print '%5d %5.10f' %(e1, e2)
"""
Output when running program in windows console gaussian2.py
----x-------f(x)------
-5 0.0000014867
-4 0.0001338302
-3 0.0044318484
-2 0.0539909665
-1 0.2419707245
0 0.3989422804
1 0.2419707245
2 0.0539909665
3 0.0044318484
4 0.0001338302
5 0.0000014867
"""
|
69dc38355e8a9bb8fa9e9d8f58b125cef7c48e36 | aysegultopkara/Python---celcious-to-fahrenheit-converter | /Celcious to Fahrenheit converter.py | 135 | 3.671875 | 4 | celcious = int(input("Please enter celcious: "))
message = f"{celcious} celcious {(celcious * 9/5) + 32} Fahrenheit"
print(message)
|
45c41e91dd0afa53a76fbe470b763a617d59825c | hankyeolk/PythonStudy | /자료구조/graph.py | 778 | 3.71875 | 4 | #find friends
fr_info = {
'summer': ['john', 'justin', 'mike'],
'john': ['summer', 'justin'],
'justin': ['john', 'summer', 'mike', 'may'],
'mike': ['summer', 'justin'],
'may': ['justin', 'kim'],
'kim': ['may'],
'tom': ['jerry'],
'jerry': ['tom']
}
def print_all_friends(g, start):
qu = [] #앞으로 처리해야 할 사람을 queue에 저장
done = set() #이미 queue에 추가한 사람을 집합에 기록해서 중복방지
qu.append(start)
done.add(start)
while qu:
p = qu.pop(0)
print(p) # 이 부분에만 print를 줘야 이름이 한 명씩 나온다.
for x in g[p]:
if x not in done: # g['summer']의 value 값들을 queue, set에 저장
qu.append(x)
done.add(x)
print_all_friends(fr_info, 'summer') |
d3be84b381b4a2961738ed00454f3475d953ecff | crossihuy/MyRepo | /better_calculator.py | 2,352 | 4.28125 | 4 | # tried to make +, -, *, / a variable both ways
# plus = "+"
# minus = "-"
# multiply = "*"
# divide = "/"
def main():
q = "y"
plus = "+"
minus = "-"
multiply = "*"
divide = "/"
def calc():
while True:
try:
print("***Please enter five numbers.***\n ")
num_1 = float(input("What is your first number? "))
f1 = input("""Do you want to +(add), -(subtract), *(multiply), or /(divide) \n""")
# if f1 == ("plus"):
# continue
# elif f1 == ("minus"):
# break
# elif f1 == ("multiply"):
# break
# elif f1 == ("divide"):
# break
# except ValueError:
# print("Please enter what function.")
num_2 = float(input("What is your second number?\n "))
num_3 = float(input("What is your third number?\n "))
num_4 = float(input("What is your fourth number?\n "))
num_5 = float(input("What is your fith number?\n "))
except ValueError:
print("You did not enter a number. ")
continue
except ZeroDivisionError:
if f1 == "/" and num_1 == 0 or num_2 == 0 or num_3 == 0 or num_4 == 0 or num_5 == 0:
print("Error: Can not divide by zero.\n ")
elif f1 == "+":
print("{} + {} + {} + {} + {} = ".format(num_1, num_2, num_3, num_4, num_5 ))
print(num_1 + num_2 + num_3 + num_4 + num_5 )
elif f1 == "-":
print("{} - {} - {} - {} - {} = ".format(num_1, num_2, num_3, num_4, num_5))
print(num_1 - num_2 - num_3 - num_4 - num_5 )
elif f1 == "*":
print("{} * {} * {} * {} * {} = ".format(num_1, num_2, num_3, num_4, num_5))
print(num_1 * num_2 * num_3 * num_4 * num_5 )
elif f1 == "/":
print("{} / {} / {} / {} / {} = ".format(num_1, num_2, num_3, num_4, num_5))
print(num_1 / num_2 / num_3 / num_4 / num_5 )
else:
print("Enter a valid function, ")
again = "yes" #while True:
while again == "yes":
cal()
print ("If you want to calculate other numbers type 'yes' \n ").lower()
again = input()
# if q == "y" or q == "" or q == "n":
else:
print("you did not make a selection. ")
|
06c9d074c709622b170cb41493734609f7f46a9a | tufts-ml-courses/comp137-dnn-20f-assignments | /assignment1/implementation.py | 1,478 | 4.25 | 4 | import tensorflow as tf
"""
This is a short tutorial of tensorflow. After this tutorial, you should know the following concepts:
1. constant,
2. operations
3. variables
4. gradient calculation
5. optimizer
"""
def regression_func(x, w, b):
"""
The function of a linear regression model
args:
x: tf.Tensor with shape (n, d)
w: tf.Variable with shape (d,)
b: tf.Variable with shape ()
return:
y_hat: tf.Tensor with shape [n,]. y_hat = x * w + b (matrix multiplication)
"""
# TODO: implement this function
# consider these functions: `tf.matmul`, `tf.einsum`, `tf.squeeze`
return y_hat
def loss_func(y, y_hat):
"""
The loss function for linear regression
args:
y: tf.Tensor with shape (n,)
y_hat: tf.Tensor with shape (n,)
return:
loss: tf.Tensor with shape (). loss = (y - y_hat)^\top (y - y_hat)
"""
# TODO: implement the function.
# Consider these functions: `tf.square`, `tf.reduce_sum`
return loss
def train_lr(x, y, lamb):
"""
Train a linear regression model.
args:
x: tf.Tensor with shape (n, d)
y: tf.Tensor with shape (n, )
lamb: tf.Tensor with shape ()
"""
# TODO: implement the function.
# initialize parameters w and b
# set an optimizer
# please check the documentation of tf.keras.optimizers.SGD
# loop to optimize w and b
return w, b
|
af89cffe4a2895db671b282b225e131aeb173296 | Ahed-bahri/Python | /upperReverse.py | 105 | 4 | 4 | # My solution
def uppercase_reverse(word):
return word.upper()
print (uppercase_reverse("banana")) |
4fb9a093a4c21d5c3065a32514f84138e2fbe94d | timsleeper/python_bootcamp | /day00/ex01/exec.py | 207 | 3.640625 | 4 | import sys
full_string = " ".join(sys.argv[1:])
rev_string = ""
for i in range(len(full_string)):
rev_string = rev_string + full_string[-(i + 1)].swapcase()
if rev_string != "":
print(rev_string)
|
8bc23f042af497717fadf582302a9c6301a28d0d | Sciencethebird/Python | /Plotting(Matplotlib)/Basic_Plot.py | 279 | 3.625 | 4 | import matplotlib.pyplot as plt
import numpy as np
x = [1,2,3]
y = [3,4,5]
x1 = np.arange(0,2*np.pi, 0.01)
y1 = np.sin(x1)
plt.plot(x, y, label = 'line')
plt.plot(x1,y1, label = 'sin')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Simple Graph')
plt.legend()#圖例
plt.show() |
f15f4fd593d27bbcdb1d4fcb88ac5f46d4f80ab9 | Mrhairui/leetcode | /22_3.py | 586 | 3.765625 | 4 | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
tmp = head.next
head.next = self.swapPairs(head.next.next) # 类的递归
tmp.next = head
return tmp
solution = Solution()
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(3)
l1.next.next.next = ListNode(4)
a = solution.swapPairs(l1)
while a:
print(a.val)
a = a.next |
81747e76b2f6863dd0f86fb097305309a01c106c | mitubaEX/one_day_one_ABC | /ABC/001-050/023/B/main.py | 597 | 3.515625 | 4 | # 'a' + str + 'b'
# 'c' + str + 'a'
# 'b' + str + 'b'
n = int(input())
ans = input()
count = 0
turn = 0
s = 'b'
while True:
if len(s) > n:
print(-1)
break
elif ans == 'b':
print(0)
break
else:
if turn == 0:
s = 'a' + s + 'c'
turn += 1
count += 1
elif turn == 1:
s = 'c' + s + 'a'
turn += 1
count += 1
elif turn == 2:
s = 'b' + s + 'b'
turn = 0
count += 1
if s == ans:
print(count)
break
|
38afe5cbb98e93e8bbd2464479b9452bb2187e06 | LeTailleCrayon/pronote-hooks | /script.py | 806 | 3.515625 | 4 | # coding=utf-8
print("Bienvenue sur PronoteTools ! Version 1.0.0.0")
print("--------------------------------------------")
print("1. Calculer ta moyenne")
select = input("Pour commencer que veux-tu faire ? Saisis le numéro : ")
if(select=="1"):
print("Bienvenue sur le calculateur de moyenne ! Il te suffit de copier-coller le fichier json, vas lire les instructions !")
releve = open('pronote-json.txt', 'r').read()
ms = releve.count("satisfaisante")
mts = releve.count("bonne")
mf = releve.count("fragile")
mi = releve.count("insuffisante")
total = ms+mts+mf+mi
mssur20 = ms*15
mtssur20 = mts*20
mfsur20 = mf*10
misur20 = mi*5
moyennebrut = mssur20 + mtssur20 + mfsur20 + misur20
moyenne = moyennebrut/total
print(moyenne)
|
fa8af2f92490a3e57743662a01062fff898eba59 | aj-michael/NLP | /NLTKExperiments/mapper.py | 674 | 3.671875 | 4 | #!/usr/bin/env python
import sys from nltk.tokenize
import wordpunct_tokenize
def read_input(file):
for line in file:
# split the line into tokens
yield wordpunct_tokenize(line)
def main(separator='\t'):
# input comes from STDIN (standard input)
data = read_input(sys.stdin)
for tokens in data:
# write the results to STDOUT (standard output);
# what we output here will be the input for the
# Reduce step, i.e. the input for reducer.py
#
# tab-delimited; the trivial token count is 1 for token in tokens:
print '%s%s%d' % (word, separator, 1)
if __name__ == "__main__":
main()
|
2709ef9ecc5996e74482df9901de606c33cb79e0 | rh01/gofiles | /lcode1-99/ex28/generateTrees.py | 976 | 3.75 | 4 | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
if n == 0: return []
return self.createBSTs(1, n)
def createBSTs(self, start, end):
res = []
if start > end:
res.append(None)
return res
for i in range(start, end+1, 1):
left_res = self.createBSTs(start, i-1)
right_res = self.createBSTs(i+1, end)
lsize = len(left_res)
rsize = len(right_res)
for j in range(lsize):
for k in range(rsize):
root = TreeNode(i)
root.left = left_res[j]
root.right = right_res[k]
res.append(root)
return res |
54ff5c40e922d1763d1b298206f8d285df1de720 | bainco/bainco.github.io | /course-files/lectures/lecture13/conditionals/01_leap_year.py | 235 | 3.578125 | 4 | def print_days_in_february(year):
print('...')
print('February 26')
print('February 27')
print('February 28')
if year % 4 == 0:
print('February 29')
print_days_in_february(2019)
print_days_in_february(2020) |
5d004f112d0115ca33f0bc373f2d973f0e9a9a50 | Bachatero/AKOB | /pyscripts/week6.py | 677 | 3.546875 | 4 | #str1 = "Hello"
#str2 = 'there'
#bob = str1 + str2
#print bob
#x = '40'
#y = int(x) + 2
#print y
#x = 'From marquard@uct.ac.za'
#print x[8]
#x = 'From marquard@uct.ac.za'
#print x[14:17]
#print len('banana')*7
#greet = 'Hello Bob'
#print greet.upper()
data = 'From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008'
pos = data.find('.')
print data[pos:pos+3]
greet = 'Hello Bob'
dir(greet)
nstr=greet.replace("Bob","Butthead")
greet = ' Hello Bob '
greet.lstrip()
greet.rstrip()
greet.strip()
line="Please have a nice day"
line.startswith("P")
true
line="From bergeroso@bergerium.com 6.5.2099"
at_sign_posit=line.find("@")
space_posit=line.find('',at_sign_posit)
host=line.find[at_sign_posit+1:space_posit]
print host
|
4ad5ae44517d7c2fc98682a36a7b2b492b4f7694 | wangzhifengharrison/cs820-csp | /main.py | 4,404 | 3.515625 | 4 | # main.py
#
# AUTHOR
# ---------
# Jhonatan S. Oliveira
# oliveira@uregina.ca
# Department of Computer Science
# University of Regina
# Canada
#
#
# DESCRIPTION
# -----------
# This script is a utility for running all implemented search algorithms.
# After calling the script in a prompt command, the user can input constants and pick a search algorithm.
# For more details, please, see documentation in the README file.
from csp_generator import generate_csp
from csp_inference import backtrack, arc_consistency
from time import time
def main():
"""
Description
-----------
Shows a menu to the user.
User can input constants used by the model RB.
User can pick a search algorithm for solving the CSP.
See README file for more details.
Example
-------
>>> main()
>>> Initial state (comma separated):
--> 1,2,3,8,5,6,7,4,0
>>> Choose the Search algorithm
>>> 1) Depth First
>>> 2) Breath First
>>> 3) Best First - tiles out of place
>>> 4) Best First - min moves
>>> 5) Best First - heuristic H
--> 3
>>> Result:
[[1, 2, 3, 8, 5, 6, 7, 4, 0], [1, 2, 3, 8, 5, 0, 7, 4, 6], [1, 2, 3, 8, 0, 5, 7, 4, 6], [1, 2, 3, 8, 5, 6, 7, 0, 4], [1, 2, 0, 8, 5, 3, 7, 4, 6], [1, 2, 3, 8, 0, 6, 7, 5, 4], [1, 2, 3, 8, 4, 5, 7, 0, 6], [1, 0, 3, 8, 2, 5, 7, 4, 6], [1, 2, 3, 0, 8, 5, 7, 4, 6], [1, 2, 3, 8, 5, 6, 0, 7, 4], [1, 2, 3, 8, 4, 5, 7, 6, 0], [1, 2, 3, 8, 6, 0, 7, 5, 4], [1, 0, 2, 8, 5, 3, 7, 4, 6], [1, 0, 3, 8, 2, 6, 7, 5, 4], [1, 2, 3, 0, 8, 6, 7, 5, 4], [1, 2, 3, 8, 4, 5, 0, 7, 6], [1, 2, 3, 8, 4, 0, 7, 6, 5], [1, 2, 3, 8, 0, 4, 7, 6, 5]]
>>> Want to try again? (Y/N)
-->
"""
keep_running = True
while keep_running:
# Input constants
print()
print()
print(">>> !!! Starting Assignment 3 Solution !!! <<<")
print()
print(">>> Constants:")
n = int(input("--> Number of variables (n): "))
p = float(input("--> Constraint Tightness (p): "))
alpha = float(input("--> Constant alpha: "))
r = float(input("--> Constant r: "))
print()
# Using AC or not
print()
print(">>> Do you wish to run Arc-Consistency before backtrack?")
use_ac_str = input("--> (y/n): ")
print()
use_ac = False
if (use_ac_str == "y") or (use_ac_str == "Y") or (use_ac_str == "yes") or (use_ac_str == "Yes") or (use_ac_str == "YES"):
use_ac = True
# Shows options
print()
print(">>> Choose the Search algorithm")
print(">>> 1) Backtrack Search")
print(">>> 2) Backtrack Search with Forward Checking")
print(">>> 3) Backtrack Search with Maintaining Arc Consistency (MAC)")
# Input search algorithm option
option = input("--> ")
print()
# Generate CSP and run AC if needed
variables, domains, constrains = generate_csp(n, p, alpha, r)
ac_result = True
if use_ac:
ac_result = arc_consistency(variables, domains, constrains)
# Print generated CSP
print()
print(">>> Generated CSP:")
print(">>> Variables: " + ",".join(["X"+str(v) for v in variables]))
print(">>> Domain: " + ",".join([str(v) for v in domains[0]]))
print(">>> Constrains:")
for (var1, var2) in constrains:
print("("+str(var1)+","+str(var2)+"): " + " ".join([str(val1)+","+str(val2) for val1, val2 in constrains[(var1,var2)]]))
print()
# If AC can not reduce domain to zero or AC is not run.
tic = time()
if ac_result:
# Run search algorithm
result = None
if option == "1":
result = backtrack({}, variables, domains, constrains)
elif option == "2":
result = backtrack({}, variables, domains, constrains, inf_type="FC")
elif option == "3":
result = backtrack({}, variables, domains, constrains, inf_type="MAC")
# Shows result from search algorithm
if result:
print(">>> Solution <<<")
print(", ".join(["X"+str(v)+":"+str(result[v]) for v in result]))
print()
print()
else:
print(">>> Not a valid choice.")
# In case AC returns fail.
else:
print(">>> You are lucky! Just by running AC we can tell that the CSP has no solution.")
tac = time()
# Loop again if users wants to
print(">>> Solution computed in " + str(tac-tic) + " (s)" )
print(">>> Want to try again? (Y/N)")
again = input("--> ")
if again != "y" and again != "Y":
keep_running = False
# Run main
if __name__ == "__main__":
main()
|
e61374feb5c523c3d9e78a15792a7f5f7e67bc88 | hrsh25/Sentiment_Analysis | /main.py | 668 | 3.671875 | 4 | print("<------------------------------Political Tweet Analyser------------------------------>")
print("Have an idea of the political scenario of our country by checking out the sentiments\nof the latest 1000 tweets concerning the three major political parties \n(BJP,Congress,AAP)")
input_ = input("Press 1 for BJP\nPress 2 for Congress\nPress 3 for AAP\n")
if(input_=="1"):
exec(compile(open('bjp_data.py').read(), 'bjp_data.py', 'exec'))
elif(input_=="2"):
exec(compile(open('inc_data.py').read(), 'inc_data.py', 'exec'))
elif(input_=="3"):
exec(compile(open('aap_data.py').read(), 'aap_data.py', 'exec'))
else:
print("Invalid Input")
|
7894c996e9ddeb8cc376a9382a4d8e0175b0d1d4 | sushovanisalive/python_codes | /testing_file.py | 781 | 3.96875 | 4 | import timeit
# def mean_list(in_list):
# sum=0
# for entry in in_list:
# sum = sum + entry
# return (sum/len(in_list))
# print(mean_list([2,5,8,0,8,7]))
def read_file_text(filename):
with open(filename, "r") as myfile:
data = myfile.read()
return data
def reverse_string(string_text):
return string_text[::-1]
# print(reverse_string('I want this text backward'))
def main():
start = timeit.timeit()
print('starting...')
filename = "text_file.txt"
reversed_string = reverse_string(read_file_text(filename))
f1 = open("text_file.txt", "w")
f1.write(reversed_string)
end = timeit.timeit()
print('time taken by program: ', end - start)
if __name__ == "__main__":
main()
|
b3f2d1f00bf6c34be93e249e8692a06f02099605 | ccsreenidhin/Practice_Anand_Python_Problems | /Learning_python_AnandPython/Module/problem11.py | 330 | 4.40625 | 4 | #Problem 11: Write a python program zip.py to create a zip file. The program should take name of zip file as first argument and files to add as rest of the arguments.
import zipfile
def zipf(n,f,ft):
zi=zipfile.ZipFile(n, mode = 'w')
zi.write(f)
zi.write(ft)
zi.close()
zipf('zipped','test.txt','a.txt')
|
0d3c05f56d956e941999951b0f55212cdc6ee13a | 360skyeye/kael | /examples/micro_service/caculate_service/__init__.py | 391 | 3.65625 | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author:
@time: 2017/5/25 16:07
"""
def add(a, b):
return t(a) + y(b)
def minus(a, b):
return t(a) - y(b)
def multiply(a, b):
return t(a) * y(b)
def t(a):
return a
def y(b):
return b
class TMP(object):
def __init__(self):
self.count = 0
if __name__ == '__main__':
print add(1, 2)
|
89ed5fd328bc714eefda095559a219f59258b044 | abarson/warm-up-project | /opponentV3.py | 1,279 | 3.609375 | 4 | import random
class Opponent():
def __init__(self, deck, difficulty, laidDown,):
self.deck = deck
self.difficulty = difficulty
self.laidDown = laidDown
self.books = 0
self.recentCard = None
self.lastAskedFor = None
##depending on difficulty, the Opponent might lie
def checkDeck(self, user_input):
return self.deck.hasCard(user_input)
##depending on difficulty and recentCard, the opponent asks the user for a card
def ask(self):
#On easy difficulty, the opponent asks for a random card from its hand
if self.difficulty==0:
num=random.randint(0,len(self.deck)-1)
card=self.deck[num].rank
return card
#On hard or devious difficulty, the opponent asks for the last
#card added to its hand
elif self.difficulty==1 or self.difficulty==2:
if self.recentCard != None:
card=self.recentCard.rank
else:
i=self.lastAskedFor +1
while(not self.deck.hasCard(i)):
i+=1
card = i
return card
def setRecentCard(self,newrank):
self.recentCard=newrank |
02ef78ef290d4fa7815e6cdb7a2ffc321f3cab94 | dPacc/Web_Dev_Practice | /ContextManagers/cm.py | 796 | 3.65625 | 4 | from contextlib import contextmanager
# Context Manager Using Class
# class Open_File():
#
# def __init__(self, filename, mode):
# self.filename = filename
# self.mode = mode
#
# def __enter__(self):
# self.file = open(self.filename, self.mode)
# return self.file
#
# def __exit__(self, exc_type, exc_val, traceback):
# self.file.close()
#
# with Open_File('sample.txt', 'w') as f:
# f.write('Testing')
#
# print(f.closed)
# Context Manager Using Function
@contextmanager
def open_file(filename, mode):
try:
f = open(filename, mode)
yield f
finally:
f.close()
with open_file('sample.txt', 'w') as f:
f.write('Context Manager with Functions')
print(f.closed)
# import sys
# print(sys.path)
|
c0604b70dec7d863602fe6ba930050c0d7c87379 | RodiMd/Python | /FeetToInches.py | 829 | 4.21875 | 4 | #Feet to Inches
#accept argument in feet and return value in inches
argInFeet = int(0)
foot_to_Inch = int (12)
feet = int(0)
def main():
argInFeet = input('Enter a value in feet ')
argInFeet = int(argInFeet)
# totalInches = feet_to_inches(argInFeet)
print(feet_to_inches(argInFeet))
# print('The value in inches is ', totalInches)
def feet_to_inches (feet):
return foot_to_Inch * feet
main()
#Note the results to this program initially multiplying 12in in a foot
# by an entry for argInFeet = 12, was getting an answer of 12121212---
# this type of result occurs when you multiply a string by a different
#type of value. so until I declared argInFeet = int
# i was getting the wring answer, now is good after declaring it to be
# a argInFeet = int(argInFeet)
|
494e15685671bdcafad49af38559132535724204 | gabrysb/210CT | /Question14.py | 1,226 | 3.90625 | 4 | """ Implement BFS and DFS traversals for the above graph. Save the nodes traversed in sequence to a text file."""
def DFS(self, vertex):
""" Uses a stack so it goes all the way down a branch and stores the values that can then be popped once the nodes have all been found. This particular DFS is done pre-order."""
s = [] #stack
seen = []
s.push(vertex)
while s != False:
u = s.pop()
if u not in seen:
seen.append(u)
for val in range(0, len(self.graph)-1):
if val == u:
temp = self.graph[val] #takes all values from e (array)
for e in temp:
s.push(e)
return (seen)
def BFS(self, vertex):
"""Uses a queue for a FIFO approach. This method goes through each edge from the first node, and then moves down the graph to get all the edges of the next node."""
q = [] #queue
seen = []
q.enqueue(vertex)
while q != False:
u = q.dequeue()
if u not in seen:
seen.append(u)
for val in range(0, len(self.graph)-1):
if val == u:
temp = self.graph[val] #takes all values from e (array)
for e in temp:
q.enqueue(e)
|
a900abeee21159c7c9c138711a9098738e6b88c7 | spoorthi198/Python_QA | /Linklist/linklistdemo.py | 2,124 | 4 | 4 | class Node:
def __init__(self,data,next):
self.data = data
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def insert_at_beggining(self,data):
node = Node(data,self.head)
self.head = node
def print_the_ele(self):
if self.head is None:
print("linked list is empty")
return
itr = self.head
listr = ' '
while itr:
listr += str(itr.data)+ '-->'
itr = itr.next
print(listr)
def insert_at_end(self,data):
if self.head is None:
self.head=Node(data,None)
return
itr = self.head
while itr.next:
itr = itr.next
itr.next=Node(data,None)
def insert_values(self,data_list):
if self.head is None:
for data in data_list:
self.insert_at_end(data)
def get_length(self):
count = 0
itr = self.head
while itr:
itr = itr.next
count += 1
return count
def remove_at(self,index):
if index<0 or index >= self.get_length():
print("not valid index")
raise Exception("invalid index")
if index==0:
self.head=self.head.next
return
count = 0
itr = self.head
while itr:
if count == index - 1:
itr.next=itr.next.next
break
itr = itr.next
count+=1
def insert_at(self,index,data):
if index<0 or index>=self.get_length():
print("invalid index")
raise Exception("invalid index")
if index==0:
self.insert_at_beggining(data)
return
count= 0
itr = self.head
while itr:
if count== index-1:
node = Node(data,itr.next)
itr.next=node
itr = itr.next
count+=1
ll = LinkedList()
ll.insert_values(['raj','jnana','jayamm','puttamma'])
ll.remove_at(2)
ll.print_the_ele()
ll.get_length()
|
b2e2b96eca9dc9bd734f9625c1ee3747d966ecee | tClown11/Python-Student | /test/textday/days13(20180401)/第七章__装饰器.py | 1,427 | 3.734375 | 4 | #装饰器基础知识
'''@decorate
def target():
print('running target()')
'''
'''#把装饰器通常把函数替换成另一个函数
def deco(func):
def inner():
print('running inner()')
return inner # deco返回inner函数对象
@deco
def target(): # 使用deco装饰target
print('running target()')
print(target()) #调用被装饰的target其实会运行inner
print(target) # 审查target现在是inner的引用
'''
#####装饰器的两大特点是:1.能把被装饰的函数替换成其他函数
# 2.装饰器在加载模块时立即执行
registry = [] #registry保存被@registry装饰的函数引用
def register(func): #registry的参数是一个函数
print('running register(%s)'%func) # 为了演示,显示被装饰的函数
register.append(func) # 把func存入registry
return func # 返回func:必须返回函数;这里返回的函数与参数传入的一样
@register # f1和f2被@registry装饰
def f1():
print('running f1()')
@register
def f2():
print('running f2()')
def f3(): #f3没有装饰
print('running f3()')
def main(): # main显示registry,然后调用f1()、f2()、f3()
print('running main()')
print('registry ->', registry)
f1()
f2()
f3()
if __name__ == "__main__":
main() #只有把registration.py当脚本运行时才调用main() |
b12e2a4fc6e2ea3aec431617cf1204e52905e2a0 | LarynQi/LarynQi.github.io | /assets/fa20-csm/mentor09.py | 2,592 | 3.953125 | 4 | class Baller:
all_players = []
def __init__(self, name, has_ball = False):
self.name = name
self.has_ball = has_ball
Baller.all_players.append(self)
def pass_ball(self, other_player):
if self.has_ball:
self.has_ball = False
other_player.has_ball = True
return True
else:
return False
class BallHog(Baller):
def pass_ball(self, other_player):
return False
>>> alina = Baller('Alina', True)
>>> kenny = BallHog('Kenny')
>>> len(Baller.all_players)
class TeamBaller(_______________):
"""
>>> jamie = BallHog('Jamie')
>>> cheerballer = TeamBaller('Ethan', has_ball=True)
>>> cheerballer.pass_ball(jamie)
Yay!
True
>>> cheerballer.pass_ball(jamie)
I don't have the ball
False
"""
def pass_ball(_______________, ________________):
1 2 3 4 5 6 7 [8] 7 6 5 4 3 2 1 [0] 1 [2] 1 0 -1 -2 -3 [-4] -3 -2 -1 [0] -1 -2
>>> tracker1 = PingPongTracker()
>>> tracker2 = PingPongTracker()
>>> tracker1.next()
1
>>> tracker1.next()
2
>>> tracker2.next()
1
class PingPongTracker:
def __init__(self):
def next(self):
class Musician:
popularity = 0
def __init__(self, instrument):
self.instrument = instrument
def perform(self):
print("a rousing " + self.instrument + " performance")
self.popularity = self.popularity + 2
def __repr__(self):
return self.instrument
class BandLeader(Musician):
def __init__(self):
self.band = []
def recruit(self, musician):
self.band.append(musician)
def perform(self, song):
for m in self.band:
m.perform()
Musician.popularity += 1
print(song)
def __str__(self):
return "Here's the band!"
def __repr__(self):
band = ""
for m in self.band:
band += str(m) + " "
return band[:-1]
miles = Musician("trumpet")
goodman = Musician("clarinet")
ellington = BandLeader()
class Bird:
def __init__(self, call):
self.call = call
self.can_fly = True
def fly(self):
if self.can_fly:
return "Don't stop me now!"
else:
return "Ground control to Major Tom..."
def speak(self):
print(self.call)
class Chicken(Bird):
def speak(self, other):
Bird.speak(self)
other.speak()
class Penguin(Bird):
can_fly = False
def speak(self):
call = "Ice to meet you"
print(call)
andre = Chicken("cluck")
gunter = Penguin("noot")
>>> andre.speak(Bird("coo"))
|
3db5dd4287977329769878095a83e22b04db9984 | CosmicDisorder/web-caesar | /caesar.py | 497 | 3.9375 | 4 | from helpers import alphabet_position, rotate_character
import string
alphabet = string.ascii_lowercase
def encrypt(text, rot):
new_text = ''
for char in text:
new_text += rotate_character(char, rot)
return new_text
def main():
from sys import argv, exit
if argv[1].isdigit():
text = input("Type a message:\n")
print(encrypt(text,int(argv[1])))
else:
print("usage: python caesar.py n")
exit()
if __name__ == "__main__":
main() |
552bc60993c8bf52f8abd3e72e5bb26cefd702d5 | ohbarye/euler | /python/problem0001.py | 348 | 4.46875 | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
def is_multiple_of_3_or_5(n):
return n % 3 == 0 or n % 5 == 0
print sum(filter(is_multiple,range(1,10)))
|
0cccfd233335d8aad637cf270c520418e594638d | emersonsemidio/python | /Desafio63.py | 352 | 3.90625 | 4 | print('-' * 10)
print('Sequência de Fibonacci: ')
first = 0
second = 1
soma = first + second
cont = 3
a = int(input('Quantos termos você deseja mostrar? '))
print('{} {}'.format(first,second),end=' ')
while cont <= a:
cont = cont +1
soma = first + second
print('{}'.format(soma),end=' ')
first = second
second = soma
print('FIM')
|
0cdc081608207ee5de462e75153596d96fdd1eb6 | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_41/205.py | 3,145 | 3.71875 | 4 | #! /usr/bin/env python
import pprint
import sys
if __name__ == "__main__":
data = open(sys.argv[1])
test_count = int(data.readline().strip())
for count in range(0, test_count):
number = data.readline().strip()
#print number
numbers_used = {}
for char in number:
if char == "0": continue
if char in numbers_used:
numbers_used[char]["count"] += 1
else:
numbers_used[char] = {"count": 1,
"order": 0}
ordered_numbers = ['0']
ordered_numbers.extend(sorted(numbers_used.keys()))
for counter, num in enumerate(ordered_numbers):
if num == '0': continue
numbers_used[num]["order"] = counter
#pprint.pprint(numbers_used)
valid = False
while not valid:
#add one
carry = True
number_count = {}
next_number = ""
for char in number[::-1]:
#print "Running char: %s - %s" % (number, char)
if carry:
order_index = 0
if char == '0':
order_index = 1
else:
order_index = numbers_used[char]["order"] + 1
if order_index >= len(ordered_numbers):
order_index = order_index % len(ordered_numbers)
carry = True
else:
carry = False
next_number = ordered_numbers[order_index] + next_number
if ordered_numbers[order_index] in number_count:
number_count[ordered_numbers[order_index]] += 1
else:
number_count[ordered_numbers[order_index]] = 1
else:
next_number = char + next_number
if char in number_count:
number_count[char] += 1
else:
number_count[char] = 1
#print "%s - %s, carry - %s" % (number, next_number, carry)
#pprint.pprint(number_count)
#print "Appending final carry"
if carry:
next_number = ordered_numbers[1] + next_number
if ordered_numbers[1] in number_count:
number_count[ordered_numbers[1]] += 1
else:
number_count[ordered_numbers[1]] = 1
number = next_number
valid = True
for char in ordered_numbers:
if char == '0': continue
if char in number_count:
#print "number test- %s : %s" % (number_count[char], numbers_used[char]["count"])
if number_count[char] != numbers_used[char]["count"]:
valid = False
break
else:
valid = False
print( "Case #%s: %s" % (count + 1, number ))
|
ab91ad4b6d3de344fb124a176f90f05baad0a11a | karthik4636/practice_problems | /arrays_and_strings/rotate_array.py | 489 | 3.6875 | 4 | #https://leetcode.com/problems/rotate-array/
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return
k = k % len(nums)
nums[0:len(nums) - k],nums[len(nums)-k:len(nums)] = nums[len(
nums)-k:len(nums)],nums[0:len(nums) - k]
s = Solution()
s.rotate([1,2,3,4,5,6,7]
,3) |
2f5d44fca06c651bbe6d95fe0d54c68aa99fbcda | phoenix9373/Algorithm | /2020/SWEA_문제/순열_반복문.py | 172 | 4.0625 | 4 | for a in range(3):
for b in range(3):
if a == b:
continue
for c in range(3):
if a == c or c == b:
print(a, b, c) |
10403d2e4058a56fb5a37ea803ae7266d0ae2846 | juneharold/PH526x_UPFR | /hw2sol.py | 2,810 | 3.734375 | 4 | # Ex 1
import numpy as np
import random
def create_board():
board = np.zeros((3,3), dtype = int)
return board
board = create_board()
# Ex 2
def create_board():
board = np.zeros((3,3),dtype=int)
return board
board = create_board()
def place(board,player,position):
if board[position] == 0:
board[position] = player
return board
place(board,1,(0,0))
# Ex 3
def possibilities(board):
return list(zip(*np.where(board == 0)))
possibilities(board)
# Ex 4
# write your code here!
def random_place(board, player):
possible_placements = possibilities(board)
if len(possible_placements) > 0:
possible_placements = random.choice(possible_placements)
place(board, player, possible_placements)
return board
random_place(board, 2)
# Ex 5
board = create_board()
for i in range(3):
for player in [1, 2]:
random_place(board, player)
print(board)
# Ex 6
def row_win(board,player):
winner = False
if np.any(np.all(board==player,axis=1)):
return True
else:
return False
row_win(board, 1)
# Ex 7
def col_win(board,player):
if np.any(np.all(board == player, axis = 0)):
return True
else:
return False
col_win(board,1)
# Ex 8
def diag_win(board, player):
if np.all(np.diag(board)==player):
return True
else:
return False
diag_win(board, 1)
# Ex 9
def evaluate(board):
winner = 0
for player in [1, 2]:
# Check if `row_win`, `col_win`, or `diag_win` apply. if so, store `player` as `winner`.
if row_win(board, player) or col_win(board, player) or diag_win(board, player):
winner = player
if np.all(board != 0) and winner == 0:
winner = -1
return winner
evaluate(board)
# Ex 10
# write your code here!
def play_game():
board,winner = create_board(),0
while winner == 0:
for player in [1,2]:
random_place(board,player)
winner = evaluate(board)
if winner !=0:
break
return winner
# Ex 11
random.seed(1)
games = [play_game() for i in range(1000)]
"""print(games.count(1))
print(games.count(2))
print(games.count(0))
print(games.count(3))"""
# Ex 12
def play_strategic_game():
board, winner = create_board(), 0
board[1,1] = 1
while winner == 0:
for player in [2,1]:
# use `random_place` to play a game, and store as `board`.
random_place(board,player)
winner = evaluate(board)
# use `evaluate(board)`, and store as `winner`.
if winner != 0:
break
return winner
play_strategic_game()
# Ex 13
games = [play_strategic_game() for i in range(1000)]
games.count(1)
print(games.count(1))
print(games.count(2))
print(games.count(0))
print(games.count(3))
|
891650b4106eeab0af928beb32e8b6cfce61ab77 | chabberwock/PlaystoreDownloader | /playstore/util.py | 1,174 | 3.5 | 4 | #!/usr/bin/env python
# coding: utf-8
import itertools
import logging
import time
logger = logging.getLogger(__name__)
def retry(delays=(1, 2, 3, 5), exception=Exception):
"""
Retry decorator.
Retry the execution of the wrapped function/method in case of specific errors, for a specific number
of times (specified by delays).
:param delays: The delays (in seconds) between consecutive executions of the wrapped function/method.
:param exception: The exception to check (may be a tuple of exceptions to check). By default, all
the exceptions are checked.
"""
def wrapper(function):
def wrapped(*args, **kwargs):
for delay in itertools.chain(delays, [None]):
try:
return function(*args, **kwargs)
except exception as e:
if delay is None:
logger.error("{0} (no more retries)".format(e))
raise
else:
logger.warning("{0} (retrying in {1}s)".format(e, delay))
time.sleep(delay)
return wrapped
return wrapper
|
cc2eca507366a487558dd2aec4566f75b037a1e2 | TimRock23/Algorithms | /greedy_algorithms/H.py | 140 | 3.640625 | 4 | def h(string):
word = string[-1]
return len(word)
if __name__ == '__main__':
string = input().split(' ')
print(h(string))
|
884cb2e8c977a466621f6aec0cf2fb0d7ae0ab42 | Pizzabakerz/codingsuperstar | /python-codes/codes/10.conditions_example.py | 463 | 4.15625 | 4 | '''
if
else
else if => elif
SYNTAX:
if condition:
body of if
elif condition:
body of elif
elif condition_2:
boy of elif
.
.
.
elif condition_n:
boy of elif
.
else:
body of else
'''
a = 10
if a< 10:
print(True)
if a>10:
print(True)
else:
print(False)
if a != 10:
print(False)
elif a == 10:
print(True)
elif a >10:
print(False)
else:
print(False) |
cf01ff1323204462e2fa67521743293db030d45f | elertan/HR-Minigames | /Minigames/Quinten/Game.py | 1,221 | 3.625 | 4 | from Minigame import Minigame
class QuintenGame(Minigame):
def __init__(self):
super(QuintenGame, self).__init__("QuintenGame", "Quinten", 4)
# When a player starts this minigame
def enter(self):
raise NotImplementedError("You need to override the enter method on your minigame.")
# When a player leaves this minigame
def leave(self):
raise NotImplementedError("You need to override the leave method on your minigame.")
def handleEvents(self, events):
raise NotImplementedError("You need to override the handleEvents method on your minigame.")
# Gets called on every frame
def update(self, dt):
raise NotImplementedError("You need to override the update method on your minigame.")
# Gets called on every frame
def updatePreview(self, dt):
raise NotImplementedError("You need to override the updatePreview method on your minigame.")
# Draw the current game state
def draw(self, surface):
raise NotImplementedError("You need to override the draw method on your minigame.")
def drawPreview(self, surface):
raise NotImplementedError("You need to override the drawPreview method on your minigame.") |
516fba080f10955ba00763bda4cdc4ca289d5530 | LuoJiaji/LeetCode-Demo | /Contest/weekly-contest-153/A.py | 898 | 3.921875 | 4 | class Solution(object):
def distanceBetweenBusStops(self, distance, start, destination):
"""
:type distance: List[int]
:type start: int
:type destination: int
:rtype: int
"""
l = sum(distance)
# print(l)
if start > destination:
start, destination = destination, start
c = sum(distance[start: destination])
rc = l - c
# print(c, rc)
return min(c, rc)
distance = [1,2,3,4]
start = 0
destination = 1
result = Solution().distanceBetweenBusStops(distance, start, destination)
print(result)
distance = [1,2,3,4]
start = 0
destination = 2
result = Solution().distanceBetweenBusStops(distance, start, destination)
print(result)
distance = [1,2,3,4]
start = 0
destination = 3
result = Solution().distanceBetweenBusStops(distance, start, destination)
print(result) |
4ab3a2adbfb7c552bac33d4f8a6c094fc3fb1f04 | Josmi27/project-3-Destination-Travel-App | /tests/unit_tests.py | 2,633 | 3.546875 | 4 | import unittest
import chatbot
import models
import jokeapi
from chatbot import *
class ChatbotTests(unittest.TestCase):
def test_help(self):
response = chatbot.Chatbot.response(self, "!! help ")
self.assertEqual(response, "Want me to say more? I respond to: !! about, !! say <something>, !! quotes, !! joke, !! island, !! tips, !! meditation and !! imagine")
def test_about(self):
response = chatbot.Chatbot.response(self, "!! about ")
self.assertEqual(response, 'Welcome to the Relaxation Chatroom! Here you will be able to escape from the stresses of your life and relaaaaaaax :)')
def test_say_something(self):
response = chatbot.Chatbot.response(self, "!! say hello")
self.assertEqual(response, "hello")
def test_imagine(self):
response = chatbot.Chatbot.response(self, "!! imagine ")
for element in random.sample(scenes, 2):
self.assertTrue(element in scenes)
def test_meditation(self):
response = chatbot.Chatbot.response(self, '!! meditation ')
self.assertEqual(response, "For more help relaxing, I would recommend downlaoding the Calm application on your phone.")
def test_tips(self):
response = chatbot.Chatbot.response(self, "!! tips ")
self.assertEqual(response, 'Want to know how to relax after a stressful day?: take a shower, prepare you favorite meal, do not think about any of your troubles!')
def test_joke(self):
response = chatbot.Chatbot.response(self, "!! joke ")
self.assertEqual(response, jokeapi.rand_joke)
def test_quotes(self):
response = chatbot.Chatbot.response(self, "!! quotes ")
chatbot_quotes=[" Difficult roads often lead to beautiful destinations", "I promise you nothing is as chaotic as it seems", "Act the way that you want to feel.", "Tension is who you think you should be. Relaxation is who you are."]
# self.assertEqual(response, chatbot_quotes[random.randint(0,len(chatbot_quotes)-1)])
for element in random.sample(chatbot_quotes, 3):
self.assertTrue(element in chatbot_quotes)
def test_no_response(self):
response = chatbot.Chatbot.response(self, " ")
self.assertEqual(response, "I'm sorry, I don't understand what you're saying. Try '!!help for commands I understand.'")
def test_island (self):
response = chatbot.Chatbot.response(self, "!! island ")
self.assertEqual(response,"For extra relaxation, considering visiting an island in the Caribbean, like the Bahamas!")
if __name__ == '__main__':
unittest.main() |
b64b002718fd0fc328c1c06e8c22649a36b0d1ec | saparia-data/data_structure | /geeksforgeeks/greedy/3_Job_Sequencing_Problem.py | 1,956 | 4.125 | 4 | '''
Given a set of N jobs where each job i has a deadline and profit associated to it.
Each job takes 1 unit of time to complete and only one job can be scheduled at a time.
We earn the profit if and only if the job is completed by its deadline. The task is to find the maximum profit and the number of jobs done.
Jobs will be given in the form (Job id, Deadline, Profit) associated to that Job.
Example 1:
Input:
N = 4
Jobs = (1,4,20)(2,1,10)(3,1,40)(4,1,30)
Output: 2 60
Explanation: 2 jobs can be done with
maximum profit of 60 (20+40).
Example 2:
Input:
N =
Jobs = (1,2,100)(2,1,19)(3,2,27)
(4,1,25)(5,1,15)
Output:2 127
Explanation: 2 jobs can be done with
maximum profit of 127 (100+27).
https://www.geeksforgeeks.org/job-sequencing-problem/
'''
def printJobScheduling(arr, t):
# length of array
n = len(arr)
# Sort all jobs according to
# decreasing order of profit
'''
for i in range(n):
for j in range(n - 1 - i):
if arr[j][2] < arr[j + 1][2]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
'''
arr = sorted(arr, key = lambda x: x[2], reverse = True)
print(arr)
# To keep track of free time slots
result = [False] * t
# To store result (Sequence of jobs)
job = ['-1'] * t
# Iterate through all given jobs
for i in range(len(arr)):
# Find a free slot for this job
# (Note that we start from the
# last possible slot)
mini = min(t - 1, arr[i][1] - 1)
for j in range(mini, -1, -1):
# Free slot found
if result[j] is False:
result[j] = True
job[j] = arr[i][0]
break
# print the sequence
print(job)
arr = [['a', 2, 100], # Job Array
['b', 1, 50],
['c', 3, 30],
['d', 1, 20],
['e', 2, 10]]
printJobScheduling(arr, 3) |
58e421fa960f046ecb339c3ae0d8371b3df104db | csany2020c/Demo | /b_csillagok.py | 2,057 | 3.921875 | 4 | from turtle import Turtle
from turtle import Screen
from random import Random
class TurtleOOP:
turtle = Turtle()
screen = Screen()
def hold(self):
self.turtle.speed(200)
self.turtle.penup()
self.turtle.goto(-200, 20)
self.turtle.left(200)
self.turtle.pendown()
self.turtle.begin_fill()
for i in range(180):
self.turtle.right(1)
self.turtle.forward(1)
self.turtle.left(160)
for i in range(200):
self.turtle.left(1)
self.turtle.forward(1)
self.turtle.end_fill()
def bg(self):
for k in range(40):
self.turtle.goto(-self.screen.window_width()/2, self.screen.window_height()/2 - self.screen.window_height() / 40.0 * k)
self.turtle.settiltangle(90)
self.turtle.color((0, 0, 0.5 - k / 80))
self.turtle.fillcolor((0, 0, 0.5 - k / 80))
self.turtle.begin_fill()
for i in range(2):
self.turtle.forward(self.screen.window_width())
self.turtle.right(90)
self.turtle.forward(self.screen.window_height() / 40)
self.turtle.right(90)
self.turtle.end_fill()
def star(self, a: int):
self.turtle.begin_fill()
for i in range(5):
self.turtle.forward(a)
self.turtle.left(144)
self.turtle.end_fill()
def __init__(self):
self.turtle._delay(0)
self.turtle.speed(0)
r = Random()
x = r.randint(2, 9)
print(x)
self.bg()
self.turtle.fillcolor((1, 1, 0))
self.turtle.color((1, 1, 0))
for i in range(50):
self.turtle.penup()
self.turtle.goto(r.randint(0, self.screen.window_width())-self.screen.window_width()/2, r.randint(0, self.screen.window_height())-self.screen.window_height()/2)
self.turtle.pendown()
self.star(r.randint(5, 50))
self.hold()
self.screen.mainloop()
t = TurtleOOP() |
cd7a383d467abd38aceb4c57b6a5e4ccf0f4544a | podoynitsyn-va/GEEKBRAINS-Learning | /2.OOP_in_Python/Regular_Expressions/someone/Theme_01/main.py | 4,622 | 4.03125 | 4 | # Тема 1. Регулярные выражения
# ======================================================================================================================
# 1. Получите текст из файла.
# Примечание: Можете взять свой текст или воспользоваться готовым из материалов к уроку.
# Вспомните операции с чтением файлов и не забудьте сохранить текст в переменную по аналогии с видеоуроками.
import re
try:
with open('text.txt', 'r') as f:
text = f.read()
except FileNotFoundError:
print('Файл text.txt не найден в рабочей папке')
else:
print('-' * 45, 'Задание 1', '-' * 45)
print(text)
print()
# ======================================================================================================================
# 2. Разбейте полученные текст на предложения.
# Примечание: Напоминаем, что в русском языке предложения заканчиваются на . ! или ?.
text_1 = re.split('[.!?]\s', text)
print('-' * 45, 'Задание 2', '-' * 45)
print(text_1)
print()
# ======================================================================================================================
# 3. Найдите слова, состоящие из 4 букв и более. Выведите на экран 10 самых часто используемых слов.
# Пример вывода: [(“привет”, 3), (“люди”, 3), (“город”, 2)].
most_often_four_plus_letters = {}
text_2 = re.findall('\w{4,}', text)
for elem in text_2:
most_often_four_plus_letters[elem] = most_often_four_plus_letters.get(elem, 0) + 1
print('-' * 45, 'Задание 3', '-' * 45)
print(sorted(most_often_four_plus_letters.items(), key=lambda elem: (elem[1], elem[0]), reverse=True)[:10])
print()
# ======================================================================================================================
# 4. Отберите все ссылки.
# Примечание: Для поиска воспользуйтесь методом compile, в который вы вставите свой шаблон для поиска ссылок в тексте.
pattern = re.compile('(\d?[a-z]+.[^\s]+)\.\s')
print('-' * 45, 'Задание 4', '-' * 45)
text_3 = pattern.findall(text)
print(text_3)
print()
# ======================================================================================================================
# 5. Ссылки на страницы какого домена встречаются чаще всего?
# Напоминаем, что доменное имя — часть ссылки до первого символа «слеш». Например в ссылке вида
# travel.mail.ru/travel?id=5 доменным именем является travel.mail.ru.
# Подсчет частоты сделайте по аналогии с заданием 3, но верните только одну самую частую ссылку.
# тут не мог придумать шаблон, пришлось костылить
# добавляем к ссылкам из предыдущего задания "/" - чтобы все были со "/" и преобразуем к списку
# затем перебираем элементы списка и для каждого находим позицию первого вхождения "/"
# все что до этой позиции добавляем в словарь и считаем количество повторений
text_4 = '/ '.join(text_3)
text_4 = text_4.split()
most_often_domain = {}
for elem in text_4:
slice = re.search('/', elem).span()[0]
most_often_domain[elem[:slice]] = most_often_domain.get(elem[:slice], 0) + 1
print('-' * 45, 'Задание 5', '-' * 45)
print(sorted(most_often_domain.items(), key=lambda elem: (elem[1], elem[0]), reverse=True)[:1])
print()
# ======================================================================================================================
# 6. Замените все ссылки на текст «Ссылка отобразится после регистрации».
for elem in reversed(text_3):
text = re.sub(elem, 'Ссылка отобразится после регистрации', text)
print('-' * 45, 'Задание 6', '-' * 45)
print(text)
print()
|
df4814cee56f0c37722b9e998062b9fcd7be795c | RenegaDe1288/pythonProject | /lesson5/mission2.py | 254 | 3.84375 | 4 | temp = int(input('Введите температуру: '))
if 100 >= temp >= 0:
print('Оптимальная температура')
else:
print('Предупреждение: температура вне допустимых границ')
|
bcbd292bc2aaf8298926e2ae3f4a5b5e8ff28420 | seraphbotty/hello-world | /variable examples.py | 648 | 4.1875 | 4 | age = 15 #set the users age
age_in_month = age * 12 #computer the user age in months
age_in_days = age_in_month * 30 #computer the approximate age in days
student_name = 'Jim' #create a string for user name
print("Student", student_name, "is", age, "years old")
#print("Student %s is %d years old" % (student_name, age))
print("If expresed in month", student_name, "is", age_in_month, "months old")
#print("If expressed in month, %s is %d months old" % (student_name, age_in_month))
print("If expresed in days", student_name, "is", age_in_days, "days old")
#print("If expressed in days, %s is %d days old" % (student_name, age_in_days))
|
6027715b93c92550637f94a0771f06098d196a0a | jbmilgrom/practical-algorithms-and-data-structures | /dynamic_programming/maximum_sub_array/test.py | 892 | 3.875 | 4 | from dynamic_programming.maximum_sub_array.method import maximum_subarray
print('############################')
print('Testing maximum_sub_array')
print('############################')
max = maximum_subarray([-1, 3,2,6, -3, 4,5])
assert max == 17, "expected {}; received: {}".format(17, max)
max = maximum_subarray([-1, -3, -2, -6, -3, -4, -5])
assert max == -1, "expected {}; received: {}".format(-1, max)
max = maximum_subarray([-9, -3, -2, -6, -3, -4, -5])
assert max == -2, "expected {}; received: {}".format(-2, max)
max = maximum_subarray([-1, 3,2,6, -3, 4,5, 9, -1])
assert max == 26, "expected {}; received: {}".format(26, max)
max = maximum_subarray([-1, 3,2,6, -3, 4,5, 9, -1, 10, 10])
assert max == 45, "expected {}; received: {}".format(45, max)
max = maximum_subarray([-1, 3,2,6, -3, 4,5, 9, -1, 10, 2, 3, -9])
assert max == 40, "expected {}; received: {}".format(40, max) |
099ff128c5a78b62ab92c7076995baeb5240bdbf | yamadatarousan/python_training | /03.py | 665 | 3.5625 | 4 | # coding:utf-8
# 日本語出力したいときの呪文
import codecs, sys
sys.stdout = codecs.getwriter('cp932')(sys.stdout)
import string
# "Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics."
# という文を単語に分解し,各単語の(アルファベットの)文字数を先頭から出現順に並べたリストを作成せよ.
row_str = "Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics."
replace_str = row_str.translate(string.maketrans("", ""), ",.")
split_srt = replace_str.split(" ")
x = list()
for i in split_srt:
x.append(len(i))
print x
|
868d3a05dcc63290299fde3411369fa67f490252 | tmu-nlp/NLPtutorial2019 | /kiyuna/tutorial00/word_frequency.py | 2,435 | 3.921875 | 4 | '''
ファイルの中の単語の頻度を数えるプログラムを作成
'''
from collections import defaultdict, Counter
def count_word_freq(path, trans=str):
""" Word Frequency Counter
指定されたファイルについて,単語の分布を返す.
Args:
path: 対象とするファイルのパス
trans: 単語を
大文字と小文字の区別をなくしたいとき ->「lambda x: x.lower()」
Returns:
defaultdict: {単語: 出現数}
"""
cnt = defaultdict(int)
with open(path) as f:
for line in f:
for word in map(trans, line.split()):
cnt[word] += 1
return cnt
def word_frequency_cnter(path):
with open(path) as f:
cnt = Counter(f.read().split())
return cnt
if __name__ == '__main__':
'''
「python word* test」-> テスト
'''
import os
import sys
import subprocess
from operator import itemgetter as get
os.chdir(os.path.dirname(os.path.abspath(__file__))) # cd .
def message(text):
print("\33[92m" + text + "\33[0m")
is_test = sys.argv[1:] == ["test"]
message("[*] count word frequency")
if is_test:
path = '../../test/00-input.txt'
else:
path = '../../data/wiki-en-train.word'
cnt = count_word_freq(path)
if is_test:
fn_out = '00-out.txt'
with open(fn_out, 'w') as f:
for key, value in sorted(cnt.items()):
print(f'{key}\t{value}', file=f)
message("[*] sh check.sh")
# 'test/00-answer.txt' と比較
subprocess.run(f'diff -s {fn_out} ../../test/00-answer.txt'.split())
os.remove(fn_out)
else:
print("[+] 単語の異なり数:", len(cnt), "タイプ")
print("[+] 数単語の頻度(上位 10 単語のみ)")
for key, value in sorted(cnt.items(), key=get(1), reverse=True)[:10]:
print(key, value)
message("[*] collections.Counter を使った場合")
cnt = word_frequency_cnter(path)
for key, value in cnt.most_common(10):
print(key, value, file=sys.stderr)
message("[*] trans=lambda x: x.lower() と指定した場合")
cnt = count_word_freq(path, trans=lambda x: x.lower())
for key, value in sorted(cnt.items(), key=get(1), reverse=True)[:10]:
print(key, value)
message("[+] Finished!")
|
c454efbc05cf6d8231e6807312be9d71ccd9cf58 | BruceHi/leetcode | /month12/findNumberIn2DArray.py | 1,823 | 3.84375 | 4 | # 剑指 offer 04. 二维数组中的查找
# 与 240. 搜索二维矩阵 II searchMatrix 一样
from typing import List
class Solution:
# def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
# if not matrix:
# return False
# m, n = len(matrix), len(matrix[0])
# i, j = m-1, 0
# # while 0 <= i < m and 0 <= j < n:
# while i >= 0 and j < n: # 不用完整的判定
# if matrix[i][j] < target:
# j += 1
# elif matrix[i][j] > target:
# i -= 1
# else:
# return True
# return False
# def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
# if not matrix or not matrix[0]:
# return False
# m, n = len(matrix), len(matrix[0])
# i, j = m-1, 0
# while i >= 0 and j < n:
# if target < matrix[i][j]:
# i -= 1
# elif target > matrix[i][j]:
# j += 1
# else:
# return True
# return False
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
if not matrix:
return False
m, n = len(matrix), len(matrix[0])
i, j = m-1, 0
while i >= 0 and j < n:
if matrix[i][j] == target:
return True
if matrix[i][j] < target:
j += 1
else:
i -= 1
return False
s = Solution()
matrix = [
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
print(s.findNumberIn2DArray(matrix, target=5))
print(s.findNumberIn2DArray(matrix, target=20))
print(s.findNumberIn2DArray([[]], target=20))
|
a77a2e519eecd0d3b044eeb23d8260c72a2d7219 | michael-yanov/hillel | /lesson_4/task_3.py | 535 | 3.984375 | 4 | '''
Пользователь вводит, отдельно, строку `s` и один символ `ch`. Необходимо выполнить поиск в строке `s` всех символов `ch`.
Для решения можно использовать только функцию `find`(rfind), операторы `if` и `for`(while).
'''
s = input('Enter the string: ')
ch = input('Enter symbol for searching: ')
l = 0
for i in range(len(s)):
if s[i] == ch:
l += 1
print('Found {0} symbols'.format(l)) |
0bc7afa837e73723ca846b077b279b6046421aef | puskarkarki/PythonBeginners | /Pyntive/challenge1.py | 539 | 4.125 | 4 | """ Question 1: Given a two integer numbers return their product and
if the product is greater than 1000, then return their sum"""
""" Solution 1"""
num1 = 20
num2 = 30
product = num1 * num2
sum = num1 + num2
if(product > 1000):
print(product)
else:
print(sum)
""" Another way to do this program """
def mul_or_sum(num1, num2):
product = num1 * num2
sum = num1 + num2
if(product > 1000):
return product
else:
return sum
print("\n")
result = mul_or_sum(num1, num2)
print("the result is", result)
|
a7c51df52dc968c896d8d8bc47e6ab2282a1bec7 | jilljenn/tryalgo | /tryalgo/knuth_morris_pratt.py | 2,395 | 3.5625 | 4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Find the length of maximal borders by Knuth-Morris-Pratt
jill-jênn vie, christoph dürr et louis abraham - 2014-2019
inspired from a practical lesson (TP) from Yves Lemaire
"""
# pylint: disable=undefined-variable, unused-argument
# snip{ maximum_border_length
def maximum_border_length(w):
"""Maximum string borders by Knuth-Morris-Pratt
:param w: string
:returns: table f such that f[i] is the longest border length of w[:i + 1]
:complexity: linear
"""
n = len(w)
f = [0] * n # init f[0] = 0
k = 0 # current longest border length
for i in range(1, n): # compute f[i]
while w[k] != w[i] and k > 0:
k = f[k - 1] # mismatch: try the next border
if w[k] == w[i]: # last characters match
k += 1 # we can increment the border length
f[i] = k # we found the maximal border of w[:i + 1]
return f
# snip}
# snip{ knuth_morris_pratt
def knuth_morris_pratt(s, t):
"""Find a substring by Knuth-Morris-Pratt
:param s: the haystack string
:param t: the needle string
:returns: index i such that s[i: i + len(t)] == t, or -1
:complexity: O(len(s) + len(t))
"""
sep = '\x00' # special unused character
assert sep not in t and sep not in s
f = maximum_border_length(t + sep + s)
n = len(t)
for i, fi in enumerate(f):
if fi == n: # found a border of the length of t
return i - 2 * n # beginning of the border in s
return -1
# snip}
# snip{ powerstring_by_border
def powerstring_by_border(u):
"""Power string by Knuth-Morris-Pratt
:param u: string
:returns: largest k such that there is a string y with u = y^k
:complexity: O(len(u))
"""
f = maximum_border_length(u)
n = len(u)
if n % (n - f[-1]) == 0: # does the alignment shift divide n ?
return n // (n - f[-1]) # we found a power decomposition
return 1
# snip}
# snip{ powerstring_by_find
def powerstring_by_find(u):
"""Power string using the python find method
:param u: string
:returns: largest k such that there is a string y with u = y^k
:complexity: O(len(u)^2), this is due to the naive implementation of string.find
"""
return len(u) // (u + u).find(u, 1)
# snip}
|
a7387c1ecff82382aa75b75b113e62f318c73d32 | MiguelCF06/holbertonschool-higher_level_programming | /0x07-python-test_driven_development/4-print_square.py | 556 | 4.25 | 4 | #!/usr/bin/python3
"""
Prints a square with "#"
"""
def print_square(size):
"""
An argument size type integer
representing the size of the square
"""
if isinstance(size, bool):
raise TypeError("size must be an integer")
elif not isinstance(size, int):
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
if isinstance(size, int):
for rows in range(size):
for cols in range(size):
print("#", end="")
print()
|
7749d78f6d84763c2a3e0818df50f98de794822d | derick-droid/pirple_python | /forloops.py | 810 | 4.3125 | 4 |
# syntax for for loop
words = "hello world"
letters = []
for letter in words:
print(letter)
if letter == "o":
print("what a wonderfull letter")
letters.append(letter)
print(letters)
# looping through the list created
for element in letters:
print(element)
print()
# definig a list containing number
numbers = [1, 2, 3, 4, 5]
for number in numbers:
# print(number)
if number % 2 == 0:
print(f"{number} is even ")
else:
print(f"{number} is odd")
print()
# using range function in for loop
values = []
for num in range (100):
values.append(num)
print(num)
print(values)
# giving patterns in range values
for m in range (1, 10, 2):
print(m)
values.append(m)
print(values)
# using negative number
for n in range (-1, -12, -2):
print(n)
|
2eafa24a19dcc1628ac6f27169bc1fc880175bd3 | code-tamer/Library | /Business/KARIYER/PYTHON/Python_Temelleri/if-else-demo.py | 3,075 | 4.15625 | 4 | # 1- Kullanıcıdan isim, yaş ve eğitim bilgilerini isteyip ehliyet alabilme
# durumunu kontrol ediniz. Ehliyet alma durumu en az 18 veğitim durumu
# lise ya da üniversite olmalıdır.
# name =input('Ad: ')
# age = int(input('Yaş: '))
# edu = input('Eğitim Durumu: ')
# if (age > 18):
# if (edu == 'lise') or (edu == 'üniversite'):
# print(f'Sayın {name} ehliyet almaya uygundur.')
# else:
# print(f'Sayın {name} eğitim durumunuz ehliyet almaya uygun değildir')
# else:
# print(f'Sayın {name} yaşınız ehliyet almaya uygun değildir.')
# 2- Bir öğrencinin 2 yazılı bir sözlü notunu alıp hesaplanan ortalamaya göre
# not aralığına karşılık gelen not bilgisini yazdırınız.
# 0-24 => 0
# 25-44 => 1
# 45-54 => 2
# 55-69 => 3
# 70-84 => 4
# 85-100 => 5
# name = input('İsminiz: ')
# yazili1 = float(input('1. Yazılı: '))
# yazili2 = float(input('2. Yazılı: '))
# sozlu = float(input('Sözlü: '))
# ortalama = float((yazili1) + (yazili2) + (sozlu)) // 3
# if (ortalama >= 0) and (ortalama <= 24):
# print(f'Notunuz: 0 ve Ortalamanız: {ortalama}')
# elif (ortalama >= 25) and (ortalama <= 44):
# print(f'Notunuz: 1 ve Ortalamanız: {ortalama}')
# elif (ortalama >= 45) and (ortalama <= 54):
# print(f'Notunuz: 2 ve Ortalamanız: {ortalama}')
# elif (ortalama >= 55) and (ortalama <= 69):
# print(f'Notunuz: 3 ve Ortalamanız: {ortalama}')
# elif (ortalama >= 70) and (ortalama <= 84):
# print(f'Notunuz: 4 ve Ortalamanız: {ortalama}')
# elif (ortalama >= 85) and (ortalama <= 100):
# print(f'Notunuz: 5 ve Ortalamanız: {ortalama}')
# else:
# print('Yanlış bir değer girdiniz')
# 3- Trafiğe çıkış tarihi alınan aracın servis zamanını aşağıdaki bilgilere
# göre hesaplayınız.
# 1. Bakım => 1. yıl
# 2. Bakım => 2. yıl
# 3. Bakım => 3. yıl
# ** Süre hesabını alınan gün, ay, yıl bilgisine göre gün bazlı hesaplayınız.
# *** datetime modülünü kullanmamız gerekiyor.
# (simdi) - (2018/8/1)
# days = int(input('Aracınız Kaç Gündür Trafikte: '))
# if (days <= 365):
# print('Aracınızın 1. Bakım zamanı gelmiştir.')
# elif (days > 365 ) and (days < 365*2):
# print('Aracınızın 2. Bakım zamanı gelmiştir.')
# elif (days > 365*2 ) and (days < 365*3):
# print('Aracınızın 3. Bakım zamanı gelmiştir.')
# else:
# print('Yanlış Bilgi Girdiniz')
import datetime
tarih = input('Aracınız Hangi Tarihte Trafiğe Çıktı (2019/11/4: ')
tarih = tarih.split('/')
trafigeCikis = datetime.datetime(int(tarih[0]), int(tarih[1]), int(tarih[2]))
simdi = datetime.datetime.now()
fark = simdi - trafigeCikis
days = fark.days
if (days <= 365):
print('Aracınızın 1. Bakım zamanı gelmiştir.')
elif (days > 365 ) and (days < 365*2):
print('Aracınızın 2. Bakım zamanı gelmiştir.')
elif (days > 365*2 ) and (days < 365*3):
print('Aracınızın 3. Bakım zamanı gelmiştir.')
else:
print('Yanlış Bilgi Girdiniz')
|
d0eeabdd01fcafc4d955d4f264d4378d6c710a40 | maggieyam/LeetCode | /surrounding_regions.py | 1,492 | 3.546875 | 4 | class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
m = len(board)
n = len(board[0])
onEdge = set()
for row in range(m):
for col in range(n):
if board[row][col] == "O":
self.dfs(board, row, col, onEdge)
return board
def isEdges(self, row, col, m, n):
return row == 0 or row == m - 1 or col == 0 or col == n - 1
def isOutBounded(self, row, col, m, n):
return row < 0 or row >= m or col < 0 or col >= n
def dfs(self, board, row, col, onEdge):
m = len(board)
n = len(board[0])
if (row, col) not in onEdge:
if self.isEdges(row, col, m, n):
onEdge.add((row, col))
else:
board[row][col] = "X"
neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)]
for pos in neighbors:
offsetR, offsetC = pos
newRow = row + offsetR
newCol = col + offsetC
if self.isOutBounded(newRow, newCol, m, n):
continue
if board[newRow][newCol] == "O" and (newRow, newCol) not in onEdge:
if (row, col) in onEdge:
onEdge.add((newRow, newCol))
self.dfs(board, newRow, newCol, onEdge)
|
faaa5ade71425044bca736f167d2efd0b36da8d3 | vikki1107/ProjectPy | /academic/caesarCipher.py | 2,686 | 4.625 | 5 | #!/usr/bin/env python
"""
One of the first known examples of encryption was used by Julius Caesar. Caesar needed to provide written instructions to his generals, but he didn't want his enemies to learn his plans if the message slipped into their hands. As a result, he developed what later became known as the Caesar Cipher.
The idea behind this cipher is simple (and as a result, it provides no protection against modern code breaking techniques). Each letter in the original message is shifted by 3 (or n) places. As a result, A becomes D, B becomes E, etc. The last three letters in the alphabet are wrapped around to the beginning: X becomes A, Y becomes B, and Z becomes C. Non-letter characters are not modified by the cipher.
@author: vikki
"""
# Function to convert the message input by user to Ceaser's cipher
def convert_message(function, message, key):
# If the function is decrypt then make the key value as negative
if function[0] == 'd':
key = -key
new_message = ''
for l in message:
if l.isalpha():
n = ord(l)
n += key
if l.isupper():
if n > ord('Z'):
n -= 26
elif n < ord('A'):
n += 26
elif l.islower():
if n > ord('z'):
n -= 26
elif n < ord('a'):
n += 26
new_message += chr(n)
else:
new_message += l
return new_message
# set the key limit to 26 and provide the user input
key_limit = 26
function_input = raw_input("Please enter the functionality you would wish to perform; encrypt or e; decrypt or d : ")
# If the input entered is not e or encrypt or d or decrypt then keep looping until the user enters the right keyword
while function_input.lower() not in 'encrypt e decrypt d'.split():
print "\nPlease enter either encrypt or e; decrypt or d."
function = raw_input("Please enter the functionality you would wish to perform; encrypt or e; decrypt or d : ")
# If user had provided e or d then convert them to encrypt or decrypt
if function_input.lower() == 'e' or function_input.lower() == 'encrypt':
function = 'encrypt'
elif function_input.lower() == 'd' or function_input.lower() == 'decrypt':
function = 'decrypt'
message = raw_input("Please enter your message now to %s: " %function)
key = int(input('Enter the key number from 1 to 26: '))
# Check the key user provided is between 1 to 26
while key < 1 and key > key_limit:
print "Please enter the key within 1 to 26"
print '\nYour Caeser', str(function) + str('ed') +' text is:', convert_message(function, message, key)
|
7d49038a4f922cf5ad21960aab9432bcb184e61b | LambdaOvO/alien-invasion | /ship.py | 3,482 | 3.78125 | 4 | """飞船模块"""
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
""""""
def __init__(self, ai_settings, screen):
"""初始化飞船并设置其初始位置"""
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
# 创建图片对象(加载飞船图片)
# 获取图片和屏幕的外接矩形(rect对象/矩形对象)
self.image = pygame.image.load('images\\ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# 设置飞船的位置在屏幕底部中央
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# 存储飞船在屏幕底部中央的中心坐标(常数)
self.down_center_centerx = self.rect.centerx
self.down_center_centery = self.rect.centery
# 移动标志(开始为停止状态(False))
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
# 飞船中心X轴坐标(可存浮点数)
# self.rect.centerx 只能接收整数(只能存整数)
self.centerx = float(self.rect.centerx)
# 飞船中心Y轴坐标(原理如上)
self.centery = float(self.rect.centery)
def blitme(self):
"""在指定位置绘制飞船"""
self.screen.blit(self.image, self.rect)
def update(self):
# 向右持续移动
if self.moving_right and (self.rect.right < self.screen_rect.right):
self.centerx += self.ai_settings.ship_speed_factor
# 向左持续移动
if self.moving_left and (self.rect.left > self.screen_rect.left):
self.centerx -= self.ai_settings.ship_speed_factor
# 向上持续移动
if self.moving_up and (self.rect.top > self.screen_rect.top):
self.centery -= self.ai_settings.ship_speed_factor
# 向下持续移动
if self.moving_down and (self.rect.bottom < self.screen_rect.bottom):
self.centery += self.ai_settings.ship_speed_factor
# 更新移动 (这种写法可以将飞船速度设置为浮点数)
# self.centerx是一个属性可一直保存一个数值(浮点数),如果飞船速度是一个小于1的浮点数
# 可能前几次循环不足以改变self.rect.centerx的值 通过不断的循环
# 从而self.centerx就会不断的累加或不断的减少,便可向上或向下突破一个整数
# 这样就可以改变 self.centerx的值了
# 因为这种特性使得飞船速度变慢(<1),符合预期
# 速度大于1同理
# 速度2 :60,62,64,66,68,70 实际速度2
# 速度1.5:60,61,63,64,66,67 实际速度约为1.5(:1.4)(符合预期)
# 速度0.5:60,60,61,61,62,62 实际速度约为0.5(:0.4)(符合预期)
# 这样把飞船速度设置为一个浮点数是没有问题的
self.rect.centerx = self.centerx
self.rect.centery = self.centery
def center_ship(self):
"""把飞船放到底部中央"""
# 这里代码看起来有点怪,我也没有办法,不这样的话会有bug :(
self.centerx = self.down_center_centerx
self.centery = self.down_center_centery
self.rect.centerx = self.centerx
self.rect.centery = self.centery
|
c16782569c7ca14943a67e0dcc68a8ee356114ab | brfabri/ALGORITMOS-21.06.17- | /04-loja_tinta.py | 232 | 3.75 | 4 | area=float(input("Insira o tamanho em metros quadrados da área a ser pintada: "))
if area%54==0:
latas=area/54
else:
latas=(area/54)+1
total=latas*80
print("Quantidade de latas: %2.d. Total: R$%2.f"%(latas,total))
|
ee759d393595cfd9b2492054e2094f05c65c8b54 | parulkyadav/python-basic-programs | /fab2.py | 229 | 3.71875 | 4 | def fab(num):
a=0
b=1
print(a)
print(b)
for i in range(num-2):
t=a
a=b
b=t+a
print(b)
if __name__=="__main__":
num=int(input("Enter the number of series : "))
fab(num)
|
bdca9da765f2eef70833708f0995c3c2b0765280 | sunyy3/Using-Python-Plot | /python-plot.py | 5,601 | 4.1875 | 4 | # In the begining, import several useful modules in python
import matplotlib.pyplot as plt
import numpy as np
# Example 1: simple scatter plot
# For start, let's plot a simple line, and customize your figure and axis.
x = [0, 1, 2, 3, 4, 5, 6, 7]
y = [0, 1, 4, 9,16,25,36,49]
z = [0, 1, 2, 3, 4, 5, 6, 7]
# have a empty canvas
plt.clf()
# scatter and line format: circle point, solid line, red. You can define together or explicitly.
plt.plot(x,y,'ro-',label='quadratic')
plt.plot(x,z,label='linear',color='purple',marker='o',linestyle='-',linewidth=2.0)
plt.legend(loc='upper left')
################################################################################################################################################
# other option of marker, line
# marker: point '.' ',', circle 'o', triangle 'v' '<' '>' '^', square 's', diamond 'D' 'd', x 'x' 'X', star '*', hexagon 'h' 'H', pentagon 'p'
# linestyle or ls: 'solid' '-', 'dashed' '--', 'dashdot' '-.', 'dotted' ':'
# color: 'red', 'green', 'black', 'blue', 'yellow', 'purple'
################################################################################################################################################
# The text format in the figure, including size, font, style
plt.title('simple plot',style='italic',fontsize=12,fontweight='bold')
# adding text into figure, the two number give the coordinate of text
plt.text(2,15,'This is a comment!',style='oblique')
plt.annotate('annotate',xy=(1,1),xytext=(1,8),arrowprops=dict(color='black',shrink=0.05))
#################################################################
## more information: http://matplotlib.org/users/text_props.html
#################################################################
# customize the axis
plt.xlim(0,8)
plt.ylim(0,50)
plt.xlabel('x')
plt.ylabel('y')
plt.tick_params(direction='in') # make tick face inside
plt.grid(True) # show grid
plt.savefig('simplePlot.png') # save figure you just plot
plt.show() # interactive show your figure
# Example 2: how to write Greek letter and math equation in the figure using LaTex
plt.clf()
# In python, LaTex form text is started with '$' and also ended with '$'
# Greek letter in small and Capital form
plt.title('$\\theta \\Theta; \sigma \Sigma; \pi \Pi$')
# some basic mathematical operator
plt.xlabel('$\\times \div \\neq \leq \geq \equiv $')
# some calculus symbol: the last one is Angstrom
plt.ylabel('$\infty \partial \int \oint \sum \prod \AA$')
# subscript using '_' and superscript using '^', if subscript is a long equation, using {} to surround equation, e.g. {i=1}
plt.text(0.5 ,0.5, '$(a_1+a_2)^2 = a_1^2 + 2 a_1 a_2 + a_2^2 $', style='italic')
# fraction using '\frac{numerator}{denimunator}', similarly, long equation using {}, e.g. {n^2}
plt.text(0.5 ,0.3, '$ \sum_{i=1}^{n} \\frac{1}{n} = p $')
plt.show()
##################################################################################
## more information about LaTex can be found at: https://www.sharelatex.com/learn
##################################################################################
# Example 3: how to plot histogram from raw data
import random
Alist = []
for i in range(300):
Alist.append(random.randint(1,100))
# First, plot histogram using count number in every bin range
bins = np.arange(0,105,5) # define 20 bins from 0 to 100, every 5 as one bin, Note: right boundary doesn't include
plt.clf()
plt.hist(Alist,bins=bins,alpha=0.5,color='red',edgecolor='black')
# option: 'alpha' define transparency, 'color' define fill color, 'edgecolor' define outline color
plt.ylabel('count')
plt.show()
# Second, we can also plot normalized histgram using probability of every bin
# Note: the option: "normed=True" won't give us correct figure, here we need using "weights"
# Basically, here "weights" does: instead of counting every data point in "Alist" as 1, it count as 1/len(Alist), then we easily get the probability
weight_Alist = np.ones_like(Alist)/len(Alist)
bins = np.arange(0,105,5)
plt.clf()
plt.hist(Alist,bins=bins,alpha=0.5,color='red',edgecolor='black',weights=weight_Alist)
plt.ylabel('Probability')
plt.show()
# Third, if you want to know exactly value of each bins, we can using "numpy"
bins = np.arange(0,105,5)
histRes, bin_width = np.histogram(Alist,bins)
# NOTE: the length of bin_width is larger than the length of histRes by 1.
print(histRes,len(histRes))
print(bin_width,len(bin_width))
# Example 4: how to do linear regression
from scipy import stats
x = [ 1, 2, 3, 5, 6, 8]
y = [1.3,2.3,3.5,5.7,6.3,8.7]
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
print(slope, intercept, r_value, std_err)
y_fit = [item*slope+intercept for item in x]
plt.clf()
plt.plot(x,y,'ro')
plt.plot(x,y_fit,'b-')
plt.text(2,7,' y = {0:6.4f} x + {1:6.4f} \n $R^2$ = {2:6.4f}'.format(slope, intercept, r_value*r_value))
plt.show()
# Example 5: how to draw figures with two y-axis, which share one x-axis
month = [i for i in range(1,13)]
price = [12, 8, 9, 4, 1, 3, 7, 7, 8, 13, 14, 15]
amount = [3, 5, 7, 10, 20, 15, 9, 9, 8, 4, 2, 1]
# here we have two y-axis system: ax1 and ax2
fig, ax1 =plt.subplots()
ax2 = ax1.twinx()
ax1.set_position([0.12,0.12,0.76,0.83])
ax2.set_position([0.12,0.12,0.76,0.83])
ax1.plot(month,price,'ro-')
ax2.plot(month,amount,'b>-')
ax1.set_xlabel('Month')
ax1.set_ylabel('Price')
ax2.set_ylabel('Amount')
fig.tight_layout()
plt.show()
|
9037c013e1d4e235d52e6c587bd38c436cb40985 | Carolyn95/Leetcoding-Journey | /shell_sort.py | 706 | 3.5 | 4 | # shell sort implemeting in python
import pdb
def shell_sort(bef_list):
n = len(bef_list)
# 初始步長
gap = n // 2
while gap > 0:
for i in range(gap, n):
# 每个步長進行插入排序
temp = bef_list[i]
j = i
# 插入排序
while j >= 0 and j - gap >= 0 and bef_list[j - gap] > temp:
bef_list[j] = bef_list[j - gap]
j -= gap
bef_list[j] = temp
# 得到新的步長
gap = gap // 2
print(bef_list)
return bef_list
if __name__ == "__main__":
bef_list = [13, 14, 94, 33, 82, 25, 59, 94, 65, 23, 45, 27, 73, 25, 39, 10]
shell_sort(bef_list)
|
e8c09c9f46546edc58a0dfd959a92ee68dde31ca | CardinisCode/learning-python | /Python_Code_Challenges/movie_collection/main.py | 647 | 3.609375 | 4 | class Solution:
def run(self, n, m, movies):
#
# Write your code below; return type and arguments should be according to the problem's requirements
movie_stack = list(range(n, 0, -1))
books_on_top_list = []
for movie in movies:
movie_index = movie_stack.index(movie) + 1
books_on_top = len(movie_stack[movie_index:len(movie_stack)])
movie_stack.remove(movie)
movie_stack.append(movie)
books_on_top_list.append(str(books_on_top))
return ",".join(books_on_top_list)
solution = Solution()
print(solution.run(3, 3, [3, 1, 1])) |
c714468db692122bfec0f57b553e2456c6ba0f9e | Zokhira/basics | /files_exceptions/exeptions.py | 942 | 3.921875 | 4 | # Exception Handling - handle situations
filepath = "C:/dev/2020-fall/basics/data1/students.txt"
try:
print('trying block started...')
with open(filepath, 'a') as students:
print("writing to the file..")
students.write(f"\nSergey")
with open(filepath, 'r') as students:
lines = students.readlines()
print(lines)
except FileNotFoundError as err:
print(err)
print('Enter correct file path. Check your file path.')
# print(5/0)
try:
num = 5/int(input('enter the number to divide by: '))
except ZeroDivisionError as err:
print("You can not divide by zero.")
else: # this is dependant on try block, if try block executes else block will be executed.
print('*********This is else block')
print(f"Result of this division: {num}")
finally:
print('I am a Finally block, I am always executed whatever happens with try except or else block.')
print('Program is completed!!') |
b3d2af93aa88d622e912d7bb4b5d060620192c23 | sanchezolivos-unprg/t06_sanchez | /sanchez/multiple18.py | 790 | 3.765625 | 4 | import os
# MOSTRAR VALORES
nombre=""
t_grado_fahrenheit=0.0
# INPUT
nombre=os.sys.argv[1]
t_grado_fahrenheit=float(os.sys.argv[2])
# OUTPUT
print("################################")
print(" TEMPERATURA DE UNA PERSONA ")
print("################################")
print("NOMBRE:", nombre)
print("TEMPERATURA EN GRADO FAHRENHEIT:", t_grado_fahrenheit)
print("################################")
# CONDICION MULTIPLE
# la temperatura de una persona con la siguiente condicion multiple
if(t_grado_fahrenheit>40.0):
print("peligro de muerte")
if(t_grado_fahrenheit>38.1 and t_grado_fahrenheit<39):
print("tiene fiebre alta")
if(t_grado_fahrenheit>37.8 and t_grado_fahrenheit<38.0):
print("tiene fiebre")
if(t_grado_fahrenheit<37.0):
print("temperatura normal")
# fin_if
|
af94d4585ae56941fe3baae5aea3f3b1382fc087 | mmaina48/ProjectManagement | /excrise2.py | 796 | 4.03125 | 4 | tasklist= [23,"jane",["leesso 23",560,{"cureency":"kes"}],987,(76,"john")]
# determin type of tasklist
print(type(tasklist))
# print "kes"
kesl=tasklist[2]
kesl1=kesl[2]
print(kesl1["cureency"])
# print 560
print(tasklist[2][1])
# use fxn to find len of tasklist
print(len(tasklist))
# change 987 to 789 using inbuit fxn
print(str(tasklist[3]))
print(type(str(tasklist[3])))
revern=(str(tasklist[3])[::-1])
print(revern)
# change name = "john to jane"
changename=tasklist[-1]
print("we cannot change an element in :",changename,"because is a :",type(changename))
# we cannot change an element
# you will get an error:
|
43fe21581ec0e6aaf7d89f7eb361dffb1ddbe5e1 | CompPhysics/ComputationalPhysics2 | /doc/LectureNotes/_build/jupyter_execute/boltzmannmachines.py | 72,230 | 3.59375 | 4 | #!/usr/bin/env python
# coding: utf-8
# # Boltzmann Machines
#
# Why use a generative model rather than the more well known discriminative deep neural networks (DNN)?
#
# * Discriminitave methods have several limitations: They are mainly supervised learning methods, thus requiring labeled data. And there are tasks they cannot accomplish, like drawing new examples from an unknown probability distribution.
#
# * A generative model can learn to represent and sample from a probability distribution. The core idea is to learn a parametric model of the probability distribution from which the training data was drawn. As an example
#
# a. A model for images could learn to draw new examples of cats and dogs, given a training dataset of images of cats and dogs.
#
# b. Generate a sample of an ordered or disordered Ising model phase, having been given samples of such phases.
#
# c. Model the trial function for Monte Carlo calculations
#
#
# 4. Both use gradient-descent based learning procedures for minimizing cost functions
#
# 5. Energy based models don't use backpropagation and automatic differentiation for computing gradients, instead turning to Markov Chain Monte Carlo methods.
#
# 6. DNNs often have several hidden layers. A restricted Boltzmann machine has only one hidden layer, however several RBMs can be stacked to make up Deep Belief Networks, of which they constitute the building blocks.
#
# History: The RBM was developed by amongst others Geoffrey Hinton, called by some the "Godfather of Deep Learning", working with the University of Toronto and Google.
#
#
#
# A BM is what we would call an undirected probabilistic graphical model
# with stochastic continuous or discrete units.
#
#
# It is interpreted as a stochastic recurrent neural network where the
# state of each unit(neurons/nodes) depends on the units it is connected
# to. The weights in the network represent thus the strength of the
# interaction between various units/nodes.
#
#
# It turns into a Hopfield network if we choose deterministic rather
# than stochastic units. In contrast to a Hopfield network, a BM is a
# so-called generative model. It allows us to generate new samples from
# the learned distribution.
#
#
#
# A standard BM network is divided into a set of observable and visible units $\hat{x}$ and a set of unknown hidden units/nodes $\hat{h}$.
#
#
#
# Additionally there can be bias nodes for the hidden and visible layers. These biases are normally set to $1$.
#
#
#
# BMs are stackable, meaning they cwe can train a BM which serves as input to another BM. We can construct deep networks for learning complex PDFs. The layers can be trained one after another, a feature which makes them popular in deep learning
#
#
#
# However, they are often hard to train. This leads to the introduction of so-called restricted BMs, or RBMS.
# Here we take away all lateral connections between nodes in the visible layer as well as connections between nodes in the hidden layer. The network is illustrated in the figure below.
#
# <!-- dom:FIGURE: [figures/RBM.png, width=800 frac=1.0] -->
# <!-- begin figure -->
# <img src="figures/RBM.png" width=800><p style="font-size: 0.9em"><i>Figure 1: </i></p><!-- end figure -->
#
#
#
#
#
# ## The network
#
# **The network layers**:
# 1. A function $\mathbf{x}$ that represents the visible layer, a vector of $M$ elements (nodes). This layer represents both what the RBM might be given as training input, and what we want it to be able to reconstruct. This might for example be the pixels of an image, the spin values of the Ising model, or coefficients representing speech.
#
# 2. The function $\mathbf{h}$ represents the hidden, or latent, layer. A vector of $N$ elements (nodes). Also called "feature detectors".
#
# The goal of the hidden layer is to increase the model's expressive power. We encode complex interactions between visible variables by introducing additional, hidden variables that interact with visible degrees of freedom in a simple manner, yet still reproduce the complex correlations between visible degrees in the data once marginalized over (integrated out).
#
# Examples of this trick being employed in physics:
# 1. The Hubbard-Stratonovich transformation
#
# 2. The introduction of ghost fields in gauge theory
#
# 3. Shadow wave functions in Quantum Monte Carlo simulations
#
# **The network parameters, to be optimized/learned**:
# 1. $\mathbf{a}$ represents the visible bias, a vector of same length as $\mathbf{x}$.
#
# 2. $\mathbf{b}$ represents the hidden bias, a vector of same lenght as $\mathbf{h}$.
#
# 3. $W$ represents the interaction weights, a matrix of size $M\times N$.
#
# ### Joint distribution
#
# The restricted Boltzmann machine is described by a Bolztmann distribution
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# P_{rbm}(\mathbf{x},\mathbf{h}) = \frac{1}{Z} e^{-\frac{1}{T_0}E(\mathbf{x},\mathbf{h})},
# \label{_auto1} \tag{1}
# \end{equation}
# $$
# where $Z$ is the normalization constant or partition function, defined as
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# Z = \int \int e^{-\frac{1}{T_0}E(\mathbf{x},\mathbf{h})} d\mathbf{x} d\mathbf{h}.
# \label{_auto2} \tag{2}
# \end{equation}
# $$
# It is common to ignore $T_0$ by setting it to one.
#
#
# ### Network Elements, the energy function
#
# The function $E(\mathbf{x},\mathbf{h})$ gives the **energy** of a
# configuration (pair of vectors) $(\mathbf{x}, \mathbf{h})$. The lower
# the energy of a configuration, the higher the probability of it. This
# function also depends on the parameters $\mathbf{a}$, $\mathbf{b}$ and
# $W$. Thus, when we adjust them during the learning procedure, we are
# adjusting the energy function to best fit our problem.
#
#
#
# ### Defining different types of RBMs
#
# There are different variants of RBMs, and the differences lie in the types of visible and hidden units we choose as well as in the implementation of the energy function $E(\mathbf{x},\mathbf{h})$. The connection between the nodes in the two layers is given by the weights $w_{ij}$.
#
# **Binary-Binary RBM:**
#
#
# RBMs were first developed using binary units in both the visible and hidden layer. The corresponding energy function is defined as follows:
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# E(\mathbf{x}, \mathbf{h}) = - \sum_i^M x_i a_i- \sum_j^N b_j h_j - \sum_{i,j}^{M,N} x_i w_{ij} h_j,
# \label{_auto3} \tag{3}
# \end{equation}
# $$
# where the binary values taken on by the nodes are most commonly 0 and 1.
#
#
# **Gaussian-Binary RBM:**
#
#
# Another varient is the RBM where the visible units are Gaussian while the hidden units remain binary:
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# E(\mathbf{x}, \mathbf{h}) = \sum_i^M \frac{(x_i - a_i)^2}{2\sigma_i^2} - \sum_j^N b_j h_j - \sum_{i,j}^{M,N} \frac{x_i w_{ij} h_j}{\sigma_i^2}.
# \label{_auto4} \tag{4}
# \end{equation}
# $$
# 1. RBMs are Useful when we model continuous data (i.e., we wish $\mathbf{x}$ to be continuous)
#
# 2. Requires a smaller learning rate, since there's no upper bound to the value a component might take in the reconstruction
#
# Other types of units include:
# 1. Softmax and multinomial units
#
# 2. Gaussian visible and hidden units
#
# 3. Binomial units
#
# 4. Rectified linear units
#
# ### Cost function
#
# When working with a training dataset, the most common training approach is maximizing the log-likelihood of the training data. The log likelihood characterizes the log-probability of generating the observed data using our generative model. Using this method our cost function is chosen as the negative log-likelihood. The learning then consists of trying to find parameters that maximize the probability of the dataset, and is known as Maximum Likelihood Estimation (MLE).
# Denoting the parameters as $\boldsymbol{\theta} = a_1,...,a_M,b_1,...,b_N,w_{11},...,w_{MN}$, the log-likelihood is given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# \mathcal{L}(\{ \theta_i \}) = \langle \text{log} P_\theta(\boldsymbol{x}) \rangle_{data}
# \label{_auto5} \tag{5}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# = - \langle E(\boldsymbol{x}; \{ \theta_i\}) \rangle_{data} - \text{log} Z(\{ \theta_i\}),
# \label{_auto6} \tag{6}
# \end{equation}
# $$
# where we used that the normalization constant does not depend on the data, $\langle \text{log} Z(\{ \theta_i\}) \rangle = \text{log} Z(\{ \theta_i\})$
# Our cost function is the negative log-likelihood, $\mathcal{C}(\{ \theta_i \}) = - \mathcal{L}(\{ \theta_i \})$
#
# ### Optimization / Training
#
# The training procedure of choice often is Stochastic Gradient Descent (SGD). It consists of a series of iterations where we update the parameters according to the equation
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# \boldsymbol{\theta}_{k+1} = \boldsymbol{\theta}_k - \eta \nabla \mathcal{C} (\boldsymbol{\theta}_k)
# \label{_auto7} \tag{7}
# \end{equation}
# $$
# at each $k$-th iteration. There are a range of variants of the algorithm which aim at making the learning rate $\eta$ more adaptive so the method might be more efficient while remaining stable.
#
# We now need the gradient of the cost function in order to minimize it. We find that
# <!-- Equation labels as ordinary links -->
# <div id="_auto8"></div>
#
# $$
# \begin{equation}
# \frac{\partial \mathcal{C}(\{ \theta_i\})}{\partial \theta_i}
# = \langle \frac{\partial E(\boldsymbol{x}; \theta_i)}{\partial \theta_i} \rangle_{data}
# + \frac{\partial \text{log} Z(\{ \theta_i\})}{\partial \theta_i}
# \label{_auto8} \tag{8}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto9"></div>
#
# $$
# \begin{equation}
# = \langle O_i(\boldsymbol{x}) \rangle_{data} - \langle O_i(\boldsymbol{x}) \rangle_{model},
# \label{_auto9} \tag{9}
# \end{equation}
# $$
# where in order to simplify notation we defined the "operator"
# <!-- Equation labels as ordinary links -->
# <div id="_auto10"></div>
#
# $$
# \begin{equation}
# O_i(\boldsymbol{x}) = \frac{\partial E(\boldsymbol{x}; \theta_i)}{\partial \theta_i},
# \label{_auto10} \tag{10}
# \end{equation}
# $$
# and used the statistical mechanics relationship between expectation values and the log-partition function:
# <!-- Equation labels as ordinary links -->
# <div id="_auto11"></div>
#
# $$
# \begin{equation}
# \langle O_i(\boldsymbol{x}) \rangle_{model} = \text{Tr} P_\theta(\boldsymbol{x})O_i(\boldsymbol{x}) = - \frac{\partial \text{log} Z(\{ \theta_i\})}{\partial \theta_i}.
# \label{_auto11} \tag{11}
# \end{equation}
# $$
# The data-dependent term in the gradient is known as the positive phase
# of the gradient, while the model-dependent term is known as the
# negative phase of the gradient. The aim of the training is to lower
# the energy of configurations that are near observed data points
# (increasing their probability), and raising the energy of
# configurations that are far from observed data points (decreasing
# their probability).
#
# The gradient of the negative log-likelihood cost function of a Binary-Binary RBM is then
# <!-- Equation labels as ordinary links -->
# <div id="_auto12"></div>
#
# $$
# \begin{equation}
# \frac{\partial \mathcal{C} (w_{ij}, a_i, b_j)}{\partial w_{ij}} = \langle x_i h_j \rangle_{data} - \langle x_i h_j \rangle_{model}
# \label{_auto12} \tag{12}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto13"></div>
#
# $$
# \begin{equation}
# \frac{\partial \mathcal{C} (w_{ij}, a_i, b_j)}{\partial a_{ij}} = \langle x_i \rangle_{data} - \langle x_i \rangle_{model}
# \label{_auto13} \tag{13}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto14"></div>
#
# $$
# \begin{equation}
# \frac{\partial \mathcal{C} (w_{ij}, a_i, b_j)}{\partial b_{ij}} = \langle h_i \rangle_{data} - \langle h_i \rangle_{model}.
# \label{_auto14} \tag{14}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto15"></div>
#
# $$
# \begin{equation}
# \label{_auto15} \tag{15}
# \end{equation}
# $$
# To get the expectation values with respect to the *data*, we set the visible units to each of the observed samples in the training data, then update the hidden units according to the conditional probability found before. We then average over all samples in the training data to calculate expectation values with respect to the data.
#
#
#
#
# ### Kullback-Leibler relative entropy
#
# When the goal of the training is to approximate a probability
# distribution, as it is in generative modeling, another relevant
# measure is the **Kullback-Leibler divergence**, also known as the
# relative entropy or Shannon entropy. It is a non-symmetric measure of the
# dissimilarity between two probability density functions $p$ and
# $q$. If $p$ is the unkown probability which we approximate with $q$,
# we can measure the difference by
# <!-- Equation labels as ordinary links -->
# <div id="_auto16"></div>
#
# $$
# \begin{equation}
# \text{KL}(p||q) = \int_{-\infty}^{\infty} p (\boldsymbol{x}) \log \frac{p(\boldsymbol{x})}{q(\boldsymbol{x})} d\boldsymbol{x}.
# \label{_auto16} \tag{16}
# \end{equation}
# $$
# Thus, the Kullback-Leibler divergence between the distribution of the
# training data $f(\boldsymbol{x})$ and the model distribution $p(\boldsymbol{x}|
# \boldsymbol{\theta})$ is
# <!-- Equation labels as ordinary links -->
# <div id="_auto17"></div>
#
# $$
# \begin{equation}
# \text{KL} (f(\boldsymbol{x})|| p(\boldsymbol{x}| \boldsymbol{\theta})) = \int_{-\infty}^{\infty}
# f (\boldsymbol{x}) \log \frac{f(\boldsymbol{x})}{p(\boldsymbol{x}| \boldsymbol{\theta})} d\boldsymbol{x}
# \label{_auto17} \tag{17}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto18"></div>
#
# $$
# \begin{equation}
# = \int_{-\infty}^{\infty} f(\boldsymbol{x}) \log f(\boldsymbol{x}) d\boldsymbol{x} - \int_{-\infty}^{\infty} f(\boldsymbol{x}) \log
# p(\boldsymbol{x}| \boldsymbol{\theta}) d\boldsymbol{x}
# \label{_auto18} \tag{18}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto19"></div>
#
# $$
# \begin{equation}
# %= \mathbb{E}_{f(\boldsymbol{x})} (\log f(\boldsymbol{x})) - \mathbb{E}_{f(\boldsymbol{x})} (\log p(\boldsymbol{x}| \boldsymbol{\theta}))
# = \langle \log f(\boldsymbol{x}) \rangle_{f(\boldsymbol{x})} - \langle \log p(\boldsymbol{x}| \boldsymbol{\theta}) \rangle_{f(\boldsymbol{x})}
# \label{_auto19} \tag{19}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto20"></div>
#
# $$
# \begin{equation}
# = \langle \log f(\boldsymbol{x}) \rangle_{data} + \langle E(\boldsymbol{x}) \rangle_{data} + \log Z
# \label{_auto20} \tag{20}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto21"></div>
#
# $$
# \begin{equation}
# = \langle \log f(\boldsymbol{x}) \rangle_{data} + \mathcal{C}_{LL} .
# \label{_auto21} \tag{21}
# \end{equation}
# $$
# The first term is constant with respect to $\boldsymbol{\theta}$ since $f(\boldsymbol{x})$ is independent of $\boldsymbol{\theta}$. Thus the Kullback-Leibler Divergence is minimal when the second term is minimal. The second term is the log-likelihood cost function, hence minimizing the Kullback-Leibler divergence is equivalent to maximizing the log-likelihood.
#
#
# To further understand generative models it is useful to study the
# gradient of the cost function which is needed in order to minimize it
# using methods like stochastic gradient descent.
#
# The partition function is the generating function of
# expectation values, in particular there are mathematical relationships
# between expectation values and the log-partition function. In this
# case we have
# <!-- Equation labels as ordinary links -->
# <div id="_auto22"></div>
#
# $$
# \begin{equation}
# \langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{model}
# = \int p(\boldsymbol{x}| \boldsymbol{\theta}) \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} d\boldsymbol{x}
# = -\frac{\partial \log Z(\theta_i)}{ \partial \theta_i} .
# \label{_auto22} \tag{22}
# \end{equation}
# $$
# Here $\langle \cdot \rangle_{model}$ is the expectation value over the model probability distribution $p(\boldsymbol{x}| \boldsymbol{\theta})$.
#
# ## Setting up for gradient descent calculations
#
# Using the previous relationship we can express the gradient of the cost function as
# <!-- Equation labels as ordinary links -->
# <div id="_auto23"></div>
#
# $$
# \begin{equation}
# \frac{\partial \mathcal{C}_{LL}}{\partial \theta_i}
# = \langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{data} + \frac{\partial \log Z(\theta_i)}{ \partial \theta_i}
# \label{_auto23} \tag{23}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto24"></div>
#
# $$
# \begin{equation}
# = \langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{data} - \langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{model}
# \label{_auto24} \tag{24}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto25"></div>
#
# $$
# \begin{equation}
# %= \langle O_i(\boldsymbol{x}) \rangle_{data} - \langle O_i(\boldsymbol{x}) \rangle_{model}
# \label{_auto25} \tag{25}
# \end{equation}
# $$
# This expression shows that the gradient of the log-likelihood cost
# function is a **difference of moments**, with one calculated from
# the data and one calculated from the model. The data-dependent term is
# called the **positive phase** and the model-dependent term is
# called the **negative phase** of the gradient. We see now that
# minimizing the cost function results in lowering the energy of
# configurations $\boldsymbol{x}$ near points in the training data and
# increasing the energy of configurations not observed in the training
# data. That means we increase the model's probability of configurations
# similar to those in the training data.
#
#
# The gradient of the cost function also demonstrates why gradients of
# unsupervised, generative models must be computed differently from for
# those of for example FNNs. While the data-dependent expectation value
# is easily calculated based on the samples $\boldsymbol{x}_i$ in the training
# data, we must sample from the model in order to generate samples from
# which to caclulate the model-dependent term. We sample from the model
# by using MCMC-based methods. We can not sample from the model directly
# because the partition function $Z$ is generally intractable.
#
# As in supervised machine learning problems, the goal is also here to
# perform well on **unseen** data, that is to have good
# generalization from the training data. The distribution $f(x)$ we
# approximate is not the **true** distribution we wish to estimate,
# it is limited to the training data. Hence, in unsupervised training as
# well it is important to prevent overfitting to the training data. Thus
# it is common to add regularizers to the cost function in the same
# manner as we discussed for say linear regression.
#
#
#
# ## RBMs for the quantum many body problem
#
# The idea of applying RBMs to quantum many body problems was presented by G. Carleo and M. Troyer, working with ETH Zurich and Microsoft Research.
#
# Some of their motivation included
#
# * The wave function $\Psi$ is a monolithic mathematical quantity that contains all the information on a quantum state, be it a single particle or a complex molecule. In principle, an exponential amount of information is needed to fully encode a generic many-body quantum state.
#
# * There are still interesting open problems, including fundamental questions ranging from the dynamical properties of high-dimensional systems to the exact ground-state properties of strongly interacting fermions.
#
# * The difficulty lies in finding a general strategy to reduce the exponential complexity of the full many-body wave function down to its most essential features. That is
#
# a. Dimensional reduction
#
# b. Feature extraction
#
#
# * Among the most successful techniques to attack these challenges, artifical neural networks play a prominent role.
#
# * Want to understand whether an artifical neural network may adapt to describe a quantum system.
#
# Carleo and Troyer applied the RBM to the quantum mechanical spin lattice systems of the Ising model and Heisenberg model, with encouraging results. Our goal is to test the method on systems of moving particles. For the spin lattice systems it was natural to use a binary-binary RBM, with the nodes taking values of 1 and -1. For moving particles, on the other hand, we want the visible nodes to be continuous, representing position coordinates. Thus, we start by choosing a Gaussian-binary RBM, where the visible nodes are continuous and hidden nodes take on values of 0 or 1. If eventually we would like the hidden nodes to be continuous as well the rectified linear units seem like the most relevant choice.
#
#
#
#
# ## Representing the wave function
#
# The wavefunction should be a probability amplitude depending on
# $\boldsymbol{x}$. The RBM model is given by the joint distribution of
# $\boldsymbol{x}$ and $\boldsymbol{h}$
# <!-- Equation labels as ordinary links -->
# <div id="_auto26"></div>
#
# $$
# \begin{equation}
# F_{rbm}(\mathbf{x},\mathbf{h}) = \frac{1}{Z} e^{-\frac{1}{T_0}E(\mathbf{x},\mathbf{h})}.
# \label{_auto26} \tag{26}
# \end{equation}
# $$
# To find the marginal distribution of $\boldsymbol{x}$ we set:
# <!-- Equation labels as ordinary links -->
# <div id="_auto27"></div>
#
# $$
# \begin{equation}
# F_{rbm}(\mathbf{x}) = \sum_\mathbf{h} F_{rbm}(\mathbf{x}, \mathbf{h})
# \label{_auto27} \tag{27}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto28"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z}\sum_\mathbf{h} e^{-E(\mathbf{x}, \mathbf{h})}.
# \label{_auto28} \tag{28}
# \end{equation}
# $$
# Now this is what we use to represent the wave function, calling it a neural-network quantum state (NQS)
# <!-- Equation labels as ordinary links -->
# <div id="_auto29"></div>
#
# $$
# \begin{equation}
# \Psi (\mathbf{X}) = F_{rbm}(\mathbf{x})
# \label{_auto29} \tag{29}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto30"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z}\sum_{\boldsymbol{h}} e^{-E(\mathbf{x}, \mathbf{h})}
# \label{_auto30} \tag{30}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto31"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z} \sum_{\{h_j\}} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2} + \sum_j^N b_j h_j + \sum_\
# {i,j}^{M,N} \frac{x_i w_{ij} h_j}{\sigma^2}}
# \label{_auto31} \tag{31}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto32"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2}} \prod_j^N (1 + e^{b_j + \sum_i^M \frac{x\
# _i w_{ij}}{\sigma^2}}).
# \label{_auto32} \tag{32}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto33"></div>
#
# $$
# \begin{equation}
# \label{_auto33} \tag{33}
# \end{equation}
# $$
# ## Choose the cost function
#
# Now we don't necessarily have training data (unless we generate it by using some other method). However, what we do have is the variational principle which allows us to obtain the ground state wave function by minimizing the expectation value of the energy of a trial wavefunction (corresponding to the untrained NQS). Similarly to the traditional variational Monte Carlo method then, it is the local energy we wish to minimize. The gradient to use for the stochastic gradient descent procedure is
# <!-- Equation labels as ordinary links -->
# <div id="_auto34"></div>
#
# $$
# \begin{equation}
# C_i = \frac{\partial \langle E_L \rangle}{\partial \theta_i}
# = 2(\langle E_L \frac{1}{\Psi}\frac{\partial \Psi}{\partial \theta_i} \rangle - \langle E_L \rangle \langle \frac{1}{\Psi}\frac{\partial \Psi}{\partial \theta_i} \rangle ),
# \label{_auto34} \tag{34}
# \end{equation}
# $$
# where the local energy is given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto35"></div>
#
# $$
# \begin{equation}
# E_L = \frac{1}{\Psi} \hat{\mathbf{H}} \Psi.
# \label{_auto35} \tag{35}
# \end{equation}
# $$
# ### Mathematical details
#
# Because we are restricted to potential functions which are positive it
# is convenient to express them as exponentials, so that
# <!-- Equation labels as ordinary links -->
# <div id="_auto36"></div>
#
# $$
# \begin{equation}
# \phi_C (\boldsymbol{x}_C) = e^{-E_C(\boldsymbol{x}_C)}
# \label{_auto36} \tag{36}
# \end{equation}
# $$
# where $E(\boldsymbol{x}_C)$ is called an *energy function*, and the
# exponential representation is the *Boltzmann distribution*. The
# joint distribution is defined as the product of potentials.
#
# The joint distribution of the random variables is then
# $$
# p(\boldsymbol{x}) = \frac{1}{Z} \prod_C \phi_C (\boldsymbol{x}_C) \nonumber
# $$
# $$
# = \frac{1}{Z} \prod_C e^{-E_C(\boldsymbol{x}_C)} \nonumber
# $$
# $$
# = \frac{1}{Z} e^{-\sum_C E_C(\boldsymbol{x}_C)} \nonumber
# $$
# 3
# 9
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# <!-- Equation labels as ordinary links -->
# <div id="_auto38"></div>
#
# $$
# \begin{equation}
# p_{BM}(\boldsymbol{x}, \boldsymbol{h}) = \frac{1}{Z_{BM}} e^{-\frac{1}{T}E_{BM}(\boldsymbol{x}, \boldsymbol{h})} ,
# \label{_auto38} \tag{38}
# \end{equation}
# $$
# with the partition function
# <!-- Equation labels as ordinary links -->
# <div id="_auto39"></div>
#
# $$
# \begin{equation}
# Z_{BM} = \int \int e^{-\frac{1}{T} E_{BM}(\tilde{\boldsymbol{x}}, \tilde{\boldsymbol{h}})} d\tilde{\boldsymbol{x}} d\tilde{\boldsymbol{h}} .
# \label{_auto39} \tag{39}
# \end{equation}
# $$
# $T$ is a physics-inspired parameter named temperature and will be assumed to be 1 unless otherwise stated. The energy function of the Boltzmann machine determines the interactions between the nodes and is defined
# $$
# E_{BM}(\boldsymbol{x}, \boldsymbol{h}) = - \sum_{i, k}^{M, K} a_i^k \alpha_i^k (x_i)
# - \sum_{j, l}^{N, L} b_j^l \beta_j^l (h_j)
# - \sum_{i,j,k,l}^{M,N,K,L} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j) \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto40"></div>
#
# $$
# \begin{equation}
# - \sum_{i, m=i+1, k}^{M, M, K} \alpha_i^k (x_i) v_{im}^k \alpha_m^k (x_m)
# - \sum_{j,n=j+1,l}^{N,N,L} \beta_j^l (h_j) u_{jn}^l \beta_n^l (h_n).
# \label{_auto40} \tag{40}
# \end{equation}
# $$
# Here $\alpha_i^k (x_i)$ and $\beta_j^l (h_j)$ are one-dimensional
# transfer functions or mappings from the given input value to the
# desired feature value. They can be arbitrary functions of the input
# variables and are independent of the parameterization (parameters
# referring to weight and biases), meaning they are not affected by
# training of the model. The indices $k$ and $l$ indicate that there can
# be multiple transfer functions per variable. Furthermore, $a_i^k$ and
# $b_j^l$ are the visible and hidden bias. $w_{ij}^{kl}$ are weights of
# the \textbf{inter-layer} connection terms which connect visible and
# hidden units. $ v_{im}^k$ and $u_{jn}^l$ are weights of the
# \textbf{intra-layer} connection terms which connect the visible units
# to each other and the hidden units to each other, respectively.
#
#
#
# We remove the intra-layer connections by setting $v_{im}$ and $u_{jn}$
# to zero. The expression for the energy of the RBM is then
# <!-- Equation labels as ordinary links -->
# <div id="_auto41"></div>
#
# $$
# \begin{equation}
# E_{RBM}(\boldsymbol{x}, \boldsymbol{h}) = - \sum_{i, k}^{M, K} a_i^k \alpha_i^k (x_i)
# - \sum_{j, l}^{N, L} b_j^l \beta_j^l (h_j)
# - \sum_{i,j,k,l}^{M,N,K,L} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j).
# \label{_auto41} \tag{41}
# \end{equation}
# $$
# resulting in
# $$
# P_{RBM} (\boldsymbol{x}) = \int P_{RBM} (\boldsymbol{x}, \tilde{\boldsymbol{h}}) d \tilde{\boldsymbol{h}} \nonumber
# $$
# $$
# = \frac{1}{Z_{RBM}} \int e^{-E_{RBM} (\boldsymbol{x}, \tilde{\boldsymbol{h}}) } d\tilde{\boldsymbol{h}} \nonumber
# $$
# $$
# = \frac{1}{Z_{RBM}} \int e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)
# + \sum_{j, l} b_j^l \beta_j^l (\tilde{h}_j)
# + \sum_{i,j,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)}
# d\tilde{\boldsymbol{h}} \nonumber
# $$
# $$
# = \frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
# \int \prod_j^N e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j)
# + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{\boldsymbol{h}} \nonumber
# $$
# $$
# = \frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
# \biggl( \int e^{\sum_l b_1^l \beta_1^l (\tilde{h}_1) + \sum_{i,k,l} \alpha_i^k (x_i) w_{i1}^{kl} \beta_1^l (\tilde{h}_1)} d \tilde{h}_1 \nonumber
# $$
# $$
# \times \int e^{\sum_l b_2^l \beta_2^l (\tilde{h}_2) + \sum_{i,k,l} \alpha_i^k (x_i) w_{i2}^{kl} \beta_2^l (\tilde{h}_2)} d \tilde{h}_2 \nonumber
# $$
# $$
# \times ... \nonumber
# $$
# $$
# \times \int e^{\sum_l b_N^l \beta_N^l (\tilde{h}_N) + \sum_{i,k,l} \alpha_i^k (x_i) w_{iN}^{kl} \beta_N^l (\tilde{h}_N)} d \tilde{h}_N \biggr) \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto42"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
# \prod_j^N \int e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{h}_j
# \label{_auto42} \tag{42}
# \end{equation}
# $$
# Similarly
# $$
# P_{RBM} (\boldsymbol{h}) = \frac{1}{Z_{RBM}} \int e^{-E_{RBM} (\tilde{\boldsymbol{x}}, \boldsymbol{h})} d\tilde{\boldsymbol{x}} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto43"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z_{RBM}} e^{\sum_{j, l} b_j^l \beta_j^l (h_j)}
# \prod_i^M \int e^{\sum_k a_i^k \alpha_i^k (\tilde{x}_i)
# + \sum_{j,k,l} \alpha_i^k (\tilde{x}_i) w_{ij}^{kl} \beta_j^l (h_j)} d\tilde{x}_i
# \label{_auto43} \tag{43}
# \end{equation}
# $$
# Using Bayes theorem
# $$
# P_{RBM} (\boldsymbol{h}|\boldsymbol{x}) = \frac{P_{RBM} (\boldsymbol{x}, \boldsymbol{h})}{P_{RBM} (\boldsymbol{x})} \nonumber
# $$
# $$
# = \frac{\frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)
# + \sum_{j, l} b_j^l \beta_j^l (h_j)
# + \sum_{i,j,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j)}}
# {\frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
# \prod_j^N \int e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{h}_j} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto44"></div>
#
# $$
# \begin{equation}
# = \prod_j^N \frac{e^{\sum_l b_j^l \beta_j^l (h_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j)} }
# {\int e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{h}_j}
# \label{_auto44} \tag{44}
# \end{equation}
# $$
# Similarly
# $$
# P_{RBM} (\boldsymbol{x}|\boldsymbol{h}) = \frac{P_{RBM} (\boldsymbol{x}, \boldsymbol{h})}{P_{RBM} (\boldsymbol{h})} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto45"></div>
#
# $$
# \begin{equation}
# = \prod_i^M \frac{e^{\sum_k a_i^k \alpha_i^k (x_i)
# + \sum_{j,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j)}}
# {\int e^{\sum_k a_i^k \alpha_i^k (\tilde{x}_i)
# + \sum_{j,k,l} \alpha_i^k (\tilde{x}_i) w_{ij}^{kl} \beta_j^l (h_j)} d\tilde{x}_i}
# \label{_auto45} \tag{45}
# \end{equation}
# $$
# The original RBM had binary visible and hidden nodes. They were
# showned to be universal approximators of discrete distributions.
# It was also shown that adding hidden units yields
# strictly improved modelling power. The common choice of binary values
# are 0 and 1. However, in some physics applications, -1 and 1 might be
# a more natural choice. We will here use 0 and 1.
# <!-- Equation labels as ordinary links -->
# <div id="_auto46"></div>
#
# $$
# \begin{equation}
# E_{BB}(\boldsymbol{x}, \mathbf{h}) = - \sum_i^M x_i a_i- \sum_j^N b_j h_j - \sum_{i,j}^{M,N} x_i w_{ij} h_j.
# \label{_auto46} \tag{46}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto47"></div>
#
# $$
# \begin{equation}
# p_{BB}(\boldsymbol{x}, \boldsymbol{h}) = \frac{1}{Z_{BB}} e^{\sum_i^M a_i x_i + \sum_j^N b_j h_j + \sum_{ij}^{M,N} x_i w_{ij} h_j}
# \label{_auto47} \tag{47}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto48"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}}
# \label{_auto48} \tag{48}
# \end{equation}
# $$
# with the partition function
# <!-- Equation labels as ordinary links -->
# <div id="_auto49"></div>
#
# $$
# \begin{equation}
# Z_{BB} = \sum_{\boldsymbol{x}, \boldsymbol{h}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}} .
# \label{_auto49} \tag{49}
# \end{equation}
# $$
# ### Marginal Probability Density Functions
#
# In order to find the probability of any configuration of the visible units we derive the marginal probability density function.
# <!-- Equation labels as ordinary links -->
# <div id="_auto50"></div>
#
# $$
# \begin{equation}
# p_{BB} (\boldsymbol{x}) = \sum_{\boldsymbol{h}} p_{BB} (\boldsymbol{x}, \boldsymbol{h})
# \label{_auto50} \tag{50}
# \end{equation}
# $$
# $$
# = \frac{1}{Z_{BB}} \sum_{\boldsymbol{h}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}} \nonumber
# $$
# $$
# = \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \sum_{\boldsymbol{h}} e^{\sum_j^N (b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j})h_j} \nonumber
# $$
# $$
# = \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \sum_{\boldsymbol{h}} \prod_j^N e^{ (b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j})h_j} \nonumber
# $$
# $$
# = \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \bigg ( \sum_{h_1} e^{(b_1 + \boldsymbol{x}^T \boldsymbol{w}_{\ast 1})h_1}
# \times \sum_{h_2} e^{(b_2 + \boldsymbol{x}^T \boldsymbol{w}_{\ast 2})h_2} \times \nonumber
# $$
# $$
# ... \times \sum_{h_2} e^{(b_N + \boldsymbol{x}^T \boldsymbol{w}_{\ast N})h_N} \bigg ) \nonumber
# $$
# $$
# = \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N \sum_{h_j} e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}) h_j} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto51"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N (1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}) .
# \label{_auto51} \tag{51}
# \end{equation}
# $$
# A similar derivation yields the marginal probability of the hidden units
# <!-- Equation labels as ordinary links -->
# <div id="_auto52"></div>
#
# $$
# \begin{equation}
# p_{BB} (\boldsymbol{h}) = \frac{1}{Z_{BB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M (1 + e^{a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}) .
# \label{_auto52} \tag{52}
# \end{equation}
# $$
# ### Conditional Probability Density Functions
#
# We derive the probability of the hidden units given the visible units using Bayes' rule
# $$
# p_{BB} (\boldsymbol{h}|\boldsymbol{x}) = \frac{p_{BB} (\boldsymbol{x}, \boldsymbol{h})}{p_{BB} (\boldsymbol{x})} \nonumber
# $$
# $$
# = \frac{ \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}} }
# {\frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N (1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}})} \nonumber
# $$
# $$
# = \frac{ e^{\boldsymbol{x}^T \boldsymbol{a}} e^{ \sum_j^N (b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} ) h_j} }
# { e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N (1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}})} \nonumber
# $$
# $$
# = \prod_j^N \frac{ e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} ) h_j} }
# {1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto53"></div>
#
# $$
# \begin{equation}
# = \prod_j^N p_{BB} (h_j| \boldsymbol{x}) .
# \label{_auto53} \tag{53}
# \end{equation}
# $$
# From this we find the probability of a hidden unit being "on" or "off":
# <!-- Equation labels as ordinary links -->
# <div id="_auto54"></div>
#
# $$
# \begin{equation}
# p_{BB} (h_j=1 | \boldsymbol{x}) = \frac{ e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} ) h_j} }
# {1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}}
# \label{_auto54} \tag{54}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto55"></div>
#
# $$
# \begin{equation}
# = \frac{ e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} )} }
# {1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}}
# \label{_auto55} \tag{55}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto56"></div>
#
# $$
# \begin{equation}
# = \frac{ 1 }{1 + e^{-(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j})} } ,
# \label{_auto56} \tag{56}
# \end{equation}
# $$
# and
# <!-- Equation labels as ordinary links -->
# <div id="_auto57"></div>
#
# $$
# \begin{equation}
# p_{BB} (h_j=0 | \boldsymbol{x}) =\frac{ 1 }{1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}} } .
# \label{_auto57} \tag{57}
# \end{equation}
# $$
# Similarly we have that the conditional probability of the visible units given the hidden are
# <!-- Equation labels as ordinary links -->
# <div id="_auto58"></div>
#
# $$
# \begin{equation}
# p_{BB} (\boldsymbol{x}|\boldsymbol{h}) = \prod_i^M \frac{ e^{ (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}) x_i} }{ 1 + e^{a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}} }
# \label{_auto58} \tag{58}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto59"></div>
#
# $$
# \begin{equation}
# = \prod_i^M p_{BB} (x_i | \boldsymbol{h}) .
# \label{_auto59} \tag{59}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto60"></div>
#
# $$
# \begin{equation}
# p_{BB} (x_i=1 | \boldsymbol{h}) = \frac{1}{1 + e^{-(a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h} )}}
# \label{_auto60} \tag{60}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto61"></div>
#
# $$
# \begin{equation}
# p_{BB} (x_i=0 | \boldsymbol{h}) = \frac{1}{1 + e^{a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h} }} .
# \label{_auto61} \tag{61}
# \end{equation}
# $$
# ### Gaussian-Binary Restricted Boltzmann Machines
#
# Inserting into the expression for $E_{RBM}(\boldsymbol{x},\boldsymbol{h})$ in equation results in the energy
# $$
# E_{GB}(\boldsymbol{x}, \boldsymbol{h}) = \sum_i^M \frac{(x_i - a_i)^2}{2\sigma_i^2}
# - \sum_j^N b_j h_j
# -\sum_{ij}^{M,N} \frac{x_i w_{ij} h_j}{\sigma_i^2} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto62"></div>
#
# $$
# \begin{equation}
# = \vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 - \boldsymbol{b}^T \boldsymbol{h}
# - (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h} .
# \label{_auto62} \tag{62}
# \end{equation}
# $$
# ### Joint Probability Density Function
# $$
# p_{GB} (\boldsymbol{x}, \boldsymbol{h}) = \frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
# + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}} \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{- \sum_i^M \frac{(x_i - a_i)^2}{2\sigma_i^2}
# + \sum_j^N b_j h_j
# +\sum_{ij}^{M,N} \frac{x_i w_{ij} h_j}{\sigma_i^2}} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto63"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z_{GB}} \prod_{ij}^{M,N} e^{-\frac{(x_i - a_i)^2}{2\sigma_i^2}
# + b_j h_j
# +\frac{x_i w_{ij} h_j}{\sigma_i^2}} ,
# \label{_auto63} \tag{63}
# \end{equation}
# $$
# with the partition function given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto64"></div>
#
# $$
# \begin{equation}
# Z_{GB} = \int \sum_{\tilde{\boldsymbol{h}}}^{\tilde{\boldsymbol{H}}} e^{-\vert\vert\frac{\tilde{\boldsymbol{x}} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \tilde{\boldsymbol{h}}
# + (\frac{\tilde{\boldsymbol{x}}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\tilde{\boldsymbol{h}}} d\tilde{\boldsymbol{x}} .
# \label{_auto64} \tag{64}
# \end{equation}
# $$
# ### Marginal Probability Density Functions
#
# We proceed to find the marginal probability densitites of the
# Gaussian-binary RBM. We first marginalize over the binary hidden units
# to find $p_{GB} (\boldsymbol{x})$
# $$
# p_{GB} (\boldsymbol{x}) = \sum_{\tilde{\boldsymbol{h}}}^{\tilde{\boldsymbol{H}}} p_{GB} (\boldsymbol{x}, \tilde{\boldsymbol{h}}) \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} \sum_{\tilde{\boldsymbol{h}}}^{\tilde{\boldsymbol{H}}}
# e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \tilde{\boldsymbol{h}}
# + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\tilde{\boldsymbol{h}}} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto65"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2}
# \prod_j^N (1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}} ) .
# \label{_auto65} \tag{65}
# \end{equation}
# $$
# We next marginalize over the visible units. This is the first time we
# marginalize over continuous values. We rewrite the exponential factor
# dependent on $\boldsymbol{x}$ as a Gaussian function before we integrate in
# the last step.
# $$
# p_{GB} (\boldsymbol{h}) = \int p_{GB} (\tilde{\boldsymbol{x}}, \boldsymbol{h}) d\tilde{\boldsymbol{x}} \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} \int e^{-\vert\vert\frac{\tilde{\boldsymbol{x}} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
# + (\frac{\tilde{\boldsymbol{x}}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}} d\tilde{\boldsymbol{x}} \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h} } \int \prod_i^M
# e^{- \frac{(\tilde{x}_i - a_i)^2}{2\sigma_i^2} + \frac{\tilde{x}_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}{\sigma_i^2} } d\tilde{\boldsymbol{x}} \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h} }
# \biggl( \int e^{- \frac{(\tilde{x}_1 - a_1)^2}{2\sigma_1^2} + \frac{\tilde{x}_1 \boldsymbol{w}_{1\ast}^T \boldsymbol{h}}{\sigma_1^2} } d\tilde{x}_1 \nonumber
# $$
# $$
# \times \int e^{- \frac{(\tilde{x}_2 - a_2)^2}{2\sigma_2^2} + \frac{\tilde{x}_2 \boldsymbol{w}_{2\ast}^T \boldsymbol{h}}{\sigma_2^2} } d\tilde{x}_2 \nonumber
# $$
# $$
# \times ... \nonumber
# $$
# $$
# \times \int e^{- \frac{(\tilde{x}_M - a_M)^2}{2\sigma_M^2} + \frac{\tilde{x}_M \boldsymbol{w}_{M\ast}^T \boldsymbol{h}}{\sigma_M^2} } d\tilde{x}_M \biggr) \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
# \int e^{- \frac{(\tilde{x}_i - a_i)^2 - 2\tilde{x}_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}{2\sigma_i^2} } d\tilde{x}_i \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
# \int e^{- \frac{\tilde{x}_i^2 - 2\tilde{x}_i(a_i + \tilde{x}_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}) + a_i^2}{2\sigma_i^2} } d\tilde{x}_i \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
# \int e^{- \frac{\tilde{x}_i^2 - 2\tilde{x}_i(a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}) + (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 - (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 + a_i^2}{2\sigma_i^2} } d\tilde{x}_i \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
# \int e^{- \frac{(\tilde{x}_i - (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}))^2 - a_i^2 -2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} - (\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 + a_i^2}{2\sigma_i^2} } d\tilde{x}_i \nonumber
# $$
# $$
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
# e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}
# \int e^{- \frac{(\tilde{x}_i - a_i - \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2}{2\sigma_i^2}}
# d\tilde{x}_i \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto66"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
# \sqrt{2\pi \sigma_i^2}
# e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}} .
# \label{_auto66} \tag{66}
# \end{equation}
# $$
# ### Conditional Probability Density Functions
#
# We finish by deriving the conditional probabilities.
# $$
# p_{GB} (\boldsymbol{h}| \boldsymbol{x}) = \frac{p_{GB} (\boldsymbol{x}, \boldsymbol{h})}{p_{GB} (\boldsymbol{x})} \nonumber
# $$
# $$
# = \frac{\frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
# + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}}}
# {\frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2}
# \prod_j^N (1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}} ) }
# \nonumber
# $$
# $$
# = \prod_j^N \frac{e^{(b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j})h_j } }
# {1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto67"></div>
#
# $$
# \begin{equation}
# = \prod_j^N p_{GB} (h_j|\boldsymbol{x}).
# \label{_auto67} \tag{67}
# \end{equation}
# $$
# The conditional probability of a binary hidden unit $h_j$ being on or off again takes the form of a sigmoid function
# $$
# p_{GB} (h_j =1 | \boldsymbol{x}) = \frac{e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j} } }
# {1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto68"></div>
#
# $$
# \begin{equation}
# = \frac{1}{1 + e^{-b_j - (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}}
# \label{_auto68} \tag{68}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto69"></div>
#
# $$
# \begin{equation}
# p_{GB} (h_j =0 | \boldsymbol{x}) =
# \frac{1}{1 + e^{b_j +(\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}} .
# \label{_auto69} \tag{69}
# \end{equation}
# $$
# The conditional probability of the continuous $\boldsymbol{x}$ now has another form, however.
# $$
# p_{GB} (\boldsymbol{x}|\boldsymbol{h})
# = \frac{p_{GB} (\boldsymbol{x}, \boldsymbol{h})}{p_{GB} (\boldsymbol{h})} \nonumber
# $$
# $$
# = \frac{\frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
# + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}}}
# {\frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
# \sqrt{2\pi \sigma_i^2}
# e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}}
# \nonumber
# $$
# $$
# = \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
# \frac{e^{- \frac{(x_i - a_i)^2}{2\sigma_i^2} + \frac{x_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}{2\sigma_i^2} }}
# {e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}}
# \nonumber
# $$
# $$
# = \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
# \frac{e^{-\frac{x_i^2 - 2a_i x_i + a_i^2 - 2x_i \boldsymbol{w}_{i\ast}^T\boldsymbol{h} }{2\sigma_i^2} } }
# {e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}}
# \nonumber
# $$
# $$
# = \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
# e^{- \frac{x_i^2 - 2a_i x_i + a_i^2 - 2x_i \boldsymbol{w}_{i\ast}^T\boldsymbol{h}
# + 2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2}
# {2\sigma_i^2} }
# \nonumber
# $$
# $$
# = \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
# e^{ - \frac{(x_i - b_i - \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2}{2\sigma_i^2}} \nonumber
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto70"></div>
#
# $$
# \begin{equation}
# = \prod_i^M \mathcal{N}
# (x_i | b_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}, \sigma_i^2)
# \label{_auto70} \tag{70}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto71"></div>
#
# $$
# \begin{equation}
# \Rightarrow p_{GB} (x_i|\boldsymbol{h}) = \mathcal{N}
# (x_i | b_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}, \sigma_i^2) .
# \label{_auto71} \tag{71}
# \end{equation}
# $$
# The form of these conditional probabilities explains the name
# "Gaussian" and the form of the Gaussian-binary energy function. We see
# that the conditional probability of $x_i$ given $\boldsymbol{h}$ is a normal
# distribution with mean $b_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}$ and variance
# $\sigma_i^2$.
#
#
# ## Neural Quantum States
#
#
# The wavefunction should be a probability amplitude depending on $\boldsymbol{x}$. The RBM model is given by the joint distribution of $\boldsymbol{x}$ and $\boldsymbol{h}$
# <!-- Equation labels as ordinary links -->
# <div id="_auto72"></div>
#
# $$
# \begin{equation}
# F_{rbm}(\boldsymbol{x},\mathbf{h}) = \frac{1}{Z} e^{-\frac{1}{T_0}E(\boldsymbol{x},\mathbf{h})}
# \label{_auto72} \tag{72}
# \end{equation}
# $$
# To find the marginal distribution of $\boldsymbol{x}$ we set:
# <!-- Equation labels as ordinary links -->
# <div id="_auto73"></div>
#
# $$
# \begin{equation}
# F_{rbm}(\mathbf{x}) = \sum_\mathbf{h} F_{rbm}(\mathbf{x}, \mathbf{h})
# \label{_auto73} \tag{73}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto74"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z}\sum_\mathbf{h} e^{-E(\mathbf{x}, \mathbf{h})}
# \label{_auto74} \tag{74}
# \end{equation}
# $$
# Now this is what we use to represent the wave function, calling it a neural-network quantum state (NQS)
# <!-- Equation labels as ordinary links -->
# <div id="_auto75"></div>
#
# $$
# \begin{equation}
# \Psi (\mathbf{X}) = F_{rbm}(\mathbf{x})
# \label{_auto75} \tag{75}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto76"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z}\sum_{\boldsymbol{h}} e^{-E(\mathbf{x}, \mathbf{h})}
# \label{_auto76} \tag{76}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto77"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z} \sum_{\{h_j\}} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2} + \sum_j^N b_j h_j + \sum_{i,j}^{M,N} \frac{x_i w_{ij} h_j}{\sigma^2}}
# \label{_auto77} \tag{77}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto78"></div>
#
# $$
# \begin{equation}
# = \frac{1}{Z} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2}} \prod_j^N (1 + e^{b_j + \sum_i^M \frac{x_i w_{ij}}{\sigma^2}})
# \label{_auto78} \tag{78}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto79"></div>
#
# $$
# \begin{equation}
# \label{_auto79} \tag{79}
# \end{equation}
# $$
# The above wavefunction is the most general one because it allows for
# complex valued wavefunctions. However it fundamentally changes the
# probabilistic foundation of the RBM, because what is usually a
# probability in the RBM framework is now a an amplitude. This means
# that a lot of the theoretical framework usually used to interpret the
# model, i.e. graphical models, conditional probabilities, and Markov
# random fields, breaks down. If we assume the wavefunction to be
# postive definite, however, we can use the RBM to represent the squared
# wavefunction, and thereby a probability. This also makes it possible
# to sample from the model using Gibbs sampling, because we can obtain
# the conditional probabilities.
# <!-- Equation labels as ordinary links -->
# <div id="_auto80"></div>
#
# $$
# \begin{equation}
# |\Psi (\mathbf{X})|^2 = F_{rbm}(\mathbf{X})
# \label{_auto80} \tag{80}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto81"></div>
#
# $$
# \begin{equation}
# \Rightarrow \Psi (\mathbf{X}) = \sqrt{F_{rbm}(\mathbf{X})}
# \label{_auto81} \tag{81}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto82"></div>
#
# $$
# \begin{equation}
# = \frac{1}{\sqrt{Z}}\sqrt{\sum_{\{h_j\}} e^{-E(\mathbf{X}, \mathbf{h})}}
# \label{_auto82} \tag{82}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto83"></div>
#
# $$
# \begin{equation}
# = \frac{1}{\sqrt{Z}} \sqrt{\sum_{\{h_j\}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{2\sigma^2} + \sum_j^N b_j h_j + \sum_{i,j}^{M,N} \frac{X_i w_{ij} h_j}{\sigma^2}} }
# \label{_auto83} \tag{83}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto84"></div>
#
# $$
# \begin{equation}
# = \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \sqrt{\sum_{\{h_j\}} \prod_j^N e^{b_j h_j + \sum_i^M \frac{X_i w_{ij} h_j}{\sigma^2}}}
# \label{_auto84} \tag{84}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto85"></div>
#
# $$
# \begin{equation}
# = \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \sqrt{\prod_j^N \sum_{h_j} e^{b_j h_j + \sum_i^M \frac{X_i w_{ij} h_j}{\sigma^2}}}
# \label{_auto85} \tag{85}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto86"></div>
#
# $$
# \begin{equation}
# = \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \prod_j^N \sqrt{e^0 + e^{b_j + \sum_i^M \frac{X_i w_{ij}}{\sigma^2}}}
# \label{_auto86} \tag{86}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto87"></div>
#
# $$
# \begin{equation}
# = \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \prod_j^N \sqrt{1 + e^{b_j + \sum_i^M \frac{X_i w_{ij}}{\sigma^2}}}
# \label{_auto87} \tag{87}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto88"></div>
#
# $$
# \begin{equation}
# \label{_auto88} \tag{88}
# \end{equation}
# $$
# ### Cost function
#
# This is where we deviate from what is common in machine
# learning. Rather than defining a cost function based on some dataset,
# our cost function is the energy of the quantum mechanical system. From
# the variational principle we know that minizing this energy should
# lead to the ground state wavefunction. As stated previously the local
# energy is given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto89"></div>
#
# $$
# \begin{equation}
# E_L = \frac{1}{\Psi} \hat{\mathbf{H}} \Psi,
# \label{_auto89} \tag{89}
# \end{equation}
# $$
# and the gradient is
# <!-- Equation labels as ordinary links -->
# <div id="_auto90"></div>
#
# $$
# \begin{equation}
# G_i = \frac{\partial \langle E_L \rangle}{\partial \alpha_i}
# = 2(\langle E_L \frac{1}{\Psi}\frac{\partial \Psi}{\partial \alpha_i} \rangle - \langle E_L \rangle \langle \frac{1}{\Psi}\frac{\partial \Psi}{\partial \alpha_i} \rangle ),
# \label{_auto90} \tag{90}
# \end{equation}
# $$
# where $\alpha_i = a_1,...,a_M,b_1,...,b_N,w_{11},...,w_{MN}$.
#
#
# We use that $\frac{1}{\Psi}\frac{\partial \Psi}{\partial \alpha_i}
# = \frac{\partial \ln{\Psi}}{\partial \alpha_i}$,
# and find
# <!-- Equation labels as ordinary links -->
# <div id="_auto91"></div>
#
# $$
# \begin{equation}
# \ln{\Psi({\mathbf{X}})} = -\ln{Z} - \sum_m^M \frac{(X_m - a_m)^2}{2\sigma^2}
# + \sum_n^N \ln({1 + e^{b_n + \sum_i^M \frac{X_i w_{in}}{\sigma^2}})}.
# \label{_auto91} \tag{91}
# \end{equation}
# $$
# This gives
# <!-- Equation labels as ordinary links -->
# <div id="_auto92"></div>
#
# $$
# \begin{equation}
# \frac{\partial }{\partial a_m} \ln\Psi
# = \frac{1}{\sigma^2} (X_m - a_m)
# \label{_auto92} \tag{92}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto93"></div>
#
# $$
# \begin{equation}
# \frac{\partial }{\partial b_n} \ln\Psi
# =
# \frac{1}{e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1}
# \label{_auto93} \tag{93}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto94"></div>
#
# $$
# \begin{equation}
# \frac{\partial }{\partial w_{mn}} \ln\Psi
# = \frac{X_m}{\sigma^2(e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1)}.
# \label{_auto94} \tag{94}
# \end{equation}
# $$
# If $\Psi = \sqrt{F_{rbm}}$ we have
# <!-- Equation labels as ordinary links -->
# <div id="_auto95"></div>
#
# $$
# \begin{equation}
# \ln{\Psi({\mathbf{X}})} = -\frac{1}{2}\ln{Z} - \sum_m^M \frac{(X_m - a_m)^2}{4\sigma^2}
# + \frac{1}{2}\sum_n^N \ln({1 + e^{b_n + \sum_i^M \frac{X_i w_{in}}{\sigma^2}})},
# \label{_auto95} \tag{95}
# \end{equation}
# $$
# which results in
# <!-- Equation labels as ordinary links -->
# <div id="_auto96"></div>
#
# $$
# \begin{equation}
# \frac{\partial }{\partial a_m} \ln\Psi
# = \frac{1}{2\sigma^2} (X_m - a_m)
# \label{_auto96} \tag{96}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto97"></div>
#
# $$
# \begin{equation}
# \frac{\partial }{\partial b_n} \ln\Psi
# =
# \frac{1}{2(e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1)}
# \label{_auto97} \tag{97}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto98"></div>
#
# $$
# \begin{equation}
# \frac{\partial }{\partial w_{mn}} \ln\Psi
# = \frac{X_m}{2\sigma^2(e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1)}.
# \label{_auto98} \tag{98}
# \end{equation}
# $$
# Let us assume again that our Hamiltonian is
# <!-- Equation labels as ordinary links -->
# <div id="_auto99"></div>
#
# $$
# \begin{equation}
# \hat{\mathbf{H}} = \sum_p^P (-\frac{1}{2}\nabla_p^2 + \frac{1}{2}\omega^2 r_p^2 ) + \sum_{p<q} \frac{1}{r_{pq}},
# \label{_auto99} \tag{99}
# \end{equation}
# $$
# where the first summation term represents the standard harmonic
# oscillator part and the latter the repulsive interaction between two
# electrons. Natural units ($\hbar=c=e=m_e=1$) are used, and $P$ is the
# number of particles. This gives us the following expression for the
# local energy ($D$ being the number of dimensions)
# <!-- Equation labels as ordinary links -->
# <div id="_auto100"></div>
#
# $$
# \begin{equation}
# E_L = \frac{1}{\Psi} \mathbf{H} \Psi
# \label{_auto100} \tag{100}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto101"></div>
#
# $$
# \begin{equation}
# = \frac{1}{\Psi} (\sum_p^P (-\frac{1}{2}\nabla_p^2 + \frac{1}{2}\omega^2 r_p^2 ) + \sum_{p<q} \frac{1}{r_{pq}}) \Psi
# \label{_auto101} \tag{101}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto102"></div>
#
# $$
# \begin{equation}
# = -\frac{1}{2}\frac{1}{\Psi} \sum_p^P \nabla_p^2 \Psi
# + \frac{1}{2}\omega^2 \sum_p^P r_p^2 + \sum_{p<q} \frac{1}{r_{pq}}
# \label{_auto102} \tag{102}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto103"></div>
#
# $$
# \begin{equation}
# = -\frac{1}{2}\frac{1}{\Psi} \sum_p^P \sum_d^D \frac{\partial^2 \Psi}{\partial x_{pd}^2} + \frac{1}{2}\omega^2 \sum_p^P r_p^2 + \sum_{p<q} \frac{1}{r_{pq}}
# \label{_auto103} \tag{103}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto104"></div>
#
# $$
# \begin{equation}
# = \frac{1}{2} \sum_p^P \sum_d^D (-(\frac{\partial}{\partial x_{pd}} \ln\Psi)^2 -\frac{\partial^2}{\partial x_{pd}^2} \ln\Psi + \omega^2 x_{pd}^2) + \sum_{p<q} \frac{1}{r_{pq}}.
# \label{_auto104} \tag{104}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto105"></div>
#
# $$
# \begin{equation}
# \label{_auto105} \tag{105}
# \end{equation}
# $$
# Letting each visible node in the Boltzmann machine
# represent one coordinate of one particle, we obtain
# <!-- Equation labels as ordinary links -->
# <div id="_auto106"></div>
#
# $$
# \begin{equation}
# E_L =
# \frac{1}{2} \sum_m^M (-(\frac{\partial}{\partial v_m} \ln\Psi)^2 -\frac{\partial^2}{\partial v_m^2} \ln\Psi + \omega^2 v_m^2) + \sum_{p<q} \frac{1}{r_{pq}},
# \label{_auto106} \tag{106}
# \end{equation}
# $$
# where we have that
# <!-- Equation labels as ordinary links -->
# <div id="_auto107"></div>
#
# $$
# \begin{equation}
# \frac{\partial}{\partial x_m} \ln\Psi
# = - \frac{1}{\sigma^2}(x_m - a_m) + \frac{1}{\sigma^2} \sum_n^N \frac{w_{mn}}{e^{-b_n - \frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1}
# \label{_auto107} \tag{107}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto108"></div>
#
# $$
# \begin{equation}
# \frac{\partial^2}{\partial x_m^2} \ln\Psi
# = - \frac{1}{\sigma^2} + \frac{1}{\sigma^4}\sum_n^N \omega_{mn}^2 \frac{e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}}}{(e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1)^2}.
# \label{_auto108} \tag{108}
# \end{equation}
# $$
# We now have all the expressions neeeded to calculate the gradient of
# the expected local energy with respect to the RBM parameters
# $\frac{\partial \langle E_L \rangle}{\partial \alpha_i}$.
#
# If we use $\Psi = \sqrt{F_{rbm}}$ we obtain
# <!-- Equation labels as ordinary links -->
# <div id="_auto109"></div>
#
# $$
# \begin{equation}
# \frac{\partial}{\partial x_m} \ln\Psi
# = - \frac{1}{2\sigma^2}(x_m - a_m) + \frac{1}{2\sigma^2} \sum_n^N
# \frac{w_{mn}}{e^{-b_n-\frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1}
#
# \label{_auto109} \tag{109}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto110"></div>
#
# $$
# \begin{equation}
# \frac{\partial^2}{\partial x_m^2} \ln\Psi
# = - \frac{1}{2\sigma^2} + \frac{1}{2\sigma^4}\sum_n^N \omega_{mn}^2 \frac{e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}}}{(e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1)^2}.
# \label{_auto110} \tag{110}
# \end{equation}
# $$
# The difference between this equation and the previous one is that we multiply by a factor $1/2$.
#
#
#
#
#
# ## Python version for the two non-interacting particles
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added restricted boltzmann machine method for dealing with the wavefunction
# RBM code based heavily off of:
# https://github.com/CompPhysics/ComputationalPhysics2/tree/gh-pages/doc/Programs/BoltzmannMachines/MLcpp/src/CppCode/ob
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,a,b,w):
sigma=1.0
sig2 = sigma**2
Psi1 = 0.0
Psi2 = 1.0
Q = Qfac(r,b,w)
for iq in range(NumberParticles):
for ix in range(Dimension):
Psi1 += (r[iq,ix]-a[iq,ix])**2
for ih in range(NumberHidden):
Psi2 *= (1.0 + np.exp(Q[ih]))
Psi1 = np.exp(-Psi1/(2*sig2))
return Psi1*Psi2
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,a,b,w):
sigma=1.0
sig2 = sigma**2
locenergy = 0.0
Q = Qfac(r,b,w)
for iq in range(NumberParticles):
for ix in range(Dimension):
sum1 = 0.0
sum2 = 0.0
for ih in range(NumberHidden):
sum1 += w[iq,ix,ih]/(1+np.exp(-Q[ih]))
sum2 += w[iq,ix,ih]**2 * np.exp(Q[ih]) / (1.0 + np.exp(Q[ih]))**2
dlnpsi1 = -(r[iq,ix] - a[iq,ix]) /sig2 + sum1/sig2
dlnpsi2 = -1/sig2 + sum2/sig2**2
locenergy += 0.5*(-dlnpsi1*dlnpsi1 - dlnpsi2 + r[iq,ix]**2)
if(interaction==True):
for iq1 in range(NumberParticles):
for iq2 in range(iq1):
distance = 0.0
for ix in range(Dimension):
distance += (r[iq1,ix] - r[iq2,ix])**2
locenergy += 1/sqrt(distance)
return locenergy
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,a,b,w):
sigma=1.0
sig2 = sigma**2
Q = Qfac(r,b,w)
WfDer = np.empty((3,),dtype=object)
WfDer = [np.copy(a),np.copy(b),np.copy(w)]
WfDer[0] = (r-a)/sig2
WfDer[1] = 1 / (1 + np.exp(-Q))
for ih in range(NumberHidden):
WfDer[2][:,:,ih] = w[:,:,ih] / (sig2*(1+np.exp(-Q[ih])))
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,a,b,w):
sigma=1.0
sig2 = sigma**2
qforce = np.zeros((NumberParticles,Dimension), np.double)
sum1 = np.zeros((NumberParticles,Dimension), np.double)
Q = Qfac(r,b,w)
for ih in range(NumberHidden):
sum1 += w[:,:,ih]/(1+np.exp(-Q[ih]))
qforce = 2*(-(r-a)/sig2 + sum1/sig2)
return qforce
def Qfac(r,b,w):
Q = np.zeros((NumberHidden), np.double)
temp = np.zeros((NumberHidden), np.double)
for ih in range(NumberHidden):
temp[ih] = (r*w[:,:,ih]).sum()
Q = b + temp
return Q
# Computing the derivative of the energy and the energy
def EnergyMinimization(a,b,w):
NumberMCcycles= 10000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = np.empty((3,),dtype=object)
DeltaPsi = np.empty((3,),dtype=object)
DerivativePsiE = np.empty((3,),dtype=object)
EnergyDer = [np.copy(a),np.copy(b),np.copy(w)]
DeltaPsi = [np.copy(a),np.copy(b),np.copy(w)]
DerivativePsiE = [np.copy(a),np.copy(b),np.copy(w)]
for i in range(3): EnergyDer[i].fill(0.0)
for i in range(3): DeltaPsi[i].fill(0.0)
for i in range(3): DerivativePsiE[i].fill(0.0)
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,a,b,w)
QuantumForceOld = QuantumForce(PositionOld,a,b,w)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+ QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,a,b,w)
QuantumForceNew = QuantumForce(PositionNew,a,b,w)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])* (D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])- PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
#print("wf new: ", wfnew)
#print("force on 1 new:", QuantumForceNew[0,:])
#print("pos of 1 new: ", PositionNew[0,:])
#print("force on 2 new:", QuantumForceNew[1,:])
#print("pos of 2 new: ", PositionNew[1,:])
DeltaE = LocalEnergy(PositionOld,a,b,w)
DerPsi = DerivativeWFansatz(PositionOld,a,b,w)
DeltaPsi[0] += DerPsi[0]
DeltaPsi[1] += DerPsi[1]
DeltaPsi[2] += DerPsi[2]
energy += DeltaE
DerivativePsiE[0] += DerPsi[0]*DeltaE
DerivativePsiE[1] += DerPsi[1]*DeltaE
DerivativePsiE[2] += DerPsi[2]*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE[0] /= NumberMCcycles
DerivativePsiE[1] /= NumberMCcycles
DerivativePsiE[2] /= NumberMCcycles
DeltaPsi[0] /= NumberMCcycles
DeltaPsi[1] /= NumberMCcycles
DeltaPsi[2] /= NumberMCcycles
EnergyDer[0] = 2*(DerivativePsiE[0]-DeltaPsi[0]*energy)
EnergyDer[1] = 2*(DerivativePsiE[1]-DeltaPsi[1]*energy)
EnergyDer[2] = 2*(DerivativePsiE[2]-DeltaPsi[2]*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
NumberHidden = 2
interaction=True
# guess for parameters
a=np.random.normal(loc=0.0, scale=0.001, size=(NumberParticles,Dimension))
b=np.random.normal(loc=0.0, scale=0.001, size=(NumberHidden))
w=np.random.normal(loc=0.0, scale=0.001, size=(NumberParticles,Dimension,NumberHidden))
# Set up iteration using stochastic gradient method
Energy = 0
EDerivative = np.empty((3,),dtype=object)
EDerivative = [np.copy(a),np.copy(b),np.copy(w)]
# Learning rate eta, max iterations, need to change to adaptive learning rate
eta = 0.001
MaxIterations = 50
iter = 0
np.seterr(invalid='raise')
Energies = np.zeros(MaxIterations)
EnergyDerivatives1 = np.zeros(MaxIterations)
EnergyDerivatives2 = np.zeros(MaxIterations)
while iter < MaxIterations:
Energy, EDerivative = EnergyMinimization(a,b,w)
agradient = EDerivative[0]
bgradient = EDerivative[1]
wgradient = EDerivative[2]
a -= eta*agradient
b -= eta*bgradient
w -= eta*wgradient
Energies[iter] = Energy
print("Energy:",Energy)
#EnergyDerivatives1[iter] = EDerivative[0]
#EnergyDerivatives2[iter] = EDerivative[1]
#EnergyDerivatives3[iter] = EDerivative[2]
iter += 1
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
pd.set_option('max_columns', 6)
data ={'Energy':Energies}#,'A Derivative':EnergyDerivatives1,'B Derivative':EnergyDerivatives2,'Weights Derivative':EnergyDerivatives3}
frame = pd.DataFrame(data)
print(frame)
# In[ ]:
|
2bc0fc20668dba0dcb8cb0d61402a202060f54d7 | bambrow/python-programming-notes | /io_basics/01_io.py | 251 | 3.671875 | 4 | #!/usr/bin/env python
# coding:utf-8
with open('example.txt', 'r', encoding='utf-8', errors='ignore') as f:
for line in f.readlines():
print(line.strip())
with open('example2.txt', 'w', encoding='utf8') as f:
f.write('Hello World!') |
09cce81030257893e2b182f92f768d7e3086ab56 | rhofset/Fizz-Buzz | /FizzBuzz.py | 307 | 4.0625 | 4 | # 1 - 100, if multiple of 3 and 5 print FizzBuzz, if multiple of 5 print Buzz and if multiple of 3 print Fizz
for x in range(1,101):
if x % 3 == 0 and x % 5 == 0:
print('FizzBuzz')
elif x % 3 == 0:
print('Fizz')
elif x % 5 == 0:
print('Buzz')
else:
print(x)
|
38b9b7e23f8da67f5f75fa57a0ecfcbb1abcf2ef | HankChou0811/123 | /123.py | 132 | 3.71875 | 4 | N = 10
height = input('請輸入身高:')
weight = input('請輸入體重:')
print('therefore u say that', height, weight)
print(N)
|
c962fddaae1c251f1e94512b49e80b7aa5408aad | mwoinoski/crs1906 | /examples/ch08_examples/subprocesses/encrypt.py | 2,163 | 3.5625 | 4 | """
encrypt.py - uses the subprocess module to encrypt multiple files in parallel.
"""
import getpass
import os
import sys
import subprocess
# The run_openssl function will be the target of the processes that we create
def run_openssl(file, pw):
"""
Use openssl to encrypt some data using AES (Advanced Encryption Standard),
the replacement for DES (Data Encryption Standard).
"""
try:
# Note how the input and output files are opened as
# `in_file` and `out_file`
with open(file, 'r') as in_file:
with open(file + '.aes', 'w') as out_file:
environ = os.environ.copy()
environ['secret'] = pw # store password in env variable
# Call subprocess.Popen() to launch a process running
# openssl to encrypt the input file.
cmd = ['openssl', 'enc', '-e', '-aes256', '-pass', 'env:secret']
proc = subprocess.Popen(cmd, env=environ,
stdin=in_file, stdout=out_file)
# We don't need to write to or flush the
# Popen instance's standard input because openssl is reading
# from a file instead of a pipe.
return proc
except Exception as e:
print('Problem encrypting', file, e)
raise
def main():
if len(sys.argv) == 1:
print(f'Usage: {sys.argv[0]} file...')
sys.exit(1)
pw = getpass.getpass() # prompts and reads without echoing input
procs = []
# loop over the command line arguments
for file in sys.argv[1:]:
proc = run_openssl(file, pw)
procs.append(proc)
# loop over all the Popen instances in the `procs` list
for proc in procs:
# For each Popen instance, call the communicate() method to wait
# for the process to complete.
# We don't need to save the return values of communicate()
# because the processes are reading and writing directly to files.
proc.communicate()
print('Done encrypting', ' '.join(sys.argv[1:]))
if __name__ == '__main__':
main()
|
22f949649423ee94c042f01482d5f7b80a4eefc5 | kaveriumadi/project | /list.py | 775 | 3.875 | 4 | l1 = [1,2,4,3,6,7,8,4,3]
print(l1)
l1[4]
print(l1[4])
print(len(l1))
l1.append(22)
print(l1)
l1.append(25)
print(l1)
l1.insert(2, 45)
print(l1)
l2 = [4,6,2,8]
l1.extend(l2)
print(l1)
l1[2]=20
print(l1)
l1.sort()
print(l1)
l1.reverse()
print(l1)
l1.sort()
print(l1)
l1.pop()
print(l1)
l1.pop(1)
print(l1)
l3 = ["a", "b", "c","d", "a", "b"] #removing the duplicate values
l3 = list(dict.fromkeys(l3))
print(l3)
text = "hello everyone"[::-1] #reverse the string
print(text)
#x = input("enter a first number:")
#y = input("enter the second number:")
#sum = int(x) + int(y) #adding two numbers
#print(sum)
print(l1)
def count_3s():
cnt = 0
for i in l1:
if i == 3:
cnt=cnt+1
return cnt
print(count_3s()) |
62a541185a25a9385a724326133f10e9fbce7d01 | freakraj/python_projects | /for_loop.py | 164 | 3.765625 | 4 | # i=1
# while i<=10:
# print(f"Hello word : {i}")
# i+=1
for i in range(0,11):
print(f"gautam word {i}")
print("\nTHIS YOUR SHOP ") |
0964edb8a9e9a9c07d02a6374f50b06777c9c3a8 | bunshue/vcs | /_4.python/__code/Python GUI 設計活用 tkinter之路/ch3/ch3_35.py | 404 | 3.640625 | 4 | # ch3_35.py
from tkinter import *
root = Tk()
root.title("ch3_35") # 視窗標題
Colors = ["red","orange","yellow","green","blue","purple"]
r = 0 # row編號
for color in Colors:
Label(root,text=color,relief="groove",width=20).grid(row=r,column=0)
Label(root,bg=color,relief="ridge",width=20).grid(row=r,column=1)
r += 1
root.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.