blob_id string | repo_name string | path string | length_bytes int64 | score float64 | int_score int64 | text string | is_english bool |
|---|---|---|---|---|---|---|---|
a0bb7d1e40305b400b3f64447cfc2af80c3a47a1 | achang6/wallstory | /pytuts/monkey/calc_ke.py | 503 | 4.1875 | 4 | # calculate kinetic energy
# welcome message
print('This program calculates the kinetic energy of a moving object.')
# receive mass
m_string = input('Enter the object\'s mass in kilograms: ')
# convert string input to float
m = float(m_string)
# receive velocity
v_string = input('Enter the object\'s velocity in m/s: ')
# convert string input to float
v = float(v_string)
# calculate KE, half mass times velocy squared
ke = 0.5 * m * v * v
print('The object has ' + str(ke) + ' joules of energy.')
| true |
94c2f892a841e7e85e3b4b8668fa0c3c03aab561 | JagritiG/object_oriented_python | /15_polymorphism.py | 2,845 | 4.65625 | 5 | # Example of polymorphism
# Todo: Example of inbuilt polymorphic functions:
print(len("Python")) # len() returns length of a string
print(len([1, 2, 3, 4, 5])) # len() returns length of a list
# Todo: Example of user defined polymorphic function
def add(num1, num2, *args):
return num1 + num2 + sum(num for num in args)
# Todo: Polymorphism with class methods
# Below code shows how python can use two different class types, in the same way.
# We create a for loop that iterates through a tuple of objects.
# Then call the methods assuming that these methods actually exist in both class.
class Honda:
def __init__(self):
self.make = "Honda"
self.model = "Accord"
self.status = "Used"
def get_make(self):
return self.make
def get_model(self):
return self.model
def get_status(self):
return self.status
class Hyundai:
def __init__(self):
self.make = "Hyundai"
self.model = "Palisade"
self.status = "New"
def get_make(self):
return self.make
def get_model(self):
return self.model
def get_status(self):
return self.status
# Todo: Polymorphism with Inheritance
# In Python, Polymorphism lets us define methods in the child class that have the same name as the methods in the parent class.
# In inheritance, the child class inherits the methods from the parent class.
# However, it is possible to modify a method in a child class that it has inherited from the parent class.
# This is particularly useful in cases where the method inherited from the parent class does not quite fit the child class.
# In such cases, we re-implement the method in the child class.
# This process of re-implementing a method in the child class is known as Method Overriding.
class Car:
def __init__(self):
self.status = "New"
self.year = "2018"
def get_year(self):
return self.year
def get_status(self):
return self.status
class Honda1(Car):
def get_status(self):
self.status = "Used"
return self.status
class Hyundai1(Car):
def get_status(self):
self.status = "Used"
return self.status
# Todo: Polymorphism with a Function and objects:
if __name__ == "__main__":
# Example of user defined polymorphic function
print(add(2, 3, 5))
# Polymorphism with class method
car1 = Honda()
car2 = Hyundai()
for info in (car1, car2):
print(info.get_make())
print(info.get_model())
print(info.get_status())
# Polymorphism with Inheritance
base_car = Car()
sub_car1 = Honda1()
sub_car2 = Hyundai1()
print(base_car.get_status(), base_car.get_year())
print(sub_car1.get_status(), base_car.get_year())
print(sub_car2.get_status(), base_car.get_year())
| true |
f72d71630eaebcdcf010cbc8c3abd3d5a8f2bfba | David-Lisboa/Python-Basico---Oceean | /02OperacoesMatematica.py | 424 | 4.21875 | 4 | # Operações Matematicas
soma = 1 + 2
subtracao = 3 - 1
multiplicacao = 3 * 2
divisao = 5 / 3
divisao_int = 5 // 3
print(soma)
print(type(soma))
print(subtracao)
print(type(subtracao))
print(multiplicacao)
print(type(multiplicacao))
print(divisao)
print(type(divisao))
print(divisao_int)
print(type(divisao_int))
print(int(5 / 3)) # converte a divisão par inteiro
variavel = 1
variavel = "amanda"
print(variavel) | false |
4ce07b2cd30f6087d42555e915e9369574f6dde0 | trishulg/Lectures | /Lec4/SavingsProgram.py | 548 | 4.25 | 4 | # Get information from the user ? Input
balance = float(input('How much do u want to save : '))
if balance <= 0:
print('Looks like you already have enough')
balance = 0
payment = 1
else:
payment = float(input('How much will you save each period: '))
if payment <= 0:
payment = float(input('enter a positive value: '))
# Calculation
num_remaining_payments = balance / payment
# Presentation to the user / Output
print(num_remaining_payments)
print('You must make ' + str(num_remaining_payments) + ' more payments')
| true |
866f2ddd60097928168b3a1510f042e5cd4b9880 | Latas2001/python-program | /if elif else.py | 311 | 4.1875 | 4 | n=input("Enter the no.")
if n%2!=0:
print("This is odd no.")
print("Weird")
for n in range(2,5):z
elif n%2==0:
print("not weird")
break
for n in range(6,20):
elif n%2==0:
print("weird")
break
elif n%2==0 and n>20:
print("not weird")
else:
print("good bye"):
| false |
66e60bf7b0d5ef02153b9ba4bb0b99e405758a45 | Latas2001/python-program | /birthday reminder.py | 721 | 4.375 | 4 | dict={}
while True:
print("_______________Birthday App________________")
print("1.Show Birthday")
print("2.Add to Birthday List")
print("3.Exit")
choice = int(input("Enter the choice: "))
if choice==1:
if len(dict.keys())==0:
print("nothing to show....")
else:
name=input("Enter the name look for birthday....")
birthday=dict.get(name,"No data found")
print(birthday)
elif choice==2:
name=input("Enter your friend's name: ")
date=input("Enter the birthday: ")
dict[name]=date
print("Birthday added successfully")
elif choice==3:
break
else:
print("Choose a valid option")
| true |
74cf296823ec6ed1d27702f4e8e778b488badf00 | shreyasingh18/HSBC-2021-WFS1-DEMOS | /python-examples/if_demo.py | 264 | 4.21875 | 4 | x = int(input("Enter value for x: "))
y = int(input("Enter value for y: "))
if x > y:
print("if: x > y")
print("if: x = ", x, "y = ", y)
else:
print("else: inside the else condition")
print("else: x = ", x, "y = ", y)
print("outside if condition") | false |
d115b3c8fd28f43e7259473a048e960b1f65423d | shreyasingh18/HSBC-2021-WFS1-DEMOS | /python-examples/nested_list_demo.py | 361 | 4.25 | 4 | items = [[1, 4, 3], [5, 8, 9, 10]]
#above list is nested list
print(len(items))
#finding the number of items of the particular index
print(len(items[0]))
#for loop to iterate the list
for x in items:
print(x)
print("--------------")
for x in items:
for y in x:
print(y)
print(items)
#deleting the items based on index
del items[0]
print(items) | true |
fc3331e287fc69733621fe8b34ad67cddc70e270 | anishmarathe007/Assignments | /bookManagement.py | 1,199 | 4.1875 | 4 | data = {}
def insertIntoBook(name,author):
if name not in data.keys():
data[name] = author
print("Book Successfully Inserted!")
else:
print("Book with the same name already exists!")
def search(name):
if name in data.keys():
print(name, "Present. Author name is : ", data[name])
else:
print(name,"not present in the shelf.")
def deleteBook(name):
if name in data.keys():
print(name, "Deleted!")
data.pop(name)
else:
print(name, "Not Present")
def displayBooks():
print("Book Name\tAuthor Name")
for key in data.keys():
print(key,"\t",data[key])
while True:
print("1.Enter New Book\n2.Search for a book\n3.Delete a book\n4.Display a book\n5.Exit")
ch = int(input("Enter the choice.\n"))
if ch==1:
name = input("Enter the name of the book.\n")
author = input("Enter the name of author for book\n")
insertIntoBook(name,author)
elif ch==2:
name = input("Enter the name of the book you want to search!\n")
search(name)
elif ch==3:
name = input("Enter the name of the book you want to delete!\n")
deleteBook(name)
elif ch==4:
displayBooks()
elif ch==5:
exit(0)
else:
print("Wrong Choice!") | true |
97a6b7d3bccc444a72db5cd4fa60b184aff9468a | shotokan/web_scraping_examples | /q1/d.py | 879 | 4.6875 | 5 | def _is_multiple_of_six(number):
"""
Function utility used to check if a number is multiple of six
:param number:
:return:
"""
return (number % 6) == 0
def _is_multiple_of_seven(number):
"""
Function utility used to check if a number is multiple of seven
:param number:
:return:
"""
return (number % 7) == 0
def multiples(start, end):
""" Generator used to find multiples of 6 and 7."""
counter = start
while counter != end:
counter += 1
if _is_multiple_of_seven(counter) and _is_multiple_of_six(counter):
yield 'Docket Alarm'
elif _is_multiple_of_seven(counter):
yield 'Docket'
elif _is_multiple_of_six(counter):
yield 'Alarm'
else:
yield counter
if __name__ == '__main__':
for i in multiples(0, 1000):
print(i)
| true |
26be92cae05d7c34988a82611d73208c5092ebd5 | ktn-andrea/Scripts | /03/palindrom.py | 735 | 4.15625 | 4 | #!/usr/bin/env python3
def is_palindrome1(s):
return s == s[::-1]
def is_palindrome2(s):
i = 0
p = False
while i <= len(s)/2:
if s[i] == s[len(s)-i-1]:
p = True
else:
return False
i += 1
return p
def is_palindrome3(s):
if len(s) < 2:
return True
elif s[0] == s[-1]:
return is_palindrome3(s[1:len(s)-1])
else:
return False
def main():
s = input("Bemeneti szo: ")
print("{0}: {1}".format(s,is_palindrome1(s)))
print("{0}: {1}".format(s,is_palindrome2(s)))
print("{0}: {1}".format(s,is_palindrome3(s)))
#################################################################
if __name__ == "__main__":
main()
| false |
5dcb94625f39b6b106041a51f63baef27ce59177 | jeremytedwards/data-structures | /src/data_structures/sort_insertion.py | 1,801 | 4.125 | 4 | # coding=utf-8
import random
import timeit
def sort_insertion(origin_list):
"""Implement insertion sort."""
if len(origin_list) == 0:
return origin_list
else:
sorted_list = [origin_list.pop(0)]
while len(origin_list) > 0:
item = origin_list.pop(0)
for index, v in enumerate(sorted_list):
if v >= item:
sorted_list.insert(index, item)
break
elif sorted_list[-1] < item:
sorted_list.append(item)
break
return sorted_list
def main():
# Test Random List
random_list = [random.randint(1, 10000) for i in range(10000)]
print("\nRandom Set:\n", random_list)
result = sort_insertion(random_list)
print("Random Set Result:\n", result)
print('\nSort time: {}\n'.format(
timeit.timeit("sort_insertion", setup="from __main__ import sort_insertion",
number=500)))
# Test Ordered List
ordered_list = [i + 1 for i in range(random.randint(1, 10000))]
print("\nOrdered Set:\n", ordered_list)
result = sort_insertion(ordered_list)
print("Ordered Set Result:\n", result)
print('\nSort time: {}\n'.format(
timeit.timeit("sort_insertion", setup="from __main__ import sort_insertion",
number=500)))
# Test Reversed List
reversed_list = [i for i in range(random.randint(1, 10000))][::-1]
print("\nReversed Set:\n", reversed_list)
result = sort_insertion(reversed_list)
print("Reversed Set Result:\n", result)
print('\nSort time: {}\n'.format(
timeit.timeit("sort_insertion", setup="from __main__ import sort_insertion",
number=500)))
if __name__ == '__main__':
main()
| true |
d1fcc3ea09823f097bed3a97aaa43abd71a5ec9a | ayesha53/ayeshajawaid | /calculator.py | 435 | 4.25 | 4 | first_number=int(input("Enter number "))
second_number=int(input("Enter number "))
operator=input("Enter operator")
if operator=="+":
print(int(first_number)+int(second_number))
elif operator=="-":
print(int(first_number)-int(second_number))
elif operator=="*":
print(int(first_number)*int(second_number))
elif operator=="/":
print(int(first_number)/int(second_number))
else:
print("You enterd wrong operator")
| false |
8d2680094b60b6fd59369f3031bb725c952acfde | ravindrasinghinbox/learn-python | /python-w3schools-v1/list.py | 1,427 | 4.1875 | 4 | # mylist = ["a", "c", "b"]
# print(mylist)
# print(mylist[0])
# print(mylist[-1])
# print(mylist[1:2])
# thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"]
# print(thislist[-4:-1])
# # change list value
# mylist = ["a", "b", "c"]
# mylist[0] = "A"
# print(mylist)
# # loop list
# for x in mylist:
# print(x)
# # check item if exist
# if "A" in mylist:
# print("Yes, 'A' is in list")
# # list len
# print(len(mylist))
# # add items
# mylist.append("C")
# print(mylist)
# # add element at index with insert
# mylist.insert(1, "0")
# print(mylist)
# # remove element
# mylist.remove("0")
# print(mylist)
# # pop element
# print(mylist.pop())
# # del
# del mylist[0]
# print(mylist)
# # del entire element
# # del mylist
# # print(mylist)
# # clear
# mylist.clear()
# print(mylist)
# # copy
# mylist2 = mylist.copy()
# print(mylist)
# mylist3 = list(mylist)
# print(mylist3)
# # join tow list
# mylist = ["hello", "world"]
# mylist2 = ["here", "goes"]
# list3 = mylist + mylist2
# print(list3)
# mylist.extend(mylist2)
# print(mylist)
# # list constructor
# mylist = list(("ok", "goes", "here", "today"))
# print(mylist)
# print(mylist.sort())
# # list constructor
# mylist = list(["ok", "goes", "here", "today"])
# print(mylist)
# mylist.sort()
# mylist.reverse()
# mylist.remove("ok")
# print(mylist) | false |
778636921c820b819ca1f8a698183fa15fc290a1 | ravindrasinghinbox/learn-python | /python-w3schools-v1/if-else.py | 786 | 4.40625 | 4 | # if
if(2>1):
print("2 is greater than 1")
# elif
if(5<1):
print("5 is less than 1")
elif (4 > 2):
print("4 is greater than 2")
# else
if(5<1):
print("5 is less than 1")
elif (4 < 2):
print("4 is less than 2")
else:
print("No input")
# short hand if
if(4<5):print("4 is greater than 5")
# short hand if else
print("true") if(1<2) else print("false")
# one line if else condition
print("true") if(2<1) else print("true2") if(1<2) else print("false")
# and
if(1<2 and 2<3): print("printing true here")
# or
if(1<2 or 5<3): print("printing true here with or")
# nested if
if (5 < 10):
print("five is less than 10")
if (5 < 8):
print("5 is less than 8")
else:
print("greater than 8")
# pass
if(1<2):pass | false |
2683e9f5a7b57b2281df5d8b05b8245cc319660a | sudiptoshahin/pythonmachinelearningbasic | /inputs.py | 1,324 | 4.1875 | 4 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
"""
input() and raw_input() both function take a string as
an argument and displays it as promot in shell. it waits for
the user to hit enter
for raw_input(), input line is treated as string and becomes
the value returend by the function
nput treats the typed line as a Python
expression and infers a type.
input() takes only the type value of input
"""
# name1 = input('Enter your name: ')
# print("Your name: ", name1)
## it will show error in editor but in IDLE dont
# name2 = raw_input('Enter your name:~')
# print("Your name is" +name2);
## square the integer
x = 2
ans = 0
itersLeft = x
while (itersLeft != 0):
ans = ans + x
itersLeft = itersLeft - 1
print (str(x) + "*" + str(x) +"="+ str(ans))
# In[8]:
"""
Write a program that asks the user to input 10 integers, and
then prints the largest odd number that was entered. If no odd number was
entered, it should print a message to that effect.
"""
numbers = int(input('Enter 10 integers: '))
index = 0
while index < 9:
if (numbers[index] % 2 != 0):
OddNumber = numbers[index]
if (OddNumber > numbers[index]):
largestOddNumber = oddNumber
index = index + 1
print("Largest odd number is " +str(largestOddNumber))
# In[ ]:
# In[ ]:
| true |
c75d6af83883238345dd0330087cddb25e655803 | shahnaaz20/debugging_part_4 | /cipher2.py | 927 | 4.34375 | 4 | def encrypt(message):
ascii_message = [ord(char)+3 for char in message]
encrypt_message = [ chr(char) for char in ascii_message]
return(''.join(encrypt_message))
def decrypt(message):
ascii_message = [ord(char)-3 for char in message]
decrypt_message = [ chr(char) for char in ascii_message]
return (''.join(decrypt_message))
flag = True
while flag == True:
choice = input("What do you want to do? 1. Encrypt a message 2. Decrypt a message Enter `e` or `d` respectively!")
if choice == 'e':
message = input("Enter message to encrypt??")
print(encrypt(message))
break
elif choice == 'd':
message = input("Enter message to decrypt?")
print(decrypt(message))
break
else:
play_again = input("Do you want to try agian or Do you want to exit? (y/n)")
if play_again == 'y':
continue
else:
break | false |
75def06a03b2dd42b7af41318c36c5a53592247f | emojipeach/euler_problems_python | /0002.py | 664 | 4.28125 | 4 | print("""Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:""")
print("""1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...""")
print("""By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.""")
def gen_fibonacci(a):
x = 0
while x < a:
x = fibonacci[-2] + fibonacci[-1]
fibonacci.append(x)
def select_evens(b):
for i in b:
if i % 2 == 0:
even_fibs.append(i)
def solution(x):
gen_fibonacci(x)
select_evens(fibonacci)
print(sum(even_fibs))
fibonacci = [1,2]
even_fibs = []
solution(4000000) | true |
304bd6076896e403fd2973d669f1469b255220d4 | CrystalBRana/LabProjects2 | /q_n0_ 8.py | 299 | 4.375 | 4 | ''' Write a Python program which accepts the radius of a circle from the user and compute the area.
(area of circle =PI * r^2)'''
radius = float(input("Enter the radius of circle in centimeter:"))
area_of_circle = (3.14 * (radius**2))
print(f"The area of circle is {area_of_circle} square meter") | true |
2a0b4735c4db4ba0b831028b3a8a26c242a007f9 | CrystalBRana/LabProjects2 | /q.no.10.py | 402 | 4.1875 | 4 | # Write a python program to convert seconds to day, hour, minutes and seconds.
seconds = int(input('Insert second:'))
seconds_in_day = 60*60*24
seconds_in_hour = 60*60
seconds_in_minute = 60
days = seconds // seconds_in_day
hours = (seconds - (days * seconds_in_day))// seconds_in_hour
minutes = (seconds - (days * seconds_in_day) - (hours * seconds_in_hour)) // seconds_in_minute
print(days, hours)
| true |
af6475761f6aef23786189e00e99f2862639583a | SRaja001/MIT_Open | /HW/Problem_set_1.py | 2,844 | 4.25 | 4 | #Problem 0
#dob = raw_input('Please Enter your dat of birth MM/DD/YY: \n**')
#user = raw_input('Please enter your last name: \n**')
#print user, dob
###Problem 1
##
##balance = float(raw_input("Please enter the balance on your credit card: "))
##interest_rate = float(raw_input("Please enter the annual interest rate as a decimal: "))
##monthly_pay_rate = float(raw_input("Please enter the monthly payment rate as a decimal: "))
##total_paid = 0.0
##
##for i in range(1,13):
## monthly_payment = monthly_pay_rate * balance
## interest_paid = interest_rate/12.0 * balance
## principle_paid = monthly_payment - interest_paid
## balance = balance - principle_paid
## total_paid = total_paid + monthly_payment
## print "Month", i
## print "Minimum monthly payment: $%.2f" % monthly_payment
## print "Principle paid: $%.2f" % principle_paid
## print "Remaining balance: $%.2f" % balance
##
##print "RESULT"
##print "Total amount paid: $%.2f" % total_paid
##print "Remaining balance: $%.2f" % balance
#Problem 2
##balance = float(raw_input("Please enter your outstanding balance: "))
##interest = float(raw_input("Please enter your annual interest rate as a decimal: "))
##monthly_interest = interest/12
##remaining_balance = balance
##monthly_payment = 0
##i = 0
##
##while remaining_balance > 0:
## i = i+1
## monthly_payment = monthly_payment + 10
## months = 0
## remaining_balance = balance
## while months < 12 and remaining_balance > 0:
## months = months + 1
## remaining_balance = remaining_balance * (1 + monthly_interest) - monthly_payment
##
## #year_payment = monthly_payment * 12
## #remaining_balance = (balance * (1 + interest)) - year_payment
##
##print "Monthly: ", monthly_payment
##print "Balance: ", remaining_balance
##print " months: ", months
##print "iterations: ", i
#Problem 3
balance = float(raw_input("Please enter your outstanding balance: "))
interest = float(raw_input("Please enter your annual interest rate as a decimal: "))
monthly_interest = interest/12.0
upper = (balance * (1 + (interest/12))**12)/12
lower = balance/12
monthly_payment = (upper + lower)/2
months = 0
remaining_balance = balance
j =0
while upper - lower > 0.005:
j = j + 1
remaining_balance = balance
monthly_payment = (upper + lower)/2
print "Monthly Payment: ", monthly_payment
for i in range(1,13):
remaining_balance = remaining_balance * (1 + monthly_interest) - monthly_payment
if remaining_balance < 0:
upper = monthly_payment
else:
lower = monthly_payment
## if months < 12:
## upper = monthly_payment
## if months > 12:
## lower = monthly_payment
print "Monthly: ", monthly_payment
print "Balance: ", remaining_balance
#print "Months: ", months
print "iterations: ", j
| true |
a11a6374068185e7cf39fca250b1e9f04b3f7f59 | jonathansilveira1987/EXERCICIOS_ESTRUTURA_DE_DECISAO | /exercicio20.py | 1,267 | 4.28125 | 4 | # 20. Faça um Programa para leitura de três notas parciais de um aluno. O programa deve calcular
# a média alcançada por aluno e apresentar:
# 1. A mensagem "Aprovado", se a média for maior ou igual a 7, com a respectiva média alcançada;
# 2. A mensagem "Reprovado", se a média for menor do que 7, com a respectiva média alcançada;
# 3. A mensagem "Aprovado com Distinção", se a média for igual a 10.
# Autor desconhecido
# Fonte: http://python-iniciantes.blogspot.com/2012/08/exercicios-36-estrutura-de-decisao.html
def entraNota(quantidade_nota):
notas = []
int = 1
for num in range(quantidade_nota):
nota = (float(input("Digite a {0}ª nota: ".format(int))))
if nota < 0 or nota > 10:
raise ValueError('Erro na {0}ª nota. Digite uma nota entre 0 e 10.'.format(int))
notas.append(nota)
int += 1
return notas
def mediaAluno(notas):
soma = sum(notas)
print(soma)
media = soma/len(notas)
if media > 7.0 and media < 10:
print('Aprovado com média: {0}'.format(media))
elif media < 7.0:
print('Reprovado com média: {0}'.format(media))
else:
print('Aprovado com distinção: 10!')
notas = entraNota(3)
mediaAluno(notas) | false |
59f91514f7f4b3169801d00bf4a545fd1dddef91 | jonathansilveira1987/EXERCICIOS_ESTRUTURA_DE_DECISAO | /exercicio23.py | 592 | 4.25 | 4 | # 23. Faça um Programa que peça um número e informe se o número é inteiro ou decimal.
# Dica: utilize uma função de arredondamento.
# Desenvolvido por Jonathan Silveira - Instagram: @ jonathandev01
# Função de Arredondamento
# numero = float(input("Numero original: "))
# print("Arredondado :", round(numero))
numero = float(input("Digite um número: "))
if numero == round(numero):
print("O número digitado é inteiro")
else:
print("O número digitado é decimal")
# print("Arredondado pra baixo: ", round(numero-0.5))
# print("Arredondado pra cima : ", round(numero+0.5)) | false |
a8e840fc577193db202ed5af696cb221e77db59a | kaidokariste/python | /01_LearnPythonHardWay/03_raw_terminal_input.py | 499 | 4.1875 | 4 | print("How old are you"),
age = input() # raw_input from python2 was renamed input in python3
print("How tall are you"),
height = input()
print("How much do you weight"),
weight = input()
print("So you're {} old, {} tall and {} heavy.".format(age,height,weight))
# you can define input text also as variable
age = input("How old are you?")
height = input("How tall are you?")
weight = input("How much do you weight? ")
print("So, you're {} old, {} tall and {} heavy. ".format(age,height,weight)) | true |
3a863353a9d02e208139ed825835b60db3adee0d | iEdwinTorres/backend-baby-names-assessment | /babynames.py | 2,733 | 4.21875 | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# BabyNames python coding exercise.
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
"""
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration. Here's what the HTML looks like in the
baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
- Extract all the text from the file and print it
- Find and extract the year and print it
- Extract the names and rank numbers and print them
- Get the names data into a dict and print it
- Build the [year, 'name rank', ... ] list and print it
- Fix main() to use the extracted_names list
"""
import sys
import re
import argparse
def extract_names(filename):
with open(filename) as f:
text = f.read()
names = []
pattern = r'Popularity in (\d{4})'
year_match = re.search(pattern, text)
year = year_match.group(1)
names.append(year)
pattern = r'<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>'
names_ranks = re.findall(pattern, text)
names_to_rank = {}
for rank_tuple in names_ranks:
rank, boy_name, girl_name = rank_tuple
if boy_name not in names_ranks:
names_to_rank[boy_name] = rank
if girl_name not in names_to_rank:
names_to_rank[girl_name] = rank
sorted_names = sorted(names_to_rank.items())
for name, rank in sorted_names:
names.append(f'{name} {rank}')
return names
def create_parser():
"""Create a command line parser object with 2 argument definitions."""
parser = argparse.ArgumentParser(
description="Extracts and alphabetizes baby names from html.")
parser.add_argument(
'--summaryfile', help='creates a summary file', action='store_true')
parser.add_argument('files', help='filename(s) to parse', nargs='+')
return parser
def main(args):
parser = create_parser()
ns = parser.parse_args(args)
if not ns:
parser.print_usage()
sys.exit(1)
file_list = ns.files
create_summary = ns.summaryfile
for filename in file_list:
names = extract_names(filename)
text = '\n'.join(names)
if create_summary:
with open(f'{filename}.summary', 'w') as f:
f.write(text)
else:
print(text)
if __name__ == '__main__':
main(sys.argv[1:])
| true |
bb3aceaea7a1f8ef838d20a859ab58b9982c580a | flovera1/CrackingTheCodeInterview | /Python/Chapter1ArraysAndStrings/stringCompression.py | 816 | 4.34375 | 4 | '''
Implement a method to perform basic string compression using the counts of repeated characters.
For example, the string aabcccccaaa would become a2b1c5a3. If the "compressed" string would not
become smaller than the original string, your method should return the original string.
You can assume the string has only uppercase and lower case letters (a-z).
'''
def StrCompress(string):
count = 1
new = ''
for i in range (len(string)):
if i <len(string)-1:
if string[i] == string[i+1]:
count += 1
else:
new += string[i]+str(count)
count = 1
else:
new +=string[i]+str(count)
if len(string) <= len(new):
return string
return new
print(StrCompress('aabcccccaaa')) | true |
079970bfa071570a912ba12d321c469e46a844ab | NedyalkoKr/Learning-Python | /Lesson 9 - Modularity/function_basics.py | 986 | 4.125 | 4 | # defining a new function
# x is the input to the function
# functions can accept one, or more or none parameters
# these parameters represent the initial data the function will use
# internaly
def square(x):
# returning the output of a function
return x * x
print(square(5))
# we can choose to bind parameters to local variables
# but because our parameter(s) are used as local variables
# adding anoter name as alias to a parameter is not nessesery
def square_again(x):
param_x = x
output = param_x * param_x
return output
print(square_again(5))
# in alsmost all cases functions will return single output
# if we are not explicitly using the return keyword
# function will implicitly return None
def even_or_odd(n):
if n % 2 == 0:
print("even")
return
print("odd")
output = even_or_odd(2)
print(output is None)
# its better to use return statement
def nth_root(radicand, n):
return radicand ** (1/n)
print(nth_root(16, 2)) | true |
43ce9efa69e5038b870c9bd863a26e2b4d9780fb | NedyalkoKr/Learning-Python | /Lesson 6 - Strings, Collections, and Iteration/lists.py | 922 | 4.59375 | 5 | # list is a ordered collection(sequence) of objects
# lists are mutable
# lists are iterable
# creating list object using list literal form
numbers = [1,2,3,4,5,6,7,8,9]
fruits = ["apple", "orange", "pear"]
# each list item position is map to a index that allows to reference and retrive that item
print(numbers[0])
print(fruits[0])
print(numbers[-1])
print(fruits[-1])
print(numbers[2])
print(fruits[6])
# if we try to retrive item that dosen't exist python will raise IndexError
print(fruits[10])
# reassign list item
numbers[-1] = "9"
print(numbers)
# list methods
prices = []
# append single item at the end of the list
prices.append(1.99)
print(prices)
prices.append(2.99)
print(prices)
# using list() constructor to create list from other collections
print(list("hello, world!"))
# list() can be called on a collection only data type
# passing not-iterable object will raise a error
print(list(10)) | true |
4c38a7789b6010f9fcd1cc16758354234c4901f1 | NedyalkoKr/Learning-Python | /Lesson 7 - Scalar Types, Operators, and Control Flow/nesting_conditionals.py | 405 | 4.28125 | 4 | h = 42
if h > 50:
print("Greater than 50")
else:
# nesting is not a bad pattern, but in python is better to flat
# than nested for readability
if h < 20:
print("Less than 20")
else:
print("Between 20 and 50")
# the same logic bu using flat structure
if h > 50:
print("Greater than 50")
elif h < 20:
print("Less than 20")
else:
print("Between 20 and 50") | true |
4387486343afdac38e51dd0971f9ce9637a2dd0e | luiseduardogfranca/Luis-Franca | /LP1-P1/Initial Knowledge Test/array_intersection.py | 1,370 | 4.15625 | 4 | def min_value(array):
min = array[0]
for value in array:
if value < min:
min = value
return min
# function to sort in ascending order
def sort_array(array):
new_array = []
for index in range(len(array)):
min = min_value(array)
new_array.append(min)
array.remove(min)
return new_array
# array for the numbers storage
first_array = [None] * 20
second_array = [None] * 20
array_intersection = []
# input numbers
for index in range(len(first_array) + len(second_array)):
if index < len(first_array):
first_array[index] = int(input(""))
else:
second_array[len(second_array) - index] = int(input(""))
# verify intersection
for value in first_array:
value_okay = False
for second_value in second_array:
if value == second_value:
value_okay = True
# verify if value already exists in intersection array
if value_okay:
for intersection in array_intersection:
if value == intersection:
value_okay = False
if value_okay:
array_intersection.append(value)
if len(array_intersection) > 0:
# sort the array
array_intersection = sort_array(array_intersection)
# output
for value in array_intersection:
print(value)
elif len(array_intersection) == 0:
print("VAZIO")
| true |
4560924bf5c9fd07101e6cd2663c6add33352fff | luiseduardogfranca/Luis-Franca | /LP1-P1/Conditionals and Repetition Structure/playing_with_arrays.py | 1,195 | 4.15625 | 4 | def inverse_order(array):
return [array[index] for index in range(len(array) - 1, -1, -1)]
def left_shift(array):
new_array = [None] * len(array)
for index in range(len(array)):
new_array[index-1] = array[index]
return new_array
# i did it this way to learn a new way
def sort_by_decreasing(array):
is_ordered = True
while(not array_is_ordered(array)):
index = 0
while(index < len(array) - 1):
difference = array[index] - array[index + 1]
# exchange smaller value for larger
if( difference < 0):
array[index] += abs(difference)
array[index + 1] -= abs(difference)
index += 1
return array
def array_is_ordered(array):
is_ordered = True
index = 0
while(index < len(array) - 1):
if(array[index] - array[index + 1] < 0):
is_ordered = False
return is_ordered
index += 1
return is_ordered
amount_number = int(input())
numbers = [int(number) for number in (input().split(" "))]
print(*inverse_order(numbers))
print(*left_shift(numbers))
print(*sort_by_decreasing(numbers))
| true |
00752314ecbd8f9be2862896576e36e76eaa6305 | ATHULKNAIR/PythonPrograms | /PrintSum.py | 241 | 4.15625 | 4 | # Write a program that takes three numbers and prints their sum. Every number
# is given on a separate line.
a = int(input('Enter First Number'))
b = int(input('Enter Second Number'))
c = int(input('Enter Third Number'))
print(a + b + c) | true |
6acd96fe92a6241537df088dd2df8dda02e7a6ac | rachelBurford/she_codes_python | /conditionals/dictionaries/Dictionaries_exercises/q1.py | 850 | 4.125 | 4 | prices = {
"Baby Spinach": 2.78,
"Hot Chocolate": 3.70,
"Crackers": 2.10,
"Bacon": 9.00,
"Carrots": 0.56,
"Oranges": 3.08
}
quantity = {
"Baby Spinach": 1,
"Hot Chocolate": 3,
"Crackers": 2,
"Bacon": 1,
"Carrots": 4,
"Oranges": 2
}
quantity2 = {
"Baby Spinach": 2,
"Hot Chocolate": 1,
"Crackers": 4,
"Bacon": 0,
"Carrots": 8,
"Oranges": 5
}
for item, quantity in quantity.items():
if quantity <= 1 :
print(item)
# print(f"{quantity} {item} @ ${prices[item]} = $ {(quantity * prices[item])}")
#for key , value in the list with the values
# for item in quantity.values():
# print(item)
# google Select rather than for or if
for item, quantity2 in quantity2.items():
print(f"{quantity2} {item} @ $ {prices[item]} = $ {(quantity2 * prices[item])}") | true |
dcc907106126ba5a4491e86d9dca2aa648a365ee | vsingh1998/Code_in_place | /Lectures/Lecture4/add2numbers.py | 652 | 4.25 | 4 | """
File: add2numbers.py
--------------------
This program asks for the user inputs of two numbers and prints their sum.
"""
def main():
print("This is a program to calculate sum of two numbers.")
# ask the user to input first number
num1 = input("Enter first number: ")
# convert string into integer
num1 = int(num1)
# ask the user to input second number
num2 = input("Enter second number: ")
# convert string into integer
num2 = int(num2)
# computer sum of the two numbers
total = num1 + num2
# print the sum
print("The sum is: " + str(total) + ".")
if __name__ == '__main__':
main()
| true |
34f78ebc1dd9e983bb699b85fad673b75bb318e6 | asnewton/Stack | /QueueByStack.py | 570 | 4.1875 | 4 | """ Implement Queue using Stacks """
from Stack import NewStack
class Queue:
myStack1 = NewStack()
myStack2 = NewStack()
def enQueue(self, item):
Queue.myStack1.push(item)
def deQueue(self):
while Queue.myStack1.isempty() is not True:
Queue.myStack2.push(Queue.myStack1.pop())
print "deleted item :", Queue.myStack2.pop()
def printQ(self):
while Queue.myStack1.isempty() is not True:
print(Queue.myStack1.pop())
myQ = Queue()
myQ.enQueue(1)
myQ.enQueue(2)
myQ.enQueue(3)
myQ.deQueue()
| true |
3bdaa7792f2fb8796ea50a7fa59d9204a79db3d0 | jakaprima/python-basic-minimalist | /tantangan/manipulasi_string_searching.py | 508 | 4.28125 | 4 | def LongestWord(sen):
# first we remove non alphanumeric characters from the string
# using the translate function which deletes the specified characters
sen = sen.translate(None, "~!@#$%^&*()-_+={}[]:;'<>?/,.|`")
# now we separate the string into a list of words
arr = sen.split(" ")
# the list max function will return the element in arr
# with the longest length because we specify key=len
return max(arr, key=len)
print LongestWord("the $$$longestasdfasdf# word is coderbyte")
| true |
a9ca0cdcc9546f3dc38ee1f5afe01071ae2d038f | V-Marco/miscellaneous | /tkinter_bootcamp/grid.py | 420 | 4.53125 | 5 | from tkinter import *
# Create a root window
root = Tk()
# Create a label widget
myLabel1 = Label(root, text = "Hello, World!")
myLabel2 = Label(root, text = "Hi again!")
myLabel3 = Label(root, text = " ")
# Put them in the grid
# The positions are relative!
myLabel1.grid(row = 0, column = 0)
myLabel2.grid(row = 1, column = 5)
myLabel3.grid(row = 1, column = 1)
# Put the root into the loop
root.mainloop() | true |
bc63aacc6a2e74a5b190023f2fe991816e9cd332 | ShivaniAsokumar/problems-py | /Find_Second_Max.py | 2,532 | 4.15625 | 4 | """
! PROMPT: Find the second largest element in a given list.
* Input: Array of numbers
* Output: Second largest number
? What happens if an illegal argument is given. => Raise ValueError
* Secong largest number is smaller than max but larger than all other values.
// Brute Force Solution
* Find the maximum using nested loops.
* Delete the maximum from the list
* Find the maximum again to get the second largest number.
* Time Complexity: O(n^2)
// Current Solution
* Find the maximum O(n)
* Remove the maximum from array O(n)
* Find the maximum O(n)
* Time complexity = O(3n)
def find_second_maximum(lst):
maximum = float('-inf')
for n in lst:
if n > maximum:
maximum = n
lst.remove(maximum)
maximum = float('-inf')
for n in lst:
if n > maximum:
maximum = n
return maximum
// Testing
arr = [9,2,3,6]
maximum = 9
arr = [2,3,6]
maximum = 6
"""
def find_second_maximum(lst):
# For invalid inputs
try:
maximum = float('-inf')
for n in lst:
if n > maximum:
maximum = n
lst.remove(maximum)
maximum = float('-inf')
for n in lst:
if n > maximum:
maximum = n
return maximum
except ValueError:
print('Input is invalid. Please enter the correct values')
print(find_second_maximum([4,2,1,5,4]))
# * Better solution with Two Traversals
def find_second_maximum2(lst):
try:
maximum = second_max = float('-inf')
for n in lst:
if n > maximum:
maximum = n
for n in lst:
if n > second_max and n != maximum:
second_max = n
return second_max
except ValueError:
print('Input is invalid. Please enter the correct values')
print(find_second_maximum2([4,2,1,5,4]))
# * One Traversal O(n)
def find_second_maximum3(lst):
try:
maximum = second_max = float('-inf')
for n in lst:
if n > maximum:
maximum = n
second_max = maximum
elif n != maximum and n > second_max:
second_max = n
return second_max
except ValueError:
print('Input is invalid. Please enter the correct values')
"""
// Lessons Learned
* There are two ways of solving this problems: one traversal or two traversal.
* Having just one traversal is less cluttered so even though the time complexity is the same, it is easier to read.
"""
| true |
38c04c9e3b6d113a3526adcda38fef3f12252623 | CamilliCerutti/Python-exercicios | /Curso em Video/ex005.py | 270 | 4.25 | 4 | # ANTECESSOR E SUCESSOR
# Faça um programa que leia um número Inteiro e mostre na tela o seu sucessor e seu antecessor.
x = int(input('Digite um número: '))
ant = x-1
suc = x+1
print(f' O antecessor do número que você escolheu é: \n{ant} e o sucessor é {suc}') | false |
d3f7181630d380ec71df0e9ba11851a48bd6eb1a | CamilliCerutti/Python-exercicios | /CURSO UDEMY/EXERCICIOS/EX003.PY | 442 | 4.125 | 4 | # Faça um programa que peça o primeiro nome do usuário. Se o nome tiver 4 letras ou menos escreva "Seu nome é curto"; se tiver entre 5 e 6 letras, escreva "Seu nome é normal"; maior que 6 escreva "Seu nome é muito grande"
nome = input('Digite seu nome: ')
letras = len(nome)
if letras <= 4:
print('Seu nome é curto')
elif 5 <= letras <= 6:
print('Seu nome é normal')
elif letras > 6:
print('Seu nome é muito grande') | false |
8d2b9e08b1470ce89c4eca976b3c664ef3889d87 | CamilliCerutti/Python-exercicios | /Curso em Video/ex075.py | 740 | 4.28125 | 4 | # ANALISE DE DADOS EM UMA TUPLA
# Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final, mostre:
# A) Quantas vezes apareceu o valor 9.
# B) Em que posição foi digitado o primeiro valor 3.
# C) Quais foram os números pares.
tupla = (int(input('Digite um valor: ')), int(input('Digite um valor: ')), int(input('Digite um valor: ')), int(input('Digite um valor: ')))
print(f'Os valores digitados foram {tupla}')
print(f'O valor 9 aparece {tupla.count(9)} vezes')
if 3 in tupla:
print(f'O primeiro valor 3 foi digitado na{tupla.index(3)+1}ª posição')
else:
print('O valor 3 não foi digitado em nenhuma posição')
for c in tupla:
if tupla % 2 == 0:
print(tupla, end='')
| false |
5193fe9313b4172185dbf05d83b213253686697d | CamilliCerutti/Python-exercicios | /Curso em Video/ex028.py | 613 | 4.1875 | 4 | # JOGO DE ADVINHAÇÃO V1.0
# Escreva um programa que faça o computador “pensar” em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. O programa deverá escrever na tela se o usuário venceu ou perdeu.
from random import randint
from time import sleep
num = int(input('Pensei em um número entre 0 e 5. Tente advinhar qual é este número: '))
c = randint(0,5)
print('PROCESSANDO...')
sleep(3)
print(f'O numero que eu pensei foi {c}')
if num == c:
print('Voce acertou, parabens!')
else:
print('Voce errou, tente novamente!')
| false |
009eb8f77f950e82f8a65bcb4f03ff6baf13395c | UlysseARNAUD-IPSSI/Module-python | /Chapitre 1 : Bases du langage Python/exercices/1.2 : Collections, boucles et dictionnaires/cityinfo.py | 1,421 | 4.3125 | 4 | #!/usr/bin/env python3
"""
Variables utilisées
"""
infoCity = {'Lyon': (513275, 47.87, 'Lyonnais'),
'Paris': (2206488, 105.40, 'Parisiens'),
'Brest': (139163, 49.51, 'Brestois'),
'Bordeaux': (249712, 49.36, 'Bordelais'),
}
cities = ['Lyon', 'Paris', 'Brest']
"""
Fonction principale
"""
def main():
for city in cities:
voirVille(city)
while True:
choisirVille()
"""
Choix de la ville
"""
def choisirVille():
city = input('Saisissez une ville : (END pour quitter, DUMP pour voir les villes) ').strip()
conditions = {
'END': quitter,
'DUMP': voir
}
action = conditions.get(city, False)
if (action == False):
voirVille(city)
else:
action()
"""
Voir les informations sur une ville
"""
def voirVille(city):
info = infoCity.get(city, False)
if info == False:
print('Ville inconnue')
return
population, surface, gentile = infoCity[city]
print("La ville de {} contient {} habitants pour une surface de {}."
"Les habitants de cette ville sont les {}."
.format(city, population, surface, gentile))
return
"""
Quitter le programme
"""
def quitter():
print('Au revoir !')
quit()
"""
Voir les informations de toutes les villes
"""
def voir():
for city in infoCity:
voirVille(city)
return
main()
| false |
f91d47047a5b5bf2aace76fee32407c1d6c13818 | AnkitNigam1985/Data-Science-Projects | /Courses/DataFlair/pandas_pipe.py | 2,450 | 4.3125 | 4 | import numpy as np
import pandas as pd
#Pipe() used to apply any operation on all elements of series or dataframe
#Creating a function to be applied using pipe()
def adder(ele1, ele2):
return ele1+ele2
#Series
print("\nSeries:\n")
dataflair_s1 = pd.Series([11, 21, 31, 41, 51])
print("Original :\n",dataflair_s1)
print("After pipe():\n", dataflair_s1.pipe(adder, 3))
"""
Original :
0 11
1 21
2 31
3 41
4 51
dtype: int64
After pipe():
0 14
1 24
2 34
3 44
4 54
dtype: int64
"""
#DataFrame
dataflair_df1 = pd.DataFrame(
6*np.random.randn(6, 3), columns=['c1', 'c2', 'c3'])
print("Original Dataframe :\n", dataflair_df1)
print("After pipe():\n", dataflair_df1.pipe(adder, 3))
"""
Original Dataframe :
c1 c2 c3
0 7.846747 -2.022487 -4.943301
1 -0.857617 -3.749087 7.165374
2 12.145709 11.951062 4.020946
3 1.778519 5.065232 -12.122106
4 -4.958476 -7.021716 -4.242996
5 0.358903 -3.543973 4.067560
After pipe():
c1 c2 c3
0 10.846747 0.977513 -1.943301
1 2.142383 -0.749087 10.165374
2 15.145709 14.951062 7.020946
3 4.778519 8.065232 -9.122106
4 -1.958476 -4.021716 -1.242996
"""
=======
import numpy as np
import pandas as pd
#Pipe() used to apply any operation on all elements of series or dataframe
#Creating a function to be applied using pipe()
def adder(ele1, ele2):
return ele1+ele2
#Series
print("\nSeries:\n")
dataflair_s1 = pd.Series([11, 21, 31, 41, 51])
print("Original :\n",dataflair_s1)
print("After pipe():\n", dataflair_s1.pipe(adder, 3))
"""
Original :
0 11
1 21
2 31
3 41
4 51
dtype: int64
After pipe():
0 14
1 24
2 34
3 44
4 54
dtype: int64
"""
#DataFrame
dataflair_df1 = pd.DataFrame(
6*np.random.randn(6, 3), columns=['c1', 'c2', 'c3'])
print("Original Dataframe :\n", dataflair_df1)
print("After pipe():\n", dataflair_df1.pipe(adder, 3))
"""
Original Dataframe :
c1 c2 c3
0 7.846747 -2.022487 -4.943301
1 -0.857617 -3.749087 7.165374
2 12.145709 11.951062 4.020946
3 1.778519 5.065232 -12.122106
4 -4.958476 -7.021716 -4.242996
5 0.358903 -3.543973 4.067560
After pipe():
c1 c2 c3
0 10.846747 0.977513 -1.943301
1 2.142383 -0.749087 10.165374
2 15.145709 14.951062 7.020946
3 4.778519 8.065232 -9.122106
4 -1.958476 -4.021716 -1.242996
"""
| false |
7d9fec9bce16a86e0da440ca9c06c3b29d4a5d7d | RaimundoJSoares/Programs-in-Python | /Maior_e_menorNumero.py | 443 | 4.125 | 4 | a = int(input( 'Digite um numero'))
b = int(input('Digite o segundo numero'))
c = int(input('Digite o terceiro numero'))
#verificando quem é o menor
menor = a
if b < a and b < c:
menor = b
if c < a and c < b:
menor = c
print('O menor valor digitado foi {}'.format(menor))
#Verificando quem é o maior
maior = a
if b > a and b > c :
maior = b
if c > a and c > b :
maior = c
print('O maior valor digitado foi {}'.format(maior))
| false |
20b94d8b7f160bb48e2371e38165ed44558046ff | namphung1998/Comp123_code | /Final/Files/Q5.py | 1,052 | 4.28125 | 4 | import turtle
# Your job in this question is to write a function named
# drawSquares. The drawSquares draws a series of squares
# each one next to the other. The drawn squares start with
# 10 pixels to a side, and get bigger by 10 until they
# reach the input max size, after which they get smaller
# until they reach 10 again. The function has two
# parameters, a trutle object (which will be used to draw
# the squares) and a maximum size object (which should be
# a multiple of 10). This function does not need to return
# anything. See the included example output for more
# information.
def drawSquares(turt, max):
for sideLen in range(10, max+10, 10):
for i in range(4):
turt.fd(sideLen)
turt.left(90)
turt.fd(sideLen)
if sideLen == max:
for len in range(max-10, 0, -10):
for i in range(4):
turt.forward(len)
turt.left(90)
turt.fd(len)
sc = turtle.Screen()
turt = turtle.Turtle()
drawSquares(turt, 60)
sc.exitonclick() | true |
90987e465b35469819d8c421424500fe06dbd2de | mitcheccles/tensortrade | /tensortrade/core/clock.py | 1,184 | 4.5625 | 5 | from datetime import datetime
class Clock(object):
"""A class to track the time for a process.
Attributes
----------
start : int
The time of start for the clock.
step : int
The time of the process the clock is at currently.
Methods
-------
now(format=None)
Gets the current time in the provided format.
increment()
Increments the clock by specified time increment.
reset()
Resets the clock.
"""
def __init__(self):
self.start = 0
self.step = self.start
def now(self, format: str = None) -> datetime:
"""Gets the current time in the provided format.
Parameters
----------
format : str or None, optional
The format to put the current time into.
Returns
-------
datetime
The current time.
"""
return datetime.now().strftime(format) if format else datetime.now()
def increment(self) -> None:
"""Increments the clock by specified time increment."""
self.step += 1
def reset(self) -> None:
"""Resets the clock."""
self.step = self.start
| true |
d55539e09d65135646d3a27fc82ecb7e0cc33a18 | BenjaminAage/TileTraveller | /tile_traveller_def.py | 2,707 | 4.46875 | 4 |
# https://github.com/BenjaminAage/TileTraveller/blob/master/tile_traveller_def.py
# 1. Which implementation was easier and why?
# - It was quite hard to implement program #1 (without functions), as you had to figure out
# all the factors to have the program up and running. However, the function program (#2) was
# not easier to be fully honest, but on the other hand it is much more readable and
# easiser to understand afterwards.
# 2. Which implementation is more readable and why?
# - As stated in answer number 1, the second program is easier to understand as you are
# dividing the overall problem into smaller sectors. Thereby making it more readable.
# 3. Which problems in the first implementations were you able to solve
# with the latter implementation?
# - To categories the print operations of the program, so when the user of the program
# has entered a valid direction, the program gives the next direction easily without
# writting the code again (def printed(route)).
# With that, you could make another function that defines what the user types in, and
# thereby return the direction he/she wants to go next.
# All that was left to do then, is to create the while-loop to run the program sufficiently,
# as well as to use the functions created within the loop.
#
#
def new_route(x, y):
if x == 1 and y == 1:
return "N"
elif x == 1 and y == 2:
return "NES"
elif x == 1 and y == 3:
return "ES"
elif x == 2 and y == 1:
return "N"
elif x == 2 and y == 2:
return "SW"
elif x == 2 and y == 3:
return "EW"
elif x == 3 and y == 1:
return ""
elif x == 3 and y == 2:
return "NS"
elif x == 3 and y == 3:
return "SW"
def routes(direction, route):
for i in route:
if i == direction:
return True
return False
def printed(route):
if route != "":
print("You can travel: ", end="")
for i in range(len(route)):
if i != 0:
print(" or ", end="")
if route[i] == "N":
print("(N)orth", end="")
elif route[i] == "E":
print("(E)ast", end="")
elif route[i] == "S":
print("(S)outh", end="")
elif route[i] == "W":
print("(W)est", end="")
print(".")
x = 1
y = 1
start = "N"
print("You can travel: (N)orth.")
while not (x == 3 and y == 1):
direction = input("Direction: ").upper()
if routes(direction, start):
if direction == "N":
y += 1
if direction == "E":
x += 1
if direction == "S":
y -= 1
if direction == "W":
x -= 1
start = new_route(x, y)
printed(start)
else:
print("Not a valid direction!")
print("Victory!")
| true |
f6875ce5510fbe4dce11a13281b5b0b1784a8899 | jeowsome/Python-Adventures | /Rock-Paper-Scissors/Find positions/main.py | 473 | 4.34375 | 4 | # put your python code here
numbers = input().split(' ') # read the input then create a new list for positions
to_find = input()
to_print = []
# when "iterating over the list of numbers", append all the found occurrences
for i in range(len(numbers)):
if numbers[i] == to_find:
to_print.append(str(i))
# Finally, join the list of indexes or print the message "not found".
if to_print:
print(' '.join(to_print))
else:
print("not found")
| true |
7ee8bd96405b9f6a0d8b200d4627e4f371511db3 | jgazal/DSA_Python-FAD | /Python_FAD/Capitulo3_LoopsCondicionais/Range.py | 423 | 4.15625 | 4 | print("Range")
print("-----")
# Imprimindo números pares entre 50 e 101
for i in range(50, 101, 2):
print(i)
print("\n")
for i in range(3, 6):
print (i)
print("\n")
for i in range(0, -20, -2):
print(i)
print("\n")
lista = ['Morango', 'Banana', 'Maça', 'Uva']
lista_tamanho = len(lista)
for i in range(0, lista_tamanho):
print(lista[i])
print("\n")
# Tudo em Python é um objeto
print(type(range(0,3))) | false |
d449cdfec0e9cde9c047ed06aa6cf30360c9e108 | mauricioTechDev/daily-code-wars | /python/running-out-of-space.py | 746 | 4.15625 | 4 | # Kevin is noticing his space run out!
# Write a function that removes the spaces from the values and
# returns an array showing the space decreasing. For example,
# running this function on the array ['i', 'have','no','space']
# would produce ['i','ihave','ihaveno','ihavenospace'].
# SOLUTION WITH NO MODULE
def spacey(array):
dictionary = dict()
for num in range(len(array)):
dictionary[num] = array[0]
for index in range(1, len(array)):
for el in range(index, len(array)):
dictionary[el]+=array[index]
return dictionary.values()
# SOLUTION WITH itertools MODULE
from itertools import accumulate
def spacey(array):
return list(accumulate(array))
| true |
3a5e51c219a91504012fa87e2f3db7abacd392f7 | raysmith619/Introduction-To-Programming | /exercises/prroduct.py | 664 | 4.34375 | 4 | # product.py
"""
Write a function product(factor1, factor2, factor3) that returns the
product of the values factor1, factor2, factor3.
Test it on the following:
.5, .4, .3;
1, 2, 3;
-1, -1, -1;
"""
def product(factor1, factor2, factor3):
""" Do product of 3 factors, returning the product
:factor1, factor2, factor3: factors
:returns: factor1*factor2*factor3
"""
prod = factor1*factor2*factor3
return prod
def testit(f1,f2,f3):
""" Test / Exercise product
"""
prod = product(f1,f2,f3)
print("testing prod(", f1,f2,f3, ") = ", prod)
testit(.5,.4,.3)
testit(1,2,3)
testit(-1,-1,-1)
| true |
9b25fa00573645aa4941e852ecbbc8499bee599a | raysmith619/Introduction-To-Programming | /exercises/functions/friends_family/simple_friends/list_friends.py | 498 | 4.15625 | 4 | #list_friends.py 23Sep2020 crs, Author
# Simple List Example
"""
Just list a list of friends names
"""
# Initial list of friends names
my_friends = [
"Ray Smith",
"Phil Fontain",
"Rich Parker",
]
# Simple loop to print out names from list
for name in my_friends:
print(name)
r'''
Output:
>>>
= RESTART: C:\Users\raysm\workspace\python
\IntroductionToProgramming\exercises\functions
\friends_family\simple_friends\list_friends.py
Ray Smith
Phil Fontain
Rich Parker
>>>
'''
| false |
f692537aac14503d8da3feecc163026fcc780f1c | raysmith619/Introduction-To-Programming | /exercises/turtle/turtle_onclick_rainbow.py | 837 | 4.4375 | 4 | # turtle_on_click_rainbow.py 27Nov2020 crs, from turtle_onclick
""" Adding color to turtle_onclick.py
Operation:
Repeat:
1. Position the mouse inside graphics screen
2. Click mouse (button one)
A line is draw to the mouse position
"""
from turtle import *
rainbow = ["red", "orange", "yellow", "green",
"blue", "indigo", "violet"]
color_index = len(rainbow) # Starting color
# screen object
wn = Screen() # Required for events(e.g. click)
# method to perform action
def fxn(x, y):
global color_index # Required if changing outside var
# Change to next rainbow color
# Wraps to first at end of list
color_index = (color_index + 1)%len(rainbow)
color(rainbow[color_index])
goto(x, y)
# onclick action
wn.onclick(fxn)
wn.mainloop()
| true |
9cd3b65a6842ca1e873263202533ec990ff8f7ef | AxelSiliezar/ME021-Python | /Me021-Python/HW01/HW01_03.py | 305 | 4.21875 | 4 | import math
#given
M = input ("Enter value for M: ")
M = float(M)
m = input ("Enter value for m: ")
m = float (m)
r = input ("Enter value for r: ")
r = float (r)
G = 6.674*10**(-11) #Nm^2/kg^2
#G = universal gravitational constant
F = ((G)*(M*m/r**2))
print ("The value of F is: ",(F))
| false |
092092f0e0f92fa658a28b8a4ff6898a52868c1f | sashakrasnov/datacamp | /21-deep-learning-in-python/3-building-deep-learning-models-with-keras/03-fitting-the-model.py | 1,348 | 4.5 | 4 | '''
Fitting the model
You're at the most fun part. You'll now fit the model. Recall that the data to be used as predictive features is loaded in a NumPy matrix called predictors and the data to be predicted is stored in a NumPy matrix called target. Your model is pre-written and it has been compiled with the code from the previous exercise.
'''
import numpy as np
def relu(input):
'''Define your relu activation function here
'''
# Calculate the value for the output of the relu function: output
output = max(0, input)
# Return the value just calculated
return output
data = np.loadtxt('../datasets/hourly_wages.csv', delimiter=',', skiprows=1)
predictors = data[:, 1:]
target = data[:, 0]
'''
INSTRUCTIONS
* Fit the model. Remember that the first argument is the predictive features (predictors), and the data to be predicted (target) is the second argument.
'''
# Import necessary modules
import keras
from keras.layers import Dense
from keras.models import Sequential
# Specify the model
n_cols = predictors.shape[1]
model = Sequential()
model.add(Dense(50, activation='relu', input_shape = (n_cols,)))
model.add(Dense(32, activation='relu'))
model.add(Dense(1))
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# Fit the model
model.fit(predictors, target, epochs=10)
| true |
f544c60fe5e01e1b88821505f3293bce45f8261b | sashakrasnov/datacamp | /21-deep-learning-in-python/4-fine-tuning-keras-models/06-building-your-own-digit-recognition-model.py | 2,475 | 4.5625 | 5 | '''
Building your own digit recognition model
You've reached the final exercise of the course - you now know everything you need to build an accurate model to recognize handwritten digits!
We've already done the basic manipulation of the MNIST dataset shown in the video, so you have X and y loaded and ready to model with. Sequential and Dense from keras are also pre-imported.
To add an extra challenge, we've loaded only 2500 images, rather than 60000 which you will see in some published results. Deep learning models perform better with more data, however, they also take longer to train, especially when they start becoming more complex.
If you have a computer with a CUDA compatible GPU, you can take advantage of it to improve computation time. If you don't have a GPU, no problem! You can set up a deep learning environment in the cloud that can run your models on a GPU. Here is a blog post by Dan that explains how to do this - check it out after completing this exercise! It is a great next step as you continue your deep learning journey.
'''
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
data = np.loadtxt('../datasets/mnist.csv', delimiter=',')
X = data[:, 1:]
y = to_categorical(data[:, 0])
'''
INSTRUCTIONS
* Create a Sequential object to start your model. Call this model.
* Add the first Dense hidden layer of 50 units to your model with 'relu' activation. For this data, the input_shape is (784,).
* Add a second Dense hidden layer with 50 units and a 'relu' activation function.
* Add the output layer. Your activation function should be 'softmax', and the number of nodes in this layer should be the same as the number of possible outputs in this case: 10.
* Compile model as you have done with previous models: Using 'adam' as the optimizer, 'categorical_crossentropy' for the loss, and metrics=['accuracy'].
* Fit the model using X and y using a validation_split of 0.3.
'''
# Create the model: model
model = Sequential()
# Add the first hidden layer
model.add(Dense(50, activation='relu', input_shape=[784]))
# Add the second hidden layer
model.add(Dense(50, activation='relu', input_shape=[784]))
# Add the output layer
model.add(Dense(10, activation='softmax'))
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Fit the model
model.fit(X, y, validation_split=0.3, epochs=3)
| true |
5a936e185653a9333474874e60db2dd78565cb30 | sashakrasnov/datacamp | /22-network-analysis-in-python-1/1-introduction-to-networks/03-specifying-a-weight-on-edges.py | 1,641 | 4.3125 | 4 | '''
Specifying a weight on edges
Weights can be added to edges in a graph, typically indicating the "strength" of an edge. In NetworkX, the weight is indicated by the 'weight' key in the metadata dictionary.
Before attempting the exercise, use the IPython Shell to access the dictionary metadata of T and explore it, for instance by running the commands T.edge[1][10] and then T.edge[10][1]. Note how there's only one field, and now you're going to add another field, called 'weight'.
'''
from pickle import load
from networkx import Graph
# Reading Graph v1 pickle data
#with open('../datasets/ego-twitter.p', 'rb') as f:
# T = load(f)
# Reading Graph v2 pickle data
with open('../datasets/ego-twitter.p2', 'rb') as f:
nodes, edges = load(f)
T = Graph()
T.add_nodes_from(nodes)
T.add_edges_from(edges)
'''
INSTRUCTIONS
* Set the 'weight' attribute of the edge between node 1 and 10 of T to be equal to 2. Refer to the following template to set an attribute of an edge: network_name.edge[node1][node2]['attribute'] = value. Here, the 'attribute' is 'weight'.
* Set the weight of every edge involving node 293 to be equal to 1.1. To do this:
* Using a for loop, iterate over all the edges of T, including the metadata.
* If 293 is involved in the list of nodes [u, v]:
* Set the weight of the edge between u and v to be 1.1.
'''
# Set the weight of the edge
T[1][10]['weight'] = 2
# Iterate over all the edges (with metadata)
for u, v, d in T.edges(data=True):
# Check if node 293 is involved
if 293 in [u, v]:
# Set the weight to 1.1
T[u][v]['weight'] = 1.1 | true |
e1b882e222d1cc893c2bcf8512372943370b71c2 | sashakrasnov/datacamp | /06-importing-data-in-python-2/3-diving-deep-into-the-twitter-api/03-load-and-explore-twitter-data.py | 1,212 | 4.53125 | 5 | '''
Load and explore your Twitter data
Now that you've got your Twitter data sitting locally in a text file, it's time to explore it! This is what you'll do in the next few interactive exercises. In this exercise, you'll read the Twitter data into a list: tweets_data.
Instructions
* Assign the filename 'tweets.txt' to the variable tweets_data_path.
* Initialize tweets_data as an empty list to store the tweets in.
* Within the for loop initiated by for line in tweets_file:, load each tweet into a variable, tweet, using json.loads(), then append tweet to tweets_data using the append() method.
* Hit submit and check out the keys of the first tweet dictionary printed to the shell.
'''
# Import package
import json
# String of path to file: tweets_data_path
tweets_data_path = '../datasets/tweets3.txt'
# Initialize empty list to store tweets: tweets_data
tweets_data = []
# Open connection to file
tweets_file = open(tweets_data_path, 'r')
# Read in tweets and store in list: tweets_data
for line in tweets_file:
tweet = json.loads(line)
tweets_data.append(tweet)
# Close connection to file
tweets_file.close()
# Print the keys of the first tweet dict
print(tweets_data[0].keys())
| true |
1575384d98d8fbab917cfe024e3686b910f42d54 | sashakrasnov/datacamp | /15-statistical-thinking-in-python-1/1-graphical-exploratory-data-analysis/05-computing-the-ecdf.py | 1,722 | 4.40625 | 4 | '''
Computing the ECDF
In this exercise, you will write a function that takes as input a 1D array of data and then returns the x and y values of the ECDF. You will use this function over and over again throughout this course and its sequel. ECDFs are among the most important plots in statistical analysis. You can write your own function, foo(x,y) according to the following skeleton:
| def foo(a,b):
| """State what function does here"""
| # Computation performed here
| return x, y
The function foo() above takes two arguments a and b and returns two values x and y. The function header def foo(a,b): contains the function signature foo(a,b), which consists of the function name, along with its parameters. For more on writing your own functions, see DataCamp's course Python Data Science Toolbox (Part 1) here!
'''
import numpy as np
'''
INSTRUCTIONS
* Define a function with the signature ecdf(data). Within the function definition,
* Compute the number of data points, n, using the len() function.
* The x-values are the sorted data. Use the np.sort() function to perform the sorting.
* The y data of the ECDF go from 1/n to 1 in equally spaced increments. You can construct this using np.arange(). Remember, however, that the end value in np.arange() is not inclusive. Therefore, np.arange() will need to go from 1 to n+1. Be sure to divide this by n.
* The function returns the values x and y.
'''
def ecdf(data):
'''Compute ECDF for a one-dimensional array of measurements.'''
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
| true |
20eebc469dec4f8c152a61f7ea5c04b53092e976 | sashakrasnov/datacamp | /24-data-types-for-data-science/4-handling-dates-and-times/03-pieces-of-time.py | 1,804 | 4.25 | 4 | '''
Pieces of Time
When working with datetime objects, you'll often want to group them by some component of the datetime such as the month, year, day, etc. Each of these are available as attributes on an instance of a datetime object.
You're going to work with the summary of the CTA's daily ridership. It contains the following columns, in order: service_date, day_type, bus, rail_boardings, and total_rides. The modules defaultdict and datetime have already been imported for you.
'''
import csv
from datetime import datetime
from collections import defaultdict
with open('../datasets/cta_daily_summary_totals.csv', 'r') as csvfile:
daily_summaries = [
tuple(row.values())
for row in csv.DictReader(csvfile)
]
'''
INSTRUCTIONS
* Create a defaultdict of an integer called monthly_total_rides.
* Loop over the list daily_summaries, which contains the columns mentioned above in the assignment text.
* Convert the service_date (1st element of daily_summary) to a datetime object called service_datetime. Use '%m/%d/%Y' as your format string.
* Use the month of the service_datetime as the dict key and add the total_rides (5th element of daily_summary) to the current amount for the month. Be sure to convert this into an integer.
* Print monthly_total_rides.
'''
# Create a defaultdict of an integer: monthly_total_rides
monthly_total_rides = defaultdict(int)
# Loop over the list daily_summaries
for daily_summary in daily_summaries:
# Convert the service_date to a datetime object
service_datetime = datetime.strptime(daily_summary[0], '%m/%d/%Y')
# Add the total rides to the current amount for the month
monthly_total_rides[service_datetime.month] += int(daily_summary[4])
# Print monthly_total_rides
print(monthly_total_rides) | true |
0f35c9a6a623c41d4590847e59962315672ce2b3 | sashakrasnov/datacamp | /29-statistical-simulation-in-python/2-probability-and-data-generation-process/07-driving-test.py | 2,037 | 4.65625 | 5 | '''
Driving test
Through the next exercises, we will learn how to build a data generating process (DGP) through progressively complex examples.
In this exercise, you will simulate a very simple DGP. Suppose that you are about to take a driving test tomorrow. Based on your own practice and based on data you have gathered, you know that the probability of you passing the test is 90% when it's sunny and only 30% when it's raining. Your local weather station forecasts that there's a 40% chance of rain tomorrow. Based on this information, you want to know what is the probability of you passing the driving test tomorrow.
This is a simple problem and can be solved analytically. Here, you will learn how to model a simple DGP and see how it can be used for simulation.
'''
import numpy as np
# Set random seed to get the same result or remove for different each time
np.random.seed(123)
# Deck of cards
deck = []
cards, n_cards = ['Heart', 'Club', 'Spade', 'Diamond'], 13
for card in cards:
deck += list(zip([card]*n_cards, list(range(n_cards))))
'''
INSTRUCTIONS 1/2
* Write a function test_outcome() to simulate the weather.
* Set weather as 'rain' or 'sun' depending on the input argument p_rain (the probability of rain).
* Based on the weather, return 'pass' or 'fail'.
'''
sims, outcomes, p_rain, p_pass = 1000, [], 0.40, {'sun': 0.9, 'rain': 0.3}
def test_outcome(p_rain):
# Simulate whether it will rain or not
weather = np.random.choice(['rain', 'sun'], p=[p_rain, 1 - p_rain])
# Simulate and return whether you will pass or fail
return np.random.choice(['pass', 'fail'], p=[p_pass[weather], 1 - p_pass[weather]])
'''
INSTRUCTIONS 2/2
* Compute the probability of passing the test. You'll have to count the number of times you pass the test, as collected in outcomes.
'''
for _ in range(sims):
outcomes.append(test_outcome(p_rain))
# Calculate fraction of outcomes where you pass
print('Probability of Passing the driving test = {}'.format(outcomes.count('pass') / sims)) | true |
206f83801de4bbda63c100e3026939cf9085e7f6 | sashakrasnov/datacamp | /26-manipulating-time-series-data-in-python/1-working-with-time-series-in-pandas/06-calculating-stock-price-changes.py | 1,590 | 4.25 | 4 | '''
Calculating stock price changes
You have learned in the video how to calculate returns using current and shifted prices as input. Now you'll practice a similar calculation to calculate absolute changes from current and shifted prices, and compare the result to the function .diff().
'''
import pandas as pd
yahoo = pd.read_csv('../datasets/stock_data/yahoo.csv', index_col='date', parse_dates=['date'])
'''
INSTRUCTIONS
We have already imported pandas as pd and matplotlib.pyplot as plt. We have also loaded Yahoo stock prices for the years 2013 to 2015, set the frequency to business daily, and assigned the result to yahoo.
* Create a new column called shifted_30 that contains the 'price' shifted by 30 business days into the future.
* Subtract 'shifted_30' from 'price', and assign the result to a new column, 'change_30'.
* Apply .diff(), setting periods to 30, and assign the result to a new column, 'diff_30'.
* Inspect the last five rows of yahoo to verify the calculation.
* Subtract diff_30 from change_30 using the .sub() method and print the .value_counts() of the result to show both columns are equal.
'''
# Created shifted_30 here
yahoo['shifted_30'] = yahoo.asfreq('B').price.shift(periods=30)
# Subtract shifted_30 from price
yahoo['change_30'] = yahoo.price - yahoo.shifted_30
# Get the 30-day price difference
yahoo['diff_30'] = yahoo.price.diff(periods=30)
# Inspect the last five rows of price
print(yahoo.tail(5))
# Show the value_counts of the difference between change_30 and diff_30
print(yahoo.change_30.sub(yahoo.diff_30).value_counts())
| true |
dbad60a48570c819043be5067ba14a0748a48fd3 | sashakrasnov/datacamp | /29-statistical-simulation-in-python/4-advanced-applications-of-simulation/01-modeling-corn-production.py | 1,494 | 4.3125 | 4 | '''
Modeling Corn Production
Suppose that you manage a small corn farm and are interested in optimizing your costs. In this exercise, we will model the production of corn.
For simplicity, let's assume that corn production depends on only two factors: rain, which you don't control, and cost, which you control. Rain is normally distributed with mean 50 and standard deviation 15. For now, let's fix cost at 5,000. Corn produced in any season is a Poisson random variable while the average corn production is governed by the equation:
100 × (cost)^0.1 × (rain)^0.2
Let's model this production function and simulate one outcome.
'''
import numpy as np
# Set random seed to get the same result or remove for different each time
np.random.seed(223)
'''
INSTRUCTIONS
* Initialize rain as a normal random variable with mean 50 and standard deviation 15.
* In the corn_produced() function, model mean_corn as 100 × cost^0.1 × rain^0.2.
* Model corn as a Poisson random variable with mean mean_corn.
* Simulate one outcome by storing the result of calling corn_produced() in corn_result and print your results.
'''
# Initialize variables
cost = 5000
rain = np.random.normal(50, 15)
# Corn production model
def corn_produced(rain, cost):
mean_corn = 100 * (cost**0.1) * (rain**0.2)
corn = np.random.poisson(mean_corn)
return corn
# Simulate and print corn production
corn_result = corn_produced(rain, cost)
print('Simulated Corn Production = {}'.format(corn_result)) | true |
19fb72baf7e1c430977fc551d85e516d2e87dadc | sashakrasnov/datacamp | /09-manipulating-dataframes-with-pandas/4-grouping-data/03-computing-multiple-aggregates-of-multiple.columns.py | 1,806 | 4.25 | 4 | '''
Computing multiple aggregates of multiple columns
The .agg() method can be used with a tuple or list of aggregations as input. When applying multiple aggregations on multiple columns, the aggregated DataFrame has a multi-level column index.
In this exercise, you're going to group passengers on the Titanic by 'pclass' and aggregate the 'age' and 'fare' columns by the functions 'max' and 'median'. You'll then use multi-level selection to find the oldest passenger per class and the median fare price per class.
The DataFrame has been pre-loaded as titanic.
'''
import pandas as pd
titanic = pd.read_csv('../datasets/titanic.csv')
'''
INSTRUCTIONS
* Group titanic by 'pclass' and save the result as by_class.
* Select the 'age' and 'fare' columns from by_class and save the result as by_class_sub.
* Aggregate by_class_sub using 'max' and 'median'. You'll have to pass 'max' and 'median' in the form of a list to .agg().
* Use .loc[] to print all of the rows and the column specification ('age','max'). This has been done for you.
* Use .loc[] to print all of the rows and the column specification ('fare','median').
'''
# Group titanic by 'pclass': by_class
by_class = titanic.groupby('pclass')
# Select 'age' and 'fare'
by_class_sub = by_class[['age','fare']]
# Aggregate by_class_sub by 'max' and 'median': aggregated
aggregated = by_class_sub.agg(['max','median'])
# Print the maximum age in each class
print(aggregated.loc[:, ('age','max')])
# Print the median fare in each class
print(aggregated.loc[:, ('fare','median')])
'''
> print(aggregated.loc[:, ('age','max')])
pclass
1 80.0
2 70.0
3 74.0
Name: (age, max), dtype: float64
> print(aggregated.loc[:, ('fare','median')])
pclass
1 60.0000
2 15.0458
3 8.0500
Name: (fare, median), dtype: float64
''' | true |
db126fa6dca8af406e3eb64cec30bb4e302f7ef4 | sashakrasnov/datacamp | /24-data-types-for-data-science/3-meet-the-collections-module/04-safely-appending-to-a-keys-value-list.py | 1,648 | 4.75 | 5 | '''
Safely appending to a key's value list
Often when working with dictionaries, you know the data type you want to have each key be; however, some data types such as lists have to be initialized on each key before you can append to that list.
A defaultdict allows you to define what each uninitialized key will contain. When establishing a defaultdict, you pass it the type you want it to be, such as a list, tuple, set, int, string, dictionary or any other valid type object.
'''
import csv
with open('../datasets/cta_daily_station_totals.csv', 'r') as csvfile:
entries = [
(row['date'], row['stationname'], row['rides'])
for row in csv.DictReader(csvfile)
]
'''
INSTRUCTIONS
* Import defaultdict from collections.
* Create a defaultdict with a default type of list called ridership.
* Iterate over the list entries, unpacking it into the variables date, stop, and riders, exactly as you did in the previous exercise.
* Use stop as the key of the ridership dictionary and append riders to its value.
* Print the first 10 items of the ridership dictionary. You can use the .items() method for this. Remember, you have to convert ridership.items() to a list before slicing.
'''
# Import defaultdict
from collections import defaultdict
# Create a defaultdict with a default type of list: ridership
ridership = defaultdict(list)
# Iterate over the entries
for date, stop, riders in entries:
# Use the stop as the key of ridership and append the riders to its value
ridership[stop].append(riders)
# Print the first 10 items of the ridership dictionary
print(list(ridership.items())[:10]) | true |
0453443a2fd4382dd5faf56577be5ed76779b069 | sashakrasnov/datacamp | /08-pandas-foundations/1-data-ingestion-and-inspection/07-plotting-series-using-pandas.py | 1,935 | 4.84375 | 5 | '''
Plotting series using pandas
Data visualization is often a very effective first step in gaining a rough understanding of a data set to be analyzed. Pandas provides data visualization by both depending upon and interoperating with the matplotlib library. You will now explore some of the basic plotting mechanics with pandas as well as related matplotlib options. We have pre-loaded a pandas DataFrame df which contain the data you need. Your job is to use the DataFrame method df.plot() to visualize the data, and then explore the optional matplotlib input parameters that this .plot() method accepts.
The pandas .plot() method makes calls to matplotlib to construct the plots. This means that you can use the skills you've learned in previous visualization courses to customize the plot. In this exercise, you'll add a custom title and axis labels to the figure.
Before plotting, inspect the DataFrame in the IPython Shell using df.head(). Also, use type(df) and note that it is a single column DataFrame.
'''
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('../datasets/weather_data_austin_2010.csv')
'''
INSTRUCTIONS
* Create the plot with the DataFrame method df.plot(). Specify a color of 'red'.
* Note: c and color are interchangeable as parameters here, but we ask you to be explicit and specify color.
* Use plt.title() to give the plot a title of 'Temperature in Austin'.
* Use plt.xlabel() to give the plot an x-axis label of 'Hours since midnight August 1, 2010'.
* Use plt.ylabel() to give the plot a y-axis label of 'Temperature (degrees F)'.
* Finally, display the plot using plt.show().
'''
# Create a plot with color='red'
df.plot(color='red')
# Add a title
plt.title('Temperature in Austin')
# Specify the x-axis label
plt.xlabel('Hours since midnight August 1, 2010')
# Specify the y-axis label
plt.ylabel('Temperature (degrees F)')
# Display the plot
plt.show()
| true |
a8550d5ba734efaadd56fd5332aa2666f8b2fefa | sashakrasnov/datacamp | /06-importing-data-in-python-2/2-interacting-with-apis-to-import-data-from-the-web/01-loading-and-exploring-a-json.py | 924 | 4.65625 | 5 | '''
Loading and exploring a JSON
Now that you know what a JSON is, you'll load one into your Python environment and explore it yourself. Here, you'll load the JSON 'a_movie.json' into the variable json_data, which will be a dictionary. You'll then explore the JSON contents by printing the key-value pairs of json_data to the shell.
Instructions
* Load the JSON 'a_movie.json' into the variable json_data within the context provided by the with statement. To do so, use the function json.load() within the context manager.
* Use a for loop to print all key-value pairs in the dictionary json_data. Recall that you can access a value in a dictionary using the syntax: dictionary[key].
'''
import json
# Load JSON: json_data
with open('../datasets/a_movie.json') as json_file:
json_data = json.load(json_file)
# Print each key-value pair in json_data
for k in json_data.keys():
print(k + ': ', json_data[k])
| true |
1f9b6dc325f192935d30604dae151f875e5ad35c | sashakrasnov/datacamp | /21-deep-learning-in-python/1-basics-of-deep-learning-and-neural-networks/02-the-rectified-linear-activation-function.py | 1,716 | 4.625 | 5 | '''
The Rectified Linear Activation Function
As Dan explained to you in the video, an "activation function" is a function applied at each node. It converts the node's input into some output.
The rectified linear activation function (called ReLU) has been shown to lead to very high-performance networks. This function takes a single number as an input, returning 0 if the input is negative, and the input if the input is positive.
Here are some examples:
relu(3) = 3
relu(-3) = 0
'''
import numpy as np
input_data = np.array([3, 5])
weights = {
'node_0': np.array([ 2, 4]),
'node_1': np.array([ 4, -5]),
'output': np.array([ 2, 7])
}
'''
INSTRUCTIONS
* Fill in the definition of the relu() function:
* Use the max() function to calculate the value for the output of relu().
* Apply the relu() function to node_0_input to calculate node_0_output.
* Apply the relu() function to node_1_input to calculate node_1_output.
'''
def relu(input):
'''Define your relu activation function here
'''
# Calculate the value for the output of the relu function: output
output = max(0, input)
# Return the value just calculated
return output
# Calculate node 0 value: node_0_output
node_0_input = (input_data * weights['node_0']).sum()
node_0_output = relu(node_0_input)
# Calculate node 1 value: node_1_output
node_1_input = (input_data * weights['node_1']).sum()
node_1_output = relu(node_1_input)
# Put node values into array: hidden_layer_outputs
hidden_layer_outputs = np.array([node_0_output, node_1_output])
# Calculate model output (do not apply relu)
model_output = (hidden_layer_outputs * weights['output']).sum()
# Print model output
print(model_output) | true |
8bb2983a52a9607911c0ea69f4a453fef39b45de | sashakrasnov/datacamp | /08-pandas-foundations/2-exploratory-data-analysis/09-separate-and-summarize.py | 1,569 | 4.125 | 4 | '''
Separate and summarize
Let's use population filtering to determine how the automobiles in the US differ from the global average and standard deviation. How the distribution of fuel efficiency (MPG) for the US differ from the global average and standard deviation?
In this exercise, you'll compute the means and standard deviations of all columns in the full automobile dataset. Next, you'll compute the same quantities for just the US population and subtract the global values from the US values.
All necessary modules have been imported and the DataFrame has been pre-loaded as df.
INSTRUCTIONS
* Compute the global mean and global standard deviations of df using the .mean() and .std() methods. Assign the results to global_mean and global_std.
* Filter the 'US' population from the 'origin' column and assign the result to us.
* Compute the US mean and US standard deviations of us using the .mean() and .std() methods. Assign the results to us_mean and us_std.
* Print the differences between us_mean and global_mean and us_std and global_std. This has already been done for you.
'''
import pandas as pd
df = pd.read_csv('../datasets/auto-mpg.csv')
# Compute the global mean and global standard deviation: global_mean, global_std
global_mean = df.mean()
global_std = df.std()
# Filter the US population from the origin column: us
us = df[df['origin'] == 'US']
# Compute the US mean and US standard deviation: us_mean, us_std
us_mean = us.mean()
us_std = us.std()
# Print the differences
print(us_mean - global_mean)
print(us_std - global_std) | true |
6b582bf7eb3e5e3fc4ae9912c68cd2be5dccac6d | sashakrasnov/datacamp | /14-interactive-data-visualization-with-bokeh/1-basic-plotting-with-bokeh/08-plotting-data-from-pandas-dataframes.py | 1,796 | 4.21875 | 4 | '''
Plotting data from Pandas DataFrames
You can create Bokeh plots from Pandas DataFrames by passing column selections to the glyph functions.
Bokeh can plot floating point numbers, integers, and datetime data types. In this example, you will read a CSV file containing information on 392 automobiles manufactured in the US, Europe and Asia from 1970 to 1982.
The CSV file is provided for you as 'auto.csv'.
Your job is to plot miles-per-gallon (mpg) vs horsepower (hp) by passing Pandas column selections into the p.circle() function. Additionally, each glyph will be colored according to values in the color column.
'''
from bokeh.io import output_file, show
'''
INSTRUCTIONS
* Import pandas as pd.
* Use the read_csv() function of pandas to read in 'auto.csv' and store it in the DataFrame df.
* Import figure from bokeh.plotting.
* Use the figure() function to create a figure p with the x-axis labeled 'HP' and the y-axis labeled 'MPG'.
* Plot mpg (on the y-axis) vs hp (on the x-axis) by color using p.circle(). Note that the x-axis should be specified before the y-axis inside p.circle(). You will need to use Pandas DataFrame indexing to pass in the columns. For example, to access the color column, you can use df['color'], and then pass it in as an argument to the color parameter of p.circle(). Also specify a size of 10.
'''
# Import pandas as pd
import pandas as pd
# Read in the CSV file: df
df = pd.read_csv('../datasets/auto-mpg.csv')
# Import figure from bokeh.plotting
from bokeh.plotting import figure
# Create the figure: p
p = figure(x_axis_label='HP', y_axis_label='MPG')
# Plot mpg vs hp by color
p.circle(df['hp'], df['mpg'], color=df['color'], size=10)
# Specify the name of the output file and show the result
output_file('auto-df.html')
show(p)
| true |
cf5ed84b81c429740decf635f089ce6cf2b1b1a4 | sashakrasnov/datacamp | /24-data-types-for-data-science/2-dictionaries--the-root-of-python/06-working-with-dictionaries-more-pythonically.py | 1,852 | 4.25 | 4 | '''
Popping and deleting from dictionaries
Often, you will want to remove keys and value from a dictionary. You can do so using the del Python instruction. It's important to remember that del will throw a KeyError if the key you are trying to delete does not exist. You can not use it with the .get() method to safely delete items; however, it can be used with try: catch:.
If you want to save that deleted data into another variable for further processing, the .pop() dictionary method will do just that. You can supply a default value for .pop() much like you did for .get() to safely deal with missing keys. It's also typical to use .pop() instead of del since it is a safe method.
Here, you'll remove 2011 and 2015 to save them for later, and then delete 2012 from the dictionary.
'''
baby_names = {}
with open('../datasets/baby_names.csv') as f:
# Skipping header
_ = f.readline()
# Iterating over lines
for row in f:
year, sex, _, name, count, rank = row.strip().split(',')
year = int(year)
rank = int(rank)
if sex == 'MALE' and year > 2011:
# Empty dictionary for 2012
if year in baby_names and year != 2012:
baby_names[year][rank] = name
else:
baby_names[year] = {}
# Sorting dictionary year by year
for y in baby_names:
baby_names[y] = dict(sorted(baby_names[y].items()))
'''
INSTRUCTIONS
* Iterate over baby_names[2014], unpacking it into rank and name.
* Print each rank and name.
* Repeat the process for baby_names[2012].
'''
# Iterate over the 2014 nested dictionary
for rank, name in baby_names[2014].items():
# Print rank and name
print(rank, name)
# Iterate over the 2012 nested dictionary
for rank, name in baby_names[2012].items():
# Print rank and name
print(rank, name)
| true |
4ea71b51359e8486ed5c5e65bea639653363814b | sashakrasnov/datacamp | /19-machine-learning-with-the-experts-school-budgets/2-creating-a-simple-first-model/06-combining-text-columns-for-tokenization.py | 2,114 | 4.125 | 4 | '''
Combining text columns for tokenization
In order to get a bag-of-words representation for all of the text data in our DataFrame, you must first convert the text data in each row of the DataFrame into a single string.
In the previous exercise, this wasn't necessary because you only looked at one column of data, so each row was already just a single string. CountVectorizer expects each row to just be a single string, so in order to use all of the text columns, you'll need a method to turn a list of strings into a single string.
In this exercise, you'll complete the function definition combine_text_columns(). When completed, this function will convert all training text data in your DataFrame to a single string per row that can be passed to the vectorizer object and made into a bag-of-words using the .fit_transform() method.
Note that the function uses NUMERIC_COLUMNS and LABELS to determine which columns to drop. These lists have been loaded into the workspace.
'''
import pandas as pd
LABELS = ['Function', 'Use', 'Sharing', 'Reporting', 'Student_Type', 'Position_Type', 'Object_Type', 'Pre_K', 'Operating_Status']
NUMERIC_COLUMNS = ['FTE', 'Total']
'''
INSTRUCTIONS
* Use the .drop() method on data_frame with to_drop and axis= as arguments to drop the non-text data. Save the result as text_data.
* Fill in missing values (inplace) in text_data with blanks (""), using the .fillna() method.
* Complete the .apply() method by writing a lambda function that uses the .join() method to join all the items in a row with a space in between.
'''
# Define combine_text_columns()
def combine_text_columns(data_frame, to_drop=NUMERIC_COLUMNS + LABELS):
""" converts all text in each row of data_frame to single vector """
# Drop non-text columns that are in the df
to_drop = set(to_drop) & set(data_frame.columns.tolist())
text_data = data_frame.drop(to_drop, axis=1)
# Replace nans with blanks
text_data.fillna("", inplace=True)
# Join all text items in a row that have a space in between
return text_data.apply(lambda x: " ".join(x), axis=1) | true |
1b100714dada7b59a0ebab1e83f8de2d942aead0 | sashakrasnov/datacamp | /28-machine-learning-for-time-series-data-in-python/3-predicting-time-series-data/01-introducing-the-dataset.py | 1,453 | 4.53125 | 5 | '''
Introducing the dataset
As mentioned in the video, you'll deal with stock market prices that fluctuate over time. In this exercise you've got historical prices from two tech companies (Ebay and Yahoo) in the DataFrame prices. You'll visualize the raw data for the two companies, then generate a scatter plot showing how the values for each company compare with one another. Finally, you'll add in a "time" dimension to your scatter plot so you can see how this relationship changes over time.
The data has been loaded into a DataFrame called prices.
'''
import pandas as pd
import matplotlib.pyplot as plt
prices = pd.read_csv('../datasets/ebay_yhoo.csv', index_col=0, parse_dates=True)
'''
INSTRUCTIONS 1/3
* Plot the data in prices. Pay attention to any irregularities you notice.
'''
# Plot the raw values over time
prices.plot()
plt.show()
'''
INSTRUCTIONS 2/3
* Generate a scatter plot with the values of Ebay on the x-axis, and Yahoo on the y-axis. Look up the symbols for both companies from the column names of the DataFrame.
'''
# Scatterplot with one company per axis
prices.plot.scatter(x='EBAY', y='YHOO')
plt.show()
'''
INSTRUCTIONS 3/3
* Finally, encode time as the color of each datapoint in order to visualize how the relationship between these two variables changes.
'''
# Scatterplot with color relating to time
prices.plot.scatter('EBAY', 'YHOO', c=prices.index, cmap=plt.cm.viridis, colorbar=False)
plt.show() | true |
ff58963682d6b0c385af84e7cfd8e569ebb0f43c | sashakrasnov/datacamp | /21-deep-learning-in-python/2-optimizing-a-neural-network-with-backward-propagation/01-coding-how-weight-changes-affect-accuracy.py | 2,751 | 4.4375 | 4 | '''
Coding how weight changes affect accuracy
Now you'll get to change weights in a real network and see how they affect model accuracy!
Have a look at the following neural network: https://s3.amazonaws.com/assets.datacamp.com/production/course_3524/datasets/ch2ex4.png
Its weights have been pre-loaded as weights_0. Your task in this exercise is to update a single weight in weights_0 to create weights_1, which gives a perfect prediction (in which the predicted value is equal to target_actual: 3).
Use a pen and paper if necessary to experiment with different combinations. You'll use the predict_with_network() function, which takes an array of data as the first argument, and weights as the second argument.
'''
import numpy as np
def relu(input):
'''Define your relu activation function here
'''
# Calculate the value for the output of the relu function: output
output = max(0, input)
# Return the value just calculated
return output
def predict_with_network(input_data_point, weights):
node_0_input = (input_data_point * weights['node_0']).sum()
node_0_output = relu(node_0_input)
node_1_input = (input_data_point * weights['node_1']).sum()
node_1_output = relu(node_1_input)
hidden_layer_values = np.array([node_0_output, node_1_output])
input_to_final_layer = (hidden_layer_values * weights['output']).sum()
model_output = relu(input_to_final_layer)
return model_output
'''
INSTRUCTIONS
* Create a dictionary of weights called weights_1 where you have changed 1 weight from weights_0 (You only need to make 1 edit to weights_0 to generate the perfect prediction).
* Obtain predictions with the new weights using the predict_with_network() function with input_data and weights_1.
* Calculate the error for the new weights by subtracting target_actual from model_output_1.
* Hit 'Submit Answer' to see how the errors compare!
'''
# The data point you will make a prediction for
input_data = np.array([0, 3])
# Sample weights
weights_0 = {
'node_0': [2, 1],
'node_1': [1, 2],
'output': [1, 1]
}
# The actual target value, used to calculate the error
target_actual = 3
# Make prediction using original weights
model_output_0 = predict_with_network(input_data, weights_0)
# Calculate error: error_0
error_0 = model_output_0 - target_actual
# Create weights that cause the network to make perfect prediction (3): weights_1
weights_1 = {
'node_0': [2, 1],
'node_1': [1, 2],
'output': [1, 0]
}
# Make prediction using new weights: model_output_1
model_output_1 = predict_with_network(input_data, weights_1)
# Calculate error: error_1
error_1 = model_output_1 - target_actual
# Print error_0 and error_1
print(error_0)
print(error_1)
| true |
c909f3767520223e6be10318e530cbc924ef2b76 | sashakrasnov/datacamp | /24-data-types-for-data-science/2-dictionaries--the-root-of-python/01-creating-and-looping-through-dictionaries.py | 1,832 | 4.90625 | 5 | '''
Creating and looping through dictionaries
You'll often encounter the need to loop over some array type data, like in Chapter 1, and provide it some structure so you can find the data you desire quickly.
You start that by creating an empty dictionary and assigning part of your array data as the key and the rest as the value.
Previously, you used sorted() to organize your data in a list. Dictionaries can also be sorted. By default, using sorted() on a dictionary will sort by the keys of the dictionary. You can also reverse the order by passing reverse=True as a keyword argument.
Finally, since sorted returns a list, you can use slice notation to select only part of the list. For example, [:10] will slice the first ten items off a list and return only those items.
'''
female_baby_names_2012 = set()
with open('../datasets/baby_names.csv') as f:
# Skipping header
_ = f.readline()
# Iterating over lines
for row in f:
year, sex, _, name, count, _ = row.strip().split(',')
if year == '2012' and sex == 'FEMALE':
female_baby_names_2012.add((name, count))
'''
INSTRUCTIONS
* Create an empty dictionary called names.
* Loop over female_baby_names_2012, unpacking it into the variables name and rank.
* Inside the loop, add each name to the names dictionary using the rank as the key.
* Sort the names dictionary in descending order, select the first ten items. Print each item.
'''
# Create an empty dictionary: names
names = {}
# Loop over the girl names
for name, rank in female_baby_names_2012:
# Add each name to the names dictionary using rank as the key
names[rank] = name
# Sort the names list by rank in descending order and slice the first 10 items
for rank in sorted(names, reverse=True)[:10]:
# Print each item
print(names[rank]) | true |
a31ee59404efdb8fa7481075543179c1fec6412b | sashakrasnov/datacamp | /08-pandas-foundations/1-data-ingestion-and-inspection/05-reading-a-flat-file.py | 1,610 | 4.4375 | 4 | '''
Reading a flat file
In previous exercises, we have preloaded the data for you using the pandas function read_csv(). Now, it's your turn! Your job is to read the World Bank population data you saw earlier into a DataFrame using read_csv(). The file has been downloaded as world_population.csv.
The next step is to reread the same file, but simultaneously rename the columns using the names keyword input parameter, set equal to a list of new column labels. You will also need to set header=0 to rename the column labels.
Finish up by inspecting the result with df.head() and df.info() in the IPython Shell.
pandas has already been imported and is available in the workspace as pd.
'''
import pandas as pd
'''
INSTRUCTIONS
* Use pd.read_csv() with the string 'world_population.csv' to read the CSV file into a DataFrame and assign it to df1.
* Create a list of new column labels - 'year', 'population' - and assign it to the variable new_labels.
* Reread the same file, again using pd.read_csv(), but this time, add the keyword arguments header=0 and names=new_labels. Assign the resulting DataFrame to df2.
* Print both the df1 and df2 DataFrames to see the change in column names. This has already been done for you.
'''
# Read in the file: df1
df1 = pd.read_csv('../datasets/world_population.csv')
# Create a list of the new column labels: new_labels
new_labels = ['year','population']
# Read in the file, specifying the header and names parameters: df2
df2 = pd.read_csv('../datasets/world_population.csv', header=0, names=new_labels)
# Print both the DataFrames
print(df1)
print(df2)
| true |
341a39673f57049a3f28d111b7b4e46428714cc8 | sashakrasnov/datacamp | /26-manipulating-time-series-data-in-python/1-working-with-time-series-in-pandas/04-set-and-change-time-series-frequency.py | 1,210 | 4.1875 | 4 | '''
Set and change time series frequency
In the video, you have seen how to assign a frequency to a DateTimeIndex, and then change this frequency.
Now, you'll use data on the daily carbon monoxide concentration in NYC, LA and Chicago from 2005-17.
You'll set the frequency to calendar daily and then resample to monthly frequency, and visualize both series to see how the different frequencies affect the data.
'''
import pandas as pd
import matplotlib.pyplot as plt
co = pd.read_csv('../datasets/air_quality_data/co_cities.csv', index_col=0, parse_dates=True)
'''
INSTRUCTIONS
We have already imported pandas as pd and matplotlib.pyplot as plt and we have already loaded the co_cities.csv file in a variable co.
* Inspect co using .info().
* Use .asfreq() to set the frequency to calendar daily.
* Show a plot of 'co' using subplots=True.
* Change the the frequency to monthly using the alias 'M'.
* Show another plot of co using subplots=True.
'''
# Inspect data
print(co.info())
# Set the frequency to calendar daily
co = co.asfreq('D')
# Plot the data
co.plot(subplots=True)
plt.show()
# Set frequency to monthly
co = co.asfreq('M')
# Plot the data
co.plot(subplots=True)
plt.show()
| true |
5a33d4bb2add5882a2a6637aecf90330ed47776d | sashakrasnov/datacamp | /14-interactive-data-visualization-with-bokeh/1-basic-plotting-with-bokeh/09-the-bokeh-columndatasource.py | 1,733 | 4.125 | 4 | '''
The Bokeh ColumnDataSource (continued)
You can create a ColumnDataSource object directly from a Pandas DataFrame by passing the DataFrame to the class initializer.
In this exercise, we have imported pandas as pd and read in a data set containing all Olympic medals awarded in the 100 meter sprint from 1896 to 2012. A color column has been added indicating the CSS colorname we wish to use in the plot for every data point.
Your job is to import the ColumnDataSource class, create a new ColumnDataSource object from the DataFrame df, and plot circle glyphs with 'Year' on the x-axis and 'Time' on the y-axis. Color each glyph by the color column.
The figure object p has already been created for you.
'''
import pandas as pd
from bokeh.plotting import figure
from bokeh.io import output_file, show
df = pd.read_csv('../datasets/sprint.csv')
p = figure(x_axis_label='Year', y_axis_label='Time')
'''
INSTRUCTIONS
* Import the ColumnDataSource class from bokeh.plotting.
* Use the ColumnDataSource() function to make a new ColumnDataSource object called source from the DataFrame df.
* Use p.circle() to plot circle glyphs of size=8 on the figure p with 'Year' on the x-axis and 'Time' on the y-axis. Be sure to also specify source=source and color='color' so that the ColumnDataSource object is used and each glyph is colored by the color column.
'''
# Import the ColumnDataSource class from bokeh.plotting
from bokeh.plotting import ColumnDataSource
# Create a ColumnDataSource: source
source = ColumnDataSource(df)
# Add circle glyphs to the figure p
p.circle(x='Year', y='Time', color='color', size=8, source=source)
# Specify the name of the output file and show the result
output_file('sprint.html')
show(p)
| true |
f8cdaee2db28f409fc8bf13ea75d0abb9a509a99 | sashakrasnov/datacamp | /22-network-analysis-in-python-1/4-bringing-it-all-together/08-finding-important-collaborators.py | 1,909 | 4.125 | 4 | '''
Finding important collaborators
Almost there! You'll now look at important nodes once more. Here, you'll make use of the degree_centrality() and betweenness_centrality() functions in NetworkX to compute each of the respective centrality scores, and then use that information to find the "important nodes". In other words, your job in this exercise is to find the user(s) that have collaborated with the most number of users.
'''
import pickle
import networkx as nx
# Reading Graph v1 pickle data
#with open('../datasets/github_users.p', 'rb') as f:
# G = pickle.load(f)
# Reading Graph v2 pickle data
with open('../datasets/github_users.p2', 'rb') as f:
nodes, edges = pickle.load(f)
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
'''
INSTRUCTIONS
* Compute the degree centralities of G. Store the result as deg_cent.
* Compute the maximum degree centrality. Since deg_cent is a dictionary, you'll have to use the .values() method to get a list of its values before computing the maximum degree centrality with max().
* Identify the most prolific collaborators using a list comprehension:
* Iterate over the degree centrality dictionary deg_cent that was computed earlier using its .items() method. What condition should be satisfied if you are seeking to find user(s) that have collaborated with the most number of users? Hint: It has do to with the maximum degree centrality.
* Hit 'Submit Answer' to see who the most prolific collaborator(s) is/are!
'''
# Compute the degree centralities of G: deg_cent
deg_cent = nx.degree_centrality(G)
# Compute the maximum degree centrality: max_dc
max_dc = max(deg_cent.values())
# Find the user(s) that have collaborated the most: prolific_collaborators
prolific_collaborators = [n for n, dc in deg_cent.items() if deg_cent == max_dc]
# Print the most prolific collaborator(s)
print(prolific_collaborators) | true |
21fce1ba844f4c7273ed86a18e21b7725bc92f5f | sashakrasnov/datacamp | /24-data-types-for-data-science/1-fundamental-data-types/06-determining-set-differences.py | 1,822 | 4.65625 | 5 | '''
Determining set differences
Another way of comparing sets is to use the difference() method. It returns all the items found in one set but not another. It's important to remember the set you call the method on will be the one from which the items are returned. Unlike tuples, you can add() items to a set. A set will only add items that do not exist in the set.
In this exercise, you'll explore what names were common in 2011, but are no longer common in 2014. The set baby_names_2014 has been pre-loaded into your workspace. As in the previous exercise, the names have been converted to title case to ensure a proper comparison.
'''
baby_names_2014 = set()
with open('../datasets/baby_names.csv') as f:
records = [r.strip().split(',') for r in f]
for row in records:
if row[0] == '2014':
baby_names_2014.add(row[3])
'''
INSTRUCTIONS
* Create an empty set called baby_names_2011. You can do this using set().
* Use a for loop to iterate over each row in records:
* If the first column of each row in records is "2011", add its fourth column to baby_names_2011. Remember that Python is 0-indexed!
* Find the difference between baby_names_2011 and baby_names_2014. Store the result as differences.
* Print the differences. This has been done for you, so hit 'Submit Answer' to see the result!
'''
# Create the empty set: baby_names_2011
baby_names_2011 = set()
# Loop over records and add the names from 2011 to the baby_names_2011 set
for row in records:
# Check if the first column is '2011'
if row[0] == '2011':
# Add the fourth column to the set
baby_names_2011.add(row[3].title())
# Find the difference between 2011 and 2014: differences
differences = baby_names_2011.difference(baby_names_2014)
# Print the differences
print(differences)
| true |
8b9d573be4b1eebe8a6d3ee66252f8c3442890a0 | sashakrasnov/datacamp | /29-statistical-simulation-in-python/2-probability-and-data-generation-process/03-game-of-thirteen.py | 1,668 | 4.40625 | 4 | '''
Game of thirteen
A famous French mathematician Pierre Raymond De Montmart, who was known for his work in combinatorics, proposed a simple game called as Game of Thirteen. You have a deck of 13 cards, each numbered from 1 through 13. Shuffle this deck and draw cards one by one. A coincidence is when the number on the card matches the order in which the card is drawn. For instance, if the 5th card you draw happens to be a 5, it's a coincidence. You win the game if you get through all the cards without any coincidences. Let's calculate the probability of winning at this game using simulation.
By completing this exercise, you will further strengthen your ability to cast abstract problems into the simulation framework for estimating probabilities.
'''
import numpy as np
# Set random seed to get the same result or remove for different each time
np.random.seed(111)
'''
INSTRUCTIONS
* For each drawing, draw all the cards in deck without replacement and assign to draw.
* Check if there are any coincidences in the draw and, if there are, increment the coincidences counter by 1.
* Calculate winning probability as the fraction of games without any coincidences.
'''
# Pre-set constant variables
deck, sims, coincidences = np.arange(1, 14), 10000, 0
for _ in range(sims):
# Draw all the cards without replacement to simulate one game
draw = np.random.choice(a=deck, size=len(deck), replace=False)
# Check if there are any coincidences
coincidence = (draw == list(np.arange(1, 14))).any()
if coincidence == True:
coincidences += 1
# Calculate probability of winning
print('Probability of winning = {}'.format(1-coincidences/sims))
| true |
d2f59c0803cbe2036e08d903216d7fb254a8fa1b | sashakrasnov/datacamp | /29-statistical-simulation-in-python/1-basics-of-randomness-and-simulation/05-simulating-the-dice-game.py | 1,556 | 4.46875 | 4 | '''
Simulating the dice game
We now know how to implement the first three steps of a simulation. Now let's consider the next step - repeated random sampling.
Simulating an outcome once doesn't tell us much about how often we can expect to see that outcome. In the case of the dice game from the previous exercise, it's great that we won once. But suppose we want to see how many times we can expect to win if we played this game multiple times, we need to repeat the random sampling process many times. Repeating the process of random sampling is helpful to understand and visualize inherent uncertainty and deciding next steps.
Following this exercise, you will be familiar with implementing the fourth step of running a simulation - sampling repeatedly and generating outcomes.
'''
import numpy as np
# Set random seed to get the same result or remove for different each time
np.random.seed(223)
'''
INSTRUCTIONS
* Set sims to 100 repetitions and initialize wins to 0.
* Write a for loop to repeat throwing the dice.
* Set outcomes to the outcome of throwing two dice.
* If the two dice show the same number, increment wins by 1.
'''
# Initialize model parameters & simulate dice throw
die, probabilities, num_dice = [1, 2, 3, 4, 5, 6], [1/6] * 6, 2
sims, wins = 100, 0
for i in range(sims):
outcomes = np.random.choice(die, size=num_dice, p=probabilities)
# Increment `wins` by 1 if the dice show same number
if outcomes[0] == outcomes[1]:
wins = wins + 1
print('In {} games, you win {} times'.format(sims, wins)) | true |
f8bf2d9477abd2cfef2629ad87f03ce171d34e26 | sashakrasnov/datacamp | /22-network-analysis-in-python-1/3-structures/01-identifying-triangle-relationships.py | 2,208 | 4.125 | 4 | '''
Identifying triangle relationships
Now that you've learned about cliques, it's time to try leveraging what you know to find structures in a network. Triangles are what you'll go for first. We may be interested in triangles because they're the simplest complex clique. Let's write a few functions; these exercises will bring you through the fundamental logic behind network algorithms.
In the Twitter network, each node has an 'occupation' label associated with it, in which the Twitter user's work occupation is divided into celebrity, politician and scientist. One potential application of triangle-finding algorithms is to find out whether users that have similar occupations are more likely to be in a clique with one another.
'''
import pickle
import networkx as nx
# Reading Graph v1 pickle data
#with open('../datasets/ego-twitter-subsampled.p', 'rb') as f:
# T = pickle.load(f)
# Reading Graph v2 pickle data
with open('../datasets/ego-twitter-subsampled.p2', 'rb') as f:
nodes, edges = pickle.load(f)
T = nx.Graph()
T.add_nodes_from(nodes)
T.add_edges_from(edges)
T = T.to_undirected()
'''
INSTRUCTIONS
* Import combinations from itertools.
* Write a function is_in_triangle() that has two parameters - G and n - and checks whether a given node is in a triangle relationship or not.
* combinations(iterable, n) returns combinations of size n from iterable. This will be useful here, as you want combinations of size 2 from G.neighbors(n).
* To check whether an edge exists between two nodes, use the .has_edge(node1, node2) method. If an edge exists, then the given node is in a triangle relationship, and you should return True.
'''
from itertools import combinations
# Define is_in_triangle()
def is_in_triangle(G, n):
'''Checks whether a node `n` in graph `G` is in a triangle relationship or not.
Returns a boolean.
'''
in_triangle = False
# Iterate over all possible triangle relationship combinations
for n1, n2 in combinations(G.neighbors(n),2):
# Check if an edge exists between n1 and n2
if G.has_edge(n1, n2):
in_triangle = True
break
return in_triangle
| true |
fd12b8f0353d46fac24d7d2efd50ba1185a290a5 | sashakrasnov/datacamp | /07-cleaning-data-in-python/4-cleaning-data-for-analysis/06-custom-functions-to-clean-data.py | 2,461 | 4.21875 | 4 | '''
Custom functions to clean data
You'll now practice writing functions to clean data.
The tips dataset has been pre-loaded into a DataFrame called tips. It has a 'sex' column that contains the values 'Male' or 'Female'. Your job is to write a function that will recode 'Male' to 1, 'Female' to 0, and return np.nan for all entries of 'sex' that are neither 'Male' nor 'Female'.
Recoding variables like this is a common data cleaning task. Functions provide a mechanism for you to abstract away complex bits of code as well as reuse code. This makes your code more readable and less error prone.
As Dan showed you in the videos, you can use the .apply() method to apply a function across entire rows or columns of DataFrames. However, note that each column of a DataFrame is a pandas Series. Functions can also be applied across Series. Here, you will apply your function over the 'sex' column.
INSTRUCTIONS
* Define a function named recode_sex() that has one parameter: sex_value.
* If sex_value equals 'Male', return 1.
* Else, if sex_value equals 'Female', return 0.
* If sex_value does not equal 'Male' or 'Female', return np.nan. NumPy has been pre-imported for you.
* Apply your recode_sex() function over tips.sex using the .apply() method to create a new column: 'sex_recode'. Note that when passing in a function inside the .apply() method, you don't need to specify the parentheses after the function name.
* Hit 'Submit Answer' and take note of the new 'sex_recode' column in the tips DataFrame!
'''
import pandas as pd
import re
tips = pd.read_csv('../datasets/tips.csv')
# Define recode_sex()
def recode_sex(sex_value):
# Return 1 if sex_value is 'Male'
if sex_value == 'Male':
return 1
# Return 0 if sex_value is 'Female'
elif sex_value == 'Female':
return 0
# Return np.nan
else:
return np.nan
# Apply the function to the sex column
tips['sex_recode'] = tips['sex'].apply(recode_sex)
# Print the first five rows of tips
print(tips.head())
'''
total_bill tip sex smoker day time size sex_recode
0 16.99 1.01 Female No Sun Dinner 2.0 0.0
1 10.34 1.66 Male No Sun NaN 3.0 1.0
2 21.01 3.50 Male No Sun Dinner 3.0 1.0
3 23.68 3.31 Male No Sun Dinner 2.0 1.0
4 24.59 3.61 Female No Sun Dinner 4.0 0.0
''' | true |
b754d2966c71ec0c22f1af9caa7bdf933f8c3616 | sashakrasnov/datacamp | /11-analyzing-police-activity-with-pandas/1-preparing-the-data-for-analysis/03-dropping-rows.py | 1,300 | 4.46875 | 4 | '''
Dropping rows
When you know that a specific column will be critical to your analysis, and only a small fraction of rows are missing a value in that column, it often makes sense to remove those rows from the dataset.
During this course, the driver_gender column will be critical to many of your analyses. Because only a small fraction of rows are missing driver_gender, we'll drop those rows from the dataset.
'''
import pandas as pd
ri = pd.read_csv('../datasets/RI_cleaned.csv', nrows=100000, low_memory=False)
ri.drop(['county_name', 'state'], axis='columns', inplace=True)
'''
INSTRUCTIONS
* Count the number of missing values in each column.
* Drop all rows that are missing driver_gender by passing the column name to the subset parameter of .dropna().
* Count the number of missing values in each column again, to verify that none of the remaining rows are missing driver_gender.
* Examine the DataFrame's .shape to see how many rows and columns remain.
'''
# Count the number of missing values in each column
print(ri.isnull().sum())
# Drop all rows that are missing 'driver_gender'
ri.dropna(subset=['driver_gender'], inplace=True)
# Count the number of missing values in each column (again)
print(ri.isnull().sum())
# Examine the shape of the DataFrame
print(ri.shape) | true |
c6c9a3c3dc132e3cfb2cfc349f23350c9d163dba | sashakrasnov/datacamp | /04-python-data-science-toolbox-2/3-bringing-it-all-together!/07-writing-a-generator-to-load-data-in-chunks-3.py | 1,737 | 4.46875 | 4 | '''
Writing a generator to load data in chunks (3)
Great! You've just created a generator function that you can use to help you process large files.
Now let's use your generator function to process the World Bank dataset like you did previously.
You will process the file line by line, to create a dictionary of the counts of how many times each country appears in a column in the dataset. For this exercise, however, you won't process just 1000 rows of data, you'll process the entire dataset!
The generator function read_large_file() and the csv file 'world_dev_ind.csv' are preloaded and ready for your use.
Go for it!
'''
def read_large_file(file_object):
'''A generator function to read a large file lazily.'''
# Loop indefinitely until the end of the file
while True:
# Read a line from the file: data
data = file_object.readline()
# Break if this is the end of the file
if not data:
break
# Yield the line of data
yield data
'''
Instructions
* Bind the file 'world_dev_ind.csv' to file in the context manager with open().
* Complete the for loop so that it iterates over the generator from the call to read_large_file() to process all the rows of the file.
'''
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Open a connection to the file
with open('../datasets/world_dev_ind.csv') as file:
# Iterate over the generator from read_large_file()
for line in read_large_file(file):
row = line.split(',')
first_col = row[0]
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
else:
counts_dict[first_col] = 1
# Print
print(counts_dict)
| true |
5ca7c9d133c9dcf4a63deed0f3741827294f3203 | sashakrasnov/datacamp | /32-introduction-to-pyspark/2-manipulating-data/03-selecting.py | 1,895 | 4.5 | 4 | '''
The Spark variant of SQL's SELECT is the .select() method. This method takes multiple arguments - one for each column you want to select. These arguments can either be the column name as a string (one for each column) or a column object (using the df.colName syntax). When you pass a column object, you can perform operations like addition or subtraction on the column to change the data contained in it, much like inside .withColumn().
The difference between .select() and .withColumn() methods is that .select() returns only the columns you specify, while .withColumn() returns all the columns of the DataFrame in addition to the one you defined. It's often a good idea to drop columns you don't need at the beginning of an operation so that you're not dragging around extra data as you're wrangling. In this case, you would use .select() and not .withColumn().
Remember, a SparkSession called spark is already in your workspace, along with the Spark DataFrame flights.
'''
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
flights = spark.table('flights')
'''
INSTRUCTIONS
* Select the columns tailnum, origin, and dest from flights by passing the column names as strings. Save this as selected1.
* Select the columns origin, dest, and carrier using the df.colName syntax and then filter the result using both of the filters already defined for you (filterA and filterB) to only keep flights from SEA to PDX. Save this as selected2.
'''
# Select the first set of columns
selected1 = flights.select('tailnum', 'origin', 'dest')
# Select the second set of columns
temp = flights.select(flights.origin, flights.dest, flights.carrier)
# Define first filter
filterA = flights.origin == 'SEA'
# Define second filter
filterB = flights.dest == 'PDX'
# Filter the data, first by filterA then by filterB
selected2 = temp.filter(filterA).filter(filterB) | true |
6ed2d41343506384911094ba689c134531af95a4 | sashakrasnov/datacamp | /26-manipulating-time-series-data-in-python/4-putting-it-all-together-building-a-value-weighted-index/03-import-index-component-price-information.py | 1,973 | 4.125 | 4 | '''
Import index component price information
Now you'll use the stock symbols for the companies you selected in the last exercise to calculate returns for each company.
'''
import pandas as pd
import matplotlib.pyplot as plt
listings = pd.read_excel('../datasets/stock_data/listings.xlsx', sheet_name='nyse', na_values='n/a')
listings.set_index('Stock Symbol', inplace=True)
# Drop rows with missing 'sector' data
listings.dropna(subset=['Sector'], inplace=True)
# Select companies with IPO Year before 2019
listings = listings[listings['IPO Year'] < 2019]
# Select stock symbols for largest company for each sector
tickers = listings.groupby(['Sector'])['Market Capitalization'].nlargest(1).index.get_level_values('Stock Symbol')
'''
INSTRUCTIONS
We have already imported pandas as pd and matplotlib.pyplot as plt for you. We have also made the variable tickers available to you, which contains the Stock Symbol for each index component as a list.
* Print tickers to verify the content matches your result from the last exercise.
* Use pd.read_csv() to import 'stock_prices.csv', parsing the 'Date' column and also setting the 'Date' column as index before assigning the result to stock_prices. Inspect the result using .info().
* Calculate the price return for the index components by dividing the last row of stock_prices by the first, subtracting 1 and multiplying by 100. Assign the result to price_return.
* Plot a horizontal bar chart of the sorted returns with the title Stock Price Returns.
'''
# Print tickers
print(tickers)
# Import prices and inspect result
stock_prices = pd.read_csv('../datasets/stock_data/stock_data.csv', index_col='Date', parse_dates=['Date'])
print(stock_prices.info())
# Calculate the returns
price_return = (stock_prices.iloc[-1] / stock_prices.iloc[0]).sub(1).mul(100)
# Plot horizontal bar chart of sorted price_return
price_return.sort_values().plot(kind='barh', title='Stock Price Returns')
plt.show()
| true |
7056ea7898204358ce56b6d531df2f3fe587ffcb | sashakrasnov/datacamp | /10-merging-dataframes-with-pandas/2-concatenating-data/04-concatenating-pandas-dataframes-along-column-axis.py | 2,317 | 4.15625 | 4 | '''
Concatenating pandas DataFrames along column axis
The function pd.concat() can concatenate DataFrames horizontally as well as vertically (vertical is the default). To make the DataFrames stack horizontally, you have to specify the keyword argument axis=1 or axis='columns'.
In this exercise, you'll use weather data with maximum and mean daily temperatures sampled at different rates (quarterly versus monthly). You'll concatenate the rows of both and see that, where rows are missing in the coarser DataFrame, null values are inserted in the concatenated DataFrame. This corresponds to an outer join (which you will explore in more detail in later exercises).
The files 'quarterly_max_temp.csv' and 'monthly_mean_temp.csv' have been pre-loaded into the DataFrames weather_max and weather_mean respectively, and pandas has been imported as pd.
'''
import pandas as pd
weather_max = pd.DataFrame({'Max TemperatureF': [68, 89, 91, 84]}, index=['Jan', 'Apr', 'Jul', 'Oct'])
weather_mean = pd.DataFrame({'Mean TemperatureF': [53.100000, 70.000000, 34.935484, 28.714286, 32.354839, 72.870968, 70.133333, 35.000000, 62.612903, 39.800000, 55.451613, 63.766667]}, index=['Apr', 'Aug', 'Dec', 'Feb', 'Jan', 'Jul', 'Jun', 'Mar', 'May', 'Nov', 'Oct', 'Sep'])
weather_max.index.name = weather_mean.index.name = 'Month'
'''
INSTRUCTIONS
* Create a new DataFrame called weather by concatenating the DataFrames weather_max and weather_mean horizontally.
* Pass the DataFrames to pd.concat() as a list and specify the keyword argument axis=1 to stack them horizontally.
* Print the new DataFrame weather.
'''
# Concatenate weather_max and weather_mean horizontally: weather
weather = pd.concat([weather_max, weather_mean], axis=1)
# Print weather
print(weather)
'''
> weather
Max TemperatureF Mean TemperatureF
Apr 89.0 53.100000
Aug NaN 70.000000
Dec NaN 34.935484
Feb NaN 28.714286
Jan 68.0 32.354839
Jul 91.0 72.870968
Jun NaN 70.133333
Mar NaN 35.000000
May NaN 62.612903
Nov NaN 39.800000
Oct 84.0 55.451613
Sep NaN 63.766667
''' | true |
0a7757a9fa33be538833f52f832c9872ff36c672 | sashakrasnov/datacamp | /10-merging-dataframes-with-pandas/1-preparing-data/04-sorting-dataframe-with-the-index-and-columns.py | 2,593 | 4.96875 | 5 | '''
Sorting DataFrame with the Index & columns
It is often useful to rearrange the sequence of the rows of a DataFrame by sorting. You don't have to implement these yourself; the principal methods for doing this are .sort_index() and .sort_values().
In this exercise, you'll use these methods with a DataFrame of temperature values indexed by month names. You'll sort the rows alphabetically using the Index and numerically using a column. Notice, for this data, the original ordering is probably most useful and intuitive: the purpose here is for you to understand what the sorting methods do.
INSTRUCTIONS
* Read 'monthly_max_temp.csv' into a DataFrame called weather1 with 'Month' as the index.
* Sort the index of weather1 in alphabetical order using the .sort_index() method and store the result in weather2.
* Sort the index of weather1 in reverse alphabetical order by specifying the additional keyword argument ascending=False inside .sort_index().
* Use the .sort_values() method to sort weather1 in increasing numerical order according to the values of the column 'Max TemperatureF'.
'''
# Import pandas
import pandas as pd
# Read 'monthly_max_temp.csv' into a DataFrame: weather1
weather1 = pd.read_csv('../datasets/monthly_max_temp.csv', index_col='Month')
# Print the head of weather1
print(weather1.head())
# Sort the index of weather1 in alphabetical order: weather2
weather2 = weather1.sort_index()
# Print the head of weather2
print(weather2.head())
# Sort the index of weather1 in reverse alphabetical order: weather3
weather3 = weather1.sort_index(ascending=False)
# Print the head of weather3
print(weather3.head())
# Sort weather1 numerically using the values of 'Max TemperatureF': weather4
weather4 = weather1.sort_values('Max TemperatureF')
# Print the head of weather4
print(weather4.head())
'''
> weather1.head()
Max TemperatureF
Month
Jan 68
Feb 60
Mar 68
Apr 84
May 88
> weather2.head()
Max TemperatureF
Month
Apr 84
Aug 86
Dec 68
Feb 60
Jan 68
> weather3.head()
Max TemperatureF
Month
Sep 90
Oct 84
Nov 72
May 88
Mar 68
> weather4.head()
Max TemperatureF
Month
Feb 60
Jan 68
Mar 68
Dec 68
Nov 72
''' | true |
32de5c51962ed107e39b8c67a51b85be88214d39 | sashakrasnov/datacamp | /27-visualizing-time-series-data-in-python/4-work-with-multiple-time-series/01-load-multiple-time-series.py | 1,332 | 4.125 | 4 | '''
Load multiple time series
Whether it is during personal projects or your day-to-day work as a Data Scientist, it is likely that you will encounter situations that require the analysis and visualization of multiple time series at the same time.
Provided that the data for each time series is stored in distinct columns of a file, the pandas library makes it easy to work with multiple time series. In the following exercises, you will work with a new time series dataset that contains the amount of different types of meat produced in the USA between 1944 and 2012.
'''
import pandas as pd
url_meat = '../datasets/ch4_meat.csv'
'''
INSTRUCTIONS
We've imported pandas using the pd alias.
* Read in the the csv file located at url_meat into a DataFrame called meat.
* Convert the date column in meat to the datetime type.
* Set the date column as the index of meat.
* Print the summary statistics of all the numeric columns in meat.
'''
# Read in meat DataFrame
meat = pd.read_csv(url_meat)
# Review the first five lines of the meat DataFrame
print(meat.head(5))
# Convert the date column to a datestamp type
meat['date'] = pd.to_datetime(meat['date'])
# Set the date column as the index of your DataFrame meat
meat = meat.set_index('date')
# Print the summary statistics of the DataFrame
print(meat.describe()) | true |
0b8caa4f6f082d1f8c456b94dc1e87903a66f69e | sashakrasnov/datacamp | /24-data-types-for-data-science/2-dictionaries--the-root-of-python/04-adding-and-extending-dictionaries.py | 2,763 | 4.65625 | 5 | '''
Adding and extending dictionaries
If you have a dictionary and you want to add data to it, you can simply create a new key and assign the data you desire to it. It's important to remember that if it's a nested dictionary, then all the keys in the data path must exist, and each key in the path must be assigned individually.
You can also use the .update() method to update a dictionary with keys and values from another dictionary, tuples or keyword arguments.
Here, you'll combine several techniques used in prior exercises to setup your dictionary in a way that makes it easy to find the least popular baby name for each year.
Your job is to add data for the year 2011 to your dictionary by assignment, 2012 by update, and then find the least popular baby name for each year.
'''
boy_names = {}
with open('../datasets/baby_names.csv') as f:
# Skipping header
_ = f.readline()
# Iterating over lines
for row in f:
year, sex, _, name, count, rank = row.strip().split(',')
year = int(year)
rank = int(rank)
if sex == 'MALE':
# Empty dictionary for 2012
if year in boy_names and year != 2012:
boy_names[year][rank] = name
else:
boy_names[year] = {}
# Sorting dictionary year by year
for y in boy_names:
boy_names[y] = dict(sorted(boy_names[y].items()))
# Separating 2011 year from main dictionary
names_2011 = boy_names.pop(2011)
'''
INSTRUCTIONS
* Assign the names_2011 dictionary as the value to the 2011 key of the boy_names dictionary.
* Update the 2012 key in the boy_names dictionary with the following data in a list of tuples: (1, 'Casey'), (2, 'Aiden').
* Loop over the boy_names dictionary.
* Inside the first for loop, use another for loop to loop over and sort the data for each year of boy_names by descending rank.
* Make sure you have a rank and print 'No Data Available' if not. This has been done for you.
* Safely print the year and least popular name or 'Not Available' if it is not found. Take advantage of the .get() method.
'''
# Assign the names_2011 dictionary as the value to the 2011 key of boy_names
boy_names[2011] = names_2011
# Update the 2012 key in the boy_names dictionary
boy_names[2012].update([
(1, 'Casey'),
(2, 'Aiden')
])
# Loop over the boy_names dictionary
for year in boy_names:
# Loop over and sort the data for each year by descending rank
for rank in sorted(boy_names[year], reverse=True)[:1]:
# Check that you have a rank
if not rank:
print(year, 'No Data Available')
# Safely print the year and the least popular name or 'Not Available'
print(year, boy_names[year].get(rank, 'Not Available')) | true |
bbfd20e82c8d9bfe591e3345ed94c2e94702eb7e | sashakrasnov/datacamp | /12-introduction-to-databases-in-python/4-creating-and-manipulating-your-own-databases/06-updating-individual-records.py | 2,179 | 4.28125 | 4 | '''
Updating individual records
The update statement is very similar to an insert statement, except that it also typically uses a where clause to help us determine what data to update. You'll be using the FIPS state code using here, which is appropriated by the U.S. government to identify U.S. states and certain other associated areas. Recall that you can update all wages in the employees table as follows:
stmt = update(employees).values(wage=100.00)
For your convenience, the names of the tables and columns of interest in this exercise are: state_fact (Table), name (Column), and fips_state (Column).
'''
from sqlalchemy import create_engine, select, update, MetaData, Table
engine = create_engine('sqlite:///../datasets/census.sqlite')
connection = engine.connect()
metadata = MetaData()
state_fact = Table('state_fact', metadata, autoload=True, autoload_with=engine)
'''
INSTRUCTIONS
* Build a statement to select all columns from the state_fact table where the name column is New York. Call it select_stmt.
* Print the results of executing the select_stmt and fetching all records.
* Build an update statement to change the fips_state column code to 36, save it as stmt.
* Use a where clause to filter for states with the name of 'New York' in the state_fact table.
* Execute stmt via the connection and save the output as results.
* Hit 'Submit Answer' to print the rowcount of the results and the results of executing select_stmt. This will verify the fips_state code is now 36.
'''
# Build a select statement: select_stmt
select_stmt = select([state_fact]).where(state_fact.columns.name == 'New York')
# Print the results of executing the select_stmt
print(connection.execute(select_stmt).fetchall())
# Build a statement to update the fips_state to 36: stmt
stmt = update(state_fact).values(fips_state=36)
# Append a where clause to limit it to records for New York state
stmt = stmt.where(state_fact.columns.name == 'New York')
# Execute the statement: results
results = connection.execute(stmt)
# Print rowcount
print(results.rowcount)
# Execute the select_stmt again to view the changes
print(connection.execute(select_stmt).fetchall())
| true |
9d47888970d4d2bba4d9555352eb0a23c2e17a2d | sashakrasnov/datacamp | /18-linear-classifiers-in-python/3-logistic-regression/01-regularized-logistic-regression.py | 1,519 | 4.1875 | 4 | '''
Regularized logistic regression
In Chapter 1 you used logistic regression on the handwritten digits data set. Here, we'll explore the effect of L2 regularization. The handwritten digits dataset is already loaded, split, and stored in the variables X_train, y_train, X_valid, and y_valid. The variables train_errs and valid_errs are already initialized as empty lists.
'''
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
digits = datasets.load_digits()
X_train, X_valid, y_train, y_valid = train_test_split(digits.data, digits.target)
train_errs = []
valid_errs = []
C_values = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
'''
INSTRUCTIONS
* Loop over the different values of C_value, fitting a model each time. Save the error on the training set and the validation set for each model.
* Create a plot of the training and testing error as a function of the regularization parameter, C.
* Looking at the plot, what's the best value of C?
'''
# Loop over values of C
for C_value in C_values:
# Create LogisticRegression object and fit
lr = LogisticRegression(C=C_value)
lr.fit(X_train, y_train)
# Evaluate error rates and append to lists
train_errs.append(1.0 - lr.score(X_train, y_train))
valid_errs.append(1.0 - lr.score(X_valid, y_valid))
# Plot results
plt.semilogx(C_values, train_errs, C_values, valid_errs)
plt.legend(('train', 'validation'))
plt.show() | true |
4d1c8c000e06f5459daf9ce36ffc09a46bab346d | sashakrasnov/datacamp | /17-supervised-learning-with-scikit-learn/1-classification/02-k-nearest-neighbors-predict.py | 2,532 | 4.28125 | 4 | '''
k-Nearest Neighbors: Predict
Having fit a k-NN classifier, you can now use it to predict the label of a new data point. However, there is no unlabeled data available since all of it was used to fit the model! You can still use the .predict() method on the X that was used to fit the model, but it is not a good indicator of the model's ability to generalize to new, unseen data.
In the next video, Hugo will discuss a solution to this problem. For now, a random unlabeled data point has been generated and is available to you as X_new. You will use your classifier to predict the label for this new data point, as well as on the training data X that the model has already seen. Using .predict() on X_new will generate 1 prediction, while using it on X will generate 435 predictions: 1 for each sample.
The DataFrame has been pre-loaded as df. This time, you will create the feature array X and target variable array y yourself.
'''
import pandas as pd
import numpy as np
from random import random
df = pd.read_csv('../datasets/house-votes-84.csv', header=None).replace({'y':int(1),'n':int(0), '?':np.nan})
fills = (df.mean() > 0.5).astype('int') # fill values for NaN
df.fillna(value=fills, inplace=True)
df.columns = ['party', 'infants', 'water', 'budget', 'physician', 'salvador', 'religious', 'satellite', 'aid', 'missile', 'immigration', 'synfuels', 'education', 'superfund', 'crime', 'duty_free_exports', 'eaa_rsa']
df.loc[:, 'infants':'eaa_rsa'] = df.loc[:, 'infants':'eaa_rsa'].astype('int')
#X_new = np.array([[random() for _ in range(16)]])
X_new = np.array([[int(random() > 0.5) for _ in range(16)]])
'''
INSTRUCTIONS
* Create arrays for the features and the target variable from df. As a reminder, the target variable is 'party'.
* Instantiate a KNeighborsClassifier with 6 neighbors.
* Fit the classifier to the data.
* Predict the labels of the training data, X.
* Predict the label of the new data point X_new.
'''
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y = df['party'].values
X = df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors: knn
knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X,y)
# Predict the labels for the training data X
y_pred = knn.predict(X)
# Predict and print the label for the new data point X_new
new_prediction = knn.predict(X_new)
print('Prediction: {}'.format(new_prediction))
| true |
b574a6df1c7c3fda951b2db54dee2bf19c76a8db | geohotweb/programing | /ecuaciones/leyendo_numeros.py | 279 | 4.34375 | 4 | #Este programa va leyendo numeros y mostrandolos por pantalla mientras los numeros que se introducen sean positivos.
num = int(input('Introduce un numero: '))
while num >= 0:
print(num)
num = int(input('Introduce otro numero: '))
print('Ha finalizado mi trabajo, adiós!.')
| false |
aa1b56ed11db7fd4ed50f0346226807a05b6db15 | geohotweb/programing | /ecuaciones/vocales_consonantes.py | 664 | 4.125 | 4 | #Este programa determina si el caracter introducido es vocal mayuscula o vocal minuscula y si es consonante mayuscula o minuscula tambien dice si hay un caraccter desconocido.
#! usr/bin/python
letra_o_vocal = input('Introduce la primera vocal o consonante: ')
mayusculas1 = 'BCDFGHJKLMNPQRSTVWXYZ'
minusculas1 = 'bcdfghjklmnpqrstvwxyz'
vocal_mayus = 'AEIOU'
vocal_minus = 'aeiou'
if letra_o_vocal in vocal_minus:
print('Es una vocal minuscula.')
if letra_o_vocal in vocal_mayus:
print('Es una vocal mayuscula.')
if letra_o_vocal in minusculas1:
print('Es una consonante minuscula.')
if letra_o_vocal in mayusculas1:
print('Es una consonante mayuscula.')
| false |
eda2584a11a3e4fb1a77f8082ce455df3b4c4713 | Xinyuan-wur/algorithms-in-bioinformatics | /clustering/assignment_kmeans_skeleton.py | 1,917 | 4.25 | 4 | #!/usr/bin/env python
"""
Author:
Student number:
Implementation of the k-means clustering algorithm
Hints:
- write a function to obtain Euclidean distance between two points.
- write a function to initialize centroids by randomly selecting points
from the initial set of points. You can use the random.sample() method
- write a function to find the closest centroid to each point and assign
the points to the clusters.
- write a function to calculate the centroids given the clusters
- write a function to implement k-means
- write a function to calculate WGSS given the clusters and the points
- write a function to calculate BGSS given the clusters and the points
"""
# import statements
import random
def csv_parser(lines):
"""Return list of point coordinates as [[x1,y1,z1,...],[x2,y2,z2,...]]
lines: open file or list of lines. Expected format:
The file has a single header line.
Each line contains the coordinates for one data point, starting
with a label. A data point can be specified in arbitrary dimensions.
Output: List of lists with the coordinates of the points.
This function does not capture the labels of the data points. In case
they are needed later, this function should be adjusted.
"""
data_points = []
for line in lines:
items = line.strip().split(",")
try: #will fail on header line in file
data_points.append(map(float, items[1:]))#first item is the label
except ValueError: #must be the header
continue
return data_points
if __name__ == "__main__":
file = open('2dtest.csv')
lines1 = file.readlines()
print(csv_parser(lines1))
# the code below should produce the results necessary to answer
# the questions. In other words, if we run your code, we should see
# the data that you used to answer the questions.
| true |
c162c550ba25f29822159f0c4fca5421e1dedd37 | 12reach/PlayWithPython | /primary/functions.py | 1,622 | 4.6875 | 5 | #!/usr/bin/python3
# functions and parameters are two important part of a program
# a function do the repetitive job so that we need not write same thing more and more
# functions do many things
# we will see it later in our detailed functions series
# let us define a function that pass two parameters and those parameters can be taken from the users
# we just want to make more interesting
def main():
print("This is main function.")
for_loops(2, 6)
print("--------")
for_loops(3, 7)
print("--------")
for_loops(5, 9)
print("--------")
for_loops(0, 5)
def for_loops(a, b):
for i in range(a, b):
print(i)
if __name__ == "__main__":main()
# and the output looks like
# This is main function.
# 2
# 3
# 4
# 5
# --------
# 3
# 4
# 5
# 6
# --------
# 5
# 6
# 7
# 8
# --------
# 0
# 1
# 2
# 3
# 4
# now we can have default values of two parameters so that if user does not pass any value the function runs
# first we need to change the above code
def main():
print("This is main function.")
for_loops(2, 6)
print("--------")
for_loops(3, 7)
print("--------")
for_loops(5, 9)
print("--------")
for_loops()
def for_loops(a = 0, b = 10):
for i in range(a, b):
print(i)
if __name__ == "__main__":main()
# in the last call we did not pass any value and see what happens
#####################
# This is main function.
# 2
# 3
# 4
# 5
# --------
# 3
# 4
# 5
# 6
# --------
# 5
# 6
# 7
# 8
# --------
# 0
# 1
# 2
# 3
# 4
# 5
# 6
# 7
# 8
# 9
#####################
# in the last section output has been changed by the default values | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.