blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f8ca474b86faf4dd73b3bc2797d8e5f3be1b0e08 | Python | Aasthaengg/IBMdataset | /Python_codes/p03290/s844351215.py | UTF-8 | 670 | 2.640625 | 3 | [] | no_license | from math import ceil
d,g = map(int,input().split())
pc = [list(map(int,input().split())) for i in range(d)]
ans = float("inf")
for bit in range(1<<d):
sum = 0
cnt = 0
#残ってるやつのmaxのインデックス取得するためsetを設定
nokori = set(range(1, d + 1))
for i in range(d):
if bit & (1 << i):
sum += pc[i][0]*100*(i+1) + pc[i][1]
cnt += pc[i][0]
nokori.discard(i+1)
if sum < g:
use = max(nokori)
husoku = min(ceil((g-sum)/(use*100)),pc[use-1][0])
cnt += husoku
sum += husoku*use*100
if sum >= g:
ans = min(cnt,ans)
print(ans) | true |
f9a7f8aa781a96c448cf68d578e2bd8199f59343 | Python | ghbarreto/python | /python_crash_course/Chapter6/nesting_dictionaries.py | UTF-8 | 4,437 | 3.921875 | 4 | [] | no_license | print("==============================================")
print("")
print("Exercise 6-7 People ")
person_1 = {'name': 'Thais', 'age': 32, 'relationship': 'Married'}
person_2 = {'name': 'Gabriel', 'age': 26, 'relationship': 'Married'}
persons = [person_1, person_2]
for props in persons:
print(props)
print("==============================================")
print("")
print("Exercise 6-8 Pets ")
# pet_5 = {'name': 'Luke', 'age': '5', 'owner:': 'Thais', 'nationality': 'Brazil'}
# pet_6 = {'name': 'Sophia', 'age': '10', 'owner:': 'Gabriel', 'nationality': 'Canada'}
# pet_7 = {'name': 'Joia', 'age': '5', 'owner:': 'Suze', 'nationality': 'Spain'}
# pet_8 = {'name': 'Nessy', 'age': '12', 'owner:': 'Liss', 'nationality': 'Belgium'}
pet_2 = {'name': 'Branca', 'owner': 'Gabriel'}
pet_3 = {'name': 'Dhara', 'owner': 'Gabriel'}
pet_4 = {'name': 'Icaro', 'owner': 'Paulo'}
pet_description = {
'Branca' : 'A really lovely and cute cat',
'Dhara' : 'The best friend anyone can have',
'Icaro' : 'Beautiful bird that brightens every morning'
}
pets = [pet_2, pet_3, pet_4]
for pet in pets:
for y, z in pet_description.items():
if pet['name'] == y:
print(f"{pet['name']} {z}")
print("==============================================")
print("")
print("Exercise 6-9 Favourite places ")
favourite_places = {
'Gabriel': {
'place': 'Vancouver',
'country': 'Canada',
},
'Thais': {
'place': 'Dublin',
'country': 'Ireland',
},
'Rose': {
'place': 'Lisbon',
'country': 'Portugal',
}
}
for y, key in favourite_places.items():
print(f"{y}'s favourite place is {key['place']} that is located in {key['country']}")
print("==============================================")
print("")
print("Exercise 6-10 Favourite number ")
favourite_numbers = {
'thais' : [6, 7, 8, 9],
'gabriel' : [6, 4, 5, 10],
'Rose': [7, 31, 4, 2],
'Paulo': [20, 4, 9]
}
for k, y in favourite_numbers.items():
print(f"{k}'s favourite numbers are:")
for v in y:
print(v)
print("==============================================")
print("")
print("Exercise 6-11 Cities")
cities = {
'Vancouver': {
'About': 'Vancouver is a major city in western Canada, located in the Lower Mainland region of British Columbia.',
'Population': '2,581,000',
'Country': 'Canada'
},
'Sao Paulo': {
'About': 'The city is the capital of the surrounding state of São Paulo, the most populous and wealthiest state in Brazil.',
'Population': '12,325,232',
'Country': 'Brazil'
},
'Salvador': {
'About': 'Founded by the Portuguese in 1549 as the first capital of Brazil, Salvador is one of the oldest colonial cities in the Americas.',
'Population': '2.900.000',
'Country': 'Brazil'
}
}
for cityName, val in cities.items():
print("")
print(f"{cityName}")
print(f"{val['About']}")
print(f"{val['Population']}")
print(f"{val['Country']}")
print("==============================================")
print("")
print("Exercise 6-12 Extensions")
cities = {
'Vancouver': {
'About': 'Vancouver is a major city in western Canada, located in the Lower Mainland region of British Columbia.',
'Population': '2,581,000',
'Country': 'Canada',
'Pets': {
'Cats': '200k',
'Dogs': '500k',
'Birds': '25m'
},
},
'Sao Paulo': {
'About': 'The city is the capital of the surrounding state of São Paulo, the most populous and wealthiest state in Brazil.',
'Population': '12,325,232',
'Country': 'Brazil',
'Pets': {
'Cats': '25m',
'Dogs': '100m',
'Birds': '200m'
},
},
'Salvador': {
'About': 'Founded by the Portuguese in 1549 as the first capital of Brazil, Salvador is one of the oldest colonial cities in the Americas.',
'Population': '2.900.000',
'Country': 'Brazil',
'Pets': {
'Cats': '1m',
'Dogs': '1.5m',
'Birds': '50m',
},
}
}
for v, k in cities.items():
print(f"{v}")
print(f"- {k['About']}")
print(f"- Population: {k['Population']}")
print(f"- Location: {k['Country']}")
print(f"- Pet Population: ")
for vs, i in k['Pets'].items():
print(f" - {vs}: {i} ")
| true |
25b6b3c2a719b7ac8d209dd01d3cb028d8191482 | Python | changediyasunny/Challenges | /leetcode_2018/304_range_sum_query_immutable.py | UTF-8 | 1,445 | 4.09375 | 4 | [] | no_license | """
304. Range Sum Query 2D - Immutable
Given a 2D matrix matrix, find the sum of the elements inside the rectangle defined
by its upper left corner (row1, col1) and lower right corner (row2, col2).
Given matrix = [
[3, 0, 1, 4, 2],
[5, 6, 3, 2, 1],
[1, 2, 0, 1, 5],
[4, 1, 0, 1, 7],
[1, 0, 3, 0, 5]
]
sumRegion(2, 1, 4, 3) -> 8
sumRegion(1, 1, 2, 2) -> 11
sumRegion(1, 2, 2, 4) -> 12
Time complexity : O(1) time per query, O(mn) time pre-computation. The pre-computation in the
constructor takes O(mn) time. Each sumRegion query takes O(1) time.
Space complexity : O(mn). The algorithm uses O(mn) space to store the cumulative region sum.
"""
class NumMatrix:
def __init__(self, matrix):
"""
:type matrix: List[List[int]]
"""
if not matrix:
return
self.M = len(matrix)
self.N = len(matrix[0])
self.dp = [[0] * (self.N+1) for _ in range(self.M + 1)]
for i in range(1, self.M+1):
for j in range(1, self.N+1):
self.dp[i][j] = matrix[i-1][j-1] + self.dp[i-1][j] + self.dp[i][j-1] - self.dp[i-1][j-1]
print(self.dp)
def sumRegion(self, row1, col1, row2, col2):
"""
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.dp[row2+1][col2+1] - self.dp[row2+1][col1] - self.dp[row1][col2+1] + self.dp[row1][col1]
| true |
09998313bb2ffce539969ecc3bde6af05259a7fd | Python | Oscarydotel/cspp10 | /Unit1/ODotel_seconds.py | UTF-8 | 155 | 3.875 | 4 | [] | no_license | sec = input("Enter a number of seconds ")
mint = float(sec)/60
hours = mint/60
print(str(sec)+" Seconds is "+str(mint)+" hours," + str(hours) + " seconds") | true |
8f64b4345c027151a453b5f9d5ac348c0974a2d9 | Python | ajaybrijju/learn-git | /hungry.py | UTF-8 | 320 | 3.890625 | 4 | [] | no_license | hungry = input("are you hungry?\n")
if hungry=="yes" and hungry=="Y":
print("Eat samoooosa")
print("Eat pizza")
print("Eat burger")
print("Eat fries")
print("added a new item")
else:
print("Do your homework! dude!!!")
print("drink water")
print("drink soda")
print("new line added")
| true |
4054e732d3cca0330617a02f35bdd2781654ad62 | Python | westurner/workhours | /workhours/reports/writers.py | UTF-8 | 6,497 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
"""
Report writers for various formats
"""
import csv
import workhours.models.json as json
import StringIO
import functools
def itemgetter_default(args, default=None):
"""
Return a callable object that fetches the given item(s) from its operand,
or the specified default value.
Similar to operator.itemgetter except returns ``default``
when the index does not exist
"""
if args is None:
columns = xrange(len(line))
else:
columns = args
def _itemgetter(row):
for col in columns:
try:
yield row[col]
except IndexError:
yield default
return _itemgetter
def get_list_from_str(str_, cast_callable=int):
if str_ is None:
return None
if not str_ or not str_.strip():
return []
return [cast_callable(x.strip()) for x in str_.split(',')]
# TODO FIXME
import operator
def sort_by(sortstr, iterable, reverse=False):
columns = get_list_from_str(sortstr)
log.debug("columns: %r" % columns)
#get_columns = operator.itemgetter(*columns)
get_columns = itemgetter_default(columns, default=None)
return sorted(iterable,
key=get_columns,
reverse=reverse)
class ResultWriter(object):
OUTPUT_FILETYPES = {
'csv': ",",
'json': True,
'tsv': "\t",
'html': True,
"txt": True,
}
filetype = None
def __init__(self, _output, *args, **kwargs):
self._output = _output
self._conf = kwargs
self.setup(_output, *args, **kwargs)
def setup(self, *args, **kwargs):
pass
def set_output(self, _output):
if _output and self._output is not None:
raise Exception()
else:
self._output = _output
def header(self, *args, **kwargs):
pass
def write(self, obj):
print(obj, file=self._output)
def write_numbered(self, obj):
print(obj, file=self._output)
def footer(self, *args, **kwargs):
pass
@classmethod
def get_writer(cls, _output,
filetype="csv",
**kwargs):
"""get writer object for _output with the specified filetype
:param output_filetype: csv | json | tsv
:param _output: output file
"""
output_filetype = filetype.strip().lower()
if output_filetype not in ResultWriter.OUTPUT_FILETYPES:
raise Exception()
writer = None
if output_filetype == "txt":
writer = ResultWriter_txt(_output)
elif output_filetype == "csv":
writer = ResultWriter_csv(_output, **kwargs)
elif output_filetype == "tsv":
writer = ResultWriter_csv(_output, delimiter='\t', **kwargs)
elif output_filetype == "json":
writer = ResultWriter_json(_output)
elif output_filetype == "html":
writer = ResultWriter_html(_output, **kwargs)
else:
raise NotImplementedError()
return (
writer,
(kwargs.get('number_lines')
and writer.write_numbered or writer.write ))
class ResultWriter_txt(ResultWriter):
filetype = 'txt'
def write_numbered(self, obj):
self.write(obj._numbered_str(odelim='\t'))
class ResultWriter_csv(ResultWriter):
filetype = 'csv'
def setup(self, *args, **kwargs):
self.delimiter=kwargs.get('delimiter',
ResultWriter.OUTPUT_FILETYPES.get(self.filetype, ','))
self._output_csv = csv.writer(self._output,
quoting=csv.QUOTE_NONNUMERIC,
delimiter=self.delimiter)
#doublequote=True)
def header(self, *args, **kwargs):
attrs = kwargs.get('attrs', PylineResult._fields)
self._output_csv.writerow(attrs)
def write(self, obj):
self._output_csv.writerow(obj.result)
def write_numbered(self, obj):
self._output_csv.writerow(tuple(obj._numbered()))
class ResultWriter_json(ResultWriter):
filetype = 'json'
def write(self, obj):
print(
json.dumps(
obj._asdict(),
indent=2),
end=',\n',
file=self._output)
write_numbered = write
class ResultWriter_html(ResultWriter):
filetype = 'html'
def header(self, *args, **kwargs):
attrs = self._conf.get('attrs')
title = self._conf.get('title')
if title:
self._output.write('<p>')
self._output.write(title) # TODO
self._output.write('</p>')
self._output.write("<table>")
if bool(attrs):
self._output.write("<tr>")
for col in attrs:
self._output.write(u"<th>%s</th>" % col)
self._output.write("</tr>")
def _html_row(self, obj):
yield '\n<tr>' # TODO: handle regular tuples
for attr,col in obj._asdict().iteritems(): # TODO: zip(_fields, ...)
yield "<td%s>" % (
(attr is not None) and (' class="%s"' % attr) or '')
if hasattr(col, '__iter__'):
for value in col:
yield u'<span>%s</span>' % value
else:
yield u'%s' % (
col and hasattr(col, 'rstrip') and col.rstrip()
or str(col)) #TODO
yield "</td>"
yield "</tr>"
def write(self, obj):
return self._output.write( u''.join(self._html_row(obj,)) )
def footer(self):
self._output.write('</table>\n')
def write_iterable_to_output(
iterable,
_output,
filetype="csv",
number_lines=False,
attrs=None,
sortfunc=None,
**kwargs):
(writer, output_func) = (
ResultWriter.get_writer(
_output,
filetype=filetype,
number_lines=number_lines,
attrs=attrs,
**kwargs))
writer.header()
for result in iterable:
if not result.result:
continue # TODO
try:
output_func(result)
except Exception, e:
log.exception(e)
continue # TODO
writer.footer()
return writer, output_func
def write_iterable_to_file(iterable, filename, *args, **kwargs):
with codecs.open(filename,'w',encoding='utf8') as _output:
return write_iterable_to_output(iterable, _output, *args, **kwargs)
| true |
99b939c77577ff195927b5c0c129595f6b9e0c92 | Python | monctagne9500/python | /data_fixture_sql.py | UTF-8 | 2,464 | 2.84375 | 3 | [] | no_license | #/usr/bin/python3
import mysql.connector
from faker import Faker
import random
def randomLength():
rand=random.randint(0,10)
return (rand)
def generation(nb_generation,cpt):
motif="'"
cpt+= 1
chaine_generated = ""
generator = Faker()
'''
chaine_generated += motif
chaine_generated+=str(cpt)
chaine_generated += motif
chaine_generated +=","+motif
'''
chaine_generated +="'"
chaine_split=generator.name().split(" ")
chaine_generated += chaine_split[0]
chaine_generated += motif
chaine_generated += "," + motif
chaine_generated += chaine_split[1]
chaine_generated += motif
chaine_generated += "," + motif
chaine_generated += generator.ascii_email()
chaine_generated += motif
chaine_generated += "," + motif
chaine_generated+=generator.date(pattern="%Y/%m/%d", end_datetime=None)
chaine_generated += motif
chaine_generated += "," + motif
chaine_generated+= generator.country()
chaine_generated += motif
chaine_generated += "," + motif
chaine_generated+=generator.city()
chaine_generated += motif
chaine_generated += "," + motif
chaine_generated+=generator.zipcode()
chaine_generated += motif+","
chaine_generated += motif
chaine_generated += generator.phone_number()
chaine_generated += motif
print(chaine_generated)
return chaine_generated
TAILLE_MAX_AL=int(input("entrer nombre de requete générée"))
chaine_insert=["INSERT INTO `utilisateur`(`nom`, `prenom`, `email`, `date_naissance`, `pays`, `ville`, `code_postal`,`telephoone`)VALUES (",")"]
10
#chaine=generation(TAILLE_MAX_AL,i)
# print("\n"+chaine)
conn = mysql.connector.connect(host="localhost",user="root",password="", database="test")
cursor = conn.cursor()
for i in range(TAILLE_MAX_AL):
chaine= generation(TAILLE_MAX_AL,i)
cursor.execute(chaine_insert[0]+chaine+chaine_insert[1])
print(i," ", chaine)
conn.commit()
cursor.close()
conn.close()
'''
con=mysql
print("1")
#cursor=con.
print("2")
cursor.execute("INSERT INTO utilisateur(`id`, `nom`, `prenom`, `email`, `date_naissance`, `pays`, `ville`, `code_postal`) VALUES (1, 'Queen', 'Oliver', 'arrow@teamarrow.com', '1980/05/28', 'Na ilha', 'This City', '12345')")
print("3")
con.commit()
print("4")
cursor.close()
print("5")
con.close()
print("6")
for x in range(TAILLE_MAX_AL):
print(randomLength())
''' | true |
c696cfcf7c5b2a0e3911c9280749368e1256fbbd | Python | myNum/practiceprograms | /sleepin.py | UTF-8 | 376 | 4.09375 | 4 | [] | no_license |
day_of_week = raw_input("What day of the week is tomorrow? ").lower()
weekend = [
"saturday",
"sunday",
"sat",
"sun",
"s"]
if day_of_week in weekend:
sleepin = True
else:
sleepin = False
if sleepin:
print "Lucky you! It looks like you're sleeping in tomorrow!"
else:
print "Sucks to be you! Looks like you're getting up early tomorrow!" | true |
6f75e7b192a48dd5cc28f76f42d787f02ef5f6a6 | Python | OAbouHajar/projectEIH | /eih-raspberrypi-body-detect/post_data_to_api.py | UTF-8 | 2,089 | 2.796875 | 3 | [
"CC0-1.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | from firebase import firebase
import requests
import argparse
import time
import os
import pyrebase
def get_stored_number_of_people_from_db(reg_id):
r = requests.get('https://projecteih.firebaseio.com/locations.json')
x= r.json()
return x[reg_id]['numberOfPeopleINDetect']
def db_config():
path = os.getcwd()
config = {
"apiKey": "<>",
"authDomain": "projecteih.firebaseio.com",
"databaseURL": "https://projecteih.firebaseio.com",
"storageBucket": "projecteih.appspot.com",
"serviceAccount": "{}/cred/projecteih-firebase-adminsdk-dmd9b-dfbc30ba25.json".format(path )
}
firebasePy = pyrebase.initialize_app(config)
return firebasePy
def update_with_the_new_number(id_to_reset, new_number):
db = db_config().database()
x= db.child("locations").child(id_to_reset).update({'numberOfPeopleINDetect': new_number})
print('###### DATA STORED ON CLOUD #####')
print('THE NEW NUMBER INSIDE BUILDING ' , new_number)
return x
## get the previouse number on the API
url = os.environ['FIREBASE_DB_URL']
reg_id = os.environ['REG_BUILIDING_ID']
device_id = os.environ['DEVICE_ID']
## read the argument sent from line 55 the detect.py file with the number of new people detected
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--numberOUT", type=int,
help="Current number of people OUT to send to the API")
ap.add_argument("-n2", "--numberIN", type=int,
help="Current number of people IN to send to the API")
args = vars(ap.parse_args())
currentNumberOUT = args["numberOUT"]
currentNumberIN = args["numberIN"]
## get the previous number from the DB
preNumber = get_stored_number_of_people_from_db(reg_id)
if args["numberOUT"] is not None:
## substact the number coming from the API to the number coming from the RassPi
newNumber = preNumber- currentNumberOUT
elif args["numberIN"] is not None:
## add the number coming from the API to the number coming from the RassPi
newNumber = preNumber + currentNumberIN
## send the new number to the update with the building ID
update_with_the_new_number(reg_id, newNumber)
| true |
c4fe3faf52d213d122d2677a9eaff1696ee71431 | Python | delphinevendryes/coding-practice | /reverse_linked_list.py | UTF-8 | 1,610 | 4.21875 | 4 | [] | no_license | '''
Given pointer to the head node of a linked list, the task is to reverse the linked list.
'''
class Node():
def __init__(self, val=None, next=None):
self.val = val
self.next = next
class LinkedList():
def __init__(self, *args, **kwargs):
values = kwargs.get('values', None)
if values:
N = len(values)
for i in range(N):
j = N - i - 1
if i == 0:
n = Node(values[j])
else:
n = Node(values[j], n)
self.head = n
else: self.head = Node()
def __len__(self):
h = self.head
count = 0
while h :
h = h.next
count += 1
return count
def __repr__(self):
h = self.head
s = str()
i = 0
while h:
if i != 0:
s += ' -> '
s += str(h.val)
h = h.next
i += 1
return s
def addAtBeginning(self, element):
n = Node(element, self.head)
self.head = n
def addAtEnd(self, element):
n = Node(element)
if self.head is None:
self.head = n
return
last = self.head
while last.next:
last = last.next
last.next = n
def reverse(linked_list):
e = linked_list.head
v = e.val
rev = LinkedList(values=[v])
while e.next:
e = e.next
v = e.val
rev.addAtBeginning(v)
return rev
lkd = LinkedList(values=[1, 2, 3, 4, 5, 6, 7])
r = reverse(lkd)
print(r)
| true |
7cdfa86a5b33cf239ef9db6cb3a51e724cb70ffa | Python | ClashLuke/Meros | /PythonTests/Vectors/Generation/Consensus/MeritRemoval/Partial.py | UTF-8 | 2,518 | 2.546875 | 3 | [
"CC0-1.0",
"MIT"
] | permissive | #Types.
from typing import IO, Dict, Any
#Consensus classes.
from PythonTests.Classes.Consensus.Verification import SignedVerification
from PythonTests.Classes.Consensus.MeritRemoval import PartiallySignedMeritRemoval
from PythonTests.Classes.Consensus.Consensus import Consensus
#Blockchain classes.
from PythonTests.Classes.Merit.BlockHeader import BlockHeader
from PythonTests.Classes.Merit.BlockBody import BlockBody
from PythonTests.Classes.Merit.Block import Block
from PythonTests.Classes.Merit.Blockchain import Blockchain
#BLS lib.
import blspy
#Time standard function.
from time import time
#JSON standard lib.
import json
#Consensus.
consensus: Consensus = Consensus(
bytes.fromhex("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"),
bytes.fromhex("CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"),
)
#Blockchain.
blockchain: Blockchain = Blockchain(
b"MEROS_DEVELOPER_NETWORK",
60,
int("FAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", 16)
)
#BLS Public Key.
pubKey: blspy.PublicKey = blspy.PrivateKey.from_seed(b'\0').get_public_key()
#Add a single Block to create Merit and load a MeritRemoval.
snFile: IO[Any] = open("PythonTests/Vectors/Consensus/MeritRemoval/SameNonce.json", "r")
vectors: Dict[str, Any] = json.loads(snFile.read())
blockchain.add(Block.fromJSON(vectors["blockchain"][0]))
consensus.add(SignedVerification.fromJSON(vectors["removal"]["elements"][0]))
removal: PartiallySignedMeritRemoval = PartiallySignedMeritRemoval.fromJSON(vectors["removal"])
removal.nonce = 1
consensus.add(removal)
snFile.close()
#Generate a Block with a verif and a Block with the removal.
for i in range(2):
block: Block = Block(
BlockHeader(
i + 2,
blockchain.last(),
int(time()),
consensus.getAggregate([(pubKey, i, i)])
),
BlockBody([(pubKey, i, consensus.getMerkle(pubKey, i, i))])
)
#Mine it.
block.mine(blockchain.difficulty())
#Add it.
blockchain.add(block)
print("Generated Partial Block " + str(block.header.nonce) + ".")
result: Dict[str, Any] = {
"blockchain": blockchain.toJSON(),
"data": vectors["data"],
"removal": removal.toSignedJSON()
}
partialFile: IO[Any] = open("PythonTests/Vectors/Consensus/MeritRemoval/Partial.json", "w")
partialFile.write(json.dumps(result))
partialFile.close()
| true |
6c08625ce787dfc58522b86275806ca577b68923 | Python | carmelom/cookiecutter-xmds-gpe | /{{cookiecutter.project_name}}/src/movie.py | UTF-8 | 1,590 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Create: 06-2019 - Carmelo Mordini <carmelo> <carmelo.mordini@unitn.it>
"""Module docstring
"""
import numpy as np
import matplotlib.pyplot as plt
try:
from moviepy.video.io.bindings import mplfig_to_npimage
import moviepy.editor as mpy
IMPORT_MOVIEPY = True
except ModuleNotFoundError: # as e:
# print(f"{e}\nFallback to matplotlib")
IMPORT_MOVIEPY = False
import h5py
def make_movie(h5filename, fps=20, output=None):
with h5py.File(h5filename, 'r') as f:
g = f['realtime/1']
t = g['t'][:]
x = g['x'][:]
psiI = g['psiI'][:]
psiR = g['psiR'][:]
n = np.hypot(psiR, psiI)
margin = 0.05
ylim = n.min() - margin * n.ptp(), n.max() + margin * n.ptp()
Nframes = len(t)
duration = Nframes / fps
fig, ax = plt.subplots()
line, = ax.plot(x, n[0])
ax.plot(x, n[0], ls='--', alpha=0.6)
ax.set_ylim(ylim)
if not IMPORT_MOVIEPY or output is None:
print("Using matplotlib")
def show_frame(ix):
line.set_ydata(n[ix])
for ix in range(len(t)):
show_frame(ix)
plt.pause(0.05)
else:
plt.show()
def make_frame_mpl(_t):
ix = int(_t / duration * Nframes)
# print(ix)
line.set_ydata(n[ix]) # <= Update the curve
return mplfig_to_npimage(fig) # RGB image of the figure
animation = mpy.VideoClip(make_frame_mpl, duration=duration)
animation.write_videofile(output, fps=fps)
# animation.write_gif("movie.gif", fps=20)
| true |
947adca8e8b44a46688bf73e6494b5ab762829d3 | Python | Larry213021/Clustering-by-fast-search-and-find-of-density-peaks | /classification/CFSDP(以t為基準).py | UTF-8 | 8,697 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/26 16:56
# @Author : jyl
# @File : CFSDP.py
import numpy as np
import matplotlib.pyplot as plt
import collections
from sklearn import metrics
from sklearn.metrics import davies_bouldin_score
import pandas as pd
# 例v=np.array([[7,8],[5,2],[2,3],[6,4]])
# 计算任意两点之间的欧氏距离,并存储为矩阵
def caldistance(v):
distance = np.zeros(shape=(len(v), len(v)))
for i in range(len(v)):
for j in range(len(v)):
if i > j:
distance[i][j] = distance[j][i]
elif i < j:
distance[i][j] = np.sqrt(np.sum(np.power(v[i] - v[j], 2)))
return distance
# 选出dc,t值表示平均每个点周围距离最近的点数为总点数的t%,根据所给的t选择出dc
# t的输入为百分数的整数部分,如2%只需要输入2即可
def chose_dc(dis, t):
temp = []
print('len dis',len(dis[0]))
for i in range(len(dis[0])):
for j in range(i + 1, len(dis[0])):
temp.append(dis[i][j])
print('dis shape',dis.shape)
# 升序排列
temp.sort()
print('temp: ',temp)
print('總距離個數',len(temp))
dc = temp[int(len(temp) * t / 100)]
print('dc: ',dc)
return dc
# 通过数距离小于dc的点的个数来衡量一个点的密度(离散型)**
def count_density(distance, dc):
density = np.zeros(shape=len(distance))
for index, node in enumerate(distance):
density[index] = len(node[node < dc])
return density
# 通过公式np.sum(np.exp(-(node / dc) ** 2))衡量一个店的密度(连续型)**
#node=dij
def continous_density(distance, dc):
density = np.zeros(shape=len(distance))
for index, node in enumerate(distance):
density[index] = np.sum(np.exp(-(node / dc) ** 2))
print(index,':',density[index])
return density
# 计算密度大于自身的点中距离自己最近的距离以及该点的直属上级
def node_detal(density, distance):
detal_ls = np.zeros(shape=len(distance))
closest_leader = np.zeros(shape=len(distance), dtype=np.int32)
delta = 11
d_theta = 0.1
for index, node in enumerate(distance):
theta = density[index]
Stop = False
while Stop != True:
# 点密度大于当前点的点集合(一维数组)
density_larger_than_node = np.squeeze(np.argwhere(density > theta))
# 存在密度大于自己的点
if density_larger_than_node.size != 0:
# 所有密度大于自己的点与自己的距离集合(一维数组或者一个数)
distance_between_larger_node = distance[index][density_larger_than_node]
# print(index,distance_between_larger_node)
detal_ls[index] = np.min(distance_between_larger_node)
if detal_ls[index] >= delta:
theta = theta - d_theta
else:
min_distance_index = np.squeeze(np.argwhere(distance_between_larger_node == detal_ls[index]))
# 存在多个密度大于自己且距离自己最近的点时,选择第一个点作为直属上级
if min_distance_index.size >= 2:
min_distance_index = np.random.choice(min_distance_index)
if distance_between_larger_node.size > 1:
closest_leader[index] = density_larger_than_node[min_distance_index]
else:
closest_leader[index] = density_larger_than_node
Stop = True
# 对于最大密度的点
else:
detal_ls[index] = np.max(distance)
closest_leader[index] = index
Stop = True
return detal_ls, closest_leader
# 确定类别点,计算每点的密度值与最小距离值的乘积,并画出决策图,以供选择将数据共分为几个类别
def show_nodes_for_chosing_mainly_leaders(density, detal_ls):
# 由于密度和最短距离两个属性的数量级可能不一样,分别对两者做归一化使结果更平滑
normal_den = (density - np.min(density)) / (np.max(density) - np.min(density))
normal_det = (detal_ls - np.min(detal_ls)) / (np.max(detal_ls) - np.min(detal_ls))
gamma = normal_den * normal_det
# plt.figure(num=2, figsize=(15, 10))
# plt.scatter(x=range(len(detal_ls)), y=-np.sort(-gamma), c='k', marker='o', s=-np.sort(-gamma) * 100)
# plt.xlabel('data_num')
# plt.ylabel('gamma')
# plt.title('Guarantee The Leader')
# plt.show()
return gamma
# 确定每点的最终分类
def clustering(closest_leader, chose_list):
for i in range(len(closest_leader)):
print('點:', i)
while closest_leader[i] not in chose_list:
j = closest_leader[i]
print(j)
closest_leader[i] = closest_leader[j]
print('closest_leader[i]',closest_leader[i])
new_class = closest_leader[:]
return new_class # new_class[i]表示第i点所属最终分类
def show_result(new_class, norm_data, chose_list):
colors = [
'#FF0000', '#FFA500', '#FFFF00', '#00FF00', '#228B22',
'#0000FF', '#FF1493', '#EE82EE', '#000000', '#FFA500',
'#00FF00', '#006400', '#00FFFF', '#0000FF', '#FFFACD',
'#770077', '#008866', '#000088', '#9F88FF','#3A0088',
'#660077', '#FF00FF','#0066FF', '#00FF00', '#7744FF',
'#33FFDD', '#CC6600', '#886600', '#227700', '#008888',
'#FFFF77', '#D1BBFF'
]
# 画最终聚类效果图
leader_color = {}
main_leaders = dict(collections.Counter(new_class)).keys()
for index, i in enumerate(main_leaders):
leader_color[i] = index
plt.figure(num=3, figsize=(15, 15))
for node, class_ in enumerate(new_class):
# 标出每一类的聚类中心点
if node in chose_list:
plt.scatter(x=norm_data[node, 0], y=norm_data[node, 1], marker='*', s=500, c='black',alpha=1)
else:
plt.scatter(x=norm_data[node, 0], y=norm_data[node, 1], c=colors[leader_color[class_]], s=100, marker='o',alpha=1)
plt.title('The Result Of Cluster')
plt.show()
# 画detal图和原始数据图
def show_optionmal(den, det, v):
plt.figure(num=1, figsize=(15, 9))
ax1 = plt.subplot(121)
for i in range(len(v)):
plt.scatter(x=den[i], y=det[i], c='k', marker='o', s=15)
plt.xlabel('Density')
plt.ylabel('Detal')
plt.title('Chose Leader')
plt.sca(ax1)
ax2 = plt.subplot(122)
for j in range(len(v)):
plt.scatter(x=v[j, 0], y=v[j, 1], marker='o', c='k', s=15)
plt.xlabel('axis_x')
plt.ylabel('axis_y')
plt.title('Dataset')
plt.sca(ax2)
plt.show()
def main(input_x):
# a =int(input('input start clusters num'))
# b =int(input('input final clusters num'))
a=2
b=2
num_list = []
list = []
for leaders_num in range(a,(b+1)):
for t in range(59,100):
print('t=',t)
norm_data = input_x
distance = caldistance(norm_data) # 制作任意两点之间的距离矩阵
dc = chose_dc(distance, t) # 根据t选择合适的dc
density = continous_density(distance, dc) # 统计每点的密度
detal_ls, closest_leader = node_detal(density, distance) # 统计每点的直接上司
# show_optionmal(density, detal_ls, norm_data) # 展示原始数据集并为选择司令个数提供参考
scores = show_nodes_for_chosing_mainly_leaders(density, detal_ls) # 进一步确认选择的司令个数是否正确
chose_list = np.argsort(-scores)[: leaders_num] # 选择
print(chose_list)
new_class = clustering(closest_leader, chose_list) # 确定各点的最终归属(哪个司令)
show_result(new_class, norm_data, chose_list) # 展示结果
num_list += [str(leaders_num)]
calinski = metrics.calinski_harabaz_score(norm_data, new_class)
davies = davies_bouldin_score(norm_data, new_class)
silhouette = metrics.silhouette_score(norm_data, new_class, metric='euclidean')
list.append([calinski, davies, silhouette])
name = ['calinski_harabaz_score', 'davies_bouldin_score', 'silhouette_score']
df = pd.DataFrame(list, index = num_list, columns = name)
print(df)
if __name__ == '__main__':
data = r'C:\Users\Larrywu\Desktop\schoolHW\CFSDP\Clustering-Python-master2\annulus.txt'
raw_data = np.loadtxt(data, delimiter=' ', usecols=[0, 1])
main(raw_data)
| true |
0835331cad93d12167dd93bc13d426a9d398f911 | Python | huberthoegl/tsgrain | /etc/db/producer.py | UTF-8 | 1,147 | 2.671875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # tinydb.readthedocs.io/en/latest/getting-started.html
'''
cat db.json | jq
or
cat db.json | python -m json.tool
Pretty to compact convertion:
cat db.json.pretty | jq -c
'''
import os, time
from tinydb import TinyDB, Query
if os.path.exists('dbtest.json'):
os.unlink('dbtest.json')
db = TinyDB('dbtest.json')
docid = db.insert({'type': 'startcnt', 'val': 0}) # insert document
docid = db.insert({'type': 'mandelay', 'val': 5})
for item in db:
print("0>", item)
ablaeufe = db.table('Ablaufliste', cache_size=0) # disable cache
n = 0
duration = 20
# while n < 20:
while True:
s = input("{}> ".format(n))
doc_id = ablaeufe.insert({'start': '20-05-11T22:00',
'duration': duration,
'courts': '*******',
'days': '1'})
# cycle: no, 12h, d, ...
duration += 5
n += 1
'''
r = db.all()
print("all>", r)
for i, item in enumerate(db):
print(i, "->", item)
print("%%>", ablaeufe.all())
for item in ablaeufe:
print("=>", item)
'''
# db.truncate() # alles loeschen
# print(db.all())
| true |
01ea8197ae27ab49db9095452861eae7f8367ebe | Python | alex440440/epi_python | /utils.py | UTF-8 | 367 | 3.078125 | 3 | [] | no_license |
#Uitls.py
import random
class Utils:
@staticmethod
def swap(array, i, j):
tmp = array[i]
array[i] = array[j]
array[j] = tmp
@staticmethod
def clone(array):
return array[0:len(array)]
@staticmethod
def random(max):
if max==0:
return 0
return random.SystemRandom().randint(0,max) | true |
726ccb36cfd79047817a70535f3aa17d711c4c6b | Python | joaquinmenendez/Docker-BIO821 | /analysis.py | UTF-8 | 1,640 | 3.5 | 4 | [] | no_license | import pandas as pd
import sqlalchemy
import matplotlib.pyplot as plt
import seaborn as sbn
''' Derive a sql query that returns a data set that contains a column for country, a
column for season, and a column for average number of goals scored per game for each country-season combination. '''
engine = sqlalchemy.create_engine('sqlite:///data/database.sqlite')
conn = engine.connect()
df = conn.execute('''
SELECT name, season , AVG(home_team_goal + away_team_goal) AS Avg_all_goals
FROM Country
LEFT JOIN Match
ON Country.id = Match.country_id
GROUP BY country.id , season
; ''').fetchall()
df = pd.DataFrame(data= df, columns = ['Country','Season', 'Avg_goals_per_game'])
plt.figure(figsize=(20,7))
ax = sbn.lineplot(data = df, x='Season', y='Avg_goals_per_game', hue='Country')
ax.tick_params(labelsize = 14)
plt.title('Average number of goals scored per gamem for each country-season', size = 16)
plt.ylabel('Average number of goals', size = 14)
plt.xlabel('Season', size = 14)
ax.legend( bbox_to_anchor= (1,0.75))
plt.show()
########################################################################################################################
# Create a new feature called good_season that takes the value 1 if the average number
# scored per game in a given season is greater than 2.75.
df['good_season'] = 0
df.loc[df['Avg_goals_per_game'] > 2.75, 'good_season'] = 1
### b. Create a summary data frame that displays the number of “good seasons” for each country.
df.groupby(by = 'Country', axis = 0).aggregate('sum').drop(columns = 'Avg_goals_per_game')
| true |
2283b5449daecb88e1d4800961883ce3c68160b6 | Python | hvrlxy/covid19_project | /data/transformdata.py | UTF-8 | 772 | 3.09375 | 3 | [] | no_license | file = open("alldata.csv", 'r')
lines = file.readlines()
for line in lines:
data = line.split(',')
new_line = []
valid = True
for l in data:
if l == "":
valid = False
if valid:
if data[0] == "Single":
new_line.append(0)
else:
new_line.append(1)
if data[-7] == '0%':
new_line.append(0)
elif data[-7] == '1-29%':
new_line.append(1)
elif data[-7] == '30-59%':
new_line.append(2)
else:
new_line.append(3)
if data[-6] == '20-29':
new_line.append(1)
elif data[-6] == '30-49':
new_line.append(2)
elif data[-6] == '50+':
new_line.append(3)
else:
new_line.append(0)
new_line.append(int(data[-5]))
new_line.append(int(data[-4]))
new_line.append(int(data[-2]))
new_line.append(int(data[-1]))
print(new_line)
| true |
6b7f765d6525e677b1568ec7ce9166fb4fa88804 | Python | thumarrushik/Leetcode | /Pascal's Triangle II.py | UTF-8 | 383 | 2.9375 | 3 | [] | no_license | class Solution:
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
Result = [0] * (rowIndex + 1)
Result[0] = 1
for a1 in range(1, rowIndex + 1,1):
for b1 in range(a1, 0, -1):
Result[b1] += Result[b1-1]
return Result
| true |
930853507b3de0702a2dc3fc8bce956d2e282c02 | Python | alexanderstage/python | /ex20.py | UTF-8 | 1,655 | 3.875 | 4 | [] | no_license | # import argv from the sys module
from sys import argv
# unpack argv
script, input_file = argv
# function that accepts one parameter
def print_all(f):
# print the contents of the file passed to the function
print f.read()
# function that accepts one parameter
def rewind(f):
# find the start (line 0) of the file passed to the function
f.seek(0)
# function that accepts two parameters
def print_a_line(line_count, f):
# use the line count passed to the function to print the
# content of the line the script is currently on
print line_count, f.readline()
# open the file passed to the script and assign it to a variable
current_file = open(input_file)
# print a string that ends with a line break
print "First lets print the whole file.\n"
# run the print_all function, passing it the opened file
print_all(current_file)
# print a string
print "Now lets rewind, kind of like a tape."
# run the rewind function, passing it the opened file
rewind(current_file)
# print a string
print "Lets print three lines:"
# assign a value to the 'current_line' variable
current_line = 1
# run the print_a_line function passing it the current line number
# and the opened file
# Current line = 1
print_a_line(current_line, current_file)
# increase the line number by 1
current_line += current_line
# run the print_a_line function passing it the current line number
# and the opened file
# current line = 2
print_a_line(current_line, current_file)
# increase the line number by 1
current_line += 1
# run the print_a_line function passing it the current line number
# and the opened file
# current line = 3
print_a_line(current_line, current_file) | true |
994414a7e48412bd2f530d76f1f345ff6a456c69 | Python | cmlramirez/py0220 | /Fibonnaci/main.py | UTF-8 | 155 | 3.28125 | 3 | [] | no_license | from fibonacci import *
n=int(input("Por favor indique hasta qué número generar: "))
l=[1]
for x in range(2, n + 1) :
l.append(fibo(x))
print(l)
| true |
989b058c586924e749b9936f923f728d64565c51 | Python | shashikumar2691/Python-Assigment | /recurnestedlist.py | UTF-8 | 330 | 3.359375 | 3 | [] | no_license | single_lst = []
def nestedList(olst):
for ele in olst:
if type(ele) == list:
nestedList(ele)
else:
single_lst.append(ele)
return single_lst
def singlelst():
lst = [['q','i',9],[3,4],[3,4,'abc','xyz',[100,[101,102,[103]]]]]
data = nestedList(lst)
print(data)
singlelst() | true |
88ae8b4cb108e243e88bb98a9e5a7755176acb27 | Python | JaishreeJanu/News-Recommendation-on-Twitter | /finalUsersTweet.py | UTF-8 | 1,376 | 3.234375 | 3 | [] | no_license | import sys
import csv
#http://www.tweepy.org/
import tweepy
#Get your Twitter API credentials and enter them here
consumer_key = 'UtXVnKCvSe2CL6uHfKjTkwFIC'
consumer_secret = 'uRAHe9Fj0bp5FKUFWc95hTLYSCsgKfynshkET0fRPixL0dCXmA'
access_token = '925610881588875264-jSGe9srWUVrue8u9BOVJeiwQSDJZatR'
access_secret = '2sXaXcTh2ZE9PMUwDYluhhGvPFowVcpt209p3raWOO7l7'
#method to get a user's last 100 tweets
def get_tweets(username):
#http://tweepy.readthedocs.org/en/v3.1.0/getting_started.html#api
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
#set count to however many tweets you want; twitter only allows 200 at once
number_of_tweets = 100
#get tweets
tweets = api.user_timeline(screen_name = username,count = number_of_tweets)
#create array of tweet information: username, tweet id, date/time, text
tweets_for_csv = [[username,tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in tweets]
#write to a new csv file from the array of tweets
print "writing to {0}_tweets.csv".format(username)
with open("{0}_tweets.csv".format(username) , 'w+') as file:
writer = csv.writer(file, delimiter='|')
writer.writerows(tweets_for_csv)
#if we're running this as a script
if __name__ == '__main__':
#get tweets for username passed at command line
#alternative method: loop through multiple users
users = ['THexplains','timesofindia']
for user in users:
get_tweets(user)
| true |
5bf3141a21448c68996cb5c4f8fe06ac83827c59 | Python | JavierIH/M1R0 | /code/python/main.py | UTF-8 | 2,106 | 2.59375 | 3 | [] | no_license | import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import smbus
import pygame
import time
from hardware.pca9865.pca9865 import ServoController
from control.kinematics.kinematics import Kinematics
pygame.init()
clock = pygame.time.Clock()
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
ik = Kinematics()
bus = smbus.SMBus(1)
control = ServoController(bus, 0x40)
control.addServo(0,0)
control.addServo(1,0)
position_x = 0
position_y = 0
gain = 10
speed = 1
limit_max_y = 250
limit_min_y = -250
limit_max_x = 400
limit_min_x = 150
control.move(0, 0)
control.move(1, 0)
position_x = (limit_max_x + limit_min_x)/2
position_y = (limit_max_y + limit_min_y)/2
while True:
for event in pygame.event.get():
pass
# axes = joystick.get_numaxes()
# for i in range(axes):
# axis = joystick.get_axis(i)
# buttons = joystick.get_numbuttons()
# for i in range(buttons):
# button = joystick.get_button(i)
# hats = joystick.get_numhats()
# for i in range(hats):
# hat = joystick.get_hat(i)
x = joystick.get_axis(0)
y = joystick.get_axis(1)
speed = (-joystick.get_axis(3)+1)/2
# print '\n'*100, x, '\n', y
if joystick.get_button(0):
if not ((position_x > limit_max_x and x > 0) or (position_x < limit_min_x and x < 0)):
position_x += x*speed*gain
if not ((position_y > limit_max_y and y > 0) or (position_y < limit_min_y and y < 0)):
position_y += y*speed*gain
if joystick.get_button(4):
position_x = (limit_max_x + limit_min_x)/2
position_y = (limit_max_y + limit_min_y)/2
print '\n'*100
print 'Posicion en X: ', position_x
print 'Posicion en Y: ', position_y
print 'Velocidad: ', speed
joints = ik.getTargetJoints([control.getPosition(0), control.getPosition(1)],[position_x, position_y])
print 'Angulo alpha: ', joints[0]
print 'Angulo beta: ', joints[1]
control.move(0, joints[0])
control.move(1, joints[1])
clock.tick(50)
pygame.quit()
| true |
0d193b8502481d1d27190ee825641a5d0e7dec00 | Python | badribnarayanan/dsa | /datastructs/stack.py | UTF-8 | 1,152 | 4.46875 | 4 | [
"MIT"
] | permissive | #implementing a stack and it's operations
import os
import time
class Stack(object):
"""Class to create a stack and functions for it's associated operations..
"""
def __init__(self):
self.items = []
self.head = None
def push(self, item):
#adds an item to the stack
self.items.append(item)
def pop(self):
#removes the top item from the stack and returns it
if not self.is_empty():
return self.items.pop()
else:
print("Empty stack. cannot pop items")
return None
def size(self):
#returns the size of the stack
return len(self.items)
def peek(self):
#returns the top item in the stack without removing the item
return self.items[len(self.items)-1]
def is_empty(self):
#checks if the stack is empty or not and returns the boolean value
return self.items == []
if __name__ == '__main__':
stack = Stack()
stack.push('fat fuck')
stack.push('maami')
stack.push(4)
print("Size of stack:", stack.size())
print("Last item in stack:", stack.peek())
| true |
31832de3e28492520426b69a9fa0c081d5000bd8 | Python | rociohdzhdz/WebFundamentals | /Python/python/OOP/test2classes.py | UTF-8 | 336 | 3.28125 | 3 | [] | no_license | class User:
def __init__(self, name, email):
self.name = name
self.email = email
self.account = BankAccount(int_rate=0.02, balance=0)
def example_method(self):
self.account.deposit(100) # we can call the BankAccount instance's methods
print(self.account.balance) # or access its attributes | true |
5f2cd9549c8ad603d33b95fe4139aec0dfef656d | Python | rajKarra69420/CryptoPals | /Cryptopals Set 4/set4_challenge25.py | UTF-8 | 1,233 | 2.890625 | 3 | [
"MIT"
] | permissive | import set3_challenge18 as ctr
import base64
import os
import itertools # https://stackoverflow.com/questions/2300756/get-the-nth-item-of-a-generator-in-python
key = os.urandom(16)
nonce = os.urandom(8)
def edit(ciphertext, key, nonce, offset, newtext):
return ciphertext[:offset] + bytes([x ^ y
for (x, y) in itertools.zip_longest(newtext,
itertools.islice(ctr.get_keystream(key, nonce),offset, offset + len(newtext)),
fillvalue=0)]) + ciphertext[offset + len(newtext):]
def api_call(ciphertext, newtext):
return bytes([x ^ y
for (x, y) in itertools.zip_longest(ciphertext,
edit(ciphertext, key, nonce, 0, newtext), fillvalue=0)])
if __name__ == "__main__":
f = open('set4_challenge25_encrypted.txt')
# test edit function
plaintext = base64.b64decode(f.read())
ciphertext = ctr.transform(plaintext, key, bytes(nonce))
edited_ciphertext = edit(ciphertext, key, nonce, 20, b'foobar')
edited_plaintext = ctr.transform(edited_ciphertext, key, nonce)
assert(b'foobar' in edited_plaintext)
# actual attack is here
recovered_plaintext = api_call(ciphertext, b'\x00' * len(ciphertext))
assert(recovered_plaintext == plaintext)
| true |
aee032a7d364d485ff99523069b7306878b1deea | Python | gzm55/hdfs-du | /pig/src/main/python/leaf.py | UTF-8 | 936 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# Copyright 2012 Twitter, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Mark paths with if they are leaf nodes or not.
import sys
fh = open(sys.argv[1])
prev = None
for line in fh:
line = line.strip()
if prev == None:
prev = line
continue
if line.split('\t')[0].startswith(prev.split('\t')[0]):
print "%s\t0" % prev
else:
print "%s\t1" % prev
prev = line
print "%s\t1" % prev
| true |
acea1c6d7d0dc264c45ef7a610a462388dfccafc | Python | Michael-JC/inverstment_analysis | /my/trader_center.py | UTF-8 | 4,956 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# coding:utf-8
"""
function: 该类用来加载所有的可连接的gateway,提供对内统一的下单接口
@author: zkang kai
@contact: 474918208@qq.com
"""
class trader_center(object):
"""
交易中心,策略下单或手动下单被调用,根据选择的参数进行路由下单
"""
def __init__(self, eventEngine):
"""Constructor"""
# 绑定事件引擎
self.eventEngine = eventEngine
self.eventEngine.start()
# 接口实例
self.gatewayDict = OrderedDict()
self.gatewayDetailList = []
#----------------------------------------------------------------------
def addGateway(self, gatewayModule):
"""添加底层接口"""
gatewayName = gatewayModule.gatewayName
# 创建接口实例
self.gatewayDict[gatewayName] = gatewayModule.gatewayClass(
self.eventEngine, gatewayName)
# 设置接口轮询
if gatewayModule.gatewayQryEnabled:
self.gatewayDict[gatewayName].setQryEnabled(
gatewayModule.gatewayQryEnabled)
# 保存接口详细信息
d = {
'gatewayName': gatewayModule.gatewayName,
'gatewayDisplayName': gatewayModule.gatewayDisplayName,
'gatewayType': gatewayModule.gatewayType
}
self.gatewayDetailList.append(d)
#----------------------------------------------------------------------
def getGateway(self, gatewayName):
"""获取接口"""
if gatewayName in self.gatewayDict:
return self.gatewayDict[gatewayName]
else:
# TODO: 增加对日志的处理
# self.writeLog(vt_text.GATEWAY_NOT_EXIST.format(gateway=gatewayName))
return None
#----------------------------------------------------------------------
def connect(self, gatewayName):
"""连接特定名称的接口"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.connect()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq, gatewayName):
"""订阅特定接口的行情"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq, gatewayName):
"""对特定接口发单"""
# TODO 如果创建了风控引擎,且风控检查失败则不发单 #if self.rmEngine and not self.rmEngine.checkRisk(orderReq, gatewayName):
# return ''
gateway = self.getGateway(gatewayName)
if gateway:
vtOrderID = gateway.sendOrder(orderReq)
return vtOrderID
else:
return ''
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq, gatewayName):
"""对特定接口撤单"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self, gatewayName):
"""查询特定接口的账户"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self, gatewayName):
"""查询特定接口的持仓"""
gateway = self.getGateway(gatewayName)
if gateway:
gateway.qryPosition()
#----------------------------------------------------------------------
def exit(self):
"""退出程序前调用,保证正常退出"""
# 安全关闭所有接口
for gateway in self.gatewayDict.values():
gateway.close()
# 停止事件引擎
self.eventEngine.stop()
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
log.gatewayName = 'MAIN_ENGINE'
event = Event(type_=EVENT_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def getOrder(self, vtOrderID):
"""查询委托"""
return self.dataEngine.getOrder(vtOrderID)
#----------------------------------------------------------------------
def getAllWorkingOrders(self):
"""查询所有的活跃的委托(返回列表)"""
return self.dataEngine.getAllWorkingOrders()
#----------------------------------------------------------------------
def getAllGatewayDetails(self):
"""查询引擎中所有底层接口的信息"""
return self.gatewayDetailList
| true |
c278879d27a4f6e59675a8a0e53bfdd06efd314d | Python | Nishin-0141/kyopro_educational_90_python | /SimilarProblem/050_01.py | UTF-8 | 247 | 2.578125 | 3 | [] | no_license | n, m = map(int,input().split())
a = [int(input()) for _ in range(m)]
mod = 10 ** 9 + 7
dp = [1] * (n + 1)
for i in range(m):
dp[a[i]] = 0
for i in range(n - 1):
if dp[i + 2] != 0:
dp[i + 2] = dp[i + 1] + dp[i]
print(dp[n] % mod) | true |
5e36fdf5524ab4b49b2d82d7c3d3486deddc8e6d | Python | gireevash/giree | /0040.py | UTF-8 | 90 | 2.921875 | 3 | [] | no_license | #b
i=int(input())
j=1
k=1
v=0
while(x!=0):
j=l
k=v
v=j+l
print(v,end=' ')
i=i-1
| true |
c4ce4951233110d6c62220ec433323d1a51ade16 | Python | SONGSONG729/Apriori | /apriori.py | UTF-8 | 13,727 | 3.28125 | 3 | [] | no_license | from numpy import *
from time import sleep
from votesmart import votesmart
def loadDataSet():
'''
加载数据集
:return:
'''
return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]
def createC1(dataSet):
'''
创建集合 C1.即对 dataSet 进行去重,排序,放入 list 中,然后转换所有的元素为 frozenset
:param dataSet:
:return:
'''
C1 = []
for transaction in dataSet:
for item in transaction:
if not [item] in C1:
# 遍历所有的元素,如果不在 C1 出现过,就append
C1.append([item])
C1.sort()
# 对C1中每个项构建一个不变集合frozenset表示冻结的set集合,元素无改变;可以把它当字典的key来使用
return list(map(frozenset, C1))
def scanD(D, Ck, minSupport):
'''
计算候选数据集 CK 在数据集 D 中的支持度,并返回支持度大于最小支持度(minSupport)的数据
:param D:
:param Ck:
:param minSupport:
:return:
'''
# ssCnt 临时存放选数据集 Ck 的频率. 例如: a->10, b->5, c->8
ssCnt = {}
for tid in D:
for can in Ck:
# 测试是否can中的每一个元素都在tid中
if can.issubset(tid):
#if not ssCnt.has_key(can):
if not can in ssCnt:
ssCnt[can] = 1
else:
ssCnt[can] += 1
numItems = float(len(D)) # 数据集 D 的数量
retList = []
supportData = {}
for key in ssCnt:
# 支持度 = 候选项(key)出现的次数 / 所有数据集的数量
support = ssCnt[key] / numItems
if support >= minSupport:
# 在 retList 的首位插入元素,只存储支持度满足频繁项集的值
retList.insert(0, key)
# 存储所有的候选项(key)和对应的支持度(support)
supportData[key] = support
return retList, supportData
# 输入频繁项集列表 Lk 与返回的元素个数 k,然后输出所有可能的候选项集 Ck
def aprioriGen(Lk, k):
"""aprioriGen(输入频繁项集列表 Lk 与返回的元素个数 k,然后输出候选项集 Ck。
例如: 以 {0},{1},{2} 为输入且 k = 2 则输出 {0,1}, {0,2}, {1,2}. 以 {0,1},{0,2},{1,2} 为输入且 k = 3 则输出 {0,1,2}
仅需要计算一次,不需要将所有的结果计算出来,然后进行去重操作
这是一个更高效的算法)
Args:
Lk 频繁项集列表
k 返回的项集元素个数(若元素的前 k-2 相同,就进行合并)
Returns:
retList 元素两两合并的数据集
"""
retList = []
lenLk = len(Lk)
for i in range(lenLk):
for j in range(i + 1, lenLk): # 前k-2项相同时,将两个集合合并
L1 = list(Lk[i])[: k - 2]
L2 = list(Lk[j])[: k - 2]
L1.sort()
L2.sort()
# 第一次 L1,L2 为空,元素直接进行合并,返回元素两两合并的数据集
if L1 == L2:
# set union
# print 'union=', Lk[i] | Lk[j], Lk[i], Lk[j]
retList.append(Lk[i] | Lk[j]) # 集合合并操作
return retList
# 找出数据集 dataSet 中支持度 >= 最小支持度的候选项集以及它们的支持度。即我们的频繁项集。
def apriori(dataSet, minSupport=0.5):
'''
首先构建集合 C1,然后扫描数据集来判断这些只有一个元素的项集是否满足最小支持度的要求。
那么满足最小支持度要求的项集构成集合 L1。然后 L1 中的元素相互组合成 C2,C2 再进一步过滤变成 L2,
然后以此类推,知道 CN 的长度为 0 时结束,即可找出所有频繁项集的支持度。
:param dataSet: 原始数据集
:param minSupport: 支持度的阈值
:return:
L 频繁项集的全集
supportData 所有元素和支持度的全集
'''
# C1 即对 dataSet 进行去重,排序,放入 list 中,然后转换所有的元素为 frozenset
C1 = createC1(dataSet)
# 对每一行进行 set 转换,然后存放到集合中
D = list(map(set, dataSet))
# 计算候选数据集 C1 在数据集 D 中的支持度,并返回支持度大于 minSupport 的数据
L1, supportData = scanD(D, C1, minSupport)
# L 加了一层 list, L 一共 2 层 list
L = [L1]
k = 2
# 判断 L 的第 k-2 项的数据长度是否 > 0。
while (len(L[k - 2]) > 0):
Ck = aprioriGen(L[k - 2], k)
# 计算候选数据集 CK 在数据集 D 中的支持度,并返回支持度大于 minSupport 的数据
Lk, supK = scanD(D, Ck, minSupport)
# 保存所有候选项集的支持度,如果字典没有,就追加元素,如果有,就更新元素
supportData.update(supK)
if len(Lk) == 0:
break
# Lk 表示满足频繁子项的集合,L 元素在增加
L.append(Lk)
k += 1
return L, supportData
# 计算可信度(confidence)
def calcConf(freqSet, H, supportData, brl, minConf=0.7):
'''
对两个元素的频繁项,计算可信度,例如: {1,2}/{1} 或者 {1,2}/{2} 看是否满足条件
:param freqSet: 频繁项集中的元素,例如: frozenset([1, 3])
:param H: 频繁项集中的元素的集合,例如: [frozenset([1]), frozenset([3])]
:param supportData: 所有元素的支持度的字典
:param brl: 关联规则列表的空数组
:param minConf: 最小可信度
:return:
prunedH 记录 可信度大于阈值的集合
'''
# 记录可信度大于最小可信度(minConf)的集合
prunedH = []
for conseq in H:
'''
假设 freqSet = frozenset([1, 3]), H = [frozenset([1]), frozenset([3])],
那么现在需要求出frozenset([1]) -> frozenset([3])的可信度和frozenset([3]) -> frozenset([1])的可信度
'''
'''
支持度定义: a -> b = support(a | b) / support(a).
假设 freqSet = frozenset([1, 3]), conseq = [frozenset([1])],
那么 frozenset([1]) 至 frozenset([3]) 的可信度为 = support(a | b) / support(a)
= supportData[freqSet]/supportData[freqSet-conseq] =
supportData[frozenset([1, 3])] / supportData[frozenset([1])]
'''
conf = supportData[freqSet] / supportData[freqSet - conseq]
if conf >= minConf:
# 只要买了 freqSet-conseq 集合,一定会买 conseq 集合(freqSet-conseq 集合和 conseq集合 是全集)
print(freqSet - conseq, '-->', conseq, 'conf:', conf)
brl.append((freqSet - conseq, conseq, conf))
prunedH.append(conseq)
return prunedH
# 递归计算频繁项集的规则
def rulesFromConseq(freqSet, H, supportData, brl, minConf=0.7):
'''
:param freqSet: 频繁项集中的元素,例如: frozenset([2, 3, 5])
:param H: 频繁项集中的元素的集合,例如: [frozenset([2]), frozenset([3]), frozenset([5])]
:param supportData: 所有元素的支持度的字典
:param brl: 关联规则列表的数组
:param minConf: 最小可信度
:return:
'''
'''
# H[0] 是 freqSet 的元素组合的第一个元素,并且 H 中所有元素的长度都一样,长度由 aprioriGen(H, m+1) 这里的 m + 1 来控制
# 该函数递归时,H[0] 的长度从 1 开始增长 1 2 3 ...
# 假设 freqSet = frozenset([2, 3, 5]), H = [frozenset([2]), frozenset([3]), frozenset([5])]
# 那么 m = len(H[0]) 的递归的值依次为 1 2
# 在 m = 2 时, 跳出该递归。假设再递归一次,那么 H[0] = frozenset([2, 3, 5]),freqSet = frozenset([2, 3, 5]) ,
# 没必要再计算 freqSet 与 H[0] 的关联规则了。
'''
m = len(H[0])
if (len(freqSet) > (m + 1)):
'''
# print 'freqSet******************', len(freqSet), m + 1, freqSet, H, H[0]
# 生成 m+1 个长度的所有可能的 H 中的组合,假设 H = [frozenset([2]), frozenset([3]), frozenset([5])]
# 第一次递归调用时生成 [frozenset([2, 3]), frozenset([2, 5]), frozenset([3, 5])]
# 第二次 。。。没有第二次,递归条件判断时已经退出了
'''
Hmp1 = aprioriGen(H, m + 1)
# 返回可信度大于最小可信度的集合
Hmp1 = calcConf(freqSet, Hmp1, supportData, brl, minConf)
# 计算可信度后,还有数据大于最小可信度的话,那么继续递归调用,否则跳出递归
if (len(Hmp1) > 1):
rulesFromConseq(freqSet, Hmp1, supportData, brl, minConf)
# 主函数,调用前两个函数,生成关联规则
def generateRules(L, supportData, minConf=0.7):
'''
:param L: 频繁项集列表
:param supportData: 频繁项集支持度的字典
:param minConf: 最小置信度
:return:
bigRuleList 可信度规则列表(关于 (A->B+置信度) 3个字段的组合)
'''
bigRuleList = []
# 假设 L = [[frozenset([1]), frozenset([3]), frozenset([2]), frozenset([5])], [frozenset([1, 3]), frozenset([2, 5]), frozenset([2, 3]), frozenset([3, 5])], [frozenset([2, 3, 5])]]
for i in range(1, len(L)):
# 获取频繁项集中每个组合的所有元素
for freqSet in L[i]:
# 假设:freqSet= frozenset([1, 3]), H1=[frozenset([1]), frozenset([3])]
# 组合总的元素并遍历子元素,并转化为 frozenset 集合,再存放到 list 列表中
H1 = [frozenset([item]) for item in freqSet]
# 2 个的组合,走 else, 2 个以上的组合,走 if
if (i > 1):
rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
else:
calcConf(freqSet, H1, supportData, bigRuleList, minConf)
return bigRuleList
# 收集美国国会议案中actionId的函数
def getActionIds():
votesmart.apikey = 'a7fa40adec6f4a77178799fae4441030'
actionIdList = []
billTitleList = []
fr = open('recent20bills.txt')
for line in fr.readlines():
billNum = int(line.split('\t')[0])
try:
billDetail = votesmart.votes.getBill(billNum) # api call
for action in billDetail.actions:
if action.level == 'House' and (action.stage == 'Passage' or action.stage == 'Amendment Vote'):
actionId = int(action.actionId)
print('bill: %d has actionId: %d' % (billNum, actionId))
actionIdList.append(actionId)
billTitleList.append(line.strip().split('\t')[1])
except:
print("problem getting bill %d" % billNum)
sleep(1) # delay to be polite
return actionIdList, billTitleList
# 基于投票数据的事务列表填充函数
def getTransList(actionIdList, billTitleList):
itemMeaning = ['Republican', 'Democratic']
for billTitle in billTitleList: # 填充itemMeaning函数
itemMeaning.append('%s -- Nay' % billTitle)
itemMeaning.append('%s -- Yea' % billTitle)
transDict = {}
voteCount = 2
for actionId in actionIdList:
sleep(3)
print('getting votes for actionId: %d' % actionId)
try:
voteList = votesmart.votes.getBillActionVotes(actionId)
for vote in voteList:
if not transDict.has_key(vote.candidateName):
transDict[vote.candidateName] = []
if vote.officeParties == 'Democratic':
transDict[vote.candidateName].append(1)
elif vote.officeParties == 'Republican':
transDict[vote.candidateName].append(0)
if vote.action == 'Nay':
transDict[vote.candidateName].append(voteCount)
elif vote.action == 'Yea':
transDict[vote.candidateName].append(voteCount + 1)
except:
print("problem getting actionId: %d" % actionId)
voteCount += 2
return transDict, itemMeaning
def main():
# #程序清单11-1
# # 导入数据集
# dataSet = loadDataSet()
# print(dataSet)
# # 构建第一个选项集集合C1
# C1 = createC1(dataSet)
# print(C1)
# # 构建集合表示的数据集D
# D = list(map(set, dataSet))
# print(D)
# # 去掉不满足最小支持度的项集
# L1, suppData0 = scanD(D, C1, 0.5)
# print(L1)
# # 测试程序清单11-2
# dataSet = loadDataSet()
# L, suppData = apriori(dataSet)
# print('L:', L)
# print('L[0]:', L[0])
# print('L[1]:', L[1])
# print('L[2]:', L[2])
# # 测试程序清单11-3
# dataSet = loadDataSet()
# # 生成一个最小支持度为0.5的频繁项集的集合
# L, suppData = apriori(dataSet, minSupport=0.5)
# rules = generateRules(L, suppData, minConf=0.7)
# print(rules)
# 测试程序清单11-4
# actionIdList, billTitles = getActionIds()
# 测试程序清单11-5
actionIdList, billTitles = getActionIds()
# transDict, itemMeaning = getTransList(actionIdList[:2], billTitles[:2])
# print(transDict.keys()[6])
# for item in transDict[' ']:
# print(itemMeaning[item])
# # 毒蘑菇相似特征
# dataSet = [line.split() for line in open("mushroom.dat").readlines()]
# L, supportData = apriori(dataSet, minSupport=0.3)
# print('L[1]:')
# for item in L[1]:
# if item.intersection('2'):
# print(item)
# print('L[2]:')
# for item in L[2]:
# if item.intersection('2'):
# print(item)
#
if __name__ == "__main__":
main() | true |
c206d918af71a0bce045fabd3cde37585c8c06a3 | Python | andreguerra1/DataScience-Course | /src/RandomForest.py | UTF-8 | 4,764 | 2.6875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
from sklearn.ensemble import RandomForestClassifier
import plot_functions as func
def simple_random_forest(trnX, tstX, trnY, tstY, n, d, f, labels):
rf = RandomForestClassifier(n_estimators=n, max_depth=d, max_features=f)
rf.fit(trnX, trnY)
prdY = rf.predict(tstX)
accuracy = metrics.accuracy_score(tstY, prdY)
cnf_matrix = metrics.confusion_matrix(tstY, prdY, labels)
tn, fp, fn, tp = metrics.confusion_matrix(tstY, prdY, labels).ravel()
specificity = tp/(tp+fn)
return accuracy, specificity, cnf_matrix
def simple_random_forest_CT(trnX, tstX, trnY, tstY, n, d, f, labels):
rf = RandomForestClassifier(n_estimators=n, max_depth=d, max_features=f)
rf.fit(trnX, trnY)
prdY = rf.predict(tstX)
accuracy = metrics.accuracy_score(tstY, prdY)
cnf_mtx = metrics.confusion_matrix(tstY, prdY, labels)
return accuracy, cnf_mtx
def random_forest_CT(trnX, tstX, trnY, tstY, labels, plot):
n_estimators = [5, 10, 25, 50, 75, 100, 110,130,150,170,185, 200, 250, 300, 350,400, 450, 500]
max_depths = [5, 10, 25, 30, 40, 50, 60, 70]
max_features = ['sqrt', 'log2']
max_accuracy = 0
plt.figure()
fig, axs = plt.subplots(1, 2, figsize=(10, 4), squeeze=False)
for k in range(len(max_features)):
f = max_features[k]
acc_values = {}
for d in max_depths:
accuracy_values = []
for n in n_estimators:
rf = RandomForestClassifier(n_estimators=n, max_depth=d, max_features=f)
rf.fit(trnX, trnY)
prdY = rf.predict(tstX)
# accuracy for max_features = f, max_depth = d, n_estimators = n
accuracy = metrics.accuracy_score(tstY, prdY)
accuracy_values.append(accuracy)
cnf_mtx = metrics.confusion_matrix(tstY, prdY, labels)
if accuracy > max_accuracy:
best_accuracy = [(f, d, n), accuracy, cnf_mtx]
max_accuracy = accuracy
acc_values[d] = accuracy_values
func.multiple_line_chart(axs[0, k], n_estimators, acc_values, 'Random Forests with %s features'%f, 'nr estimators',
'accuracy', percentage=True)
if plot:
plt.show()
return ["Random Forest", best_accuracy]
def random_forest(trnX, tstX, trnY, tstY, labels, plot):
n_estimators = [5, 10, 25, 50, 75, 100, 150, 200, 250, 300, 350, 400, 450, 500]
max_depths = [5, 10, 25, 40, 50, 60, 70]
max_features = ['sqrt', 'log2']
max_accuracy = 0
max_specificity = 0
plt.figure()
fig, axs = plt.subplots(1, 2, figsize=(10, 4), squeeze=False)
for k in range(len(max_features)):
f = max_features[k]
acc_values = {}
spec_values = {}
for d in max_depths:
accuracy_values = []
specificity_values = []
for n in n_estimators:
rf = RandomForestClassifier(n_estimators=n, max_depth=d, max_features=f)
rf.fit(trnX, trnY)
prdY = rf.predict(tstX)
# accuracy for max_features = f, max_depth = d, n_estimators = n
accuracy = metrics.accuracy_score(tstY, prdY)
accuracy_values.append(accuracy)
# sensitivity for max_features = f, max_depth = d, n_estimators = n
tn, fp, fn, tp = metrics.confusion_matrix(tstY, prdY, labels).ravel()
specificity = tp/(tp+fn)
specificity_values.append(specificity)
cnf_mtx = metrics.confusion_matrix(tstY, prdY, labels)
if accuracy > max_accuracy:
best_accuracy = [(f, d, n), accuracy, specificity, cnf_mtx]
max_accuracy = accuracy
if specificity > max_specificity:
best_specificity = [(f, d, n), accuracy, specificity, cnf_mtx]
max_specificity = specificity
acc_values[d] = accuracy_values
spec_values[d] = specificity_values
"""func.multiple_line_chart(axs[0, k], n_estimators, acc_values, 'Random Forests with %s features'%f, 'nr estimators',
'accuracy', percentage=True)"""
func.multiple_line_chart(axs[0, k], n_estimators, spec_values, 'Random Forests with %s features'%f, 'nr estimators',
'specificity', percentage=True)
if plot:
plt.show()
return ["Random Forest", best_accuracy, best_specificity]
| true |
70a39a34bc7db2399121033f014730bca36666c8 | Python | tonysulfaro/MI-250 | /Assingments/module03/week03/sulfaroa.module3.py | UTF-8 | 10,188 | 3.609375 | 4 | [
"MIT"
] | permissive | import sys
import requests
from csv import *
class Module3:
"""Module 3 Questions"""
'''
18. Define a function which extracts the high temperature out of an HTML file.
The file name will be passed in as the parameter value, the function should simply return the
high temperature \u2013 just the numerical value not including the units or spaces.
Here is a small extract example of what the HTML file looks like
<div id="todayData" style="display:none;">
<!-- TODAY DATA STRUCTURE DEMO -->
<!-- HEADER: Sat, Sep 1 -->
<!-- TIMESTAMP: 50m -->
<div>temp: 69°</div>
<div>iconCode:
<div class="weather weather-21"></div>
</div>
<div>feelsLike: 69°</div>
<div>hiTemp: 82°</div>
<div>lowTemp: 66°</div>
<div>precipChance: 10%</div>
<div>wind: S at 10 mph</div>
<div>dayPhrase: Night: A few passing clouds. Low 66F. Winds SSE at 5 to 10 mph.</div>
<div>nightPhrase: A few passing clouds. Low 66F. Winds SSE at 5 to 10 mph.</div>
<div>sunrise: 7:01 AM</div>
<div>sunset: 8:13 PM</div>
<div>pressure: 30.1 in.</div>
<div>dewPoint: 66°</div>
<div>visibility: 10 mi.</div>
<div>humidity: 90%</div>
<div>precip: 0.0 in.</div>
<div>uvIndex: 0</div>
<div>uvDescription: Low</div>
</div>
'''
@staticmethod
def find_days_high(filename):
with open(filename, 'r') as file:
data = file.read()
file.close()
start = data.find('hiTemp:')
end = data.find('°', start)
return int(data[start+len('hiTemp:'):end])
'''
21. Define a function which extracts the sunset time out of an HTML file.
The file name will be passed in as the parameter value, the function should simply return the
sunset time \u2013 the numerical value and either AM or PM.
Here is a small extract example of what the HTML file looks like
<div id="todayData" style="display:none;">
<!-- TODAY DATA STRUCTURE DEMO -->
<!-- HEADER: Sat, Sep 1 -->
<!-- TIMESTAMP: 50m -->
<div>temp: 69°</div>
<div>iconCode:
<div class="weather weather-21"></div>
</div>
<div>feelsLike: 69°</div>
<div>hiTemp: 82°</div>
<div>lowTemp: 66°</div>
<div>precipChance: 10%</div>
<div>wind: S at 10 mph</div>
<div>dayPhrase: Night: A few passing clouds. Low 66F. Winds SSE at 5 to 10 mph.</div>
<div>nightPhrase: A few passing clouds. Low 66F. Winds SSE at 5 to 10 mph.</div>
<div>sunrise: 7:01 AM</div>
<div>sunset: 8:13 PM</div>
<div>pressure: 30.1 in.</div>
<div>dewPoint: 66°</div>
<div>visibility: 10 mi.</div>
<div>humidity: 90%</div>
<div>precip: 0.0 in.</div>
<div>uvIndex: 0</div>
<div>uvDescription: Low</div>
</div>
'''
@staticmethod
def find_sunset(filename):
with open(filename, 'r') as file:
data = file.read()
file.close()
start = data.find('sunset:')
end = data.find('</div>', start)
return data[start+len('sunset: '):end]
'''
46. Given a ZIP code, pull the current weather from https://www.ajc.com/weather/ZIPCODE/
and use your previous function to extract and return the days highest temperature
Hint: the other functions require files, you will need to save the HTML as a temp file
then process it.
Dependencies: Module3.save_url_to_file, Module3.find_days_high
'''
@staticmethod
def find_highest_temp_for_zip(zipcode):
# The save_url_to_file wasn't in my problemset so I made one.
url = 'https://www.ajc.com/weather/' + str(zipcode) + '/'
filename = 'low_temp.html'
save_url_to_file(url, filename)
# it couldn't see the find_days_low function within the same class so i
# made an instance of it and used it that way. Kinda Effective
test = Module3()
return test.find_days_high(filename)
'''
29. Given a file (structured the same as census-state-populations.csv but not necessarily real states)
return the name of the state/district which has the smallest population estimation.
Be aware of the CSV header line
'''
@staticmethod
def smallest_state(filename):
with open(filename) as file:
file.readline()
# find lowest population
lowest = file.readline().split(',')
for line in file:
line = line.strip().split(',')
if int(line[1]) <= int(lowest[1]):
lowest = line
return lowest[0]
'''
31. Given a file (structured the same as census-state-populations.csv but not necessarily real states)
create a new CSV file that contains the name and population of all states/districts
which have a population below 3 million.
Include the CSV header information
'''
@staticmethod
def states_under_three_million(openfile, savefile):
open_file = open(openfile, 'r')
header = open_file.readline()
save_file = open(savefile, 'w')
save_file.write(header)
for line in open_file:
line = line.strip().split(',')
if int(line[1]) <= 3000000:
temp = line[0]+','+line[1]+'\n'
save_file.write(temp)
open_file.close()
save_file.close()
'''
14. Using file and string functions, write your own function to open a file.
For each line in the file, search for the text "REPLACE ME" and
replace it with the persons name provided as a parameter.
Finally, save the changes to the savefile location
'''
@staticmethod
def modify_text_file_lines(openfile, savefile, name):
# assumes that REPLACE ME is actually in there
# here is my method
readfile = open(openfile, 'r')
file_data = readfile.read()
file_data = file_data.replace('REPLACE ME', name)
readfile.close()
# write string to file
outfile = open(savefile, 'w')
outfile.write(file_data)
outfile.close()
return None
'''
28. Given a file (structured the same as census-state-populations.csv but not necessarily real states)
return the name of the state/district which has the largest population estimation.
Be aware of the CSV header line
'''
@staticmethod
def largest_state(filename):
with open(filename) as file:
file.readline()
# find lowest population
highest = file.readline().split(',')
for line in file:
line = line.strip().split(',')
if int(line[1]) >= int(highest[1]):
highest = line
return highest[0]
'''
43. Given a ZIP code, pull the current weather from https://www.ajc.com/weather/ZIPCODE
and extract the humidity level.
The ZIP code will be passes in as a parameter value, the function should simply return the
humidity level \u2013 the numerical value and the % sign
Here is a small extract example of what the HTML file looks like
<div id="todayData" style="display:none;">
<!-- TODAY DATA STRUCTURE DEMO -->
<!-- HEADER: Sat, Sep 1 -->
<!-- TIMESTAMP: 50m -->
<div>temp: 69°</div>
<div>iconCode:
<div class="weather weather-21"></div>
</div>
<div>feelsLike: 69°</div>
<div>hiTemp: 82°</div>
<div>lowTemp: 66°</div>
<div>precipChance: 10%</div>
<div>wind: S at 10 mph</div>
<div>dayPhrase: Night: A few passing clouds. Low 66F. Winds SSE at 5 to 10 mph.</div>
<div>nightPhrase: A few passing clouds. Low 66F. Winds SSE at 5 to 10 mph.</div>
<div>sunrise: 7:01 AM</div>
<div>sunset: 8:13 PM</div>
<div>pressure: 30.1 in.</div>
<div>dewPoint: 66°</div>
<div>visibility: 10 mi.</div>
<div>humidity: 90%</div>
<div>precip: 0.0 in.</div>
<div>uvIndex: 0</div>
<div>uvDescription: Low</div>
</div>
'''
@staticmethod
def find_humidity_for_zip(zipcode):
# The save_url_to_file wasn't in my problemset so I made one.
url = 'https://www.ajc.com/weather/' + str(zipcode) + '/'
page = requests.get(url).text
start = page.find('humidity: ')
end = page.find('%', start)
humid = page[start+len('humidity: '):end]+'%'
return humid
'''
23. Given a file (structured the same as census-state-populations.csv but not necessarily real states)
return the total population of a state/district passed in as a parameter.
Be aware of white spaces and capitalization.
'''
@staticmethod
def get_state_population(filename, state):
fp = open(filename, 'r')
fp.readline()
total = 0
for line in fp:
line = line.strip().split(',')
if state.lower() == line[0].lower():
total += int(line[1])
return total
'''
37. Given a URL, use string functions to extract and return the domain component
E.g. www.google.com, d2l.msu.edu
'''
@staticmethod
def extract_url_domain(url):
start = url.find('://')
end = url.find('/', start+3)
print(start, end)
if end == -1:
return url[start+3:]
return url[start+3:end]
| true |
9a522520043680e10bf71e1dd43387d972578e96 | Python | ari-frankel/Lakota | /lakota_driver.py | UTF-8 | 1,687 | 2.734375 | 3 | [] | no_license | import yaml, sys
import numpy as np
import linear_regression
import statistics
class lakota_driver:
def __init__(self):
self.data = {}
def load_data(self,dakota_tabular):
data = np.genfromtxt(dakota_tabular,names=True)
for v in data.dtype.names:
self.data[v] = data[v]
#given struct of dakota features
#tabular data file
#features considered for regression
#output variables
#return organized data
pass
def get_inputs(self,input_file):
with open(input_file,'r') as f:
params = yaml.load(f)
#add some user validation here to make sure necessary things are present
if not "dakota_tabular" in params.keys():
sys.stderr("no dakota_tabular option specified in input file, exiting with extreme prejudice\n")
sys.exit()
return params
def execute(self,lakota_params):
#get data from dakota file
self.load_data(lakota_params['dakota_tabular'])
#execute each desired postprocessor
#linear regression for each output vs all inputs
#list of regressor variables
#list of response variables
#regression method (QR, SVD)
#output options:
#slopes and intercepts
#t statistics and p-values for each regressor
#ANOVA for entire regression
#confidence intervals for the entire regression
#studentized residuals
#stats outputs
#means, variances, correlations between each variable and the inputs, histograms
try:
if "linear_regression" in lakota_params.keys():
linear_regression.linear_regression(lakota_params['linear_regression'],self.data)
if "statistics" in lakota_params.keys():
statistics.statistics(lakota_params['statistics'],self.data)
return 0
except:
return 1
| true |
121e9fdb14cec546d53b653beb9786c1e832cd2c | Python | jmarkow/dassort | /util.py | UTF-8 | 16,092 | 2.78125 | 3 | [] | no_license | import json
import yaml
import re
import logging
import os
import time
import hashlib
from itertools import cycle
# https://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
def md5_checksum(f, block_size=2**20):
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def find_key(key, var):
"""Finds all occurrences of a key in a nested dictionary, useful for gobbling up
stuff from json files.
All credit due to https://stackoverflow.com/questions/9807634/\
find-all-occurrences-of-a-key-in-nested-python-dictionaries-and-lists
"""
if hasattr(var, 'items'):
for k, v in var.items():
if k == key:
yield v
if isinstance(v, dict):
for result in find_key(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in find_key(key, d):
yield result
def read_config(file, destination=None, user=None, host=None, cmd_host=None, copy_protocol=None):
"""Simple yaml reader to parse config files
Args:
file: the yaml file to read the configuration from
Returns:
config: a dictionary with the j keys, the path variable they map to, and other bells and whistles...
"""
with open(file) as config_yaml:
base_yaml = yaml.safe_load(config_yaml)
# with config loaded, make sure we have the keys that we need
base_config = {
'keys': [],
'map': [],
'default': [],
'required_files': [],
'path': None,
'destination': destination,
'command': {
'exts': [],
'run': None
}
}
router_config = {
'key': [],
'files': [],
'filter': None,
'invert': None,
'lowercase': None,
'exact': None
}
remote_config = {
'user': user,
'host': host,
'copy_protocol': copy_protocol,
'cmd_host': cmd_host,
}
if 'dassort' in base_yaml.keys() and 'remote' in base_yaml.keys():
tree_yaml = base_yaml['dassort']
map_json = tree_yaml['json']
base_config = merge_dicts(base_config, map_json)
base_config = merge_dicts(base_config, tree_yaml)
remote_yaml = base_yaml['remote']
remote_config = merge_dicts(remote_config, remote_yaml)
router_config = None
elif 'dassort' in base_yaml.keys():
tree_yaml = base_yaml['dassort']
map_json = tree_yaml['json']
base_config = merge_dicts(base_config, map_json)
base_config = merge_dicts(base_config, tree_yaml)
remote_config = None
router_config = None
elif 'router' in base_yaml.keys():
tree_yaml = base_yaml['router']
router_config = merge_dicts(router_config, tree_yaml)
# all router items should be iterables
for k, v in router_config.items():
if type(v) is not list:
router_config[k] = [v]
base_config = None
remote_config = None
else:
base_config = None
remote_config = None
router_config = None
# reformat base configuration
if base_config is not None:
base_config = {
'keys': base_config['keys'],
'map': base_config['map'],
'default': base_config['default'],
'required_files': base_config['required_files'],
'value': [],
'path': {
'path_string': base_config['path'],
're': {'root': base_config['destination']}
},
'command': base_config['command'],
}
return base_config, remote_config, router_config
def merge_dicts(dict1, dict2):
"""Merge dictionary 2 values into dictionary 1, contingent on dictionary 1 containing
a given key.
Args:
dict1: source dictionary
dict2: merge dictionary
Returns:
merge_dict: dict2 merged into dict1
"""
merge_dict = dict1
for key, value in dict1.items():
if key in dict2:
merge_dict[key] = dict2[key]
return merge_dict
def build_path(key_dict, path_string):
"""Takes our path string and replaces variables surrounded by braces and prefixed by $
with a particular value in a key dictionary
Args:
key_dict: dictionary where each key, value pair corresponds to a variable and its value
path_string: path string that specifies how to build our target path_string
Returns:
path_string: new path to use
For example, if the path_string is ${root}/${subject} and key_dict is {'root':'cooldrive','subject':'15781'}
the path_string is converted to cooldrive/15781
"""
for key, value in key_dict.items():
path_string = re.sub('\$\{' + key + '\}', value, path_string)
return path_string
def get_listing_manifest(proc):
"""Gets the files to ship off with a corresponding json file. If the json file lives in a sub-folder,
all files in the folder become part of the manifest, if it does not, then all files with a matching filename
become part of the manifest.
Args:
proc: File or directory to process
Returns:
listing_manifest: Files to process with json file
json_file: Json file associated with the manifest
"""
# json is always LAST since it may trigger other copies...
# https://stackoverflow.com/questions/44214910/select-the-first-n-smallest-files-from-a-folder
if os.path.isdir(proc):
isdir = True
# sort the listing by size, we want big files in the back
tmp_listing = os.listdir(proc)
tmp_listing = sorted(tmp_listing, key=lambda x: os.path.getsize(os.path.join(proc, x)))
tmp_json = [os.path.join(proc, f)
for f in tmp_listing
if f.endswith('.json')]
json_file = tmp_json[0]
listing_manifest = [os.path.join(proc, f)
for f in tmp_listing
if os.path.isfile(os.path.join(proc, f))
and not f.endswith('.json')]
[listing_manifest.append(_) for _ in tmp_json]
else:
isdir = False
json_file = proc
filename = os.path.splitext(os.path.basename(proc))[0]
dirname = os.path.dirname(proc)
listing_manifest = [os.path.join(dirname, f)
for f in os.listdir(dirname)
if f.startswith(filename)
and not f.endswith('.json')]
listing_manifest.append(json_file)
return listing_manifest, json_file
def parse_router(router, dirs, files):
router_status = []
router_re = []
for filter, exact in zip(router['filter'], cycle(router['exact'])):
if exact:
router_re.append(r'\b{}\b'.format(filter))
else:
router_re.append(r'{}'.format(filter))
# first search directories
for jsons in dirs:
js_data = []
for js in jsons:
with open(js, 'r') as j:
js_data.append(json.load(j))
dir_status = []
for filter, key, lowercase, invert in zip(router_re,
cycle(router['key']),
cycle(router['lowercase']),
cycle(router['invert'])):
if lowercase:
hits = [re.search(filter, j[key], re.IGNORECASE) is not None for j in js_data]
else:
hits = [re.search(filter, j[key]) is not None for j in js_data]
if invert:
dir_status.append(not any(hits))
else:
dir_status.append(any(hits))
try:
router_status.append(dir_status.index(True))
except ValueError:
router_status.append(None)
# then search files
for js in files:
with open(js, 'r') as j:
js_data = json.load(j)
if js_data is None:
continue
file_status = []
for filter, key, lowercase, invert in zip(router_re,
cycle(router['key']),
cycle(router['lowercase']),
cycle(router['invert'])):
if lowercase:
hit = re.search(filter, js_data[key], re.IGNORECASE)
else:
hit = re.search(filter, js_data[key])
if invert:
hit = not hit
file_status.append(hit)
try:
router_status.append(file_status.index(True))
except ValueError:
router_status.append(None)
return router_status
def proc_loop(listing, base_dict, dry_run, delete, remote_options):
"""Main processing loop
"""
proc_count = 0
for proc in listing:
use_dict = base_dict
logging.info('Processing ' + proc)
sz = os.path.getsize(proc)
# loop through manifest, make sure the files are not growing...
listing_manifest, json_file = get_listing_manifest(proc=proc)
# changed from <= 1 to < 1 to account for metadata.json getting orphaned...
if len(listing_manifest) < 1:
logging.info(
'Manifest empty, continuing...(maybe files still copying?)')
continue
logging.info('Getting file sizes for manifest')
listing_sz = {f: os.path.getsize(f) for f in listing_manifest}
time.sleep(30)
listing_manifest, json_file = get_listing_manifest(proc=proc)
logging.info('Checking file sizes again')
listing_sz2 = {f: os.path.getsize(f) for f in listing_manifest}
if listing_sz != listing_sz2:
logging.info(
'A file size changed or a new file was added, continuing...')
continue
missing_files = False
if base_dict['required_files'] is not None and len(base_dict['required_files']) > 0:
basenames = [os.path.basename(_) for _ in listing_manifest]
for required_file in base_dict['required_files']:
if required_file not in basenames:
logging.info('Could not find ' + required_file)
missing_files = True
if missing_files:
logging.info('File missing, continuing...')
continue
logging.info('Found json file ' + json_file)
with open(json_file) as open_file:
dict_json = json.load(open_file)
if 'destination' in dict_json:
use_dict['path']['re']['root'] = dict_json['destination']
# if it's a directory the manifest is the contents of the directory, if it's not the manifest
# simply matches filenames
logging.info('Manifest [' + ','.join(listing_manifest) + ']')
generators = []
for m, d in zip(use_dict['map'], use_dict['default']):
use_dict['path']['re'][m] = d
for k, v in zip(use_dict['keys'], cycle(use_dict['map'])):
generators = find_key(k, dict_json)
use_dict['path']['re'][v] = next(
generators, use_dict['path']['re'][v])
# sub folder is a special key to copy over the appropriate sub-folder
if os.path.isdir(proc):
use_dict['path']['re']['sub_folder'] = os.path.basename(
os.path.normpath(proc)) + '/'
else:
use_dict['path']['re']['sub_folder'] = ''
# build a path
new_path = build_path(
use_dict['path']['re'], use_dict['path']['path_string'])
# check for command triggers
logging.info('Sending manifest to ' + new_path)
# aiight dawg, one trigger per manifest?
for f in listing_manifest:
if remote_options['copy_protocol'] == 'scp':
# dir check
local_copy = False
dir_cmd = "ssh %s@%s 'mkdir -p \"%s\"'" % (
remote_options['user'], remote_options['host'], new_path)
cp_cmd = "scp \"%s\" %s@%s:'\"%s\"'" % (
f, remote_options['user'], remote_options['host'], new_path)
elif remote_options['copy_protocol'] == 'nocopy':
local_copy = False
dir_cmd = ''
cp_cmd = ''
elif remote_options['copy_protocol'] == 'rsync':
local_copy = False
raise NotImplementedError
elif remote_options['copy_protocol'] == 'cp':
local_copy = True
dir_cmd = "mkdir -p \"%s\"" % (new_path)
cp_cmd = "cp \"%s\" \"%s\"" % (f, new_path)
else:
raise NotImplementedError
logging.info('Chk command: ' + dir_cmd)
logging.info('Copy command: ' + cp_cmd)
if not dry_run:
status = os.system(dir_cmd)
if status == 0:
logging.info(
'Directory creation/check succesful, copying...')
status = os.system(cp_cmd)
if local_copy:
# check md5
logging.info('Checking file integrity...')
with open(f, 'rb') as f_check:
md5_original = md5_checksum(f_check)
new_file = os.path.join(new_path, os.path.basename(f))
with open(new_file, 'rb') as f_check:
md5_copy = md5_checksum(f_check)
md5checksum = md5_original == md5_copy
logging.info('MD5checksum: ' + str(md5checksum))
status = status & (not md5checksum)
if status == 0 and delete:
logging.info('Copy succeeded, deleting file')
proc_count += 1
os.remove(os.path.join(new_path, f))
elif status == 0:
logging.info('Copy SUCCESS, continuing')
proc_count += 1
else:
logging.info('Copy FAILED, continuing')
continue
elif dry_run and delete:
logging.info('Would delete: ' + os.path.join(new_path, f))
issue_options = {
'user': '',
'host': '',
'cmd_host': '',
'path': ''
}
for ext, cmd in zip(use_dict['command']['exts'], cycle(use_dict['command']['run'])):
triggers = [f for f in listing_manifest if f.endswith(ext)]
if triggers and not dry_run and not delete:
raise NameError(
"Delete option must be turned on, otherwise triggers will repeat")
elif triggers and remote_options['copy_protocol'] == 'nocopy':
logging.info('nocopy, doing nothing')
elif triggers and not dry_run:
issue_options['path'] = os.path.join(
new_path, os.path.basename(triggers[0]))
issue_options = merge_dicts(issue_options, remote_options)
issue_cmd = build_path(issue_options, cmd)
logging.info('Issuing command ' + issue_cmd)
status = os.system(issue_cmd)
if status == 0:
logging.info('Command SUCCESS')
else:
logging.info('Command FAIL')
elif triggers:
issue_options['path'] = os.path.join(
new_path, os.path.basename(triggers[0]))
issue_options = merge_dicts(issue_options, remote_options)
issue_cmd = build_path(issue_options, cmd)
logging.info('Would issue command ' + issue_cmd)
return proc_count
| true |
8a879db001eca077ffccd9154baa295da0a604bd | Python | omennemo/pyp | /26417/05A-DictsSetsTuples.py | UTF-8 | 3,982 | 4.75 | 5 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 19:52:05 2017
@filename: 03A-ListsDictsSetsTupples
@author: cyruslentin
"""
# Containers
# Python includes several built-in container types: dictionaries, sets,
# and tuples.
# Dictionaries
# A dictionary stores (key, value) pairs, similar to a Map in Java or an
# object in Javascript.
# You can use it like this:
d = {'cat': 'cute', 'dog': 'furry'} # Create a new dictionary with some data
print(d['cat']) # Get an entry from a dictionary; prints "cute"
print('cat' in d) # Check if a dictionary has a given key; prints "True"
print('cute' in d)
d['fish'] = 'wet' # Set an entry in a dictionary
print(d['fish']) # Prints "wet"
#print(d['monkey']) # KeyError: 'monkey' not a key of d
print(d.get('monkey', 'N/A')) # Get an element with a default; prints "N/A"
print(d.get('fish', 'N/A')) # Get an element with a default; prints "wet"
del d['fish'] # Remove an element from a dictionary
print(d.get('fish', 'N/A')) # "fish" is no longer a key; prints "N/A"
# Loops:
# It is easy to iterate over the keys in a dictionary:
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal in d:
legs = d[animal]
print('A %s has %d legs' % (animal, legs))
# Prints "A person has 2 legs", "A spider has 8 legs", "A cat has 4 legs"
# If you want access to keys and their corresponding values, use the
# iteritems method:
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal, legs in d.items():
print('A %s has %d legs' % (animal, legs))
# Prints "A person has 2 legs", "A spider has 8 legs", "A cat has 4 legs"
# Dictionary comprehensions:
# These are similar to list comprehensions, but allow you to easily construct
# dictionaries.
nums = [0, 1, 2, 3, 4]
even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0}
print(even_num_to_square) # Prints "{0: 0, 2: 4, 4: 16}"
# Sets
# A set is an unordered collection of distinct elements. As a simple example,
# consider the following:
animals = {'cat', 'dog'}
print('cat' in animals) # Check if an element is in a set; prints "True"
print('fish' in animals) # prints "False"
animals.add('fish') # Add an element to a set
print('fish' in animals) # Prints "True"
print(len(animals)) # Number of elements in a set; prints "3"
animals.add('cat') # Adding an element that is already in the set does nothing
print(len(animals)) # Prints "3"
animals.remove('cat') # Remove an element from a set
print(len(animals)) # Prints "2"
# Loops:
# Iterating over a set has the same syntax as iterating over a list;
# however since sets are unordered, you cannot make assumptions about the
# order in which you visit the elements of the set:
animals = {'cat', 'dog', 'fish'}
for idx, animal in enumerate(animals):
#print('#%d: %s' % (idx + 1, animal))
print('#{}: {}'.format(idx + 1, animal))
# Prints "#1: fish", "#2: dog", "#3: cat"
# Set comprehensions:
# Like lists and dictionaries, we can easily construct sets using set
# comprehensions:
from math import sqrt
nums = {int(sqrt(x)) for x in range(30)}
print(nums) # Prints "set([0, 1, 2, 3, 4, 5])"
# Tuples
# A tuple is an (immutable) ordered list of values.
# A tuple is in many ways similar to a list; one of the most important
# differences is that tuples can be used as keys in dictionaries and
# as elements of sets, while lists cannot.
d = {(x, x + 1): x for x in range(10)} # Create a dictionary with tuple keys
print(d)
t = (5, 6) # Create a tuple
print(t)
print(type(t)) # Prints "<type 'tuple'>"
print(d[t]) # Prints "5"
print(d[(1, 2)]) # Prints "1"
| true |
fd29a2b3e5273777b9f3d7916a15e5651ceedc5f | Python | bucho666/WebSocketMud | /message.py | UTF-8 | 569 | 3.28125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
class Message(object):
def __init__(self, message, color='Silver'):
self._messages = [(message, color)]
def add(self, message, color='Silver'):
self._messages.append((message, color))
return self
def __str__(self):
string = ''.join(['<font color=%s>%s</font>' % (color, message.replace(' ', ' '))\
for (message, color) in self._messages])
return string.replace('\n', '<br>')
def __add__(self, other):
result = Message('')
result._messages = self._messages + other._messages
return result
| true |
52c9c3f5f5adb6b250da4073b342edda6ad3f285 | Python | jaganswornkar/logical-questions | /rangeFunction.py | UTF-8 | 275 | 3.171875 | 3 | [] | no_license | def Range(f, i=None, diff=None):
initial = None
final = f
if i:
initial = f
final = i
List = []
i = initial or 0
while i < final:
List.append(i)
i += diff or 1
return List
# for i in Range(1,10,1):
# print(i)
| true |
5107cdbd8430116ae01fe4819f316b094da46b9e | Python | delonxd/calculate_tools | /calculater0507.py | UTF-8 | 12,487 | 2.65625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import os
import time
from Data2Excel import *
data2excel = SheetDataGroup(sheet_names=[])
# 获取时间戳
localtime = time.localtime()
timestamp = time.strftime("%Y%m%d%H%M%S", localtime)
print(time.strftime("%Y-%m-%d %H:%M:%S", localtime))
df_input = pd.read_excel('计算条件.xlsx')
df_input = df_input.where(df_input.notnull(), None)
num_len = len(list(df_input['序号']))
excel_data = []
excel_data_first = []
excel_data_end = []
pd_read_flag = True
# pd_read_flag = False
# for temp in np.arange(0, 1.01, 0.1):
for temp in range(num_len):
data2excel.add_new_row()
if pd_read_flag:
df_input_row = df_input.iloc[temp]
valuation = df_input_row['资金(万元)']
fixed_assets0 = df_input_row['房价(万元)']
income_month = df_input_row['月净收入(万元)']
fund_month = df_input_row['月公积金(万元)']
expense_month = df_input_row['月消费(万元)']
investment_rate = df_input_row['现金投资比例(%)'] / 100
loan_rate_year = df_input_row['商业贷款利率(%)'] / 100
loan_fund_rate_year = df_input_row['公积金贷款利率(%)'] / 100
return_rate_year = df_input_row['现金年回报率(%)'] / 100
estate_rate_year = df_input_row['房屋价格年增长率(%)'] / 100
inflation_rate_year = df_input_row['年通货膨胀率(%)'] / 100
income_rate_year = df_input_row['收入年增长率(%)'] / 100
rate = df_input_row['贷款比率(%)'] / 100
period_years = int(df_input_row['贷款年数(年)'])
period_months = period_years * 12
else:
valuation = 530
fixed_assets0 = 530
income_month = 1
fund_month = 0.5
expense_month = 0
investment_rate = 0.7
loan_rate_year = 0.049 * 1.05
loan_fund_rate_year = 0.0325
return_rate_year = 0.04
estate_rate_year = 0.01
inflation_rate_year = 0.035
income_rate_year = 0.03
rate = temp
period_years = 30
period_months = period_years * 12
fixed_assets = [fixed_assets0]
loan = [fixed_assets[0]*rate]
fund_loan = [0]
k = 40
if loan[0] >= k:
fund_loan[0] = k
loan[0] = loan[0] - fund_loan[0]
else:
fund_loan[0] = loan[0]
loan[0] = 0
first_payment = fixed_assets[0] - loan[0] - fund_loan[0]
cash = [valuation - first_payment]
income = [income_month]
fund = [fund_month]
expense = [expense_month]
length = period_months
r = loan_rate_year/12
a1 = loan[-1] * r * (1+r)**period_months/((1+r)**period_months - 1)
r = loan_fund_rate_year/12
a2 = fund_loan[-1] * r * (1+r)**period_months / ((1+r)**period_months - 1)
payment = [a1 + a2]
inflation = [1]
for i in range(period_months):
data2excel.add_data(sheet_name="现金(万元)", data1=cash[-1])
data2excel.add_data(sheet_name="房价(万元)", data1=fixed_assets[-1])
data2excel.add_data(sheet_name="净资产(万元)", data1=cash[-1] + fixed_assets[-1])
data2excel.add_data(sheet_name="资产折算后(万元)", data1=((cash[-1] + fixed_assets[-1]) / inflation[-1]))
data2excel.add_data(sheet_name="月净收入(万元)", data1=income[-1])
data2excel.add_data(sheet_name="月消费(万元)", data1=expense[-1])
data2excel.add_data(sheet_name="月公积金(万元)", data1=fund[-1])
data2excel.add_data(sheet_name="通货膨胀率(%)", data1=inflation[-1])
data2excel.add_data(sheet_name="剩余贷款(万元)", data1=loan[-1] + fund_loan[-1])
data2excel.add_data(sheet_name="资产负债比率(%)", data1=(loan[-1] + fund_loan[-1])/(cash[-1] + fixed_assets[-1])*100)
rate_year = estate_rate_year
rslt = fixed_assets[-1] * (1 + rate_year/12)
fixed_assets.append(rslt)
r = loan_rate_year / 12
B = loan[0] * r * (1+r)**i / ((1+r)**period_months - 1)
loan.append(loan[-1] - B)
r = loan_fund_rate_year / 12
B = fund_loan[0] * r * (1+r)**i / ((1+r)**period_months - 1)
fund_loan.append(fund_loan[-1] - B)
rate_year = return_rate_year
rslt = cash[-1] + income[-1] - expense[-1] - payment[-1] + cash[-1] * (rate_year/12) * investment_rate + fund[-1]
cash.append(rslt)
rslt = inflation[-1] * (1 + inflation_rate_year/12)
inflation.append(rslt)
rslt = income[-1] * (1 + income_rate_year/12)
income.append(rslt)
rslt = fund[-1] * (1 + income_rate_year/12)
fund.append(rslt)
print('\n',
'资金:', round(valuation, 2), '万元', '\n',
'房价:', round(fixed_assets0, 2), '万元', '\n',
'月净收入:', round(income_month, 2), '万元', '\n',
'月消费:', round(expense_month, 2), '万元', '\n',
'月公积金:', round(fund_month, 2), '万元', '\n',
'现金投资比例:', round(investment_rate, 2), '\n',
'商业贷款利率:', round(loan_rate_year * 100, 2), '%', '\n',
'公积金贷款利率:', round(loan_fund_rate_year * 100, 2), '%', '\n',
'现金年回报率:', round(return_rate_year * 100, 2), '%', '\n',
'房屋价值年增长率:', round(estate_rate_year * 100, 2), '%', '\n',
'年通货膨胀率:', round(inflation_rate_year * 100, 2), '%', '\n',
'收入年增长率:', round(income_rate_year * 100, 2), '%', '\n',
'贷款年数:', round(period_years, 2), '年', '\n',
'贷款月数:', round(period_months, 2), '月', '\n',
)
print('首年: ',
'净资产:', round(valuation, 2),
'房价:', round(fixed_assets[0], 2),
'首付:', round(first_payment, 2),
'商业贷款:', round(loan[0], 2),
'公积金贷款:', round(fund_loan[0], 2),
'贷款比率:', round(rate*100, 2), '%',
'剩余现金:', round(cash[0], 2),
'资产折算后', round((cash[-1] + fixed_assets[-1]), 2),
)
print('末年: ',
'净资产:', round(cash[-1] + fixed_assets[-1], 2),
'现金:', round(cash[-1], 2),
'固资:', round(fixed_assets[-1], 2),
'月存款:', round(income[-1], 2),
'通货膨胀:', round(inflation[-1], 2),
'首月还款除公积金:', round((payment[0]-fund[0]), 2),
'首月公积金:', round(fund[0], 2),
'资产折算后', round(((cash[-1] + fixed_assets[-1])/inflation[-1]), 2),
)
print('')
data = dict()
data['资金(万元)'] = round(valuation, 2)
data['房价(万元)'] = round(fixed_assets0, 2)
data['月净收入(万元)'] = round(income_month, 2)
data['月消费(万元)'] = round(expense_month, 2)
data['月公积金(万元)'] = round(fund_month, 2)
data['现金投资比例(%)'] = round(investment_rate * 100, 2)
data['商业贷款利率(%)'] = round(loan_rate_year * 100, 2)
data['公积金贷款利率(%)'] = round(loan_fund_rate_year * 100, 2)
data['现金年回报率(%)'] = round(return_rate_year * 100, 2)
data['房屋价格年增长率(%)'] = round(estate_rate_year * 100, 2)
data['年通货膨胀率(%)'] = round(inflation_rate_year * 100, 2)
data['收入年增长率(%)'] = round(income_rate_year * 100, 2)
data['贷款年数(年)'] = round(period_years, 2)
data['贷款月数(月)'] = round(period_months, 2)
data['贷款比率(%)'] = round(rate * 100, 2)
data_first = dict()
data_first['净资产(万元)'] = round(valuation, 2)
data_first['房价(万元)'] = round(fixed_assets[0], 2)
data_first['首付(万元)'] = round(first_payment, 2)
data_first['商业贷款金额(万元)'] = round(loan[0], 2)
data_first['公积金贷款金额(万元)'] = round(fund_loan[0], 2)
data_first['贷款比率(%)'] = round(rate * 100, 2)
data_first['剩余现金(万元)'] = round(cash[0], 2)
data_first['资产折算后(万元)'] = round(valuation, 2)
data_first['月净收入(万元)'] = round(income[0], 2)
data_first['月消费(万元)'] = round(expense[0], 2)
data_first['月公积金(万元)'] = round(fund[0], 2)
data_first['月还款额(万元)'] = round(payment[0], 2)
data_first['月还款额除公积金(万元)'] = round((payment[0]-fund[0]), 2)
data_end = dict()
data_end['净资产(万元)'] = round(cash[-1] + fixed_assets[-1], 2)
data_end['房价(万元)'] = round(fixed_assets[-1], 2)
data_end['资产折算后(万元)'] = round(((cash[-1] + fixed_assets[-1]) / inflation[-1]), 2)
data_end['剩余现金(万元)'] = round(cash[-1], 2)
data_end['月净收入(万元)'] = round(income[-1], 2)
data_end['月消费(万元)'] = round(expense[-1], 2)
data_end['月公积金(万元)'] = round(fund[-1], 2)
data_end['贷款比率(%)'] = round(rate * 100, 2)
data_end['通货膨胀率(%)'] = round(inflation[-1] * 100, 2)
data_row = [data[key] for key in data.keys()]
data_first_row = [data_first[key] for key in data_first.keys()]
data_end_row = [data_end[key] for key in data_end.keys()]
excel_data.append(data_row)
excel_data_first.append(data_first_row)
excel_data_end.append(data_end_row)
# print(fixed_assets[-1])
# print('')
#
# for temp in np.arange(1, 0.31, -0.1):
# fixed_assets = [fixed_assets0]
#
# # rate = 0.5
# rate = temp
# loan = [fixed_assets[0]*(1-rate)]
# cash = [valuation-fixed_assets[0]+loan[0]]
# print('资金:', round(valuation, 2),
# '现金:', round(cash[0], 2),
# '固资:', round(fixed_assets[0], 2),
# '贷款:', round(loan[0], 2),
# '比率:', round(rate*100, 2), '%')
# # print(valuation, cash[0], loan[0])
#
# income = [income_month]
# expense = [expense_month]
#
# period_years = 30
# period_months = period_years * 12
#
# length = period_months
#
# # r = loan_rate_year/12
# # a = loan[-1] * r * (1+r)**period_months/((1+r)**period_months - 1)
# payment = []
# inflation = [1]
#
# for i in range(period_months):
# loan_month = loan[-1]
# principal = loan[0] / period_months
#
# interest_rate_year = loan_rate_year
# interest_expense = loan_month * (interest_rate_year/12)
# loan.append(loan_month-principal)
#
# payment.append(interest_expense + principal)
#
# rate_year = estate_rate_year
# rslt = fixed_assets[-1] * (1 + rate_year/12)
# fixed_assets.append(rslt)
#
# rate_year = return_rate_year
# rslt = cash[-1] + income[-1] - expense[-1] - payment[-1] + cash[-1] * (rate_year/12) * investment_rate
# cash.append(rslt)
#
# rslt = inflation[-1] * (1 + inflation_rate_year/12)
# inflation.append(rslt)
#
# rslt = income[-1] * (1 + income_rate_year/12)
# income.append(rslt)
#
# print('资产:', round(cash[-1] + fixed_assets[-1], 2),
# '现金:', round(cash[-1], 2),
# '固资:', round(fixed_assets[-1], 2),
# '月存款:', round(income[-1], 2),
# '通货膨胀:', round(inflation[-1], 2),
# '资产折算后', round(((cash[-1] + fixed_assets[-1])/inflation[-1]), 2))
#
# print('')
# # print(fixed_assets[-1])
df_data = pd.DataFrame(excel_data, columns=data.keys())
df_data_first = pd.DataFrame(excel_data_first, columns=data_first.keys())
df_data_end = pd.DataFrame(excel_data_end, columns=data_end.keys())
pass
# 保存到本地excel
filename = '计算结果'
# filename = '仿真输出_拆电容'
# filepath = 'src/Output/'+ filename + timestamp + '.xlsx'
filepath = '' + filename + '_' + timestamp + '.xlsx'
with pd.ExcelWriter(filepath) as writer:
df_input.to_excel(writer, sheet_name="参数输入", index=False)
df_data.to_excel(writer, sheet_name="计算条件", index=False)
df_data_first.to_excel(writer, sheet_name="首月净资产", index=False)
df_data_end.to_excel(writer, sheet_name="末月净资产", index=False)
names = [
"现金(万元)",
"房价(万元)",
"净资产(万元)",
"资产折算后(万元)",
"月净收入(万元)",
"月消费(万元)",
"月公积金(万元)",
"通货膨胀率(%)",
"剩余贷款(万元)",
"资产负债比率(%)",
]
data2excel.write2excel(sheet_names=names, writer=writer)
| true |
b4659ae738a510268394c4b050fabe16a95dbdef | Python | Rick-Sanchez-C/Speech-to-maths | /test.py | UTF-8 | 2,425 | 2.921875 | 3 | [] | no_license | from s2m.core.number_parser import NumberParser
from s2m.core.parser import Parser
from s2m.core.S2MParser import s2m_parser as parser
#Tests du number parser pour tester la lecture de nombres en lettres
#parser = NumberParser()
assert parser(['un']) == 1
assert parser(['huit']) == 8
assert parser(['dix']) == 10
assert parser(['onze']) == 11
assert parser(['quatorze']) == 14
assert parser(['dix','sept']) == 17
assert parser(['vingt','et', 'un']) == 21
assert parser(['trente', 'quatre']) == 34
assert parser(['quarante', 'huit']) == 48
assert parser(['cinquante']) == 50
assert parser(['soixante','sept']) == 67
assert parser(['soixante','et', 'onze']) == 71
assert parser(['soixante','quatorze']) == 74
assert parser(['soixante','dix', 'huit']) == 78
assert parser(['quatre','vingt', 'huit']) == 88
assert parser(['quatre', 'vingt', 'douze']) == 92
assert parser(['quatre', 'vingt', 'dix', 'huit']) == 98
assert parser(['nonante', 'neuf']) == 99
assert parser(['cent']) == 100
assert parser('trois cent quatre') == 304
assert parser(['huit','cent']) == 800
assert parser('huit cent douze') == 812
assert parser(['mille']) == 1000
assert parser(['mille', 'deux']) == 1002
assert parser(['mille', 'deux', 'cent']) == 1200
assert parser(['mille', 'deux', 'cent', 'deux']) == 1202
assert parser(['deux', 'mille', 'deux', 'cent', 'deux']) == 2202
assert parser(['douze', 'mille', 'deux', 'cent', 'deux']) == 12202
assert parser('sept cent million') == 700000000
assert parser('onze milliard sept cent million deux cent mille trois cent quatre') == 11700200304
assert parser('trois cent quatorze trillion onze milliard sept cent million deux cent mille trois cent quatre') == 314000000011700200304
assert parser('trois virgule quarante deux quarante deux') == 3.4242
assert parser('trente trois point zero zero huit cent quatre') == 33.00804
assert parser('cent cinquante et un virgule quarante six') == 151.46
assert parser('deux cent soixante et un mille six cent quarante trois virgule huit million quatre cent quatre vingt trois mille cinq cent douze') == 261643.8483512
#Tests de parser d'opérations binaires
#parser = S2MParser()
assert "2 + 2" in parser("deux plus deux")
assert "2 * 2" in parser("deux fois deux")
assert "\\frac{2}{2}" in parser("deux sur deux")
assert "{2}^{2}" in parser("deux puissance deux")
assert "{2}^{2}" in parser("deux au carré")
assert "3 \\neq 2" in parser("trois différent de deux")
| true |
eec663dd5bb1fb3c20b489b355dd629dd82920a6 | Python | saurabh-pandey/AlgoAndDS | /myAttempts/design/tests/test_diff_merkle_tree.py | UTF-8 | 855 | 3 | 3 | [] | no_license | import pytest
from design.merkle_tree import MerkleTree
from design.diff_merkle_tree import diff
class TestDiffMerkleTree:
def test_example1(self):
data0 = ["a", "b", "c", "d"]
tree0 = MerkleTree(data0)
data1 = ["a", "b", "e", "d"]
tree1 = MerkleTree(data1)
diff(tree0, tree1)
data0 = ["a", "b", "c", "d"]
tree0 = MerkleTree(data0)
data1 = ["a", "b", "c", "e"]
tree1 = MerkleTree(data1)
diff(tree0, tree1)
def test_example2(self):
data0 = ["a", "b", "c"]
tree0 = MerkleTree(data0)
data1 = ["a", "b", "d"]
tree1 = MerkleTree(data1)
diff(tree0, tree1)
data0 = ["a", "b", "c"]
tree0 = MerkleTree(data0)
data1 = ["a", "c", "c"]
tree1 = MerkleTree(data1)
diff(tree0, tree1)
| true |
e85fbaabb54bc18c61b6187d191fb37ff1cb6a4c | Python | guikunchen/simple-gnn-examples | /node_classifcation_simple_gnn.py | UTF-8 | 3,696 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@Time : 22/9/2021
@Author : Guikun Chen
"""
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch_geometric.datasets import Planetoid
import torch_geometric.nn as pyg_nn
# load dataset
def get_data(folder="/home/cgk/dataset", data_name="cora"):
dataset = Planetoid(root=folder, name=data_name)
return dataset
# create the graph cnn model
class GraphCNN(nn.Module):
def __init__(self, in_c, hid_c, out_c):
super(GraphCNN, self).__init__()
self.conv1 = pyg_nn.GCNConv(in_channels=in_c, out_channels=hid_c)
self.conv2 = pyg_nn.GCNConv(in_channels=hid_c, out_channels=out_c)
def forward(self, data):
# data.x data.edge_index
x = data.x # [N, C]
edge_index = data.edge_index # [2 ,E]
hid = self.conv1(x=x, edge_index=edge_index) # [N, D]
hid = F.relu(hid)
out = self.conv2(x=hid, edge_index=edge_index) # [N, out_c]
out = F.log_softmax(out, dim=1) # [N, out_c]
return out
class MyGCN(torch.nn.Module):
def __init__(self, in_c, hid_c, out_c):
super(MyGCN, self).__init__()
self.conv0 = pyg_nn.SGConv(in_c, hid_c, K=2)
self.conv1 = pyg_nn.APPNP(K=2, alpha=0.2)
self.conv2 = pyg_nn.APPNP(K=2, alpha=0.2)
self.conv3 = pyg_nn.APPNP(K=2, alpha=0.2)
self.conv4 = pyg_nn.SGConv(hid_c, out_c, K=2)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv0(x, edge_index)
x = F.dropout(x, p=0.2, training=self.training)
x = F.leaky_relu(self.conv1(x, edge_index))
x = F.dropout(x, p=0.2, training=self.training)
x = F.leaky_relu(self.conv2(x, edge_index))
x = F.dropout(x, p=0.2, training=self.training)
x = F.leaky_relu(self.conv3(x, edge_index))
x = F.dropout(x, p=0.4, training=self.training)
x = self.conv4(x, edge_index)
return F.log_softmax(x, dim=1)
def main():
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
cora_dataset = get_data()
my_net = MyGCN(in_c=cora_dataset.num_features, hid_c=256, out_c=cora_dataset.num_classes)
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
my_net = my_net.to(device)
data = cora_dataset[0].to(device)
optimizer = torch.optim.SGD(my_net.parameters(), lr=1e-2, weight_decay=1e-3, momentum=0.95)
# model train
best_acc = .0
my_net.train()
for epoch in range(500):
optimizer.zero_grad()
output = my_net(data)
loss = F.nll_loss(output[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
print("Epoch", epoch + 1, "Loss", loss.item())
####### test
my_net.eval()
_, prediction = my_net(data).max(dim=1)
target = data.y
test_correct = prediction[data.test_mask].eq(target[data.test_mask]).sum().item()
test_number = data.test_mask.sum().item()
acc = test_correct / test_number
print("Accuracy of Test Samples: {}".format(acc))
if acc > best_acc:
best_acc = acc
torch.save(my_net.state_dict(), "best.ckpt")
print(best_acc)
# model test
# my_net.eval()
# _, prediction = my_net(data).max(dim=1)
# target = data.y
# test_correct = prediction[data.test_mask].eq(target[data.test_mask]).sum().item()
# test_number = data.test_mask.sum().item()
# acc = test_correct / test_number
# print("Accuracy of Test Samples: {}".format(acc))
# if acc >= 0.82:
# torch.save(my_net.state_dict(), "1.ckpt")
if __name__ == '__main__':
main()
| true |
63383eebf32e250ed3e6c1a9c10959ffc7510537 | Python | ankursharma319/python_misc | /misc_labs/lab2.py | UTF-8 | 635 | 3.921875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 10:46:25 2015
@author: Akhil
"""
def seconds2days(n):
"""accepts a number of seconds (either as int or float) and returns the
number of days in those seconds as float"""
secondsin1day = 24*60*60.0
return n/secondsin1day
def box_surface(a, b, c):
"""computes and returns the surface area of a box (i.e. cuboid)
with edge lengths a, b, and c"""
return 2*((a*b)+(a*c)+(b*c))
def triangle_area(a, b, c):
""" returns the area of a triangle with lengths a, b and c"""
s = (a + b + c) / 2
Asquared = s*(s-a)*(s-b)*(s-c)
return Asquared**0.5
| true |
300169dcf246c2abd8b69632bdcf53a0be58f3a4 | Python | acse-os920/acse-1-assessment-3-acse-os920-master | /acse_la/gauss.py | UTF-8 | 3,645 | 3.65625 | 4 | [] | no_license | import numpy as np
import copy
# from fractions import Fraction
__all__ = ['gauss', 'matmul', 'zeromat']
def gauss(a, b):
"""
Given two matrices, `a` and `b`, with `a` square, the determinant
of `a` and a matrix `x` such that a*x = b are returned.
If `b` is the identity, then `x` is the inverse of `a`.
Parameters
----------
a : np.array or list of lists
'n x n' array
b : np. array or list of lists
'm x n' array
Examples
--------
>> a = [[2, 0, -1], [0, 5, 6], [0, -1, 1]]
>> b = [[2], [1], [2]]
>> det, x = gauss(a, b)
>> det
22.0
>> x
[[1.5], [-1.0], [1.0]]
>> A = [[1, 0, -1], [-2, 3, 0], [1, -3, 2]]
>> I = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
>> Det, Ainv = gauss(A, I)
>> Det
3.0
>> Ainv
[[2.0, 1.0, 1.0],
[1.3333333333333333, 1.0, 0.6666666666666666],
[1.0, 1.0, 1.0]]
Notes
-----
See https://en.wikipedia.org/wiki/Gaussian_elimination for further details.
"""
a = copy.deepcopy(a)
b = copy.deepcopy(b)
n = len(a)
p = len(b[0])
det = np.ones(1, dtype=np.float64)
for i in range(n - 1):
k = i
for j in range(i + 1, n):
if abs(a[j][i]) > abs(a[k][i]):
k = j
if k != i:
a[i], a[k] = a[k], a[i]
b[i], b[k] = b[k], b[i]
det = -det
for j in range(i + 1, n):
t = a[j][i]/a[i][i]
for k in range(i + 1, n):
a[j][k] -= t*a[i][k]
for k in range(p):
b[j][k] -= t*b[i][k]
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
t = a[i][j]
for k in range(p):
b[i][k] -= t*b[j][k]
t = 1/a[i][i]
det *= a[i][i]
for j in range(p):
b[i][j] *= t
return det, b
def matmul(a, b):
"""
Given two matrices, `a` and `b`, first checks if the
the dimensions of 'a' and 'b' are compatible for
multiplication. From matrix algebra, we know that for
a*b to exist, and if a is an n x p matrix and b is a
p1 x q matrix, then p = p1 must hold true. The resultant
matrix c (c = a*b) which is an n x q matrix is then created
as a zeros matrix and corresponding matrix elements are
stored via traditional matrix multiplication, i.e. the
dot product of the i_th row of a and the j_th column of b
are stored as c[i][j].
Parameters
----------
a : np.array or list of lists
'n x p' array
b : np. array or list of lists
'p1 x q' array
Examples
--------
>> a = [[1, 2, 3], [4, 5, 6]]
>> b = [[10, 11], [20, 21], [30, 31]]
>> c = matmul(a, b)
>> c
[[140, 146], [320, 335]]
>> A = [[1, 0, -1]]
>> B = [[1, 0, 0], [1, 1, 0], [6, 4, 1]]
>> C = matmul(A, B)
>> C
ValueError: Incompatible dimensions
"""
n, p = len(a), len(a[0])
p1, q = len(b), len(b[0])
if p != p1:
raise ValueError("Incompatible dimensions")
c = zeromat(n, q)
for i in range(n):
for j in range(q):
c[i][j] = sum(a[i][k]*b[k][j] for k in range(p))
return c
def zeromat(p, q):
"""
Creates a p x q zero matrix, meaning that the new
matrix has 0 for all its entries.
Parameters
----------
p : integer
number of rows of zeros matrix
q : integer
number of columns of zeros matrix
Examples
--------
>> p = 3
>> q = 4
>> z_mat = zeromat(p, q)
>> z_mat
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
"""
return [[0]*q for i in range(p)]
| true |
46cc74ac981a0ea73356bcb1b9ab26984b7d5432 | Python | sarkamedo/gauge | /pages/login_page.py | UTF-8 | 1,483 | 2.6875 | 3 | [] | no_license | # if you experience issues with importing local modules try commenting following two lines...
import sys
sys.path.append(".")
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait as WDW
from selenium.webdriver.support import expected_conditions as EC
from browser import Browser
from pages.base_page import BasePage
import time
class LoginPage(BasePage):
# ============================== Locators ==============================
_email_field = (By.CSS_SELECTOR, "#email")
_pwd_field = (By.CSS_SELECTOR, "#passwd")
_sign_in_btn = (By.CSS_SELECTOR, "#SubmitLogin")
# ============================== Elements ==============================
@property
def get_email_field(self):
return WDW(self.driver, 15).until(EC.visibility_of_element_located((self._email_field)))
@property
def get_pwd_field(self):
return WDW(self.driver, 15).until(EC.visibility_of_element_located((self._pwd_field)))
@property
def get_sign_in_btn(self):
return WDW(self.driver, 15).until(EC.element_to_be_clickable((self._sign_in_btn)))
# ============================== Methods ==============================
def enter_email(self, email):
self.get_email_field.send_keys(email)
def enter_password(self, password):
self.get_pwd_field.send_keys(password)
def click_sign_in(self):
self.get_sign_in_btn.click()
| true |
b03163a0ef8b8b1a4928d07409eb9dde95d44219 | Python | saad2999/learn-first | /pattern.py | UTF-8 | 105 | 3.484375 | 3 | [] | no_license | n=int(input("enter the limit"))
x=" "
for i in range(n+1):
f=str(i)+x
print((n+1-i)*x, i*f)
| true |
8cffc49cfef9e928cdaed5e0c947d53ba912a292 | Python | qcwthu/CrossFit | /tasks/quarel.py | UTF-8 | 1,357 | 2.75 | 3 | [] | no_license | import os
import datasets
import numpy as np
from fewshot_gym_dataset import FewshotGymDataset, FewshotGymTextToTextDataset
class QUAREL(FewshotGymTextToTextDataset):
def __init__(self):
self.hf_identifier = "quarel"
self.task_type = "text to text"
self.license = "unknown"
def get_answer_string(self, datapoint):
answer_index = datapoint["answer_index"]
st1 = datapoint["question"].find("(A)")
st2 = datapoint["question"].find("(B)")
if answer_index == 0:
answer_string = datapoint["question"][st1+4: st2]
else:
answer_string = datapoint["question"][st2+4: ]
if answer_string.endswith("or "):
answer_string = answer_string[:-3]
return answer_string
def map_hf_dataset_to_list(self, hf_dataset, split_name):
lines = []
for datapoint in hf_dataset[split_name]:
answer_string = self.get_answer_string(datapoint)
lines.append((datapoint["question"], answer_string.strip()))
return lines
def load_dataset(self):
return datasets.load_dataset("quarel")
def main():
dataset = QUAREL()
for seed in [100, 13, 21, 42, 87]:
train, dev, test = dataset.generate_k_shot_data(k=32, seed=seed, path="../data/")
if __name__ == "__main__":
main() | true |
b1bd07ca01789adaff74ffe17b8aa38a30838ac6 | Python | s1ko/LearnPython | /Intermediate/13__name__/assignments/shop/lib/db.py | UTF-8 | 1,122 | 2.53125 | 3 | [
"MIT"
] | permissive | # Date: 08/20/2018
# Author: Pure-L0G1C
# Description: A database of all the items in the store
db = {
1: {
'name': 'hygiene',
'items': {
1: { 'name': 'deodorant', 'price': 3.97 },
2: { 'name': 'after-shave', 'price': 7.61 },
3: { 'name': 'shampoo', 'price': 6.99 }
}
},
2: {
'name': 'food',
'items': {
1: { 'name': 'pizza', 'price': 9.89 },
2: { 'name': 'pizza & BBQ-wings', 'price': 35.12 },
3: { 'name': 'salad', 'price': 8.69 }
}
},
3: {
'name': 'beverage',
'items': {
1: { 'name': 'mtn-dew', 'price': 4.68 },
2: { 'name': 'coca-cola', 'price': 4.68 },
3: { 'name': 'pepsi', 'price': 4.68 }
}
},
4: {
'name': 'book',
'items': {
1: { 'name': 'the giver', 'price': 8.99 },
2: { 'name': 'crazy house', 'price': 9.62 },
3: { 'name': 'the president is missing', 'price': 18.00 }
}
},
5: {
'name': 'electronic',
'items': {
1: { 'name': 'macbook air', 'price': 1449.99 },
2: { 'name': 'UHDTV 40 inches', 'price': 329.99 },
3: { 'name': 'OfficeJet Pro 8720', 'price': 179.99 }
}
}
} | true |
770ab0828949378af5985983599ce783fe3fbed1 | Python | Green0v0/TIL | /Playdata/1_Month/x만큼 간격이 있는 n개의 숫자.py | UTF-8 | 227 | 2.859375 | 3 | [] | no_license | def solution(x, n):
answer = []
add_x = x
while len(answer) != n:
answer.append(add_x)
add_x += x
# return [x*(i+1) for i in range(n)]
# return [i * x + x for i in range(n)]
return answer | true |
8a60be2047afca24143a3d949ea1e9e55ef77cac | Python | miriambenvil/curso-tdd-python | /katas/06-bank/src/acceptance_test.py | UTF-8 | 999 | 2.890625 | 3 | [] | no_license | import unittest
from mycalendar import Calendar
from unittest.mock import Mock
from account_service import AccountService
from console import Console
class AcceptanceTest(unittest.TestCase):
def test_print_statement_containing_all_transactions(self):
console = Mock(spec=Console)
attrs = {'current_date.return_value': ["01/04/2014",
"02/04/2014",
"10/04/2014"]}
calendar = Mock(spec=Calendar, **attrs)
account = AccountService(console, calendar)
account.deposit(1000)
account.withdraw(100)
account.deposit(500)
account.print_statement()
console.print_line.assert_called_with("DATE | AMOUNT | BALANCE")
console.print_line.assert_called_with("10/04/2014 | 500 | 1400")
console.print_line.assert_called_with("02/04/2014 | -100 | 900")
console.print_line.assert_called_with("01/04/2014 | 1000 | 1000")
| true |
72771116ee541d6c62f8719b520e19abbc96e28c | Python | yasirabd/kattis | /ptice.py | UTF-8 | 555 | 3.375 | 3 | [] | no_license | n = int(input())
ans = input()
adrian_pattern = 'ABC'
bruno_pattern = 'BABC'
goran_pattern = 'CCAABB'
score_adrian = 0
score_bruno = 0
score_goran = 0
for i in range(len(ans)):
if ans[i] == adrian_pattern[i%3]: score_adrian += 1
if ans[i] == bruno_pattern[i%4]: score_bruno += 1
if ans[i] == goran_pattern[i%6]: score_goran += 1
best_score = max(score_adrian, score_bruno, score_goran)
print(best_score)
if score_adrian == best_score: print('Adrian')
if score_bruno == best_score: print('Bruno')
if score_goran == best_score: print('Goran') | true |
cd509e6037a030f3e8b45e0db66c5e9398eb7c56 | Python | frhrdr/deep_restoration | /deep_restoration/utils/feature_statistics.py | UTF-8 | 9,190 | 2.65625 | 3 | [] | no_license | import numpy as np
import tensorflow as tf
try:
import matplotlib
matplotlib.use('qt5agg', warn=False, force=True)
import matplotlib.pyplot as plt
except ImportError:
print('failed to load Matplotlib')
from matplotlib.colors import LogNorm
from utils.filehandling import get_feature_files
from sklearn.decomposition import PCA, FastICA
layer_list = ["conv1_1", "conv1_2", "pool1", "conv2_1", "conv2_2", "pool2",
"conv3_1", "conv3_2", "conv3_3", "pool3", "conv4_1", "conv4_2", "conv4_3", "pool4",
"conv5_1", "conv5_2", "conv5_3", "pool5", "fc6", "relu6",
"fc7", "relu7", "fc8", "prob"]
def gini_index(feature_map):
feature_vec = np.sort(feature_map.flatten(), axis=0)
norm = np.sum(np.abs(feature_vec))
acc = 0.0
n = feature_vec.shape[0]
for k, feat in enumerate(feature_vec):
acc += feat * (n - k + 0.5)
acc *= 2.0 / (norm * n)
gini = 1.0 - acc
return gini
def gini_index_hist(layer_name, subset_file='subset_cutoff_200_images.txt'):
files = get_feature_files(layer_name, subset_file)
ginis = []
for idx, file in enumerate(files):
if idx % 10 == 0:
print('processing file ' + str(idx) + ' / ' + str(len(files)))
mat = np.load(file)
ginis.append(gini_index(mat))
plt.hist(ginis, 50, normed=1, facecolor='green', alpha=0.75)
plt.savefig('./plots/gini_hist_' + layer_name + '.png', format='png')
def gram_matrix(feature_maps):
assert len(feature_maps.shape) == 3
num_maps = feature_maps.shape[-1]
feature_maps = np.reshape(np.transpose(feature_maps, (2, 0, 1)), (num_maps, -1))
feature_maps -= feature_maps.mean()
gram = np.dot(feature_maps, feature_maps.T)
return gram
def avg_gram_matrix(layer_name, subset_file='subset_cutoff_200_images.txt'):
files = get_feature_files(layer_name, subset_file)
avg_gram = None
for idx, file in enumerate(files):
if idx % 10 == 0:
print('processing file ' + str(idx) + ' / ' + str(len(files)))
mat = np.load(file)
if avg_gram is None:
avg_gram = gram_matrix(mat)
else:
avg_gram += gram_matrix(mat)
avg_gram /= len(files)
plt.matshow(avg_gram, interpolation='none')
plt.savefig('./plots/avg_gram_' + layer_name + '.png', format='png')
def gatys_gram_loss(feat_maps_a, feat_maps_b):
n = feat_maps_a.shape[2]
m = feat_maps_a.shape[0] * feat_maps_a.shape[1]
norm = 4 * n * n * m * m
gram_a = gram_matrix(feat_maps_a)
gram_b = gram_matrix(feat_maps_b)
diff = gram_a - gram_b
sum_diff = np.sum(np.multiply(diff, diff))
loss = sum_diff / norm
return loss
def covariance_matrix(feature_map):
assert len(feature_map.shape) == 3
num_maps = feature_map.shape[-1]
feature_map = np.reshape(np.transpose(feature_map, (2, 0, 1)), (num_maps, -1))
# feature_map -= feature_map.mean()
# cov = np.dot(feature_map.T, feature_map)
cov = np.cov(feature_map.T)
return cov
def avg_covariance_matrix(layer_name, subset_file='subset_cutoff_200_images.txt'):
files = get_feature_files(layer_name, subset_file)
avg_cov = None
for idx, file in enumerate(files):
if idx % 10 == 0:
print('processing file ' + str(idx) + ' / ' + str(len(files)))
mat = np.load(file)
if avg_cov is None:
avg_cov = covariance_matrix(mat)
else:
avg_cov += covariance_matrix(mat)
avg_cov /= len(files)
plt.matshow(avg_cov, interpolation='none')
plt.savefig('./plots/avg_cov_' + layer_name + '.png', format='png', dpi=1500)
def inverse_covariance_matrix(feature_map):
cov = covariance_matrix(feature_map)
return np.linalg.pinv(cov)
def inverse_avg_covariance_matrix(layer_name, subset_file='subset_cutoff_200_images.txt', log=False):
files = get_feature_files(layer_name, subset_file)
avg_cov = None
for idx, file in enumerate(files):
if idx % 10 == 0:
print('processing file ' + str(idx) + ' / ' + str(len(files)))
mat = np.load(file)
if avg_cov is None:
avg_cov = covariance_matrix(mat)
else:
avg_cov += covariance_matrix(mat)
avg_cov /= len(files)
inv_cov = np.linalg.pinv(avg_cov)
inv_cov[inv_cov == np.inf] = inv_cov[inv_cov != np.inf].max()
if log:
inv_cov[inv_cov == -np.inf] = inv_cov[inv_cov != -np.inf].min()
inv_cov += inv_cov.min() + 0.0000000001
norm = LogNorm(vmin=inv_cov.min(), vmax=inv_cov.max())
plt.matshow(inv_cov, norm=norm, interpolation='none')
plt.savefig('./plots/inv_avg_cov_' + layer_name + '_log.png', format='png', dpi=1500)
else:
inv_cov[inv_cov == -np.inf] = inv_cov[inv_cov != -np.inf].min()
plt.matshow(inv_cov, interpolation='none')
plt.savefig('./plots/inv_avg_cov_' + layer_name + '_lin.png', format='png', dpi=1500)
def feat_map_vis(feature_map, max_n, highest_act):
assert len(feature_map.shape) == 3
num_maps = feature_map.shape[-1]
feature_map = np.transpose(feature_map, (2, 0, 1))
if highest_act:
mat = np.reshape(feature_map, (num_maps, -1))
means = np.mean(np.abs(mat), axis=1)
sort_ids = np.argsort(means)[::-1]
mat = feature_map[sort_ids, :, :]
else:
mat = feature_map
if mat.shape[0] > max_n:
mat = mat[:max_n, :, :]
n = mat.shape[0]
cols = int(np.ceil(np.sqrt(n)))
rows = int(np.ceil(n // cols))
fig, ax_list = plt.subplots(ncols=cols, nrows=rows)
ax_list = ax_list.flatten()
for idx, ax in enumerate(ax_list):
if idx >= n:
ax.axis('off')
else:
ax.matshow(mat[idx, :, :], interpolation='none')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def visualize_feature_map(layer_name, subset_file='subset_cutoff_200_images.txt',
image_index=0, max_n=25, highest_act=True):
file = get_feature_files(layer_name, subset_file)[image_index]
feat_map = np.load(file)
feat_map_vis(feat_map, max_n, highest_act)
def feat_map_pca(layer_name, subset_file='subset_cutoff_200_images.txt', map_index=0, n_plots=25):
files = get_feature_files(layer_name, subset_file)
maps = []
for idx, file in enumerate(files):
mat = np.load(file)
map_shape = mat[:, :, map_index].shape
maps.append(mat[:, :, map_index].flatten())
maps = np.stack(maps, axis=0)
pca = PCA()
pca.fit(maps)
cols = int(np.ceil(np.sqrt(n_plots)))
rows = int(np.ceil(n_plots // cols))
fig, ax_list = plt.subplots(ncols=cols, nrows=rows)
ax_list = ax_list.flatten()
for idx, ax in enumerate(ax_list):
if idx >= n_plots:
ax.axis('off')
else:
ax.matshow(np.reshape(pca.components_[idx, :], map_shape), interpolation='none')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('./plots/pca_' + layer_name + '_map_' + str(map_index) + '.png', format='png', dpi=1500)
plt.close()
def feat_map_ica(layer_name, subset_file='subset_cutoff_200_images.txt', map_index=0, n_plots=25):
files = get_feature_files(layer_name, subset_file)
maps = []
for idx, file in enumerate(files):
mat = np.load(file)
map_shape = mat[:, :, map_index].shape
maps.append(mat[:, :, map_index].flatten())
maps = np.stack(maps, axis=0)
ica = FastICA()
ica.fit(maps)
cols = int(np.ceil(np.sqrt(n_plots)))
rows = int(np.ceil(n_plots // cols))
fig, ax_list = plt.subplots(ncols=cols, nrows=rows)
ax_list = ax_list.flatten()
for idx, ax in enumerate(ax_list):
if idx >= n_plots:
ax.axis('off')
else:
ax.matshow(np.reshape(ica.components_[idx, :], map_shape), interpolation='none')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('./plots/ica_' + layer_name + '_map_' + str(map_index) + '.png', format='png', dpi=1500)
plt.close()
def inv_avg_gram_matrix(layer_name, subset_file='subset_cutoff_200_images.txt'):
files = get_feature_files(layer_name, subset_file)
avg_gram = None
for idx, file in enumerate(files):
if idx % 10 == 0:
print('processing file ' + str(idx) + ' / ' + str(len(files)))
mat = np.load(file)
if avg_gram is None:
avg_gram = gram_matrix(mat)
else:
avg_gram += gram_matrix(mat)
avg_gram /= len(files)
inv_gram = np.linalg.pinv(avg_gram)
plt.matshow(inv_gram, interpolation='none')
plt.savefig('./plots/inv_avg_gram_' + layer_name + '.png', format='png', dpi=1500)
def gram_tensor(feature_map):
map_shape = [k.value for k in feature_map.get_shape()]
assert len(map_shape) == 3
num_maps = map_shape[-1]
feature_map = tf.reshape(tf.transpose(feature_map, perm=(2, 0, 1)), shape=(num_maps, -1))
feature_map -= tf.reduce_mean(feature_map)
gram = tf.matmul(feature_map, feature_map, transpose_b=True)
return gram | true |
db5badc0b1c3d2f043354dfe83cf69899cc47064 | Python | Double-Wen/Python | /basic/basicmath.py | UTF-8 | 173 | 3.828125 | 4 | [] | no_license | print(2+2)
print(1/1) # the result is 1.0, and its type is float
x = int(input("Please input x:"))
y = int(input("Please input y:"))
print(x % y)
print(x - (x // y)*y)
| true |
f0f8875383feee28929e7c4f373c783434247a40 | Python | randy-earthlcd/ezLCD3xxPython | /examples/complexUI.py | UTF-8 | 3,421 | 2.5625 | 3 | [] | no_license | # Minimal ezLCD Python demo
#
import platform
import sys
import urllib2
import json
import time
import calendar
import datetime
sys.path.append("..\module")
from ezLCD3xx import *
def num (s):
try:
return int(s)
except exceptions.ValueError:
return float(s)
def ktof(t):
return ((( t -273) * 1.8) +32)
def drawGrid():
LCD.lineType(2)
LCD.xy(0,30)
LCD.color(BLACK)
LCD.box(300,150,1)
LCD.xy(0,0)
LCD.color(151)
for y in range(11):
LCD.xy(0,(y*10)+170)
LCD.line(479,(y*10)+170)
for x in range(24):
LCD.xy(x*20,170)
LCD.line(x*20,270)
LCD.xy(479,170)
LCD.line(479,270)
LCD.lineType(0)
LCD.color(WHITE)
def dt(u):
return datetime.datetime.utcfromtimestamp(u)
LCD = ezLCD(None)
comPort = LCD.findezLCD()
#check what OS we are on
#Windows
if platform.system() == 'Windows':
for ez in range(0,len(comPort)):
if comPort[ez][3] == 'Unit2':
LCD = ezLCD(comPort[ez][0])
break
#Mac
elif platform.system() == 'Dawrwin':
LCD = ezLCD('/dev/tty.usbsomething')
# Bail out if comport error
if LCD.openSerial()==False:
print 'Error Opening Port'
raise SystemExit
# Turn verbose off
LCD.verbose(OFF)
# Turn off button press info from ezLCD
LCD.wquiet(ON)
# CLear screen
LCD.cls()
# Set draw color to red
LCD.font('0')
#Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
#datetime.tzinfo=('PST')
datetime.timedelta(hours=-7)
#5358705
drawGrid()
while True:
city = "huntingtonbeach"
url = "http://openweathermap.org/data/2.5/weather?id=5358705" #5339840 #5358705"
try :
request = urllib2.Request(url)
response = urllib2.urlopen(request)
weather = response.read()
except:
print 'none'
data = json.loads(weather)
LCD.lineType(2)
LCD.xy(0,0)
LCD.color(BLACK)
LCD.box(300,150,1)
LCD.color(WHITE)
print data
icon = data['weather'][0]['icon']
Time = dt( data['sys']['sunset'] )
print Time
pressure = data['main']['pressure']
#windG = data['wind']['gust']
windS = data['wind']['speed']
windD = data['wind']['deg']
LCD.picture(icon + '.gif',400,10)
temp = ktof(num(data['main']['temp']))
tempmin = ktof(num(data['main']['temp_min']))
tempmax = ktof(num(data['main']['temp_max']))
humidity =data['main']['humidity']
sky = data['weather'][0]['description']
LCD.printString('Weather For %s' % data['name'], 0, 0)
LCD.printString('Temp %d' % temp,0, 25)
LCD.printString('Temp Low %d' % tempmin,0, 50)
LCD.printString('Temp High %d' % tempmax,0, 75)
LCD.printString('Humidity %s' % humidity,0, 100)
LCD.printString('Pressure %s hpa' % pressure,0, 125)
LCD.printString('%s' % sky,170,25)
#LCD.printString('Wind Gust %s' % windG,150,60)
LCD.printString('Speed %s' % windS,170,100)
if windD in range(348,11):
windD = 'N'
if windD in range(11,33):
windD = 'NNE'
if windD in range(33,56):
windD = 'NE'
if windD in range(56,78):
windD = 'ENE'
if windD in range(78,101):
windD = 'E'
if windD in range(101,123):
windD = 'ESE'
if windD in range(123,146):
windD = 'SE'
if windD in range(146,168):
windD = 'SSE'
if windD in range(168,191):
windD = 'S'
if windD in range(191,213):
windD = 'SSW'
if windD in range(213,236):
windD = 'SW'
if windD in range(236,258):
windD = 'WSW'
if windD in range(258,281):
windD = 'W'
if windD in range(281,303):
windD = 'WNW'
if windD in range(303,326):
windD = 'NW'
if windD in range(326,348):
windD = 'NNW'
LCD.printString('Direction %s' % windD,170,125)
time.sleep(360)
| true |
df145b9ce7145d8d755704bfe26ffc5d895ccb9d | Python | waylight3/boj | /py/2776.py | UTF-8 | 118 | 2.515625 | 3 | [] | no_license | k=input
for i in range(int(k())):
a={};k()
for d in k().split():a[d]=1
k()
for d in k().split():print(int(d in a)) | true |
e822daecf89f8e203f95339ecd170d78ce5a4a30 | Python | SWuchterl/ParticlePhysicsLabCourse | /23_Muon/efficiencies/all_efficiencies.py | UTF-8 | 524 | 2.8125 | 3 | [] | no_license | #!/usr/bin/python
import time
for iteration in ['first', 'second', 'third', 'fourth']:
time.sleep(1)
for i in range(1, 5):
filename = iteration + '_iteration/efficiency_PMT' + str(i) + '.py'
filename2 = iteration + '_iteration/bayes_PMT' + str(i) + '.py'
print '\n' + '************************************************************'
print '\n' + 'executing ' + filename + '\n'
execfile(filename)
print '\n' + 'executing ' + filename2 + '\n'
execfile(filename2)
| true |
7d204db7686fc9a4522af3868e346846fd374316 | Python | miss-grass/OpenSidewalks | /getCoord.py | UTF-8 | 562 | 2.609375 | 3 | [] | no_license | import numpy as np
import pandas as pd
def main():
fields = ['exit_coord', 'transfer_coord', 'Walking_distance']
df = pd.read_csv('API_Transfer_route.csv', skipinitialspace=True, usecols=fields, dtype=object)
res = df[df['Walking_distance'] != 0]
for index, row in res.iterrows():
row['exit_coord'] = row['exit_coord'].replace("|", ",")
row['transfer_coord']= row['transfer_coord'].replace("|", ",")
res.pop('Walking_distance')
print(res)
np.save("coordinates.npy", res)
if __name__ == "__main__":
main()
| true |
7bd0616591803dcb68f8bfc4d0596fb693e1ec19 | Python | etraiger/wind-stats | /wind_stats/geometry.py | UTF-8 | 3,490 | 4.15625 | 4 | [
"MIT"
] | permissive | """Geometry operations module.
Basic affine 2D transformations.
"""
from math import cos, pi, sin
from typing import Tuple
import numpy as np
def affine_2d_transformation(
coordinates: Tuple[np.ndarray, np.ndarray], matrix: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Numpy based affine 2D transformation.
[x'] | a b xoff | [x]
[y'] = | d e yoff | [y]
[1 ] | 0 0 1 | [1]
Parameters
----------
coordinates: tuple
x, y coordinates
matrix:
transformation matrix [3, 3]
Returns
-------
x, y = coordinates
"""
if matrix.shape != (3, 3): # pragma: no cover
raise ValueError("2D transformation matrix must be of shape [3, 3]")
x, y = coordinates
vector = np.array([x, y, np.ones(len(x))])
x_new, y_new, _ = np.matmul(matrix, vector)
return x_new, y_new
def rotate(
coordinates,
angle: float = 0,
origin: Tuple[float, float] = (0, 0),
use_radians=False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Return rotated coordinates.
The angle of rotation can be specified in either degrees (default) or
radians by setting ``use_radians=True``.
Positive angles are counter-clockwise and negative are clockwise rotations.
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point object
or a coordinate tuple (x0, y0).
The affine transformation matrix for 2D rotation is:
| cos(θ) -sin(θ) xoff |
| sin(θ) cos(θ) yoff |
| 0 0 1 |
where the offsets are calculated from the origin Point(x0, y0):
xoff = x0 - x0 * cos(r) + y0 * sin(r)
yoff = y0 - x0 * sin(r) - y0 * cos(r)
"""
if not use_radians:
angle = angle * pi / 180.0
cosp = cos(angle)
sinp = sin(angle)
x0, y0 = origin
xoff = x0 - x0 * cosp + y0 * sinp
yoff = y0 - x0 * sinp - y0 * cosp
matrix = np.array(
[
[cosp, -sinp, xoff],
[sinp, cosp, yoff],
[0, 0, 1],
]
)
return affine_2d_transformation(coordinates, matrix)
def translate(
coordinates: Tuple[np.ndarray, np.ndarray], xoff=0.0, yoff=0.0
) -> Tuple[np.ndarray, np.ndarray]:
r"""Return a translated geometry shifted by offsets along each dimension.
The general 2D affine transformation matrix for translation is:
| 1 0 xoff |
| 0 1 yoff |
| 0 0 1 |
"""
matrix = np.array(
[
[1, 0, xoff],
[0, 1, yoff],
[0, 0, 1],
]
)
return affine_2d_transformation(coordinates, matrix)
def azimuth_to_cartesian_angle(azimuth: float, radians: bool = False) -> float:
"""Convert cartographical azimuth angle to cartesian angle.
Parameters
----------
azimuth: float
cartographical azimuth angle 0 is North, 180 is South when using
degrees.
Returns
-------
angle: float
cartesian angle between [0...360°] or [0...2π] based on `radians`
parameter.
Examples
--------
>>> azimuth_to_regular_angle(90)
0
>>> azimuth_to_regular_angle(0)
90
>>> azimuth_to_regular_angle(270)
180
>>> azimuth_to_regular_angle(270, radians=True)
3.141592653589793
"""
cartesian_angle = (90 - azimuth) % 360
if radians:
cartesian_angle = np.deg2rad(cartesian_angle)
return cartesian_angle
| true |
4e8e23f848a7764828518fd96e73329176f14660 | Python | caleblogan/game-of-life-image | /game_of_life/image_processing.py | UTF-8 | 803 | 3.25 | 3 | [] | no_license | from PIL import Image
def convert_to_seed(img_path):
"""
Loads and image and creates a seed array suitable for being passed to GameGraph
:param img_path: the path of the image to load
:return: a 2 dimensional list used to seed GameGraph
"""
img = Image.open(img_path)
img = img.resize((28, 28))
img = img.convert('L')
converted_data = []
row = -1
for i, x in enumerate(img.getdata()):
if i % img.width == 0:
converted_data.append([])
row += 1
threshold = 220
converted_data[row].append((1 if x >= threshold else 0))
return converted_data
if __name__ == '__main__':
# convert_to_seed('test_img.png')
seed_data = convert_to_seed('C:/Users/cloga_000/Pictures/giants_morse_behind_enemy_lines.jpg')
| true |
56edc7abd6bc5450625012995e83fc3fb7eac5e3 | Python | z991/StockWaring | /trading.py | UTF-8 | 1,026 | 2.9375 | 3 | [] | no_license | import datetime
class Trading:
def is_start(self, config):
"""
判断是否为股票交易日(周一到周五,9:30-11:30 13:00-15:00)
:return:
"""
now = datetime.datetime.now()
hour = now.hour
min = now.minute
week = now.weekday()
# 是否运行代码
result = 1
# 优先从配置文件中获取,是否为交易日
is_start = config.manual_control()
if is_start == "ON":
return 1
if is_start == "OFF":
return 0
if week not in range(1, 6):
result = 0
if (hour == 9 or hour == 11) and min in range(0, 30):
result = 0
if hour in range(0, 9) or hour in range(15, 24) or hour == 0:
result = 0
if (week not in range(1, 6)) and ((hour == 9 or hour == 11) and min in (0, 31)):
result = 0
if hour in range(0, 9) or hour in range(15, 24) or hour == 0:
result = 0
return result
| true |
fa683d3ba944fba35386f4e6cf99d9dfc8f9edcc | Python | ligaoyong/pylearn | /collection/10.可变对象.py | UTF-8 | 764 | 4.28125 | 4 | [] | no_license | # # 可变对象
a = [1,2,3]
print('修改前:', a , id(a)) # id()函数可以得到一个对象的唯一标识
# # 通过索引修改列表
a[0] = 10
print('修改后:', a , id(a))
# # 为变量重新赋值
a = [4,5,6]
print('修改后:', a , id(a))
a = [1,2,3]
b = a
# b[0] = 10
b = [10,2,3]
# print("a",a,id(a))
# print("b",b,id(b))
# == != is is not
# == != 比较的是对象的值是否相等
# is is not 比较的是对象的id是否相等(比较两个对象是否是同一个对象)
a = [1,2,3]
b = [1,2,3]
print(a,b)
print(id(a),id(b))
print(a == b) # a和b的值相等,使用==会返回True
print(a is b) # a和b不是同一个对象,内存地址不同,使用is会返回False
print(a is not b)
| true |
6997a90b806b998f6c2f2deeea73a38b8d370d84 | Python | GaoPeiRu/python-vs-code | /7-4.py | UTF-8 | 349 | 3.125 | 3 | [] | no_license | x = input()
lst = list(map(int, x.split()))
i = 1
for i in range(1, len(lst)):
if lst[i] * lst[i-1] > 0:
print(str(lst[i - 1]), str(lst[i]))
break
elif i == len(lst)-1:
print("0")
#a = [int(s) for s in input().split()]
#for i in range(1, len(a)):
#if a[i - 1] * a[i] > 0:
#print(a[i - 1], a[i])
#break
#else:
#print(0) | true |
8100bfa26f9ccc2bea6531e26dc6e25ede1103f0 | Python | titos-carrasco/Scribbler2-Python | /scribbler2/robot/HS2Infrared.py | UTF-8 | 724 | 3.4375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""Clase de ayuda (helper) para retornar informacion del S2.
Es creada exclusivamente por metodos de la clase **Scribbler2** y retornadas
a su invocador. Sus atributos pueden ser accesados directamente.
"""
class HS2Infrared:
"""Valores de los sensores infrarojos del S2.
```
irLeft :int - Valor del sensor infrarojo izquierdo
irRight:int - Valor del sensor infrarojo derecho
```
"""
def __init__(self, irLeft:int, irRight:int)->None:
"""Constructor."""
self.irLeft = irLeft
self.irRight = irRight
def __str__(self):
"""Representacion modo texto de la clase."""
return "HS2Infrared(%d, %d)" % (self.irLeft, self.irRight)
| true |
c697b91eee5f1cd789d73906bce3070cc1ba1d91 | Python | Aasthaengg/IBMdataset | /Python_codes/p03457/s223877383.py | UTF-8 | 202 | 2.96875 | 3 | [] | no_license | N=int(input())
t,x,y=0,0,0
for i in range(N):
T,X,Y=map(int,input().split())
r=abs(X-x)+abs(Y-y)
t=T-t
if r%2!=t%2 or t<r:
print('No')
exit()
t=T;x=X;y=Y
print('Yes') | true |
bc8edf5c801378e420798f20dce113e75a220a15 | Python | GuangGuangLi-Artist/LearningPython | /面向对象/面向对象基本概念/LG_04_外界设置属性的问题.py | UTF-8 | 345 | 4.15625 | 4 | [] | no_license | # coding=utf-8
class Cat():
def eat(self):
#哪一个对象调用的方法,self就是哪一个对象的引用
print("%s爱吃鱼" %self.name)
def drink(self):
print("小猫爱喝水")
tom = Cat()
#可以使用.属性名 利用赋值语句就可以了
#tom.name = "Tom"
tom.drink()
tom.eat()
tom.name = "Tom" | true |
4804cb992c10c229c3b41b9d8dbff7df91da82b2 | Python | joshkeating/genius-mentions | /pull-lyrics-csv.py | UTF-8 | 2,973 | 2.96875 | 3 | [] | no_license | from bs4 import BeautifulSoup as bs
import requests
import csv
import re
import random
import time
def checkElementExistence(input):
if input is None:
return "NA"
else:
return input.get_text()
def processNRows(sourceFile, outputFile):
sourceFilePath = "./data/output/" + sourceFile
outputFilePath = "./data/output/" + outputFile
# open input file
csvfile = open(sourceFilePath, newline='')
reader = csv.reader(csvfile, delimiter=';', quotechar='|')
# open file to write updated results
output = open(outputFilePath, 'w', newline='')
writer = csv.writer(output, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
# add the header to the file
writer.writerow(["song_id", "title", "title_with_featured", "primary_artist_id",
"primary_artist_name", "url", "album", "full_date", "date_month", "date_year", "lyrics"])
count = 0
# define the regex to replace metadata we dont care about
pattern = r"\[.*\]|\(x[0-9]\)|\n"
# skip the header
next(reader)
for currentRow in reader:
try:
# get data from file
songId = currentRow[0]
songTitle = currentRow[1]
titleWithFeat = currentRow[2]
primaryArtistId = currentRow[3]
artist = currentRow[4]
url = currentRow[5]
targetPage = requests.get(url)
html = bs(targetPage.text, "html.parser")
dateMonth = "NA"
dateYear = "NA"
# get raw text from a couple of elements on the page
lyricsStandard = checkElementExistence(html.find("div", class_="lyrics"))
fullDate = checkElementExistence(html.find("span", class_="metadata_unit-info metadata_unit-info--text_only"))
album = checkElementExistence(html.find("a", class_="song_album-info-title")).strip()
# process metadata
if fullDate != "NA":
splitDate = fullDate.split()
dateMonth = splitDate[0]
dateYear = splitDate[2]
# remove annotations and extra spaces
lyricsTemp = re.sub(pattern, " ", lyricsStandard)
lyrics = re.sub(' +',' ',lyricsTemp)
# write to file
writer.writerow([songId, songTitle, titleWithFeat, primaryArtistId, artist, url, album, fullDate, dateMonth, dateYear, lyrics])
count += 1
# print status and sleep for random intervals
if count % 20 == 0:
print(str(count) + " records processed")
waitTime = random.randint(4, 15)
print("Waiting " + str(waitTime) + " seconds...")
time.sleep(waitTime)
except IndexError:
print("Malformed html data")
except:
print("Something Broke!")
return
# run it
processNRows("ao6.csv", "full-ao6.csv")
| true |
e797107100bfec252dfe3e987d718350bda2d20a | Python | colinjroberts/sightreading-pygame | /chirp_maker.py | UTF-8 | 2,227 | 3.109375 | 3 | [
"MIT"
] | permissive | # note_maker.py
# Generates Note objects that contain all of the information needed to render
# notes on a staff including their y location on the staff (relatively to the
# top of the screen), the location of the correct image, the length value of
# the note, and the name.
# The images of quarter note are 15x45px
import pygame
# position of notes is relative to middle bar of staff
def make_chirps():
list_of_chirps = []
chirp_0_991 = pygame.mixer.Sound("snd/0-991.ogg")
chirp_0_992 = pygame.mixer.Sound("snd/0-992.ogg")
chirp_0_993 = pygame.mixer.Sound("snd/0-993.ogg")
chirp_0_994 = pygame.mixer.Sound("snd/0-994.ogg")
chirp_0_995 = pygame.mixer.Sound("snd/0-995.ogg")
chirp_0_996 = pygame.mixer.Sound("snd/0-996.ogg")
chirp_0_997 = pygame.mixer.Sound("snd/0-997.ogg")
chirp_0_998 = pygame.mixer.Sound("snd/0-998.ogg")
chirp_0_999 = pygame.mixer.Sound("snd/0-999.ogg")
chirp_1_000 = pygame.mixer.Sound("snd/1-000.ogg")
chirp_1_001 = pygame.mixer.Sound("snd/1-001.ogg")
chirp_1_002 = pygame.mixer.Sound("snd/1-002.ogg")
chirp_1_003 = pygame.mixer.Sound("snd/1-003.ogg")
chirp_1_004 = pygame.mixer.Sound("snd/1-004.ogg")
chirp_1_005 = pygame.mixer.Sound("snd/1-005.ogg")
chirp_1_006 = pygame.mixer.Sound("snd/1-006.ogg")
chirp_1_007 = pygame.mixer.Sound("snd/1-007.ogg")
chirp_1_008 = pygame.mixer.Sound("snd/1-008.ogg")
chirp_1_009 = pygame.mixer.Sound("snd/1-009.ogg")
list_of_chirps.append(chirp_0_991)
list_of_chirps.append(chirp_0_992)
list_of_chirps.append(chirp_0_993)
list_of_chirps.append(chirp_0_994)
list_of_chirps.append(chirp_0_995)
list_of_chirps.append(chirp_0_996)
list_of_chirps.append(chirp_0_997)
list_of_chirps.append(chirp_0_998)
list_of_chirps.append(chirp_0_999)
list_of_chirps.append(chirp_1_000)
list_of_chirps.append(chirp_1_001)
list_of_chirps.append(chirp_1_002)
list_of_chirps.append(chirp_1_003)
list_of_chirps.append(chirp_1_004)
list_of_chirps.append(chirp_1_005)
list_of_chirps.append(chirp_1_006)
list_of_chirps.append(chirp_1_007)
list_of_chirps.append(chirp_1_008)
list_of_chirps.append(chirp_1_009)
return list_of_chirps | true |
b0fe90b0eb8f9091de9b99f008ae515a1faf7e4a | Python | PhenixI/introduction-of-computer-udacity | /lesson6/Deep_Count .py | UTF-8 | 982 | 4.46875 | 4 | [] | no_license | # Deep Count
# The built-in len operator outputs the number of top-level elements in a List,
# but not the total number of elements. For this question, your goal is to count
# the total number of elements in a list, including all of the inner lists.
# Define a procedure, deep_count, that takes as input a list, and outputs the
# total number of elements in the list, including all elements in lists that it
# contains.
# For this procedure, you will need a way to test if a value is a list. We have
# provided a procedure, is_list(p) that does this:
def is_list(p):
return isinstance(p, list)
# It is not necessary to understand how is_list works. It returns True if the
# input is a List, and returns False otherwise.
def deep_count(p):
num = 0;
if not is_list(p):
return 0
else:
num += len(p)
for p0 in p:
num += deep_count(p0)
return num
def deep_count(p):
sum = 0
for e in p:
sum = sum + 1
if is_list(e):
sum = sum+deep_count(e)
return sum
| true |
9d8d88aa0bac7708eb1baf2a5c3b1b4be54fc5be | Python | Zigje9/Algorithm_study | /python/2166.py | UTF-8 | 304 | 2.953125 | 3 | [] | no_license | import sys
N = int(sys.stdin.readline())
pos = []
for _ in range(N):
pos.append(list(map(int, sys.stdin.readline().split())))
answer = 0
pos.append([pos[0][0], pos[0][1]])
A = 0
B = 0
for i in range(N):
A += pos[i][0]*pos[i+1][1]
B += pos[i+1][0]*pos[i][1]
print(round(abs(A-B)/2, 1))
| true |
8a8c44ad0f48b753bb2a64d05431651c111ab529 | Python | Stealthmate/atcoder | /arc/arc099/b.py | UTF-8 | 235 | 3.078125 | 3 | [] | no_license | k = int(input())
c = 1
j = 0
last_r = 1
for i in range(1, k + 1):
if c % 10 == 0:
c = 1
j += 1
n = int(str(c) + ("9" * j))
r = n / sum(int(x) for x in str(n))
if r < last_r:
print(n)
c += 1
| true |
90a690fe181a5e04371c61bb02eee15bd89ee28c | Python | LaurenceGA/programmingProjects | /python/Semester 1/projects/NumberGuesser.py | UTF-8 | 1,088 | 4.03125 | 4 | [] | no_license | __author__ = 'laurence'
import random
numsToGuess = 1000
maxNum = 100000
totalGuesses = 0
def guessnumber():
guesses = 0
randi = random.randint(0, maxNum)
print("Guessing numbers betweeen 0 and %d" % maxNum)
lowest = 0
highest = maxNum
guess = 0
while not guess == randi:
if (highest - lowest) == 1:
guess = int(lowest + (highest - lowest)/2) + 1
else:
guess = int(lowest + (highest - lowest)/2)
print("Guessing %d" % guess)
if guess > randi:
highest = guess
print("Lower")
elif guess < randi:
lowest = guess
print("Higher!")
else:
print("Correct! The number was %d" % randi)
guesses += 1
return guesses
for i in range(numsToGuess):
g = guessnumber()
totalGuesses += g
print("Took %d gusses" % g)
else:
print("\n\nAttempted to find a number between 0 and %d, %d times. %d guesses required on average. %d guesses made in total." % (maxNum, numsToGuess, totalGuesses/numsToGuess, totalGuesses)) | true |
ce942a42851bda493ed0a27aa0c9e1d041f7b9ed | Python | aditya-shukla-27/Basic-Python-Projects | /BMI_Game.py | UTF-8 | 574 | 4.03125 | 4 | [] | no_license | Weight = float(input("Please enter your weight in Kg \n"))
Height = float(input("Please enter your height in Cm \n"))
Height = Height/100
BMI = Weight/(Height**2)
print(f'So here is your BMI value {BMI}. Based on that ')
if BMI > 0:
if BMI < 16:
print("You are severely underweight")
elif BMI < 18.5:
print("You are underweight")
elif BMI <=25:
print("You are healthy")
elif BMI <= 30:
print("You are overweight")
else:
print("You are severely overweight")
else:
print("Enter valid details") | true |
9d128e793d5528f79c733c311ff3fece5f8c91d6 | Python | IamMarcIvanov/ImplementedAlgorithms | /minimum_spanning_tree.py | UTF-8 | 3,043 | 3.6875 | 4 | [] | no_license | import typing
import math
import heapq
def sorted_edge_list_from_adjacency_list(adj):
sorted_edge_list=[]
for source_ind in adj:
edge_dict = adj[source_ind]
# print(source_ind, edge_dict)
for target_ind in edge_dict:
sorted_edge_list.append(tuple([edge_dict[target_ind], source_ind, target_ind]))
sorted_edge_list.sort(key = lambda x:x[0])
return sorted_edge_list
def parent(vertex_ind, parent_list):
p = vertex_ind
while parent_list[p] != -1:
p = parent_list[p]
return p
def kruskal(adjacency_list: dict) -> typing.Tuple[float, list]:
# Weights should be positive (?)
# edges are considered undirected
n = len(adjacency_list) # Number of vertices
tree_edge_list=[]
mst_weight = 0
# Setting all parents to -1 (Disjoint Set Union)
parent_list = [-1 for i in range(n)]
# Get the sorted edge list given the adjacency list (edge = (weight, source, target))
sorted_edge_list = sorted_edge_list_from_adjacency_list(adjacency_list)
for edge in sorted_edge_list:
p1 = parent(edge[1], parent_list)
p2 = parent(edge[2], parent_list)
if p1 != p2:
# Union operation
parent_list[p2] = p1
tree_edge_list.append([edge[1], edge[2]])
mst_weight += edge[0]
return mst_weight, tree_edge_list
def prim(adjacency_list):
# This is very similar to Dijkstra's algorithm
# Making all edges undirected
for source_ind in adjacency_list:
for target_ind in adjacency_list[source_ind]:
if source_ind not in adjacency_list[target_ind]:
adjacency_list[target_ind][source_ind] = adjacency_list[source_ind][target_ind]
else:
adjacency_list[target_ind][source_ind] = min(adjacency_list[target_ind][source_ind], adjacency_list[source_ind][target_ind])
#
n = len(adjacency_list) # Number of vertices
Q = [(0, 0)]
min_weight_edge = {i: math.inf for i in range(n)}
min_weight_edge[0] = 0
parent = {i: -1 for i in range(n)}
visited = set()
mst_weight = 0
while len(Q) > 0:
v = heapq.heappop(Q)
vertex_ind = v[1]
if vertex_ind in visited:
continue
visited.add(vertex_ind)
mst_weight += min_weight_edge[vertex_ind]
# Updating min_edge_weight of all neighbour vertices of v
for target_ind in adjacency_list[vertex_ind]:
weight = adjacency_list[vertex_ind][target_ind]
if min_weight_edge[target_ind] > weight:
min_weight_edge[target_ind] = weight
parent[target_ind] = vertex_ind
heapq.heappush(Q, (min_weight_edge[target_ind], target_ind))
return mst_weight, [[i, parent[i]] for i in range(1,len(parent))]
# example (even giving directed adjacency list should be fine, algo won't consider it)
adj_list = {0: {1: 1, 2: 1, 3: 3}, 1: {2: 2}, 2: {3: 1}, 3: {0: 4}}
print(kruskal(adj_list))
print(prim(adj_list)) | true |
71a808c9f1b7e91006416f8c39515e33b91f14dc | Python | parna29/Nptel-PDSA | /Week1/gcd-euclid-3.py | UTF-8 | 300 | 3.21875 | 3 | [] | no_license | #Previous approach not feasible
#New approach: m = qn + r -> ad = q(bd) + r -> r also divisible by d
def gcd(m,n):
large = max(m,n)
small = min(m,n)
if large%small == 0:
return small
else:
return gcd(small, large%small)
print(gcd(12,36))
| true |
6d2fd85de116ffda1f5841841585860765ff428d | Python | jgwes/Python | /PythonScripts/stackExample.py | UTF-8 | 498 | 3.453125 | 3 | [] | no_license |
stack = [3,4,5]
print(stack)
stack.append(6)
stack.append(7)
print(stack)
print('Pop stack ', stack.pop())
print('Stack after pop ', stack)
print('Pop stack ', stack.pop())
print('Stack after pop ', stack)
print('Pop stack ', stack.pop())
print('Stack after pop ', stack)
print('Pop stack ', stack.pop())
print('Stack after pop ', stack)
# Empty stack
print('Pop stack ', stack.pop())
print('Stack after pop ', stack)
# IndexError
print('Pop stack ', stack.pop())
print('Stack after pop ', stack)
| true |
64b4aa2ad40d4321b1179193b2cfed298a3c50df | Python | PyLadiesTokyo/web_scraping | /201902/get_html_page.py | UTF-8 | 187 | 2.59375 | 3 | [] | no_license | import requests # ①
res = requests.get('https://pyladiestokyo.github.io/') # ②
print(res.content) # ③
with open('pyladies-top.html', 'wb') as fout:
fout.write(res.content)
| true |
b8723cf9cfdf7cbc8d4d9729419ecdbe7bad0bcc | Python | odudka/WumpusWorld | /WumpusWorld/tests/test_agent.py | UTF-8 | 3,643 | 2.734375 | 3 | [] | no_license | """ test_agent.py
~~~ Input Options ~~~
1. hjkl commands to move
2. Enter N to get a new random (non-modified) environment.
3. Enter M to get a new modified environment.
"""
from .. environment import getEnv
from . pretty import TitlePrint, printEnv, makePretty, bcolors
from .. stimuli import Stimuli
from .. agent import Agent
from .. import constants as C
from . sims import getTestEnv
import os
import time
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
env = getEnv()
Env = makePretty(env)
stimArr = Stimuli(env).stimArr
A = Agent(stimArr)
oldIndex = (0, 0)
x, y = (0, 0)
T = '====== {0} ======'
fmt = """\
{3}\n{0}\n
{4}\n{1}\n
{5}\n{2}\n\n
"""
while(True):
TitlePrint('Probabilities')
PPs = [row[:] for row in A.KB.PBank.Probs]
WPs = [row[:] for row in A.KB.WBank.Probs]
DPs = []
for i in range(len(PPs)):
DPs.append([])
for j in range(len(PPs[0])):
DPs[i].append(round(PPs[i][j] + WPs[i][j], 3))
for XXs in [PPs, WPs, DPs]:
Indexes = [index for index in A.KB.visited | A.KB.options]
for i, row in enumerate(XXs):
for j, _ in enumerate(row):
if (i, j) not in Indexes:
XXs[i][j] = '-'
XXs[i] = str(XXs[i])
print(T.format('Death'))
print('\n'.join(DPs), end='\n\n')
TitlePrint('Environment')
printEnv(Env)
print('Position of Agent: ({0},{1})'.format(x, y), end='\n')
getch = _GetchUnix()
auto = False
if auto:
deathSen = "\nTHE AGENT HAS ENTERED A ROOM WITH A {0}!!! SHE IS DEAD!!!"
goldSen = "\nTHE AGENT HAS FOUND THE GOLD IN {0} MOVES!!!"
sentences = {C.Gold: goldSen.format(A.mCount),
C.Pit: deathSen.format("PIT")}
if A.dead or A.forfeit or A.foundG:
os.system('clear')
userInput = 'F'
else:
userInput = 'r'
time.sleep(0.5)
if A.foundG:
print(sentences[C.Gold])
time.sleep(2)
elif A.dead:
print(sentences[C.Pit])
time.sleep(2)
elif A.forfeit:
print("THE AGENT HAS FORFEITED THE GAME!!!")
time.sleep(1)
else:
userInput = getch()
os.system('clear')
oldIndex = (x, y)
if userInput in 'hjklr':
x, y = oldIndex
Env[x][y] = bcolors.GREEN + 'X' + bcolors.ENDC
if userInput == 'j':
A.down()
elif userInput == 'k':
A.up()
elif userInput == 'h':
A.left()
elif userInput == 'l':
A.right()
elif userInput == 'r':
A.act()
x, y = A.KB.location
Env[x][y] = bcolors.GREEN + bcolors.BOLD + 'A' + bcolors.ENDC
elif userInput in 'MNF0123456789':
if userInput == 'M':
env = getTestEnv()
Env = makePretty(env)
elif userInput == 'N':
env = getEnv()
Env = makePretty(env)
elif userInput == 'F':
env = getEnv(fair=True)
Env = makePretty(env)
else:
env = getTestEnv(userInput)
Env = makePretty(env)
x, y = (0, 0)
stimArr = Stimuli(env).stimArr
A = Agent(stimArr)
elif userInput == 'Q':
exit()
| true |
33d73c3e634b8eaf94cff22658dab4b939e58b77 | Python | brainysmurf/ssispowerschoolsyncer | /psmdlsyncer/files/ReadFiles.py | UTF-8 | 237 | 2.890625 | 3 | [] | no_license | """
Send it the path, it gives back the raw information line by line
"""
class SimpleReader:
def __init__(self, path):
self.path = path
def raw(self):
with open(self.path) as f:
return f.readlines()
| true |
eed3aa464c3208bc3b136f00d3b1fb66f6833226 | Python | piyushkulkarni9/Titanic-Machine-Learning-from-Disaster | /Titanic model py file.py | UTF-8 | 16,642 | 3.484375 | 3 | [] | no_license |
# coding: utf-8
# ### Titanic Machine Learning from Disaster
# In[ ]:
# Loading Modules
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
import seaborn as sns
sns.set() # setting seaborn default for plots
# In[ ]:
# Loading Datasets
# Loading train and test dataset
# In[2]:
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
# In[ ]:
# Looking into the training dataset
# In[3]:
train.head()
# In[4]:
train.shape
# In[5]:
# Describing training dataset
# In[6]:
train.describe()
# In[7]:
train.describe(include=['O'])
# In[8]:
train.info()
# In[9]:
train.isnull().sum()
# In[ ]:
# There are 177 rows with missing *Age*, 687 rows with missing *Cabin* and 2 rows with missing *Embarked* information.
# In[10]:
## Looking into the testing dataset
# In[11]:
test.shape
# In[12]:
test.head()
# In[13]:
test.info()
# In[14]:
#There are missing entries for *Age* in Test dataset as well.
# In[15]:
test.isnull().sum()
# In[ ]:
# There are 86 rows with missing *Age*, 327 rows with missing *Cabin* and 1 row with missing *Fare* information.
# In[ ]:
## Relationship between Features and Survival
# In[16]:
survived = train[train['Survived'] == 1]
not_survived = train[train['Survived'] == 0]
print ("Survived: %i (%.1f%%)"%(len(survived), float(len(survived))/len(train)*100.0))
print ("Not Survived: %i (%.1f%%)"%(len(not_survived), float(len(not_survived))/len(train)*100.0))
print ("Total: %i"%len(train))
# In[ ]:
### Pclass vs. Survival
# In[17]:
train.Pclass.value_counts()
# In[18]:
train.groupby('Pclass').Survived.value_counts()
# In[19]:
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean()
# In[20]:
#train.groupby('Pclass').Survived.mean().plot(kind='bar')
sns.barplot(x='Pclass', y='Survived', data=train)
# In[21]:
### Sex vs. Survival
# In[22]:
train.Sex.value_counts()
# In[23]:
train.groupby('Sex').Survived.value_counts()
# In[24]:
train[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean()
# In[25]:
#train.groupby('Sex').Survived.mean().plot(kind='bar')
sns.barplot(x='Sex', y='Survived', data=train)
# In[26]:
# Pclass & Sex vs. Survival
# In[27]:
tab = pd.crosstab(train['Pclass'], train['Sex'])
print (tab)
tab.div(tab.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True)
plt.xlabel('Pclass')
plt.ylabel('Percentage')
# In[28]:
sns.factorplot('Sex', 'Survived', hue='Pclass', size=4, aspect=2, data=train)
# In[29]:
# From the above plot, it can be seen that:
# Women from 1st and 2nd Pclass have almost 100% survival chance.
# Men from 2nd and 3rd Pclass have only around 10% survival chance.
# In[30]:
# Pclass, Sex & Embarked vs. Survival
# In[31]:
sns.factorplot(x='Pclass', y='Survived', hue='Sex', col='Embarked', data=train)
# In[32]:
# From the above plot, it can be seen that:
# Almost all females from Pclass 1 and 2 survived.
# Females dying were mostly from 3rd Pclass.
# Males from Pclass 1 only have slightly higher survival chance than Pclass 2 and 3.
# In[ ]:
# Embarked vs. Survived
# In[33]:
train.Embarked.value_counts()
# In[34]:
train.groupby('Embarked').Survived.value_counts()
# In[35]:
train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
# In[36]:
#train.groupby('Embarked').Survived.mean().plot(kind='bar')
sns.barplot(x='Embarked', y='Survived', data=train)
# In[37]:
#Parch vs. Survival
# In[38]:
train.Parch.value_counts()
# In[39]:
train.groupby('Parch').Survived.value_counts()
# In[40]:
train[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean()
# In[41]:
sns.barplot(x='Parch', y='Survived', ci=None, data=train)
# In[42]:
# SibSp vs. Survival
# In[43]:
train.SibSp.value_counts()
# In[44]:
train.groupby('SibSp').Survived.value_counts()
# In[45]:
train[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean()
# In[46]:
sns.barplot(x='SibSp', y='Survived', ci=None, data=train)
# In[47]:
# Age vs. Survival
# In[48]:
fig = plt.figure(figsize=(15,5))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
sns.violinplot(x="Embarked", y="Age", hue="Survived", data=train, split=True, ax=ax1)
sns.violinplot(x="Pclass", y="Age", hue="Survived", data=train, split=True, ax=ax2)
sns.violinplot(x="Sex", y="Age", hue="Survived", data=train, split=True, ax=ax3)
# In[ ]:
# From Pclass violinplot, we can see that:
# 1st Pclass has very few children as compared to other two classes.
# 1st Plcass has more old people as compared to other two classes.
# Almost all children (between age 0 to 10) of 2nd Pclass survived.
# Most children of 3rd Pclass survived.
# Younger people of 1st Pclass survived as compared to its older people.
# From Sex violinplot, we can see that:
# Most male children (between age 0 to 14) survived.
# Females with age between 18 to 40 have better survival chance.
# In[50]:
total_survived = train[train['Survived']==1]
total_not_survived = train[train['Survived']==0]
male_survived = train[(train['Survived']==1) & (train['Sex']=="male")]
female_survived = train[(train['Survived']==1) & (train['Sex']=="female")]
male_not_survived = train[(train['Survived']==0) & (train['Sex']=="male")]
female_not_survived = train[(train['Survived']==0) & (train['Sex']=="female")]
plt.figure(figsize=[15,5])
plt.subplot(111)
sns.distplot(total_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='blue')
sns.distplot(total_not_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='red', axlabel='Age')
plt.figure(figsize=[15,5])
plt.subplot(121)
sns.distplot(female_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='blue')
sns.distplot(female_not_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='red', axlabel='Female Age')
plt.subplot(122)
sns.distplot(male_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='blue')
sns.distplot(male_not_survived['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color='red', axlabel='Male Age')
# In[ ]:
# From the above figures, we can see that:
# Combining both male and female, we can see that children with age between 0 to 5 have better chance of survival.
# Females with age between "18 to 40" and "50 and above" have higher chance of survival.
# Males with age between 0 to 14 have better chance of survival.
# In[ ]:
# Correlating Features
# In[ ]:
# Heatmap of Correlation between different features:
#Positive numbers = Positive correlation, i.e. increase in one feature will increase the other feature & vice-versa.
#Negative numbers = Negative correlation, i.e. increase in one feature will decrease the other feature & vice-versa.
# In our case, we focus on which features have strong positive or negative correlation with the *Survived* feature.
# In[51]:
corr=train.corr()#["Survived"]
plt.figure(figsize=(10, 10))
sns.heatmap(corr, vmax=.8, linewidths=0.01,
square=True,annot=True,cmap='YlGnBu',linecolor="white")
plt.title('Correlation between features');
# In[ ]:
## Feature Extraction
# In[ ]:
# Name Feature
# Exreacting titles from Name column.
# In[52]:
train_test_data = [train, test] # combining train and test dataset
for dataset in train_test_data:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.')
# In[53]:
train.head()
# In[54]:
pd.crosstab(train['Title'], train['Sex'])
# In[55]:
# The number of passengers with each *Title* is shown above.
# Replacing some less common titles with the name "Other".
# In[56]:
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Other')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
# In[57]:
# Converting the categorical *Title* values into numeric form.
# In[58]:
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Other": 5}
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# In[59]:
train.head()
# In[60]:
### Sex Feature
# Converting the categorical value of Sex into numeric.
# In[61]:
for dataset in train_test_data:
dataset['Sex'] = dataset['Sex'].map( {'female': 1, 'male': 0} ).astype(int)
# In[62]:
train.head()
# In[ ]:
# Embarked Feature
# In[63]:
train.Embarked.unique()
# In[64]:
# Checking number of passengers for each Embarked category.
# In[65]:
train.Embarked.value_counts()
# In[ ]:
# Category "S" has maximum passengers. Replacing "nan" values with "S".
# In[66]:
for dataset in train_test_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
# In[67]:
train.head()
# In[69]:
# Converting categorical value of Embarked into numeric.
# In[70]:
for dataset in train_test_data:
#print(dataset.Embarked.unique())
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
# In[71]:
train.head()
# In[ ]:
# Age Feature
# In[72]:
# We first fill the NULL values of *Age* with a random number between (mean_age - std_age) and (mean_age + std_age).
# We then create a new column named *AgeBand*. This categorizes age into 5 different age range.
# In[73]:
for dataset in train_test_data:
age_avg = dataset['Age'].mean()
age_std = dataset['Age'].std()
age_null_count = dataset['Age'].isnull().sum()
age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)
dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list
dataset['Age'] = dataset['Age'].astype(int)
train['AgeBand'] = pd.cut(train['Age'], 5)
print (train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean())
# In[74]:
train.head()
# In[75]:
# Mapping Age according to AgeBand.
# In[76]:
for dataset in train_test_data:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
# In[77]:
train.head()
# In[78]:
# Fare Feature
# In[79]:
# Replace missing *Fare* values with the median of Fare.
# In[80]:
for dataset in train_test_data:
dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
# In[81]:
# Creating FareBand.
# In[82]:
train['FareBand'] = pd.qcut(train['Fare'], 4)
print (train[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean())
# In[83]:
train.head()
# In[84]:
# Mapping Fare according to FareBand
# In[86]:
for dataset in train_test_data:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
# In[87]:
train.head()
# In[88]:
# SibSp & Parch Feature
#Combining SibSp & Parch feature, we create a new feature named FamilySize.
# In[89]:
for dataset in train_test_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
print (train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean())
# In[90]:
#Data shows that:
# Having FamilySize upto 4 (from 2 to 4) has better survival chance.
# FamilySize = 1, i.e. travelling alone has less survival chance.
# Large FamilySize (size of 5 and above) also have less survival chance.
# In[ ]:
# Let's create a new feature named IsAlone. This feature is used to check how is the survival chance while travelling alone as compared to travelling with family.
# In[91]:
for dataset in train_test_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
print (train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean())
# In[92]:
train.head(1)
# In[93]:
test.head(1)
# In[ ]:
# Feature Selection
# In[94]:
features_drop = ['Name', 'SibSp', 'Parch', 'Ticket', 'Cabin', 'FamilySize']
train = train.drop(features_drop, axis=1)
test = test.drop(features_drop, axis=1)
train = train.drop(['PassengerId', 'AgeBand', 'FareBand'], axis=1)
# In[95]:
train.head()
# In[96]:
test.head()
# In[ ]:
# Training classifier
# In[ ]:
# Classification & Accuracy
# In[ ]:
# Defining training and testing set
# In[98]:
X_train = train.drop('Survived', axis=1)
y_train = train['Survived']
X_test = test.drop("PassengerId", axis=1).copy()
X_train.shape, y_train.shape, X_test.shape
# In[99]:
# Importing Classifier Modules
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
# In[ ]:
# Logistic Regression
# In[100]:
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_pred_log_reg = clf.predict(X_test)
acc_log_reg = round( clf.score(X_train, y_train) * 100, 2)
print (str(acc_log_reg) + ' percent')
# In[ ]:
# k-Nearest Neighbors
# In[101]:
clf = KNeighborsClassifier(n_neighbors = 3)
clf.fit(X_train, y_train)
y_pred_knn = clf.predict(X_test)
acc_knn = round(clf.score(X_train, y_train) * 100, 2)
print (acc_knn)
# In[102]:
# Decision Tree
# In[103]:
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_pred_decision_tree = clf.predict(X_test)
acc_decision_tree = round(clf.score(X_train, y_train) * 100, 2)
print (acc_decision_tree)
# In[104]:
# Random Forest
# In[108]:
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred_random_forest = clf.predict(X_test)
acc_random_forest = round(clf.score(X_train, y_train) * 100, 2)
print (acc_random_forest)
# In[ ]:
# Confusion Matrix
# In[114]:
from sklearn.metrics import confusion_matrix
import itertools
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred_random_forest_training_set = clf.predict(X_train)
acc_random_forest = round(clf.score(X_train, y_train) * 100, 2)
print ("Accuracy: %i %% \n"%acc_random_forest)
class_names = ['Survived', 'Not Survived']
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_train, y_pred_random_forest_training_set)
np.set_printoptions(precision=2)
print ('Confusion Matrix in Numbers')
print (cnf_matrix)
print ('')
cnf_matrix_percent = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print ('Confusion Matrix in Percentage')
print (cnf_matrix_percent)
print ('')
true_class_names = ['True Survived', 'True Not Survived']
predicted_class_names = ['Predicted Survived', 'Predicted Not Survived']
df_cnf_matrix = pd.DataFrame(cnf_matrix,
index = true_class_names,
columns = predicted_class_names)
df_cnf_matrix_percent = pd.DataFrame(cnf_matrix_percent,
index = true_class_names,
columns = predicted_class_names)
plt.figure(figsize = (15,5))
plt.subplot(121)
sns.heatmap(df_cnf_matrix, annot=True, fmt='d', cmap='YlGnBu',linecolor="white")
plt.subplot(122)
sns.heatmap(df_cnf_matrix_percent, annot=True, cmap='YlGnBu',linecolor="white")
# In[ ]:
# Comparing Models
# In[113]:
models = pd.DataFrame({
'Model': ['Logistic Regression','KNN', 'Decision Tree', 'Random Forest'] ,
'Score': [acc_log_reg, acc_knn, acc_decision_tree, acc_random_forest]
})
models.sort_values(by='Score', ascending=False)
# In[ ]:
#From the above table, we can see that Decision Tree and Random Forest classfiers have the highest accuracy score.
#Among these two, we choose Random Forest classifier as it has the ability to limit overfitting as compared to Decision Trees.
| true |
f6bd7ecda6ce76a92c7cd2abdbff450387d59292 | Python | umd-huang-lab/Tensorial-Neural-Networks | /opt_einsum/helpers.py | UTF-8 | 13,575 | 2.8125 | 3 | [
"MIT"
] | permissive | """
Contains helper functions for opt_einsum testing scripts
"""
from collections import OrderedDict
import numpy as np
from .parser import get_symbol
__all__ = ["build_views", "compute_size_by_dict", "find_contraction", "flop_count"]
_valid_chars = "abcdefghijklmopqABC"
_sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3, 2, 5, 7, 4, 3, 2, 3, 4])
_default_dim_dict = {c: s for c, s in zip(_valid_chars, _sizes)}
def build_views(string, dimension_dict=None):
"""
Builds random numpy arrays for testing.
Parameters
----------
string : list of str
List of tensor strings to build
dimension_dict : dictionary
Dictionary of index _sizes
Returns
-------
ret : list of np.ndarry's
The resulting views.
Examples
--------
>>> view = build_views(['abbc'], {'a': 2, 'b':3, 'c':5})
>>> view[0].shape
(2, 3, 3, 5)
"""
if dimension_dict is None:
dimension_dict = _default_dim_dict
views = []
terms = string.split('->')[0].split(',')
for term in terms:
dims = [dimension_dict[x] for x in term]
views.append(np.random.rand(*dims))
return views
def compute_size_by_dict(indices, idx_dict, conv_subscripts="",intprods={},intermediate="", new_conv=0):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index _sizes
conv_subcripts: list of indices which will be convolved.
output: whether is an output term
intprods: dictionary
Dictionary of tensors involved in inputs.
intermediate: iterable
The list of tensors involved in this particular intermediate/product.
new_conv: Boolean
Is a new convolution occuring?
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
if i in conv_subscripts:
if len(indices)==1: #This means we are evaluating a conv idx in the output.
ret *= idx_dict[i]["max"]
continue
poslist=[convdim for convdim in idx_dict[i]]
intfactors=intprods[intermediate] #The tensors involved in this intermediate.
convfactors=[factor for factor in intfactors if factor in poslist] #Check intermediates with
#if indices=={'i','j','k','m'}:
# import pdb; pdb.set_trace()
#convolving leg
if len(convfactors)==1:
ret *=idx_dict[i][convfactors[0]] #Only one convolving leg, so use its true size.
elif len(convfactors)>1: #Check if this is a brand new convolution.
#if (intprods[convfactors[0]]|intprods[convfactors[1]])==indices:
if new_conv:
ret *=idx_dict[i]["max"]**2
else:
ret *=idx_dict[i]["max"] #A padding has already occurred; use max.
#else:
#ret*= idx_dict[i]["max"] #A padding has already occurred, so just take the max.
else:
ret *= idx_dict[i]
return ret
def find_contraction(positions, input_sets, output_set,intprods={},conv_subscripts={}):
"""
Finds the contraction and convolution for a given set of input and output sets.
Parameters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
conv_subscripts : set
The list of convolution subscripts.
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
new_convolution: boolean
Whether a new convolution is occurring.
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
remaining = list(input_sets)
new_conv = 0 #Boolean flag indicating whether a new convolution is occuring.
inputs = (remaining.pop(i) for i in sorted(positions, reverse=True))
composition=[] #The tensors which make up an intermediate product.
idx_contract = set.union(*inputs)
for i in positions:
intermediate=sorted("".join(set(input_sets[i]))) #Coerce to set and then alphabetical string.
intermediate="".join(intermediate)
composition+=intprods[intermediate]
if conv_subscripts:
for j in conv_subscripts:
if j in (input_sets[positions[0]]) and j in (input_sets[positions[1]]):
new_conv = 1 #A new convolution is about to occur.
idx_remain = output_set.union(*remaining)
new_result = idx_remain & idx_contract
new_result_str = "".join(sorted(sorted("".join(new_result)))) #Annoying manipulation from set to sorted string.
intprods[new_result_str]=list(set(composition))
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return new_result, remaining, idx_removed, idx_contract, new_conv
"""
def flop_count(idx_contraction, inner, num_terms, size_dictionary):
Computes the number of FLOPS in the contraction or convolution.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
90
>>> flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
270
overall_size = compute_size_by_dict(idx_contraction, size_dictionary)
op_factor = max(1, num_terms - 1)
if inner:
op_factor += 1
return overall_size * op_factor
"""
#tr: Fixing the flop_count.
def flop_count(idx_contraction, inner, num_terms, size_dictionary,conv_subscripts="",intprods={},intermediate="",input_sets=[],
output_set=[], new_conv=0):
"""
Computes the number of FLOPS in the contraction or convolution.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
conv_subscripts: iterable
The indices which will be convolved.
intprods: dictionary
A dictionary of which intermediate product each sequence of indices corresponds to.
intermediate: iterable
The list of tensors involved in this particular product/intermediate.
input_sets: iterable
The list of all input sets. Only used if we need to compute a left-to-right cost.
out_set: iterable
The intended output.
new_conv: Boolean
Is a new convolution occurring
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
90
>>> flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
270
"""
#If the intermediate is empty, we need to calculate the full product from left to right.
if intermediate=="":
cost=1
cost_list=[]
len_inputs = len(input_sets) #Original length of inputs.
#Complete the first left-to-right-product.
contract_tuple = find_contraction((0,1), input_sets, output_set,intprods,conv_subscripts)
out_inds, input_sets, idx_removed, idx_contract, new_sub_conv = contract_tuple
out_inds_format=sorted("".join(out_inds))
out_inds_format="".join(out_inds_format)
cost = flop_count(idx_contract, idx_removed, 2, size_dictionary, conv_subscripts,intprods,out_inds_format, "","",new_sub_conv)
cost_list.append(cost)
for j in range(1,len_inputs):
if len(input_sets)==1: break #Nothing left to contract.
#Contract from left-to-right. The next intermediate product is pushed to the end of input_sets.
contract_tuple = find_contraction((0,len(input_sets)-1), input_sets, output_set,intprods,conv_subscripts)
out_inds, input_sets, idx_removed, idx_contract, new_sub_conv = contract_tuple
out_inds_format=sorted("".join(out_inds))
out_inds_format="".join(out_inds_format)
cost = flop_count(idx_contract, idx_removed, 2, size_dictionary, conv_subscripts,intprods,
out_inds_format, "","",new_sub_conv)
cost_list.append(cost)
return sum(cost_list)
overall_size = compute_size_by_dict(idx_contraction, size_dictionary, conv_subscripts, intprods,intermediate,new_conv)
op_factor = max(1, num_terms - 1)
#if inner:
# op_factor += 1
return overall_size * op_factor
def rand_equation(n, reg, n_out=0, d_min=2, d_max=9, seed=None, global_dim=False, return_size_dict=False):
"""Generate a random contraction and shapes.
Parameters
----------
n : int
Number of array arguments.
reg : int
'Regularity' of the contraction graph. This essentially determines how
many indices each tensor shares with others on average.
n_out : int, optional
Number of output indices (i.e. the number of non-contracted indices).
Defaults to 0, i.e., a contraction resulting in a scalar.
d_min : int, optional
Minimum dimension size.
d_max : int, optional
Maximum dimension size.
seed: int, optional
If not None, seed numpy's random generator with this.
global_dim : bool, optional
Add a global, 'broadcast', dimension to every operand.
return_size_dict : bool, optional
Return the mapping of indices to sizes.
Returns
-------
eq : str
The equation string.
shapes : list[tuple[int]]
The array shapes.
size_dict : dict[str, int]
The dict of index sizes, only returned if ``return_size_dict=True``.
Examples
--------
>>> eq, shapes = rand_equation(n=10, reg=4, n_out=5, seed=42)
>>> eq
'oyeqn,tmaq,skpo,vg,hxui,n,fwxmr,hitplcj,kudlgfv,rywjsb->cebda'
>>> shapes
[(9, 5, 4, 5, 4),
(4, 4, 8, 5),
(9, 4, 6, 9),
(6, 6),
(6, 9, 7, 8),
(4,),
(9, 3, 9, 4, 9),
(6, 8, 4, 6, 8, 6, 3),
(4, 7, 8, 8, 6, 9, 6),
(9, 5, 3, 3, 9, 5)]
"""
if seed is not None:
np.random.seed(seed)
# total number of indices
num_inds = n * reg // 2 + n_out
inputs = ["" for _ in range(n)]
output = []
size_dict = OrderedDict((get_symbol(i), np.random.randint(d_min, d_max + 1)) for i in range(num_inds))
# generate a list of indices to place either once or twice
def gen():
for i, ix in enumerate(size_dict):
# generate an outer index
if i < n_out:
output.append(ix)
yield ix
# generate a bond
else:
yield ix
yield ix
# add the indices randomly to the inputs
for i, ix in enumerate(np.random.permutation(list(gen()))):
# make sure all inputs have at least one index
if i < n:
inputs[i] += ix
else:
# don't add any traces on same op
where = np.random.randint(0, n)
while ix in inputs[where]:
where = np.random.randint(0, n)
inputs[where] += ix
# possibly add the same global dim to every arg
if global_dim:
gdim = get_symbol(num_inds)
size_dict[gdim] = np.random.randint(d_min, d_max + 1)
for i in range(n):
inputs[i] += gdim
output += gdim
# randomly transpose the output indices and form equation
output = "".join(np.random.permutation(output))
eq = "{}->{}".format(",".join(inputs), output)
# make the shapes
shapes = [tuple(size_dict[ix] for ix in op) for op in inputs]
ret = (eq, shapes)
if return_size_dict:
ret += (size_dict, )
return ret
| true |
e222fdd399c8d1d2c46510270148fce00f43e1a7 | Python | shaunharker/2016-12-15-Workshop | /source/pyCHomP/MorseMatchingHomotopy.py | UTF-8 | 2,663 | 3.0625 | 3 | [
"MIT"
] | permissive | ### MorseMatchingHomotopy.py
### MIT LICENSE 2016 Shaun Harker
from queue import *
import copy
from TopologicalSort import *
from Chain import *
def MorseMatchingHomotopy(M, cellcomplex):
"""
Return a function to evaluate the discrete Morse homotopy
associated with an acyclic partial matching
"""
# Semantic sugar
def bd(chain): return cellcomplex.boundary(chain)
def dim(chain_or_cell): return chain_or_cell.dimension()
def isAce(cell): return dim(cell) == dim(M(cell))
def isKing(cell): return dim(cell) > dim(M(cell))
def isQueen(cell): return dim(cell) < dim(M(cell))
# Compute critical cells from matching
critical_cells = [ cell for cell in cellcomplex if isAce(cell) ]
# Homotopy function definition
def homotopy(chain):
"""
Implement the discrete Morse homotopy gamma
"""
# We clone the input chain to prevent unexpected alterations
work_chain = copy.deepcopy(chain)
# We create a dictionary "priority" which gives the rank of queens.
# Lower priority numbers will be processed first (i.e. priority ranking, not priority value)
Queens = [Q for Q in cellcomplex if isQueen(Q)]
def AdjacentQueens(Q): return [ q for q in bd(M(Q)) if isQueen(q) and q != Q ]
priority = { Q : rank for (rank, Q) in enumerate(TopologicalSort(Queens, AdjacentQueens)) }
# We arrange the priority queue for queens.
# We use an auxiliary set "enqueued" to prevent the same queen from being
# placed in the priority queue twice.
work_queue = PriorityQueue()
enqueued = set()
def enqueue(list_of_queens):
for Q in list_of_queens:
if Q in enqueued: continue
enqueued.add(Q)
work_queue.put((-priority[Q], Q))
# Initialize queue with the queens in the original chain
enqueue([ Q for Q in work_chain if isQueen(Q) ])
# Make a zero chain of correct dimension to store result in
gamma_chain = Chain(dim(chain) + 1, cellcomplex.ring())
# We iteratively process the maximal queen in "work_chain", each time
# adding the appropriate multiple of the boundary of its mating king in
# order to cancel it. Doing this can add new queens, which we enqueue.
# A theorem prevents previously processed queens from being "new_queens"
# We keep track of the king chain as we go.
while not work_queue.empty():
(rank, Q) = work_queue.get()
a = work_chain[Q]
if a == 0: continue
K = M(Q)
bd_K = bd(K)
b = bd_K[Q]
c = -a/b
gamma_chain[K] += c
work_chain += c * bd_K
enqueue([ q for q in bd_K if isQueen(q) and q != Q ])
return gamma_chain
return (critical_cells, homotopy)
| true |
734e2c08aedcca9000bcf0a6b716f0a3b78cdae5 | Python | RegulusReggie/CS259 | /generator.py | UTF-8 | 2,227 | 2.984375 | 3 | [] | no_license | import numpy as np
import keras
from skimage.transform import resize
from skimage.io import imread
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, batch_size=32, dim=(128, 128), n_channels=3, shuffle=True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_IDs = list_IDs
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
idx_end = min(len(self.indexes), (index + 1) * self.batch_size)
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : idx_end]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty((self.batch_size, *self.dim, 1))
# Generate data
for i, ID in enumerate(list_IDs_temp):
train_image = imread('/mnt/dfs/reggie/isic/train/image/' + ID + '.jpg')
train_image = resize(train_image, self.dim, order=1,
anti_aliasing=True) # bi-linear
# Store sample
X[i,] = train_image
train_mask = imread('/mnt/dfs/reggie/isic/train/mask/' + ID + '_segmentation.png')
# train_mask = np.true_divide(train_mask, 255)
train_mask = resize(train_mask, self.dim + (1,), order=0, anti_aliasing=True) # nearest neighbor
# Store class
y[i,] = train_mask
return X, y
| true |
5d517965916d22c40b0b531fc90cd29ec1deae20 | Python | seanpar203/event-bus | /tests/test_event_emit.py | UTF-8 | 569 | 3.328125 | 3 | [
"MIT"
] | permissive | """ Tests event being emitted and proper functions being called. """
from event_bus.bus import EventBus
# Constants
bus = EventBus()
GLOBAL_VAR = 'Init'
EVENT_NAME = 'completed'
@bus.on(event=EVENT_NAME)
def subscription():
""" Subscribed event to run after event `completed` """
global GLOBAL_VAR
GLOBAL_VAR = 'Finished'
def test_event_emit():
""" Tests that a function subscribed to an event executes on emit. """
# Before Emit
assert GLOBAL_VAR == 'Init'
bus.emit(EVENT_NAME)
# After Emit
assert GLOBAL_VAR == 'Finished'
| true |
1076d492d2026df7cf910770f8bb5c992de5c047 | Python | mauriceLC92/LeetCode-solutions | /Linked-lists/2-Add-two-numbers.py | UTF-8 | 883 | 3.4375 | 3 | [] | no_license | from typing import List
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# l1 = [2,4,3], l2 = [5,6,4]
def addTwoNumbers(l1: ListNode, l2: ListNode) -> ListNode:
current_one = l1
current_two = l2
carry_over = 0
result = ListNode(0)
result_tail = result
while current_one or current_two or carry_over:
val1 = current_one.val if current_one else 0
val2 = current_two.val if current_two else 0
carry_over, output = divmod(val1 + val2 + carry_over, 10)
result_tail.next = ListNode(output)
result_tail = result_tail.next
current_one = current_one.next if current_one else None
current_two = current_two.next if current_two else None
return result.next
starting_node = ListNode(1)
current = starting_node
current.next = ListNode(5) | true |
96e9aefa1a2fa399680f751fe735707d3bc19f25 | Python | brainerdcruz/updews-pycodes | /Analysis/uptime.py | UTF-8 | 1,486 | 2.546875 | 3 | [] | no_license | import pandas as pd
from datetime import datetime
import querySenslopeDb as q
import numpy as np
def uptime(upts, df):
up_index = upts.index[0]
updf = df[(df.timestamp <= upts['ts'].values[0])&(df.updateTS >= upts['ts'].values[0])]
if len(updf) <= 25:
upts.loc[upts.index == up_index, ['status']] = 'down'
else:
upts.loc[upts.index == up_index, ['status']] = 'up'
return upts
def main(start, end):
query = "SELECT * FROM %s.site_level_alert where source = 'internal' and alert not like '%s' and \
((timestamp <= '%s' and updateTS >= '%s') or (timestamp >= '%s' and timestamp <= '%s') \
or (updateTS >= '%s' and updateTS <= '%s'))" %(q.Namedb, 'ND%', start, end, start, end, start, end)
df = q.GetDBDataFrame(query)
rangeTS = pd.date_range(start='2017-01-01', end = '2017-04-01', freq='30min')
rangeTS = rangeTS[0:-1]
pub_uptime = pd.DataFrame({'ts':rangeTS, 'status':['-']*len(rangeTS)})
pub_uptimeTS = pub_uptime.groupby('ts')
pub_uptime = pub_uptimeTS.apply(uptime, df=df)
percent_up = 100 - (100. * len(pub_uptime[pub_uptime.status == 'down'])/len(pub_uptime))
return percent_up, pub_uptime, df
if __name__ == '__main__':
start = datetime.now()
percent_up, pub_uptime, df = main(start = '2017-01-01', end='2017-04-01')
print '\n\n'
print 'alert uptime = ' + str(np.round(percent_up, 2)) + '%'
print '\n\n'
print 'runtime =', str(datetime.now() - start) | true |
8fdc5827c2c7e8d72e98b5aa086c707fc187b428 | Python | afnanenayet/DS-A | /graph_algorithms/dfs.py | UTF-8 | 1,489 | 4.15625 | 4 | [
"MIT"
] | permissive | # depth first search implemented in Python3
# note that graphs are represented by a dictionary - the key is the node
# and the value is a set of neighbors
class Graph:
def __init__(self, node_dict):
self.visited = set()
self.nodes = node_dict
self.parents = dict()
def dfs(graph, source, target):
""" Depth first search with a queue
returns the path from source to target.
Depth first search is a recursive algorithm that starts on some "source"
node. It keeps track of each visited node, and visits every unvisited node
adjacent to the current node recusively.
:type graph: dict
:type target: int
:rtype: bool """
graph.visited.add(source)
if source == target:
print("Found target")
else:
for neighbor in graph.nodes[source]:
if neighbor not in graph.visited:
graph.parents[neighbor] = source
dfs(graph, neighbor, target)
def path_helper(parent_map, src, dest):
""" constructs a list with the desired path
:type parent_map: dict
:rtype: list """
stack = []
stack.append(dest)
while stack[0] != src:
stack.insert(0, parent_map[stack[0]])
return stack
# test
nodes = dict()
nodes[1] = set([2, 4])
nodes[2] = set([1, 3, 5])
nodes[4] = set([1])
nodes[5] = set([2, 6])
nodes[3] = set([2, 6])
graph = Graph(nodes)
parents = dict()
dfs(graph, 1, 6)
print(graph.parents)
print(path_helper(graph.parents, 1, 6))
| true |
7a02ef55a24fd1a829956e77d63d136f9ac9250c | Python | MrSpadala/Probability-riddles | /critical_hit.py | UTF-8 | 4,395 | 3.84375 | 4 | [] | no_license |
"""
This came up when reading this article https://probablydance.com/2019/08/28/a-new-algorithm-for-controlled-randomness/
found on hackernews. As he states:
<<<
The description of the problem is this: An enemy is supposed to drop a crucial item 50% of the time.
Players fight this enemy over and over again, and after fighting the enemy ten times the item still hasn’t dropped.
They think the game is broken. Or an attack is supposed to succeed 90% of the time but misses three times in a row.
Players will think the game is broken.
>>>
And again, the idea of his solution that it is also used in Warcraft 3:
<<<
The idea is that you keep on increasing the chance every time that something fails to happen.
Let’s say you have a 19% chance to get a critical hit. Then the first time you attack,
and you want to calculate if it was a critical hit or not, you use true randomness with a 5% chance of succeeding.
If you don’t land a critical hit, the next time you will get a 10% chance. If you fail again your chance goes up to 15%,
then 20%, 25% etc. If you keep on failing, at some point you will have a 100% chance of landing a critical attack.
But if you succeed at any point, your chance goes back down to 5%.
>>>
What I wanted to do is to find the probability distribution of hitting the target over the number of tries
and the mean probability to hit, gived the start probability and its increase at every step (at every missed hit)
I perform the following steps:
1. I start by finding the probability distribution to score a hit over the number of tries,
or, in other words, to analytically find the probability to score a hit at the n-th try, for all possible values of n.
2. Take the expected number of tries given the probability values calculated above
3. The average probability to score a hit is given by the inverse of the expected number of tries
"""
from pprint import pprint
import numpy as np
import random
random.seed(42)
try:
from matplotlib import pyplot as plt
except ImportError:
plt = None
# The first value of hit probability. Whenever a hit is scored, hit probability resets here
P_START = 0.05
# How much the hit probability is increased when missing a hit
P_INC = 0.05
def get_probabilities():
# Returns a vector P for the probability P(try AND hit),
# where P[i] = P(try=i AND hit), aka the probability to
# be in the i-eth try and to score a hit
# (too lazy to write efficient code)
P = [P_START]
n_increments = 0
while True:
n_increments += 1
res = min(1.0, P_START + n_increments*P_INC) #Probability to score in this try
for i in range(n_increments):
res *= 1.0 - min(1.0, P_START + i*P_INC) #Probability to not have scored in the previous tries
if res <= 0:
break
P.append(res)
return P
def expected_val(P):
# Return expected value of probability vector P.
# (i+1) to start the enumeration from 1
return sum([(i+1)*P[i] for i in range(len(P))])
def get_hit_prob(verbose=True):
P = get_probabilities()
exp = expected_val(P)
hit_prob = 1/exp
if verbose:
print("\n===============================")
for i in range(len(P)):
print(f"P(try={i+1} AND hit) = {P[i]:.8f}")
print("===============================\n")
if plt:
plt.plot(range(len(P)), P)
plt.scatter(range(len(P)), P)
plt.show()
else:
print("[x] No plot, matplotlib missing\n")
print(f">>> Expected number of tries to score a hit: {exp:.4f}")
print(f">>> Mean probability to hit 1/{exp:.4f} = {hit_prob:.6f}\n")
return hit_prob
def get_inc_prob(p_target):
global P_INC, P_START
min_err = 1.0
step = 0.001
for p in np.arange(step, p_target, step):
P_INC = p
P_START = p
calc_p = get_hit_prob(verbose=False)
if abs(calc_p-p_target) < min_err:
min_err = abs(calc_p-p_target)
best = p
print(best)
return best
def simulate():
# Simulate the game and print the evaluated probability,
# to compare to the one found analytically
hits, steps = 0, 0
p = P_START
while hits < 10**6:
if random.random() >= p:
p += P_INC
else:
hits += 1
p = P_START
steps += 1
print(f"[*] Simulated hit probability: {hits/steps:.6f}")
if __name__ == '__main__':
#get_inc_prob(0.1)
get_hit_prob()
simulate()
# - - trash not used - -
def P_comulative(p, step):
if step <= 0:
return 0.0
if p >= 1.0:
return 1.0
return p + (1-p)*P(p+P_INC, step-1) | true |
b241328384441fc7bbce527e3f82de2ae4ec0b9f | Python | heeejuunng2/2018-ggBigData | /day08-4실습.py | UTF-8 | 816 | 2.734375 | 3 | [] | no_license | import pandas as pd
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
subwayCsv_path="subway.csv"
subwayCsv=pd.read_csv(subwayCsv_path, encoding='euc-kr')
# print(subwayCsv.head())
print('-'*50)
subwayCsv.rename(columns={subwayCsv.columns[2]:'하루평균'},inplace=True)
# print(subwayCsv.head())
subwayCsv.index.name='구분'
print(subwayCsv.head())
print("-"*100)
for i in range(0,5):
subwayCsv=subwayCsv.rename(index={i:str(i+1)+')'})
print(subwayCsv.head())
print("-"*100)
# print(olive_oil['id_area'][0].split('.')[0])
for i in range(0,5):
print(subwayCsv['하루평균'][i].split(',')[0])
print('-'*100)
#시각화
subwayCsv.plot.hist(bins=100,color='blue')
plt.xlabel("Mean of the day")
plt.ylabel("Rank")
plt.title("Ues Subway")
plt.legend()
plt.show()
| true |
36b10872d3ca9e690bb39605bfef67f1097c97f1 | Python | joaodobread/ifgoiano-sd-crawler | /src/crawler/crawler.py | UTF-8 | 2,341 | 2.84375 | 3 | [] | no_license | import os
from threading import Thread
from requests import get
from bs4 import BeautifulSoup
from .utils_page_metadata import UtilsPageMetadata
from src.referrer.referrer import Referrer
import uuid
class Crawler(Thread):
def __init__(self, deep_factor: int, actual_link: str, actual_domain: str):
super().__init__()
self.instance_id = str(uuid.uuid4())
self.deep_factor = deep_factor
self.actual_link = actual_link
self.page_links = []
self.parsed_document = None
self.actual_domain = actual_domain
self.refer = Referrer.get_instance()
def make_request(self) -> str:
"""
Raises:
Exception: When something get wrong with request will be raised an error
with the attributes
Returns:
str: When success will be return all content of returned from server as a string
"""
response = get(self.actual_link)
if not response.ok:
raise Exception({
"status": response.status_code,
"message": response.reason,
"at": self.actual_link
})
return response.text
def parse_html(self, text: str):
"""
Args:
text (str): The content of page in text to parse to BS4 object
Returns:
BeautifulSoup: the html string to BS4 object
"""
self.parsed_document = BeautifulSoup(text, 'html.parser')
return self.parsed_document
def extract_links(self):
ancher_tags = self.parsed_document.find_all('a')
self.page_links = [link.get('href') for link in ancher_tags if str(
link.get('href')).__contains__(self.actual_domain)]
return self.page_links
def go_deeper(self):
if self.deep_factor >= int(os.getenv('DEEP_FACTOR_LIMIT')):
return False
for link in self.page_links:
crawler = Crawler(self.deep_factor + 1, link, self.actual_domain)
crawler.setDaemon(True)
crawler.start()
crawler.join()
def run(self):
page_text = self.make_request()
self.parse_html(page_text)
links = self.extract_links()
for link in links:
self.refer.add_link(self.actual_link, link)
self.go_deeper()
| true |
2260c2cefb80380a0110fefeab4b03b401ef218d | Python | J-AugustoManzano/livro_Python | /ExerciciosAprendizagem/Cap09/c09ex11.py | UTF-8 | 695 | 4.0625 | 4 | [] | no_license | class TipoArea():
def area(self, x=0.0, y=0.0, z=0.0):
if (isinstance(x, float) and y == 0.0 and z == 0.0):
return x ** 2
elif (isinstance(x, float) and isinstance(y, float) and z == 0.0):
return x ** 2 * 3.14159 * y
elif (isinstance(x, float) and isinstance(y, float) and isinstance(z, float)):
return x * y * z
calculo = TipoArea()
print("Área: Quadrado .. = {0:7.2f}".format(calculo.area(5.0)))
print("Área: Cubo ...... = {0:7.2f}".format(calculo.area(5.0, 6.0, 7.0)))
print("Área: Cilindro .. = {0:7.2f}".format(calculo.area(7.0, 3.0)))
enter = input("\nPressione <Enter> para encerrar... ")
| true |
80f2b85c070df301a31ee7c9bb6df30469b86d0c | Python | ImerG12/cli-games | /bnc/control-flow.py | UTF-8 | 327 | 3.390625 | 3 | [] | no_license |
#email_address = "imer.gramajo@gmail.com"
#
#res = email_address.split("@")[1];
#
#print(str(res))
#list= (25,50,75)
#def total(list):
#if sum(list) > 100:
#print (sum(list))
#else:
#print("not greater than 100")
#total(list)
list= (1,4,5,7,10,13)
only_odd= [ num for num in list if num % 2 ==1]
print(only_odd)
| true |
71479e8980a959544891ef50fc04ae292ee815aa | Python | curtislb/MovieAnalysis | /scraping/storyline_scraper.py | UTF-8 | 1,478 | 2.640625 | 3 | [] | no_license | from bs4 import BeautifulSoup
import urllib2
from time import time
import string
# a seemingly reasonable user agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'
MOVIE_ID_LOC = 'movie_ids.list'
TITLE_URL = 'http://www.imdb.com/title/%s/'
OUTPUT_LOC = 'movie_storylines.list'
movie_IDs = []
with open(MOVIE_ID_LOC, 'r') as f:
for line in f:
movie_IDs.append(line.rstrip())
start = time()
with open(OUTPUT_LOC, 'a') as f:
for i, movie_ID in enumerate(movie_IDs):
if (i + 1) < 2452:
continue
print '(%d/%d) %s' % (i + 1, len(movie_IDs), movie_ID)
title_URL = TITLE_URL % movie_ID
request = urllib2.Request(title_URL)
request.add_header('User-Agent', USER_AGENT)
opener = urllib2.build_opener()
soup = BeautifulSoup(opener.open(request))
try:
description = soup.find('div', attrs = {'itemprop': 'description'})
text = description.find('p').text.strip()
written_loc = string.find(text, 'Written by')
cleaned_text = text[:written_loc].strip().replace('\n', ' ').encode('latin1', 'ignore')
except Exception as e:
# if any sort of exception, there's probably no summary, so ignore
cleaned_text = ''
f.write('%s|%s\n' % (movie_ID, cleaned_text))
print 'Time elapsed: %f s' % (time() - start)
| true |
a901da12239d2d321f2d900d3743cc3071bb83c3 | Python | erika-r/simple_hangman | /hangman.py | UTF-8 | 1,102 | 3.78125 | 4 | [] | no_license |
import random
with open("text.txt", "r") as f:
f = f.readlines()
word_index = random.randint(0,len(f))
word = f[word_index].strip()
#possible_attempts = 6
wrong_attempts = 6
print("The word is {} letters long.".format(len(word)))
guesses = ["*"] * len(word)
print("".join(guesses))
while wrong_attempts != 0 and "".join(guesses) != word: #only wrong amounts possible
guess = input("Enter a letter:")
if guess.casefold() not in word:
wrong_attempts -= 1
print("Wrong, guess again. Attempts left ({})".format(wrong_attempts))
print("".join(guesses))
elif guess.casefold() in word:
print("Good guess ;) Attempts left ({})".format(wrong_attempts))
for i in range(len(word)):
if word[i] == guess.casefold():
guesses[i] = word[i]
print("".join(guesses))
if "".join(guesses) == word:
print("You win, the word was '{}'.".format(word))
break
if wrong_attempts == 0: #amount of body parts
print("You lose, the word was '{}'.".format(word))
| true |
221fefdc960e51658a50c0c32595f9dc5b3e72d5 | Python | mberkanbicer/multiprocessor | /die.py | UTF-8 | 1,445 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 09:40, 27/10/2021 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import numpy as np
import concurrent.futures as parallel
import time
def fitness(solution):
return np.sum(solution**2)
def create_solution():
pos = np.random.uniform(0, 1, 5)
fit = fitness(pos)
return [pos, fit]
def create_population(size):
pop = []
with parallel.ProcessPoolExecutor() as executor:
list_executors = [executor.submit(create_solution) for _ in range(size)]
# This method yield the result everytime a thread finished their job.
for f in parallel.as_completed(list_executors):
pop.append(f.result())
return pop
if __name__ == "__main__":
pop = create_population(10)
for agent in pop:
print(agent[1])
| true |
a1b7ef0958d19c9c9e0b409bf24d7fa68c3afd45 | Python | DoktorE/python-openweathermap | /openweathermap/client.py | UTF-8 | 5,380 | 3.109375 | 3 | [] | no_license | import json
import requests
class Client(object):
""" Main class to preform and interact with the API """
def __init__(self, appid, units='default'):
self.appid = appid
self.units = units
def _request(self, endpoint, method, data=None, **kwargs):
"""
Function to handle GET and POST requests
:param endpoint: endpoint for the API
:param method: method type for request
:param appid: API key
:param data: POST data
:param kwargs: extra arguments
:return: request response
"""
total_url = 'http://api.openweathermap.org/' + endpoint + '&appid=' + self.appid
# set unit type for return
if not self.units == 'metric' or self.units == 'imperial' or self.units == 'default':
print("Invalid unit specified. Valid units:\n'imperial'\n'metric'")
else:
total_url += "&units=" + self.units
if method == 'get':
r = requests.get(total_url, **kwargs)
else:
r = requests.post(total_url, data, **kwargs)
r.raise_for_status()
if len(r.text) == 0:
data = json.loads('{}')
else:
try:
data = json.loads(r.text)
except ValueError:
data = r.text
return data
def _get(self, endpoint, **kwargs):
"""
Function to preform GET requests
:param endpoint: endpoint of the API
:param kwargs: extra arguments
:return: GET response
"""
return self._request(endpoint, 'get', **kwargs)
def _post(self, endpoint, data, **kwargs):
"""
Function to preform POST requests
:param endpoint: endpoint of the API
:param data: data to be sent with the request
:param kwargs: extra arguments
:return: POST response
"""
return self.request(endpoint, 'post', data, **kwargs)
def getWeatherCity(self, city_name, country_code):
"""
Function to get the weather based on the name of a city
:param city_name: name of city to pass into the request
:param country_code: two letter code (ex: 'us') to pass into the request
:return: weather from request
"""
return self._get('data/2.5/weather?q=' + city_name + ',' + country_code)
def getWeatherCityId(self, city_id):
"""
Function to get the weather based on it's city id (refer to OpenWeatherMap's own documentation to get a list of city ids)
:param city_id: city id of city
:return: weather from request
"""
return self._get('data/2.5/weather?id=' + str(city_id))
def getWeatherZip(self, zip_code, country_code):
"""
Function to get the weather based on a zip code
:param zip_code: zip code to pass into the request
:param country_code: two letter code (ex: 'us') to pass into the request
:return: weather from request
"""
if not country_code == '':
country_code = ',' + country_code
return self._get('data/2.5/weather?zip=' + str(zip_code) + country_code)
def getWeatherCoord(self, lat, lon):
"""
Function to get the weather based on geographic coordinates
:param lat: latitudinal coordinate
:param lon: longitudinal coordinate
:return: weather from request
"""
return self._get('data/2.5/weather?lat=' + str(lat) + '&lon=' + str(lon))
def getWeatherRec(self, bbox):
"""
Function to get the weather for multiple cities in a rectangular area
:param bbox: array of 5 numbers to describe the bounding box (lat of the top left point, lon of the top left point, lat of the bottom right point, lon of the bottom right point, map zoom)
:return: weather of cities in the bounding box
"""
bbox = ",".join(str(i) for i in bbox)
return self._get('data/2.5/box/city?bbox=' + bbox + '&cluster=yes')
def getWeatherCycle(self, lat, lon, cnt):
"""
Function to get weather from cities laid within definite circle that is specified by center point ('lat', 'lon') and expected number of cities ('cnt') around this point
:param lat: latitude of the center point
:param lon: longitude of the center point
:param cnt: expected number of cities laid within circle
:return: weather of cities in the circle
"""
return self._get('data/2.5/find?lat=' + str(lat) + '&lon=' + str(lon) + '&cnt=' + str(cnt))
def getCityGroup(self, city_ids):
"""
Function to get the weather from multiple city IDs
:param city_ids: array of city ids
:return: weather for each of the city IDs
"""
city_ids = ",".join(city_ids)
print(city_ids)
return self._get('data/2.5/group?id=' + city_ids)
class WeatherConverter(object):
""" Class to convert the weather from OpenWeatherMaps' native unit """
def convertToMetric(self, data):
"""
Function to convert JSON to metric
:param data: JSON to convert
:return: converted JSON string
"""
# load the dumped data into a json string so we can index it
parsed_json = json.loads(json.dumps(data))
# temperature
parsed_json['main']['temp'] -= 273.5
parsed_json['main']['temp_min'] -= 273.5
parsed_json['main']['temp_max'] -= 273.5
# wind
parsed_json['wind']['speed'] *= 3.6
return parsed_json
def convertToImperial(self, data):
"""
Function to convert JSON to imperial
:param data: JSON to convert
:return: converted JSON string
"""
# load the dumped data into a json string so we can index it
parsed_json = json.loads(json.dumps(data))
# temperature
parsed_json['main']['temp'] *= (9/5) - 459.67
parsed_json['main']['temp_min'] *= (9/5) - 459.67
parsed_json['main']['temp_max'] *= (9/5) - 459.67
# speed
parsed_json['wind']['speed'] *= 2.2369
return parsed_json
| true |
d8022355e55cb1829c13f375aaea8cb0b41e8b1f | Python | doublejy715/Problem-Solve | /BaekJoon_Online/String/No_10809.py | UTF-8 | 185 | 3.46875 | 3 | [] | no_license | string = input()
alpa = [-1 for _ in range(26)]
for index in range(len(string)):
if alpa[ord(string[index])-97] == -1:
alpa[ord(string[index])-97]=index
print(*alpa,sep=' ') | true |
3f4f9c3ba1dd671893ffe52013db0f9a278e8898 | Python | C-H-Simpson/ERA5-CRUTS-bias-check | /ERA5_vs_CRUTS.py | UTF-8 | 6,051 | 2.703125 | 3 | [] | no_license | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Comparing ERA5 and CRU-TS
# Someone sent a plot suggesting that ERA5 has a $>2^oC$ average cold bias with respect to observations, in coastal cells in May and August.
# CRU-TS appeared to match observations much better (CRU-TS is gridded data that interpolated a set of observations).
#
# The data have already been aggregated by month, using "Agg_ERA5_and_CRUTS.py".
# Select grid-cells that:
# * are hot - defined as Tmax > 20 in CRU-TS
# * are coastal / not coastal
# Show:
# * Maps of the differences between the datasets.
# * Area averaged differences between ERA5 and CRU-TS, for the above subsets of the field
#
# ## Data loading and formatting
import xarray as xr
import scipy
import numpy as np
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.lines import Line2D
# Load my monthly averaged data - see 'Agg_ERA5_and_CRUTS.ipynb'
da_era5 = xr.open_dataset("data/era5_t2m_mavg.nc")
da_cruts_tmp = xr.open_dataset("data/cruts_tmp_mavg.nc")
da_cruts_tmx = xr.open_dataset("data/cruts_tmx_mavg.nc")
# get the ERA5 land-sea mask
era5_lsm = xr.open_dataset("/gws/nopw/j04/bas_climate/data/ecmwf/era5/invariant/era5_invariant_lsm.nc")
# make the lsm match the data
era5_lsm = era5_lsm.squeeze().drop("time").rename({"latitude": "lat", "longitude": "lon"})
# realign the data to be centered on 0 longitude
if da_era5.lon.min() >= 0:
da_era5 = da_era5.assign_coords(lon=(((da_era5.lon + 180) % 360) - 180))
if era5_lsm.lon.min() >= 0:
era5_lsm = era5_lsm.assign_coords(lon=(((era5_lsm.lon + 180) % 360) - 180))
# re-sort the axes
da_era5 = da_era5.sortby(['lon', 'lat', 'month'])
era5_lsm = era5_lsm.sortby(['lon', 'lat'])
# drop ocean
da_era5 = da_era5.where(era5_lsm["lsm"])
# convert ERA5 to Celsius from Kelvin
da_era5["t2m"] = da_era5["t2m"] - 273.15
da_cruts_tmx
# ## Selecting appropriate grid-cells
# I have selected coastal cells as cells adjacent to a change in the ERA5 land mask in either direction.
# quickly identify cells that are coastal, as cells where the land mask changes
da_coast = np.logical_or(era5_lsm["lsm"].differentiate("lat")!=0, era5_lsm["lsm"].differentiate("lon")!=0)
fig = plt.figure(figsize=(15,6))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
da_coast.plot()
# show locations where CRU-TS tmax > 20
tmx_mask = (da_cruts_tmx["tmx"] > 20)
fig = plt.figure(figsize=(15,6))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
tmx_mask.mean("month").plot()
# ## Comparing the datasets
# take the difference between ERA5 and CRU-TS
da_diff = (da_era5["t2m"] - da_cruts_tmp["tmp"])
# +
# show the difference for each month
norm = mpl.colors.Normalize(vmin=-5, vmax=5)
fig = plt.figure(figsize=(12, 6*12))
for month in range(1,13):
ax = fig.add_subplot(12, 1, month, projection=ccrs.PlateCarree())
da_diff.sel(month=month).plot(norm=norm, cmap='bwr')
ax.set_title(f"month={month}")
# -
# There are clearly some locations with large differences between the datasets.
da_diff_gt20 = da_diff.where(da_cruts_tmx["tmx"]>20)
da_diff_gt20_coastal = da_diff_gt20.where(da_coast)
da_diff_coastal = da_diff.where(da_coast)
# +
# show the difference for each month, as a histogram
# show only places with tmax > 20 degC
# AND which are coastal
bins = np.linspace(-5,5,21)
fig = plt.figure(figsize=(8, 3*12))
for month in range(1,13):
ax = fig.add_subplot(12, 1, month)
da_diff.sel(month=month).plot.hist(
bins=bins,
label="All",
histtype="step",
ax=ax,
color='r',
density=True
)
da_diff_gt20.sel(month=month).plot.hist(
bins=bins,
label="tmx > 20 degC",
histtype="step",
ax=ax,
color='b',
density=True
)
da_diff_gt20_coastal.sel(month=month).plot.hist(
bins=bins,
label="tmx > 20 degC AND coastal",
histtype="step",
ax=ax,
color='g',
density=True
)
ax.set_title("")
plt.text(0.1, 0.5, f"month={month}", transform=ax.transAxes)
custom_lines = [Line2D([0], [0], color='r', lw=4),
Line2D([0], [0], color='g', lw=4),
Line2D([0], [0], color='b', lw=4)]
ax.legend(custom_lines, ['All', 'tmx > 20 degC', 'tmx > 20 degC AND coastal'])
# -
# Let's take an area average.
lat_area_weights = np.cos(np.deg2rad(da_diff.lat))
# +
# show the area averaged difference for each month, as a line graph
# show only places with tmax > 20 degC
# AND which are coastal
fig, ax = plt.subplots(figsize=(10,8))
for month in range(1,13):
da_diff.weighted(lat_area_weights).mean(["lat", "lon"]).plot(
color='k'
)
da_diff_gt20.weighted(lat_area_weights).mean(["lat", "lon"]).plot(
color='b'
)
da_diff_gt20_coastal.weighted(lat_area_weights).mean(["lat", "lon"]).plot(
color='m'
)
da_diff_coastal.weighted(lat_area_weights).mean(["lat", "lon"]).plot(
color='r'
)
custom_lines = [Line2D([0], [0], color='k', lw=4),
Line2D([0], [0], color='b', lw=4),
Line2D([0], [0], color='m', lw=4),
Line2D([0], [0], color='red', lw=4)
]
ax.legend(custom_lines, ['All', 'tmx > 20 degC', 'tmx > 20 degC AND coastal', 'coastal'])
plt.title("Area averaged biases in monthly mean temperature")
plt.ylabel("bias (degC)")
# -
# ## Conclusions
# * There are some large differences in the monthly mean temperature field between CRU-TS 4.04 and ERA5.
# * However, the area averaged differences are $<1^oC$ for all months.
# * This is still true when only hot locations (monthly max temperature $>20^oC$) are selected, and when only coastal locations are selected.
| true |
0284941a3a97e9abe2b14bf36c0b3a5c94671958 | Python | Hassiad/100-days-of-Code-Python | /27-Tkinter_Intro/Mile_to_Km.py | UTF-8 | 730 | 3.515625 | 4 | [] | no_license |
from tkinter import *
'''Window'''
window = Tk()
window.title("Mile to Km Converter")
window.config(padx=20,pady=20)
'''Miles'''
miles_input = Entry(width=10)
miles_input.grid(column=1,row=0)
miles_label = Label(text="Miles")
miles_label.grid(column=2,row=0)
'''km converter'''
is_equal_to = Label(text="is equal to")
is_equal_to.grid(column=0,row=1)
'''Km'''
km_output = Label(text="0")
km_output.grid(column=1,row=1)
km_label = Label(text="Km")
km_label.grid(column=2,row=1)
'''Calculate'''
def calculate_button():
miles = float(miles_input.get())
km = 1.60934 * miles
km_output["text"] = f"{km}"
calculate = Button(text="Calculate", command=calculate_button)
calculate.grid(column=1,row=2)
window.mainloop() | true |
f42d8d520431e9619a2a3861c5106cbc050b6655 | Python | chemrahul82/RNASeq-Analysis-Pipeline | /analysis/expression_correlation_general.py | UTF-8 | 8,076 | 2.609375 | 3 | [] | no_license | '''
This script reads the fpkm_tracking output files for genes and transcripts, sorts them by the names of
the genes or transcripts, and then compare correlations among pairs of runs from same sample (intra),
or across different runs from different samples (inter and longitudinal)
Author: Rahul K. Das
Date: 12/15/2015
'''
import os, sys
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.stats import cumfreq
def transcript_express_compare():
samples = ['LV4','LV5','LV6']
#samples = ['LV4','LV6']
times = ['T0-1','T0-2','T1-80','T2-80','T1-LN2','T2-LN2']
#times = ['T0-2','T1-80']
runs = ['Run1','Run2','Run3','Run4','Run5']
#runs = ['Run2','Run3']
projDir = '/rawdata/projects/RNA-seq/Liver'
scriptDir = '/rawdata/projects/RNA-seq/scripts'
prefix = 'RNA_Barcode_None_001_rawlib'
#list for storing existing time points for all samples
existTimes = [[] for j in range(len(samples))]
#list for storing total runs for different time points for all samples
nruns_times = [[] for j in range(len(samples))]
#list for storing existing Run ids for different time points for all samples
existRuns = [[[] for i in range(len(times))] for j in range(len(samples))]
#4D Master list for storing fpkm values for genes/isoforms for different runs at different time points for all samples
fpkm_t_runs = [[[[] for i in range(len(runs))] for j in range(len(times))] for k in range(len(samples))]
fpkm_g_runs = [[[[] for i in range(len(runs))] for j in range(len(times))] for k in range(len(samples))]
nsamples = 0
for isam in samples:
nsamples += 1
#counter for existing time point
ntimes = 0
#loop over existing time points
for itim in times:
if os.path.exists('%s/%s/%s' %(projDir, isam, itim)):
#print 'working on %s and %s' %(isam, itim)
existTimes[nsamples-1].append(itim)
ntimes += 1
#counter for total existing runs for a time point
nruns = 0
#loop over existing runs
for irun in runs:
if os.path.exists('%s/%s/%s/%s/%s.isoforms.fpkm_tracking' %(projDir, isam, itim, irun,prefix)):
nruns += 1
existRuns[nsamples-1][ntimes-1].append(irun)
#lists for storing gene/isoform ids and fpkm values for runs
trans = []
genes = []
fpkm_t = []
fpkm_g = []
#for a run extract the transcript id and FPKM value
with open('%s/%s/%s/%s/%s.isoforms.fpkm_tracking' %(projDir, isam, itim, irun,prefix), 'rb') as f:
reader = csv.reader(f,delimiter = '\t')
next(reader)
for line in reader:
trans.append(line[0])
fpkm_t.append(line[9])
zipList1 = zip(trans, fpkm_t)
#sort the list by the transcript id, this is required for comparing FPKM values across different runs
sortedList1 = sorted(zipList1, key = lambda x: x[0])
for i in range(len(sortedList1)):
fpkm_t_runs[nsamples-1][ntimes-1][nruns-1].append(float(sortedList1[i][1]))
#for a run extract the gene id and FPKM value
with open('%s/%s/%s/%s/%s.genes.fpkm_tracking' %(projDir, isam, itim, irun,prefix), 'rb') as f:
reader = csv.reader(f,delimiter = '\t')
next(reader)
for line in reader:
genes.append(line[0])
fpkm_g.append(line[9])
zipList2 = zip(genes, fpkm_g)
#sort the list by the gene id, this is required for comparing FPKM values across different runs
sortedList2 = sorted(zipList2, key = lambda x: x[0])
for i in range(len(sortedList2)):
fpkm_g_runs[nsamples-1][ntimes-1][nruns-1].append(float(sortedList2[i][1]))
#make intra-run-scatter correlation plots for a given time point of a sample
fig = plt.figure()
fig.text(0.5, 0.04, 'log2(FPKM+1)', ha='center', va='center', family='serif',size='medium', weight = 'bold')
fig.text(0.06, 0.5, 'log2(FPKM+1)', ha='center', va='center', rotation='vertical', family='serif',size='medium', weight = 'bold')
fig.suptitle('%s' %(isam+'-'+itim), family='serif',size='large', weight = 'bold')
nc = 0
for i in range(nruns):
for j in range(nruns):
nc += 1
#plot the pair correlation plots for the transcript expression in the upper triangle
if j > i:
plt.subplot(nruns, nruns, nc)
transCorr = np.corrcoef(fpkm_t_runs[nsamples-1][ntimes-1][i], fpkm_t_runs[nsamples-1][ntimes-1][j])[0][1]
title = str(i+1)+'-'+str(j+1)+' (r = %4.3f)' %(transCorr)
plt.title('%s' %title)
plt.plot(np.log2((np.array(fpkm_t_runs[nsamples-1][ntimes-1][i])) + 1), np.log2((np.array(fpkm_t_runs[nsamples-1][ntimes-1][j])) + 1),'bo', markersize = 4)
plt.subplots_adjust(hspace = .5)
#plot the pair correlation plots for the transcript expression in the lower triangle
elif j < i:
plt.subplot(nruns, nruns, nc)
genesCorr = np.corrcoef(fpkm_g_runs[nsamples-1][ntimes-1][i], fpkm_g_runs[nsamples-1][ntimes-1][j])[0][1]
title = str(i+1)+'-'+str(j+1)+' (r = %4.3f)' %(genesCorr)
plt.title('%s' %title)
plt.plot(np.log2((np.array(fpkm_g_runs[nsamples-1][ntimes-1][i])) + 1), np.log2((np.array(fpkm_g_runs[nsamples-1][ntimes-1][j])) + 1),'ro', markersize = 4)
plt.subplots_adjust(hspace = .5)
matplotlib.rcParams.update({'font.size': 6})
matplotlib.rcParams.update({'font.family': 'serif'})
plt.savefig("%s.png" %(isam+'-'+itim+'-'+'ExpressCorr'), dpi = 300)
plt.close(fig)
nruns_times[nsamples-1].append(nruns)
#save intra, longitudinal, and inter correlation coefficients
with open('expression_correlation_intra.txt', 'w') as fintra, open('expression_correlation_longitud.txt', 'w') as flong,\
open('expression_correlation_inter.txt', 'w') as finter:
for isample in range(len(samples)):
for jsample in range(len(samples)):
#Longitudinal & Intra
if isample == jsample:
for itime in range(len(existTimes[isample])):
for jtime in range(len(existTimes[jsample])):
#store the longitudinal correlation
if jtime > itime:
for irun in range(nruns_times[isample][itime]):
for jrun in range(nruns_times[jsample][jtime]):
long_t_r = np.corrcoef(fpkm_t_runs[isample][itime][irun], fpkm_t_runs[jsample][jtime][jrun])[0][1]
long_g_r = np.corrcoef(fpkm_g_runs[isample][itime][irun], fpkm_g_runs[jsample][jtime][jrun])[0][1]
flong.write('\t'.join(map(str,[samples[isample], existTimes[isample][itime], existRuns[isample][itime][irun],\
samples[jsample], existTimes[jsample][jtime], existRuns[jsample][jtime][jrun], long_g_r, long_t_r]))+'\n')
#store the intra correlation coeff. across differenr runs for same time point
elif itime == jtime:
for irun in range(nruns_times[isample][itime]):
for jrun in range(nruns_times[jsample][jtime]):
if jrun > irun:
intra_t_r = np.corrcoef(fpkm_t_runs[isample][itime][irun], fpkm_t_runs[jsample][jtime][jrun])[0][1]
intra_g_r = np.corrcoef(fpkm_g_runs[isample][itime][irun], fpkm_g_runs[jsample][jtime][jrun])[0][1]
fintra.write('\t'.join(map(str,[samples[isample], existTimes[isample][itime], existRuns[isample][itime][irun],\
samples[jsample], existTimes[jsample][jtime], existRuns[jsample][jtime][jrun], intra_g_r, intra_t_r]))+'\n')
#Inter
elif jsample > isample:
for itime in range(len(existTimes[isample])):
for jtime in range(len(existTimes[jsample])):
for irun in range(nruns_times[isample][itime]):
for jrun in range(nruns_times[jsample][jtime]):
inter_t_r = np.corrcoef(fpkm_t_runs[isample][itime][irun], fpkm_t_runs[jsample][jtime][jrun])[0][1]
inter_g_r = np.corrcoef(fpkm_g_runs[isample][itime][irun], fpkm_g_runs[jsample][jtime][jrun])[0][1]
finter.write('\t'.join(map(str,[samples[isample], existTimes[isample][itime], existRuns[isample][itime][irun],\
samples[jsample], existTimes[jsample][jtime], existRuns[jsample][jtime][jrun], inter_g_r, inter_t_r]))+'\n')
if __name__ == '__main__':
transcript_express_compare()
| true |
5371579a09c29b1c4003fac77532c60ef3be29e0 | Python | ESDLMapEditorESSIM/ESDLValidator | /esdlvalidator/validation/validator_esdl_validation_result.py | UTF-8 | 1,084 | 2.6875 | 3 | [
"MIT"
] | permissive | from esdlvalidator.validation.functions import utils
class EsdlValidationResults:
"""Result of a validation, this is returned to the user as JSON"""
def __init__(self, validation, checks):
msg = utils.get_attribute(validation, "message", "No message defined")
results = self.__getResults(checks, msg)
self.name = utils.get_attribute(validation, "name", "No name given")
self.description = utils.get_attribute(validation, "description", "No description given")
self.checked = len(checks)
if validation["type"].lower() == "error":
self.errors = results
else:
self.warnings = results
def __getResults(self, checks, message):
results = []
for check in checks:
if not check.result.ok:
if isinstance(check.result.message, dict):
results.append(check.result.message)
else:
results.append("{0}: {1}".format(message, check.result.message))
return results
| true |