blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
83384f69b9801218446203c5dc82ee92c8312e22 | Python | hima-del/Learn_Python | /13_class_and_first_class_functions/01.py | UTF-8 | 740 | 3.96875 | 4 | [] | no_license | class Dog:
species="canis familiaris"
def __init__(self,name,age):
self.name=name
self.age=age
def __str__(self):
return f"{self.name} is {self.age} years old"
def speak(self,sound):
return f"{self.name} saying {sound}"
class Bulldog(Dog):
pass
jack=Bulldog("jack",9)
print(jack)
print(jack.speak("bow bow"))
#print(isinstance(jack,Dog))
class JackRussellTerrier(Dog):
def speak(self,sound="arf"):
return f"{self.name} says {sound}"
miles=JackRussellTerrier("miles",9)
print(miles)
print(miles.speak())
class NewDog(Dog):
def speak(self,sound="aoww"):
return super().speak(sound)
jenn=NewDog("jenn",10)
print(jenn)
print(jenn.speak()) | true |
4e03313a9551fa82ce756802889528cfc53d7b4c | Python | SlowKing02/Spacy-Lemma | /Spacy_Lemma.py | UTF-8 | 1,206 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 26 09:40:19 2018
@author: slowking
"""
import pandas as pd
import spacy
nlp = spacy.load('en')
print(nlp.pipeline)
#Load Data
Texts_train_load = pd.read_csv()
Texts_train_load['spaced'] = Texts_train_load.text.apply(nlp)
Texts_test_load = pd.read_csv()
Texts_test_load['spaced'] = Texts_test_load.text.apply(nlp)
def spacy_lemma_stop(data):
corpus = list()
for each in data.spaced:
lemma_stop = list()
for token in each:
if token.is_stop == False:
lemma_stop.append(token.lemma_)
words = ' '.join(lemma_stop)
corpus.append(words)
return corpus
Texts_train_load['lemma'] = spacy_lemma_stop(Texts_train_load)
Texts_test_load['lemma'] = spacy_lemma_stop(Texts_test_load)
Texts_train_load = Texts_train_load.drop(columns=['spaced', 'text'])
Texts_train_load = Texts_train_load.rename(index=str, columns={"lemma": "text"})
Texts_train_load.to_csv(, index=False)
Texts_test_load = Texts_test_load.drop(columns=['spaced','text'])
Texts_test_load = Texts_test_load.rename(index=str, columns={"lemma": "text"})
Texts_test_load.to_csv(, index=False)
| true |
6442270983001e67616bbbf75f03826a766cbeb2 | Python | Aasthaengg/IBMdataset | /Python_codes/p03804/s737474414.py | UTF-8 | 287 | 2.765625 | 3 | [] | no_license | import numpy as np
n, m = map(int, input().split())
a = np.array([list(input()) for _ in range(n)])
b = np.array([list(input()) for _ in range(m)])
ans = 'No'
for i in range(n-m+1):
for j in range(n-m+1):
if (a[i:m+i, j:m+j] == b).all():
ans = 'Yes'
print(ans)
| true |
d5fe427631af26324a16cfa8d2b7a2a1e4f0a3e1 | Python | archiewir/Financial-Product-Recommendation-System | /Data_sort.py | UTF-8 | 3,983 | 2.875 | 3 | [] | no_license | ### Sort Data based on complete and incomplete records ###
import pandas as pd
import csv
def checkList (list, input):
try:
list.index(input)
except ValueError:
return -1
def months (file):
m = []
with open(file, 'r') as r:
inp = csv.reader(r, delimiter=",", quotechar='|')
field = next(inp)
for row in inp:
if checkList(m, row[0]) == -1:
m.append(row[0])
sorted(m)
print (m)
def custMonthly(file):
print (str(file))
m = ['16463', '16494', '16522', '16553', '16583', '16614', '16644', '16675', '16706', '16736', '16767', '16797', '16828', '16859', '16888', '16919', '16949']
count = []
for i in range(len(m)):
count.append(0)
i=1
with open(file, 'r') as r:
inp = csv.reader(r, delimiter=",", quotechar='|')
field = next(inp)
total = 0
for row in inp:
count[m.index((row[0]))] +=1
for x in range(len(count)):
if x>0:
print(str(i) + '. ' + str(m[x]) + ': ' + str(count[x]) + ', diff: ' + str(count[x] - count[x-1]))
else:
print(str(i) + '. ' + str(m[x]) + ': ' + str(count[x]))
i+=1
for i in count:
total += i
#print ('total number of records =' + str(total))
print( '\n')
def sortData (filein):
df = pd.read_csv(filein)
df = df.sort_values(['CusID','FetchDate'], ascending = [True, True])
df.to_csv('complete1.csv',index=False, sep = ',', encoding = 'utf-8')
def completeData(file):
with open(file, 'r') as r, open("complete.csv", 'w', newline='') as wr1, open("incomplete.csv", 'w', newline='') as wr2:
inp = csv.reader(r, delimiter=",", quotechar='|')
out1 = csv.writer(wr1, delimiter=",", quotechar='|')
out2 = csv.writer(wr2, delimiter=",", quotechar='|')
fn = next(inp)
print (fn)
out1.writerow(fn)
out2.writerow(fn)
test = []
for row in inp:
if len(test) == 0:
test.append(row)
else:
if row[1]== test[len(test)-1][1]:
test.append(row)
else:
if len(test)==17:
for t in test:
out1.writerow(t)
else:
for t in test:
out2.writerow(t)
test = []
test.append(row)
def consecutive (test):
m = ['16949', '16919', '16888', '16859', '16828', '16797', '16767', '16736', '16706', '16675', '16644', '16614', '16583', '16553', '16522', '16494', '16463']
con = True
num = len(test)
init = test[0][0]
start = m.index(init)
last = start + num
for i in range (start, last):
if m[i] != test[i-start][0]:
con = False
return con
def incompleteData (file):
with open(file, 'r') as r, open("consecutive.csv", 'w', newline='') as wr1, open("inconsecutive.csv", 'w',newline='') as wr2:
inp = csv.reader(r, delimiter=",", quotechar='|')
out1 = csv.writer(wr1, delimiter=",", quotechar='|')
out2 = csv.writer(wr2, delimiter=",", quotechar='|')
fn = next(inp)
print(fn)
out1.writerow(fn)
out2.writerow(fn)
test = []
for row in inp:
if len(test) == 0:
test.append(row)
else:
if row[1]== test[len(test)-1][1]:
test.append(row)
else:
if consecutive(test) == True:
for t in test:
out1.writerow(t)
else:
for t in test:
out2.writerow(t)
test = []
test.append(row)
sortData("complete.csv")
#completeData('sorted.csv')
#incompleteData('incomplete.csv')
| true |
6aea8bd9be4230cabd6915bf494cc872e8d777c8 | Python | MFurkan41/pythonAll | /Console/Çift Sayı mı, Tek Sayı mı/odd-even.py | UTF-8 | 361 | 3.609375 | 4 | [] | no_license | liste = [23,45,78,12,44,27,37,41,93,55,82,34,15]
def ciftmi(sayi):
if (sayi % 2 == 0):
return True
if (sayi % 2 == 1):
return False
ciftler = []
tekler = []
for i in range(0,len(liste)):
if(ciftmi(liste[i]) == True):
ciftler.append(liste[i])
elif(ciftmi(liste[i]) == False):
tekler.append(liste[i])
print(ciftler) | true |
72d850447dd8f78c07b98edda588510e6d7301e3 | Python | princep4/Ludo-Dice | /random_dice.py | UTF-8 | 629 | 3.125 | 3 | [] | no_license | import random
ch='y'
while ch=='y':
r=random.randint(1,6)
if r==1:
print("[ ]")
print("[ 0 ]")
print("[ ]")
if r==2:
print("[ ]")
print("[ 00 ]")
print("[ ]")
if r==3:
print("[0 ]")
print("[ 0 ]")
print("[ 0 ]")
if r==4:
print("[0 0]")
print("[ 0 ]")
print("[ 0 ]")
if r==5:
print("[0 0]")
print("[ 0 ]")
print("[0 0]")
if r==6:
print("[0 0]")
print("[0 0]")
print("[0 0]")
ch=input("Enter 'Y' if dice again")
| true |
cc93ff256d6c554db7b4fed6b84b36a1eefe7b8e | Python | hdznrrd/parseiq | /parseiq.py | UTF-8 | 7,041 | 2.6875 | 3 | [
"MIT"
] | permissive | # coding=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""Usage: parseiq.py dump [-o OFFSET] [-f FRAMES] FILE
parseiq.py peaksearch [-b BLOCKSIZE] [-s SKIPFRAMES] [-f FRAMES] FILE
parseiq.py search [-t THRESHOLD] FILE PATTERN_FILE
Arguments:
FILE input file (WAV, IQ data))
PATTERN_FILE input file used as search pattern (WAV, IQ data)
Options:
-h --help show this help message and exit
-b BLOCKSIZE blocksize for FFT [default: 1024]
-s SKIPFRAMES number of frames to skip between exacting FFT blocks [default: 1]
-o OFFSET number of frames from the beginning of the file to skip [default: 0]
-f FRAMES limit search to at most this number of frames
-t THRESHOLD correlation threshold. between -1 and +1 [default: 0.5]
"""
# https://docs.python.org/2/library/wave.html
import wave
# http://stackoverflow.com/questions/3694918/how-to-extract-frequency-associated-with-fft-values-in-python
# https://docs.python.org/2/library/struct.html
import struct
import numpy as np
from multiprocessing import Process, Queue
# https://github.com/docopt/docopt
from docopt import docopt
import logging
def read_n_iq_frames(wav_file, n_frames=None, offset=None):
"""Reads n_frames or all frame starting from offset and
returns an numpy array of complex numbers"""
if n_frames is None:
n_frames = wav_file.getnframes()
if offset is not None:
wav_file.setpos(offset)
else:
offset = 0
n_frames = min(n_frames, wav_file.getnframes()-offset)
data = np.array(struct.unpack(
'<{n}h'.format(n=n_frames*wav_file.getnchannels()),
wav_file.readframes(n_frames)))
result = data[0:][::2] + 1j * data[1:][::2]
return result
def correlate(first, second):
"""Calculates correlation between (complex) arrays a and b"""
min_length = min(len(first), len(second))
first_std = np.std(first[0:min_length])
second_std = np.std(second[0:min_length])
first_mean = np.mean(first[0:min_length])
second_mean = np.mean(second[0:min_length])
firstsecond_sum = np.sum(np.multiply(first[0:min_length], second[0:min_length].conjugate()))
numerator = firstsecond_sum - min_length*first_mean*second_mean.conjugate()
denominator = (min_length-1)*first_std*second_std
corr = numerator/denominator
return corr
#def do_fft(data,frate):
# w = np.fft.fft(data)
# freqs = np.fft.fftfreq(len(w))
#
# # Find the peak in the coefficients
# idx=np.argmax(np.abs(w)**2)
# freq=freqs[idx]
# freq_in_hertz=abs(freq*frate)
# return (freqs.min(), freqs.max(), freq_in_hertz
# , math.sqrt(np.sum(np.abs(w)**2))/len(w))
def output_dump(wav_file, n_frames=None, offset=None):
"""Dumps the provided wave file as text formatted complex numbers"""
if not n_frames:
n_frames = wav_file.getnframes()
if not offset:
offset = 0
iq_data = read_n_iq_frames(wav_file, n_frames, offset)
for i in range(len(iq_data)):
print '{iq}'.format(iq=iq_data[i])
def worker(haystack, needle, work_queue, done_queue):
"""Worker thread funktion to calculate correlation"""
needle_length = len(needle)
for task in iter(work_queue.get, 'STOP'):
correlation_values = []
for i in task:
correlated = correlate(haystack[i:needle_length], needle)
correlation_values.append(correlated)
done_queue.put([task, correlation_values])
def correlation_index(haystack, needle):
"""Calculate correlation for all offsets of needle inside haystack"""
workers = 5
workload_size = 1000000
work_queue = Queue()
done_queue = Queue()
processes = []
length = 1+max(0, len(haystack)-len(needle))
logging.info("generating tasks")
for i in range(0, length, workload_size):
work_queue.put(range(i, max(length, i+workload_size)))
logging.info("generated " + str(work_queue.qsize()) + " jobs")
logging.info("setting up workers")
for w in xrange(workers):
process = Process(target=worker
, args=(haystack, needle, work_queue, done_queue))
process.start()
processes.append(process)
work_queue.put('STOP')
logging.info("crunching...")
for process in processes:
process.join()
done_queue.put('STOP')
logging.info("consolidating...")
correlation_values = []
for result in sorted(iter(done_queue.get, 'STOP')):
correlation_values += result[1]
logging.info("done")
return correlation_values
def output_correlation_find(haystack, needle, peak_threshold, haystack_n=None, haystack_offset=None):
"""Calculates correlation of needle with every possible
offset in haystack and reports location of all values that have
higher correlation than peak_threshold"""
if not haystack_n:
haystack_n = haystack.getnframes()
if not haystack_offset:
haystack_offset = 0
logging.info("loading pattern...")
needle_iq = read_n_iq_frames(needle)
logging.info("loading haystack...")
hay_iq = read_n_iq_frames(haystack, haystack_n, haystack_offset)
logging.info("correlating...")
correlation_values = correlation_index(hay_iq, needle_iq)
logging.info("peak extraction...")
peak_idxs = np.where(correlation_values > peak_threshold)[0]
logging.info("done")
logging.info(peak_idxs + haystack_offset)
logging.info(correlation_values[peak_idxs])
def main():
"""entry point"""
arguments = docopt(__doc__)
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='parseiq.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
#block_size = int(arguments['-b'])
#skip_frames = int(arguments['-s'])
#offset_frames = int(arguments['-o'])
#if arguments['peaksearch']:
# output_analysis(wav_file)
if arguments['dump']:
output_dump(wave.open(arguments['FILE'], 'r')
, int(arguments['-f'])
, int(arguments['-o']))
if arguments['search']:
output_correlation_find(wave.open(arguments['FILE'], 'r')
, wave.open(arguments['PATTERN_FILE'], 'r')
, float(arguments['-t']))
if __name__ == '__main__':
main()
| true |
d85255927a481effc8a4e22134f5e0a1818dce6e | Python | yj435545879/test | /section5.py | UTF-8 | 2,834 | 3.46875 | 3 | [] | no_license | import math
class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def distance(self,p2):
return math.sqrt((self.x-p2.x)**2+(self.y-p2.y)**2)
class Polygon:
def __init__(self,points=[]):
self.vertices = []
for point in points:
if isinstance(point,tuple):
point = Point(*point)
self.vertices.append(point)
def add_point(self,point):
self.vertices.append((point))
def perimeter(self):
perimeter = 0
points = self.vertices+[self.vertices[0]]
for i in range(len(self.vertices)):
perimeter += points[i].distance(points[i+1])
return perimeter
class Color:
def __init__(self,rgb_value,name):
self.rgb_value = rgb_value
self.__name = name
def __set_name(self,name):
if not name:
raise Exception("Invalid Name.")
self.__name = name
def __get_name(self):
return self.__name
name = property(__get_name,__set_name)
###案例学习
class Document:
def __init__(self):
self.characters = []
self.cursor = Cursor(self)
self.filename = ''
def insert(self,character):
self.characters.insert(self.cursor.position,character)
self.cursor.forward()
def delete(self):
del self.characters[self.cursor.position]
def save(self):
f = open(self.name,'w')
f.write(''.join(self.characters))
f.close()
def forward(self):
self.cursor += 1
def back(self):
self.cursor -= 1
@property
def string(self):
return "".join(self.characters)
class Cursor:
def __init__(self,document):
self.document=document
self.position = 0
def forward(self):
self.position += 1
def back(self):
self.position -= 1
def home(self):
while self.document.characters[self.position - 1] != '\n':
self.position -= 1
if self.position == 0:
break
def end(self):
while self.position < len(self.document.characters) \
and self.document.characters[self.position] != '\n':
self.position += 1
class Character:
def __init__(self,character,bold=False,italic=False,underline=False):
assert len(character) == 1
self.character = character
self.bold = bold
self.italic = italic
self.underline = underline
def __str__(self):
bold = "*" if self.bold else ''
italic = '/' if self.italic else ''
underline = '_' if self.underline else ''
return bold+italic+underline+self.character
print('===============')
| true |
bb9892fc2d32f3008d12a02a2e6a00337ef2e298 | Python | LaurenDebruyn/aocdbc | /correct_programs/aoc2020/day_20_jurassic_jigsaw.py | UTF-8 | 8,082 | 3.328125 | 3 | [
"MIT"
] | permissive | import math
import re
import sys
from dataclasses import dataclass
from typing import (
Dict,
List,
Optional,
Set,
Tuple,
Final,
Sequence,
cast,
overload,
Union,
Iterator,
)
from icontract import require, ensure, DBC
VALID_SIDE_RE = re.compile(r"[.#]{10}") #: Express the edge of a tile
@require(lambda side: VALID_SIDE_RE.fullmatch(side))
@ensure(lambda result: re.fullmatch(r"[.#]{10}", result))
def reverse_side(side: str) -> str:
"""Flip the side."""
return "".join(reversed(side))
class Tile(DBC):
"""Represent a tile of the puzzle."""
top: Final[str] #: Top side
right: Final[str] #: Right side
bottom: Final[str] #: Bottom side
left: Final[str] #: Left side
# fmt: off
@require(
lambda top, right, bottom, left:
all(
VALID_SIDE_RE.fullmatch(side)
for side in (top, right, bottom, left)
)
)
@require(lambda top, right: top[-1] == right[0])
@require(lambda right, bottom: right[-1] == bottom[0])
@require(lambda bottom, left: bottom[-1] == left[0])
@require(lambda left, top: left[-1] == top[0])
# fmt: on
def __init__(self, top: str, right: str, bottom: str, left: str) -> None:
"""Initialize with the given values."""
self.top = top
self.right = right
self.bottom = bottom
self.left = left
def rotate(self) -> "Tile":
"""Copy the tile and rotate it clock-wise."""
return Tile(self.left, self.top, self.right, self.bottom)
def flip_vertical(self) -> "Tile":
"""Copy the tile and flip the it along the vertical axis."""
return Tile(
reverse_side(self.bottom),
reverse_side(self.right),
reverse_side(self.top),
reverse_side(self.left),
)
def flip_horizontal(self) -> "Tile":
"""Copy the tile and flip it along the horizontal axis."""
return Tile(
reverse_side(self.top),
reverse_side(self.left),
reverse_side(self.bottom),
reverse_side(self.right),
)
def __repr__(self) -> str:
"""Represent the tile as string for easier debugging."""
return (
f"top={self.top}, "
f"right={self.right}, "
f"bottom={self.bottom}, "
f"left={self.left}"
)
def __eq__(self, other: object) -> bool:
"""
Compare by sides, if ``other`` is a :py:class:`Tile`.
Otherwise, by equality.
"""
if isinstance(other, Tile):
return (
self.top == other.top
and self.right == other.right
and self.bottom == other.bottom
and self.left == other.left
)
return self == other
def __hash__(self) -> int:
return hash((self.top, self.right, self.bottom, self.left))
def transform_tile(tile: Tile) -> Set[Tile]:
"""Produce the tile transformations by rotating and flipping it."""
ret: Set[Tile] = set()
for cur in (tile, tile.flip_vertical(), tile.flip_horizontal()):
ret.add(cur)
cur = cur.rotate()
ret.add(cur)
cur = cur.rotate()
ret.add(cur)
cur = cur.rotate()
ret.add(cur)
return ret
@dataclass
class Image(DBC):
"""Represent a (partially or fully) assembled puzzle of tiles."""
width: int #: Total width of the image
tiles: List[Tuple[int, Tile]] #: Assembled tiles
def pop(self) -> Tuple[int, Tile]:
"""Remove the last tile from the puzzle."""
return self.tiles.pop()
def attempt_add(self, tile_id: int, tile: Tile) -> bool:
"""
Try to add the tile into the image.
:return: True if successful
"""
tiles, width = self.tiles, self.width
count = len(tiles)
if count == 0:
self.tiles.append((tile_id, tile))
return True
if count % width > 0:
# align left with previous right:
_, left_contents = tiles[-1]
if tile.left != reverse_side(left_contents.right):
return False
if count >= width:
# align top with bottom of tile above:
_, above_contents = tiles[count - width]
if tile.top != reverse_side(above_contents.bottom):
return False
self.tiles.append((tile_id, tile))
return True
def place_remaining_tiles(image: Image, tiles: Dict[int, Set[Tile]]) -> bool:
"""
Try to assemble the remaining tiles into the image.
:return: True if there are no more tiles left, or if the assembly was possible.
"""
if not tiles:
return True
for tile_id, variants in list(tiles.items()):
for variant in variants:
if image.attempt_add(tile_id, variant):
del tiles[tile_id]
if place_remaining_tiles(image, tiles):
return True
image.pop()
tiles[tile_id] = variants
return False
@require(
lambda tiles: int(math.sqrt(len(tiles))) ** 2 == len(tiles),
"Number of tiles must be a perfect square",
)
def place_tiles(tiles: Dict[int, Set[Tile]]) -> Optional[Image]:
"""
Assemble the tiles given as ID 🠒 tile transformations into an image.
:return: Image, if possible; None if no puzzle could be assembled
"""
width = int(math.sqrt(len(tiles)))
image = Image(width, [])
if place_remaining_tiles(image, tiles):
return image
return None
class ValidTileText(DBC):
"""Represent lines to conform to valid tile text."""
# fmt: off
@require(
lambda lines:
len(lines) == 11
and re.match(r"Tile (\d+)", lines[0]) is not None
and all(VALID_SIDE_RE.fullmatch(line) for line in lines[1:]),
error=ValueError,
enabled=True
)
# fmt: on
def __new__(cls, lines: Sequence[str]) -> "ValidTileText":
"""Ensure the properties on the ``lines``."""
return cast(ValidTileText, lines)
# pylint: disable=function-redefined
@overload
def __getitem__(self, index: int) -> str:
"""Get the item at the given integer index."""
pass
@overload
def __getitem__(self, index: slice) -> "ValidTileText":
"""Get the slice of the lines."""
pass
def __getitem__(self, index: Union[int, slice]) -> Union[str, "ValidTileText"]:
"""Get the line(s) at the given index."""
raise NotImplementedError("Only for type annotations")
def __len__(self) -> int:
"""Return the number of the lines."""
raise NotImplementedError("Only for type annotations")
def __iter__(self) -> Iterator[str]:
"""Iterate over the lines."""
raise NotImplementedError("Only for type annotations")
def parse_tile(lines: ValidTileText) -> Tuple[int, Tile]:
"""Parse the ``lines`` into (ID number, tile """
match = re.match(r"Tile (\d+)", lines[0])
assert match
tile_id = int(match.group(1))
top = lines[1]
bottom = lines[-1][::-1]
right = "".join(line[-1] for line in lines[1:])
left = "".join([line[0] for line in lines[1:]][::-1])
return tile_id, Tile(top, right, bottom, left)
def parse_tiles(text: str) -> Dict[int, Set[Tile]]:
"""Parse the input ``text`` into ID number 🠒 possible tile transformations."""
tiles: Dict[int, Set[Tile]] = {}
sections = [section.strip().splitlines() for section in text.split("\n\n")]
for section in sections:
tile_id, tile = parse_tile(ValidTileText(section))
tiles[tile_id] = transform_tile(tile)
return tiles
def main() -> None:
"""Execute the main routine."""
tiles = parse_tiles(sys.stdin.read())
image = place_tiles(tiles)
assert image is not None
ids = [tid for tid, _ in image.tiles]
width = image.width
print(ids)
print(ids[0] * ids[width - 1] * ids[-width] * ids[-1])
if __name__ == "__main__":
main()
| true |
6fc435e2ca95df99098cdc667140244018498162 | Python | bomethis/project_euler | /1_project_Euler.py | UTF-8 | 401 | 4.3125 | 4 | [] | no_license | # If we list all the natural numbers below 10 that are multiples of
# 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
#
# Find the sum of all the multiples of 3 or 5 below 1000.
def sum_natural():
sum_num = 0
for i in range(0, 1000):
if i % 3 == 0:
sum_num += i
elif i % 5 == 0:
sum_num += i
return sum_num
print(sum_natural())
| true |
e78e92101dab848cfeaaac848278a5b33f7d8430 | Python | icefoxen/lang | /python/practice/BitsNPieces.py | UTF-8 | 2,191 | 4.21875 | 4 | [
"MIT"
] | permissive | # Example of using a lambda form.
def makeIncrementor( n ):
"""Example of using a lambda form
"""
return lambda x: x + n
f = makeIncrementor( 50 )
f( 20 )
#####################################################################
# Documentation strings.
def func():
"""Do nothing, but document it.
Really, it doesn't do anything.
"""
pass
# >>> print func.__doc__
# Do nothing, but document it.
#
# Really, it doesn't do anything.
#####################################################################
# List methods
a = [66.6, 333, 333, 1, 1234.5]
print a.count( 333 ), a.count( 66.6 ), a.count( 'x' )
# Result is >>> 2 1 0
a.insert( 2, -1 )
a.append( 333 )
# a is now [66.6, -1, 333, 1, 1234.5, 333]
a.remove( 333 )
# only removes the first one
a.reverse()
a.sort()
#####################################################################
# Using a list as a simple stack.
stack = [3, 4, 5]
stack.append( 6 )
stack.append( 7 )
stack
stack.pop()
stack.pop()
stack.pop()
# You can also use it as a queue
stack.append( "tom" )
stack.append( "Dick" )
stack.append( "harry" )
stack
stack.pop(0)
stack.pop(0)
#####################################################################
# filter( function, sequence )
# returns a sequence containing the items for which function() is true.
def f(x):
return (x % 2 != 0) and (x % 3 != 0)
filter( f, range( 2, 25 ) )
#####################################################################
# map( function, sequence ) is just like Lisp.
# It applys function() to each member of the sequence and returns it.
def cube(x):
return x*x*x
map( cube, range( 1, 15 ) )
# Is also useful for turning a pair of lists into a list of pairs.
a = [1, 2, 3, 4, 5]
b = ['a', 'b', 'c', 'd', 'e']
map( None, a, b )
# result: [(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e')]
#####################################################################
# These things are rather... odd.
# It's sorta like mapping.
[x*5 for x in [1, 2, 3, 4, 5]]
# Result: [5, 10, 15, 20, 25]
[x/2.0 for x in [1, 2, 3, 4, 5] if x > 2]
# Result: [1.5, 2.0, 2.5] | true |
c06350ee3fee5b4febfa087998ccbdb8fef9c0d9 | Python | giripranay/PYTHON | /newmodule.py | UTF-8 | 77 | 3.1875 | 3 | [] | no_license |
def addition(a,b):
print(a+b)
def multiplication(a,b):
print(a*b)
| true |
6a3a661b6c378a3d3b16c1fcbe97658941630348 | Python | TheShubham-K/Algorithmic_Toolbox | /week2_algorithmic_warmup/6_last_digit_of_the_sum_of_fibonacci_numbers/fibonacci_sum_last_digit.py | UTF-8 | 2,373 | 3.453125 | 3 | [] | no_license | # Uses python3
import sys
import random
import time
#import numpy as np
'''
def fibonacci_sum_naive(n):
if n <= 1:
return n
previous = 0
current = 1
sum = 1
for _ in range(n - 1):
previous, current = current, previous + current
sum += current
return sum % 10
def fib(n):
if n <= 1:
return n
pre = 0
cur = 1
for _ in range(n - 1):
pre, cur = cur, pre + cur
return cur%10
def fibonacci_sum_fast(n):
if n <= 1:
return n
previous = 0
current = 1
for _ in range(n + 1):
previous, current = current , (previous + current)% 10
if current > 0:
return current - 1
else:
return 9
def fibsum(n):
a = np.array([[1, 1], [1, 0]])
te = np.linalg.matrix_power(a, n+1)
fn = te[0][0]
if fn % 10 < 1:
return 9
else:
return fn % 10 -1
def fib(n):
if n == 0:
return (0, 1)
else:
a, b = fib(n // 2)
c = a * ( b * 2 -a)
d = a*a + b * b
if n % 2 == 0:
return (c, d)
else:
return(d, c+d)
'''
def get_fibonacci_huge_fast(n, m):
if n <= 1:
return n
previous = 0
current = 1
count = -1
i = 0
while i < n -1:
previous, current = current, (previous + current) % m
if current == 1 and previous == 0:
count = i + 1
break
i += 1
if count < 0:
return current
else:
if n % count <= 1:
return n % count
p = 0
c = 1
j = 0
while j < (n % count -1):
p, c = c, (p + c) % m
j += 1
return c
if __name__ == '__main__':
#n = 832564823476
'''
while True:
n = random.randint(0, 1000)
#n += 1
print (n)
r1 = fibonacci_sum_naive(n)
#r2 = fibonacci_sum_fast(n)
r2 = get_fibonacci_huge_fast(n+2, 10)
if r2 < 1:
r2 = 9
else:
r2 -= 1
if r1 == r2:
print ("ok")
else:
print(r1)
print(r2)
break
'''
input = sys.stdin.read()
n = int(input)
#t1 = time.time()
#re = fibonacci_sum_fast(n)
re = get_fibonacci_huge_fast(n+2, 10)
if re < 1:
re = 9
else: re -= 1
print(re)
| true |
32cab0421edd7144d0238571f7f05e09379225d4 | Python | waditya/HackerRank_Arrays | /05_Sparse_Arrays.py | UTF-8 | 1,720 | 3.53125 | 4 | [] | no_license | #!/bin/python3
import sys
arr_temp = input().strip().split(' ')
no_of_strings = int(arr_temp[0])
## print(no_of_strings) ## [--DEBUG--01]
##Flush the temporary array
del arr_temp
## Read the next N ('no_of_strings') from input and store it in an array
arr_strings = []
for ctr in range(no_of_strings):
arr_t = input().strip().split(' ')
arr_strings.append(arr_t[0])
del arr_t
## Display the array of strings(arrray to be searched from)
## print(arr_strings) ## [--DEBUG--01]
##Accept the number of keywords to be searched in number_of_keywords
arr_temp = input().strip().split(' ')
number_of_keywords = int(arr_temp[0])
## Flush the temporary array
del arr_temp
## Print the number_of_keywords
## print(number_of_keywords) ## ## [--DEBUG--03]
## Accept the strings to be searched in arr_search array
arr_search = []
for ctr1 in range(number_of_keywords):
arr_t = input().strip().split(' ')
arr_search.append(arr_t[0])
del arr_t
## Display the search array arr_search
## print(arr_search) ## [--DEBUG--04]
## Search the array terms in arr_search in array of strings arr_string
##Apply constraints while searching
if number_of_keywords <= 1000 and no_of_strings <=1000:
arr_count = []
## arr_count = [0, 0, 0] [--DEBUG -- :Hardcoded Array to debug custom input]
for ctr_number_of_keywords in range(number_of_keywords):
arr_count.append(0)
##Initialize the arr_count
for ctr_search in range(number_of_keywords):
for ctr_strings in range(no_of_strings):
if arr_search[ctr_search]== arr_strings[ctr_strings]:
arr_count[ctr_search] = arr_count[ctr_search] + 1
print(arr_count[ctr_search])
| true |
2103cb4660abc81694cfc99e3d2f973e71867c9d | Python | limkeunhyeok/daily-coding | /programmers/Level_3/멀리 뛰기/solution.py | UTF-8 | 121 | 2.5625 | 3 | [] | no_license | def solution(n):
dp = [1, 1]
for i in range(1, n):
dp.append(dp[-1] + dp[-2])
return dp[-1] % 1234567 | true |
4b4492221c775868ee3ea2ca23a9180c2263e489 | Python | Tom-1113/python200817a | /score.py | UTF-8 | 743 | 3.6875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 14:59:15 2020
@author: USER
"""
s = float(input("請輸入成績"))
if s >=0 and s<=100:
if s>=90:
print("level A")
elif s>=80:
print("level B")
elif s>=70:
print("level C")
elif s>=60:
print("level D")
else:
print("level E")
else:
print("輸入錯誤")
s = float(input("請輸入成績"))
if s >=0 and s<=100:
if s>=90:
print("level A")
elif s>=80:
print("level B")
elif s>=70:
print("level C")
elif s>=60:
print("level D")
else:
print("level E")
else:
print("輸入錯誤") | true |
19e79239eab69da820d10db44823209081afb880 | Python | deetemples/gatech | /Functional_Annotation/create_abinitio_gff.py | UTF-8 | 5,624 | 2.5625 | 3 | [] | no_license | '''
Script for creating .gff files from ab-initio results
'''
#!/usr/bin/env python
import subprocess,os,sys
'''
Function reads in the CRISPR results
'''
def pilercr_merger(input_directory_path,pilercr_file):
#Creating input file path
input_file=input_directory_path + pilercr_file
#Creating output file path
mod_pilercr_file_name=pilercr_file.replace("_crispr","_crispr.gff")
output_file="./tmp/"+mod_pilercr_file_name
dict1={} #Dictionary stores the crispr array details for each contig tested.
#Parsing the CRISPR input file
with open(input_file,"r") as inp:
in_file=inp.readlines()
line_count=0 #Counter for the lines in input file.
count=0 #Counter for number of crispr arrays predicted.
while(line_count<len(in_file)):
if in_file[line_count].startswith(">")==True:
in_file[line_count]=in_file[line_count].rstrip()
header=in_file[line_count].replace(">","")
count=count+1
if count not in dict1.keys():
dict1[count]=header
if in_file[line_count].startswith("SUMMARY BY SIMILARITY")==True:
line_count=line_count+6
count1=0 #Counter to match the number of arrays found to the ones reported in the "Summary by similarity"
while in_file[line_count].startswith("SUMMARY BY POSITION")!=True:
if count1<count:
count1=count1+1
crisp_array=in_file[line_count].split()
arr_num=int(crisp_array[0])
start_pos=crisp_array[2]
end_pos=int(start_pos)+int(crisp_array[3])+1
head=dict1[arr_num]
dict1[arr_num]=head+"\t"+start_pos+"\t"+str(end_pos)+"\t"+"Copies:"+crisp_array[4]+";Repeat_length:"+crisp_array[5]+";Spacer_Length:"+crisp_array[6]+";Repeat_Consensus:"+crisp_array[8]+"\n"
line_count=line_count+2
if in_file[line_count].startswith("SUMMARY BY POSITION")==True:
break
line_count=line_count+1
#Writing to the .gff output files
with open(output_file,"a+") as op:
for keys in dict1.keys():
line_split=dict1[keys].split("\t")
op.write(line_split[0]+"\t"+"pilercr"+"\t"+"CRISPR array"+"\t"+line_split[1]+"\t"+line_split[2]+"\t"+"."+"\t"+"."+"\t"+"."+"\t"+line_split[3])
'''
This function merges the predicted Transmembrane proteins to the .faa and .gff files produced by the Gene Prediction group.
'''
def tmhmm_merger(input_directory_path,tmhmm_file):
#Creating input file path
input_file=input_directory_path + tmhmm_file
#Creating output file path for .gff file
mod_tmhmm_file_name=tmhmm_file.replace("tmhmm","tmhmm.gff")
output_file="./tmp/"+mod_tmhmm_file_name
dict_faa={}
#Parsing the TM protein input file
with open(input_file,"r") as inp:
for line in inp:
line=line.rstrip()
col=line.split("\t")
header=col[0]
pred_hel_split=col[4].split("=")
pred_hel=pred_hel_split[1]
top=col[5]
if int(pred_hel)!=0: #Rejecting the proteins with zero predicted alpha-helices
dict_faa[header]= "Transmembrane Protein: Predicted Helices="+pred_hel+", Topology:"+top
#Writing to the .gff output files
with open(output_file,"a+") as op:
for keys in dict_faa.keys():
name=keys.split(":")
node=name[0]
number=name[1].split("-")
start=int(number[0])-1
if start <0:
start=0
stop=number[1]
op.write(node+"\t"+"."+"\t"+"."+"\t"+str(start)+"\t"+stop+"\t"+"."+"\t"+"."+"\t"+"."+"\t"+dict_faa[keys]+"\n")
'''
This function merges the predicted Transmembrane proteins to the .faa and .gff files produced by the Gene Prediction group.
'''
def signalp_merger(input_directory_path,signalp_file):
#Creating input file path
input_file=input_directory_path + signalp_file
#Creating output file path for .gff file
mod_signalp_file_name=signalp_file.replace(".gff3","_signalp.gff")
output_file="./tmp/"+mod_signalp_file_name
signalp_dict={}
#Parsing the SignalP input file
with open(input_file,"r") as inp:
first=inp.readline() #Removing the first line
for line in inp:
col=line.split("\t")
name=col[0]
funct=col[2]
signalp_dict[name]=funct
#Writing to the .gff output files
with open(output_file,"a+") as op:
for keys in signalp_dict.keys():
name=keys.split(":")
node=name[0]
number=name[1].split("-")
start=int(number[0])-1
if start <0:
start=0
stop=number[1]
op.write(node+"\t"+"."+"\t"+"."+"\t"+str(start)+"\t"+stop+"\t"+"."+"\t"+"."+"\t"+"."+"\t"+signalp_dict[keys]+"\n")
def main():
inputpath_pilercr=sys.argv[1]
inputpath_tmhmm=sys.argv[2]
inputpath_signalp=sys.argv[3]
files_pilercr=os.listdir(inputpath_pilercr)
file_tmhmm=os.listdir(inputpath_tmhmm)
file_signalp=os.listdir(inputpath_signalp)
#Checking if pilercr input files exist in the directory
if len(files_pilercr) == 0:
print("No files present in the directory.")
for name in files_pilercr:
print("Writing file for "+name+"\n")
#Writing gff of PilerCr results.
pilercr=pilercr_merger(inputpath_pilercr,name)
#Checking if tmhmm input files exist in the directory
if len(files_tmhmm) == 0:
print("No files present in the directory.")
for name in files_tmhmm:
print("Writing file for "+name+"\n")
#Writing gff of tmhmm results
tmhmm=tmhmm_merger(inputpath_tmhmm,name)
#Checking if signalp input files exist in the directory
if len(files_signalp) == 0:
print("No files present in the directory.")
for name in files_signalp:
print("Writing file for "+name+"\n")
#Writing gff of signalp results
signalp=signalp_merger(inputpath_signalp,name)
if __name__ == "__main__":
main()
| true |
54bc10e5e75c940f5ef1a9c5fce4335dff47b322 | Python | margaretphillips/leetcode_stubs | /group_anagrams.py | UTF-8 | 776 | 3.828125 | 4 | [] | no_license | #given an array of string group the anagrams
#an anagram is formed by grouping the letters of a different word
#ie ...ate and tea have the same letters
def groupanagrams(arr):
n = arr
#sub = []
#sep =','
#dict = {}
for a in arr:
print('----------')
print(a)
for x in n:
if x != a:
for i in range(0,len(x)):
print(x[i])
print('-----')
#for a in arr:
# n.append(sep.join(sorted(a)).replace(',', '').replace(' ', ''))
#for x in n:
# if x not in dict:
# dict[x] = []
# else:
# dict[x].append(x)
#print(dict)
#return n
arr = ['eat', 'tea', 'tan', 'nat', 'ate']
result = groupanagrams(arr)
print(result) | true |
d195dee75c1f8c60ff692372420bf22430b92917 | Python | overtunned/DStar-Lite | /dstarNode.py | UTF-8 | 661 | 3 | 3 | [] | no_license | import numpy as np
class Node:
def __init__(self, key, v1, v2):
self.key = key
self.v1 = v1
self.v2 = v2
def __eq__(self, other):
return np.sum(np.abs(self.key - other.key)) == 0
def __ne__(self, other):
return self.key != other.key
def __lt__(self, other):
return (self.v1, self.v2) < (other.v1, other.v2)
def __le__(self, other):
return (self.v1, self.v2) <= (other.v1, other.v2)
def __gt__(self, other):
return (self.v1, self.v2) > (other.v1, other.v2)
def __ge__(self, other):
return (self.v1, self.v2) >= (other.v1, other.v2) | true |
d9829c4d3cc6d3ad3bff63616884b2f2db04ff86 | Python | peterhinch/micropython-radio | /radio-fast/rftest.py | UTF-8 | 1,677 | 2.609375 | 3 | [
"MIT"
] | permissive | # Tests for radio-fast module.
# Author: Peter Hinch
# Copyright Peter Hinch 2020 Released under the MIT license
# Requires uasyncio V3 and as_drivers directory (plus contents) from
# https://github.com/peterhinch/micropython-async/tree/master/v3
from time import ticks_ms, ticks_diff
import uasyncio as asyncio
import radio_fast as rf
from as_drivers.hd44780.alcd import LCD, PINLIST # Library supporting Hitachi LCD module
from config import FromMaster, ToMaster, testbox_config, v2_config # Configs for my hardware
st = '''
On master (with LCD) issue rftest.test()
On slave issue rftest.test(False)
'''
print(st)
async def slave():
# power control done in main.py
s = rf.Slave(v2_config) # Slave runs on V2 PCB (with SD card)
send_msg = ToMaster()
while True:
await asyncio.sleep(0)
result = s.exchange(send_msg) # Wait for master
if result is not None:
print(result.i0)
else:
print('Timeout')
send_msg.i0 += 1
async def run_master(lcd):
await asyncio.sleep(0)
m = rf.Master(testbox_config)
send_msg = FromMaster()
while True:
start = ticks_ms()
result = m.exchange(send_msg)
t = ticks_diff(ticks_ms(), start)
lcd[1] = 't = {}mS'.format(t)
if result is not None:
lcd[0] = str(result.i0)
else:
lcd[0] = 'Timeout'
await asyncio.sleep(1)
send_msg.i0 += 1
def test(master=True):
lcd = LCD(PINLIST, cols = 24)
try:
asyncio.run(run_master(lcd) if master else slave())
except KeyboardInterrupt:
print('Interrupted')
finally:
asyncio.new_event_loop()
| true |
bf9a9b002114061bf0ada5350ffed85c0692809e | Python | ziuLGAP/2021.1-IBMEC | /exercicio3_26.py | UTF-8 | 921 | 4.03125 | 4 | [] | no_license | """
Exercicio 3-26
Luiz Guilherme de Andrade Pires
Engenharia de Computação
Matrícula: 202102623758
Data: 28/04/2021
"""
def votos(qntd):
"""Computa os votos dos usuários e informa a quantidade de votos de cada candidato"""
cand1 = 0
cand2 = 0
cand3 = 0
for _ in range(qntd):
voto = input("Informe o Número do candidato no qual deseja votar(1 para o candidato 1,\
2 para o canditado 2 e 3 para o candidato 3, caso seja inserido qualquer outro valor, o\
voto não será computado): ")
if voto == "1":
cand1 += 1
elif voto == "2":
cand2 += 1
elif voto == "3":
cand3 += 1
print("O número de votos do candidato 1 foi :", cand1, "votos.")
print("O número de votos do candidato 2 foi :", cand2, "votos.")
print("O número de votos do candidato 3 foi :", cand3, "votos.")
return cand1, cand2, cand3
| true |
62f3b1e54e63ecfc6dcf4fca59ee5e5aac155da3 | Python | RJJxp/MyPythonScripts | /InClass/adjustment_homework/hw_08.py | UTF-8 | 1,770 | 3.046875 | 3 | [] | no_license | import numpy as np
if __name__ == '__main__':
# ********************************************************
# ****************** first sub-question ******************
# ********************************************************
n = 5
t = 3
V = np.mat([[7.9],
[-9.6],
[-5.4],
[-8.4],
[14.4]])
p = [2.4, 2.8, 4.6, 3.7, 5.2]
P = np.mat(np.diag(p))
mean_error = np.sqrt(V.T * P * V / (n - t))
print('mean error is: %f' %mean_error)
B = np.mat([[1, 0, 0],
[-1, 1, 0],
[0, -1, 1],
[0, 0, -1],
[-1, 0, 1]])
H = B * (B.T * P * B).I * B.T * P
print('H is:\n', H)
h55 = H[4, 4]
p5 = P[4, 4]
v5 = V[4, 0]
variable_1 = np.abs(v5) / (mean_error * np.sqrt((1 - h55) / p5))
print('variable_1 is %f' %variable_1)
variable_2 = v5 * v5 / (mean_error * mean_error * (1 - h55) * (n - t) / p5)
print('variable_2 is %f' %variable_2)
# ********************************************************
# ****************** second sub-question *****************
# ********************************************************
R = np.eye(5) - H
R_row_mean = []
for i in range(5):
R_row_mean.append(np.mean(R[:,i]))
r5_mean = R_row_mean[4]
v_mean = np.mean(V)
r5 = R[:, 4]
numerator = 0
for i in range(5):
numerator += (r5[i] - r5_mean) * (V[i] - v_mean)
print(numerator)
t1 = 0
t2 = 0
for i in range(5):
t1 += (r5[i] - r5_mean) * (r5[i] - r5_mean)
t2 += (V[i] - v_mean) * (V[i] - v_mean)
denominator = np.sqrt(t1 * t2)
print(denominator)
variable_3 = numerator / denominator
print(variable_3)
| true |
495325d328abd8ea455aec9e647d491a532e3c21 | Python | JonathanLPoch/Gooroo-Tutoring | /Intro to Programming/prices.py | UTF-8 | 1,209 | 4.125 | 4 | [] | no_license | prices = [] #list for valid prices
while(True): #Continually take in inputs
price = input("Enter a price, 0 to end: ")
if(price.isdigit() or (price[0] == "-" and price[1:].isdigit())): #if a number, or a NEGATIVE one
num = int(price) #cast to int
if(num > 0): #if positive
prices.append(num) #add it in
elif(num == 0): #We stop taking input
break
else:
print("Number must be positive!")
else:
print("That's not a number")
total = 0.0
for price in prices: #summing in a list
total += price
print("Total cost: ", total)
average = total/len(prices)
print("Average cost: ", average) #average
print("Highest price: ", max(prices)) #max
print("Lowest price: ", min(prices)) #min
lessThanAvg = 0
greaterThanAvg = 0
for price in prices: #notice how we can check for both things in one pass
if price >= average:
greaterThanAvg += 1
else:
lessThanAvg += 1
print("# of prices >= avg: ", greaterThanAvg)
print("# of prices < avg: ", lessThanAvg)
#if a > b:
##do something
#else: #a <= b
#
#if a >= b:
##do something
#else: #a < b | true |
25ad6da8db5df9c9b48a219b9c9f08ce6c93697f | Python | techrabbit58/LeetCode30DaysMay2020Challenge | /main/solutions/valid_perfect_square.py | UTF-8 | 1,971 | 4.34375 | 4 | [
"Unlicense"
] | permissive | """
Week 2, Day 2: Valid Perfect Square
Given a positive integer num, write a function which returns True if num is a perfect square else False.
Note:
Do not use any built-in library function such as sqrt.
Example 1:
Input: 16
Output: true
Example 2:
Input: 14
Output: false
"""
from functools import reduce
from time import perf_counter_ns
def isPerfectSquare(num: int) -> bool:
"""Okay. Solution is O(1)."""
r = int(num ** 0.5)
return r * r == num
def isPerfectSquare_v2(num: int) -> bool:
"""
This O(1) solution were contributed to LeetCode by another user.
Way faster than my first solution!
A good example why you should always: 'Know your standard API!'
But there is so much much python magic in it, that it almost feels like cheating.
"""
return (num ** 0.5).is_integer()
def isPerfectSquare_v3(num: int) -> bool:
"""
Solve with math. Because (x + 1)^2 = x^2 + 2*x + 1. With 2*x + 1 being an odd number.
This math based solution is O(n), and not O(1), so it is elegant, but slow.
"""
x = 1
while num > 0:
num -= x
x += 2
return num == 0
if __name__ == '__main__':
p = 4321 * 4321
q = 4321 * 4319
start = perf_counter_ns()
print(isPerfectSquare(16) is True)
print(isPerfectSquare(14) is False)
print(isPerfectSquare(p) is True)
print(isPerfectSquare(q) is False)
print('v1', perf_counter_ns() - start)
start = perf_counter_ns()
print(isPerfectSquare_v2(16) is True)
print(isPerfectSquare_v2(14) is False)
print(isPerfectSquare_v2(p) is True)
print(isPerfectSquare_v2(q) is False)
print('v2', perf_counter_ns() - start)
start = perf_counter_ns()
print(isPerfectSquare_v3(16) is True)
print(isPerfectSquare_v3(14) is False)
print(isPerfectSquare_v3(p) is True)
print(isPerfectSquare_v3(q) is False)
print('v3', perf_counter_ns() - start)
# last line of code
| true |
71b25a0bb28007079f3a9123c77f1fddd43a7723 | Python | Artemaleks/programmeerimine | /faktoriaal.py | UTF-8 | 120 | 3.3125 | 3 | [] | no_license | n = input("factorial: ")
n = int(n)
fac = 1
i = 0
while i < n:
i += 1
fac = fac * i
print ("V6rdub",fac)
| true |
0d986d0aa1c4565c3b6040b553b4df30f847fa43 | Python | alvinsunyixiao/IlliniRM | /realsense/realsense_mac_legacy/realsense_pointcloud_demo.py | UTF-8 | 3,323 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
import pyrealsense as pyrs
from pyrealsense.constants import rs_option
import time
import matplotlib.pyplot as plt
def point_cloud(depth, cx=320.0, cy=240.0, fx=463.889, fy=463.889):
"""Transform a depth image into a point cloud with one point for each
pixel in the image, using the camera transform for a camera
centred at cx, cy with field of view fx, fy.
depth is a 2-D ndarray with shape (rows, cols) containing
depths from 1 to 254 inclusive. The result is a 3-D array with
shape (rows, cols, 3). Pixels with invalid depth in the input have
NaN for the z-coordinate in the result.
My Changes:
* Author had divided depth by 256 to normalize, I hadn't done that so I removed it.
* Output coordinates are in units of 1m. There is a factor of 500 applied at image capture.
* Author had returned a 3 * 480 * 640 np array. I changed to 3 flat arrays
"""
rows, cols = depth.shape
print(fx, fy, cx, cy)
c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)
valid = (depth >= 0) & (depth <= 255)
z = np.where(valid, depth, np.nan)
x = np.where(valid, z * (c - cx) / fx, 0)
y = np.where(valid, z * (r - cy) / fy, 0)
return x.flatten(), y.flatten(), z.flatten()
def convert_z16_to_rgb(frame):
'''Python implementation of librealsense make_depth_histogram()
See source code:
https://github.com/IntelRealSense/librealsense/blob/master/examples/example.hpp#L10-L33
'''
# calculate depth histogram
hist, edges = np.histogram(frame, bins=0x10000)
plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)
plt.scatter(edges[:-1], hist, s=1)
plt.title('Depth histogram')
# calculate cumulative depth histogram
hist = np.cumsum(hist)
hist -= hist[0]
plt.subplot(1, 2, 2)
plt.scatter(edges[:-1], hist, s=1)
plt.title('Cumulative depth histogram')
plt.tight_layout()
rgb_frame = np.zeros(frame.shape[:2] + (3,), dtype=np.uint8)
zeros = frame==0
non_zeros = frame!=0
f = hist[frame[non_zeros]] * 255 / hist[0xFFFF]
rgb_frame[non_zeros, 0] = f
rgb_frame[non_zeros, 1] = 0
rgb_frame[non_zeros, 2] = 255 - f
rgb_frame[zeros, 0] = 0
rgb_frame[zeros, 1] = 5
rgb_frame[zeros, 2] = 20
return rgb_frame
#print available devices
with pyrs.Service() as serv:
for dev in serv.get_devices():
print(dev)
def main():
with pyrs.Service() as serv:
depth_fps = 90
depth_stream = pyrs.stream.DepthStream(fps=depth_fps)
with serv.Device(streams=(depth_stream,)) as dev:
dev.apply_ivcam_preset(0)
try: # set custom gain/exposure values to obtain good depth image
custom_options = [(rs_option.RS_OPTION_R200_LR_EXPOSURE, 30.0),
(rs_option.RS_OPTION_R200_LR_GAIN, 100.0)]
dev.set_device_options(*zip(*custom_options))
except pyrs.RealsenseError:
pass # options are not available on all devices
time.sleep(1) #wait for the device to initialize
while True:
dev.wait_for_frames()
frame = dev.depth
plt.imshow(frame)
plt.show()
plt.pause(0.01)
if __name__ == '__main__':
main()
| true |
2231836a55670032bfc472fd498a1adfb8359f70 | Python | ChandanBharadwaj/py-practice | /basic/range.py | UTF-8 | 558 | 3.125 | 3 | [] | no_license | '''
Created On : Tue Sep 04 2018
@author: Chandan Bharadwaj
'''
# 2.X
# for higher number range(10000000000) is not efficient. it uses more memory of ram and more time.
# to avoid this we have Xrange.
# Xrange use iterator instance internally and keeps only the current encountered item into the memory.
# 3.X
# for higher number range(10000000000) is not efficient. it uses more memory of ram and more time.
# to avoid this we have Xrange.
# Xrange use iterator instance internally and keeps only the current encountered item into the memory.
print( range(10))
| true |
3d3f7d37fa4eb94a0fabe9a06d3e10cef7f132d2 | Python | shankar7791/MI-10-DevOps | /Personel/Harshalm/Python/Practice/9March/Prog5.py | UTF-8 | 219 | 4 | 4 | [] | no_license | str = "The Movie is Amazing and Wonderful !"
print(str.endwith("and"))
print(str.count("i"))
print(str.capatilize())
print(str.find("is"))
print(str.title())
print(str.rindex("is"))
print(str.rpartition("and"))
| true |
1fa754fd84672a2d7ef0be8ef2fcbf88d6da1df7 | Python | sachinhegde04/DS-and-Algo-Internship | /problems/17th June/ugly numbers.py | UTF-8 | 410 | 3.65625 | 4 | [] | no_license | def uglynumber(n):
ugly=[0]*n
ugly[0]=1
i2=i3=i5=0
next2=2
next3=3
next5=5
for l in range(1, n):
ugly[l]=min(next2,next3,next5)
if ugly[l] == next2:
i2+=1
next2=ugly[i2]*2
if ugly[l] == next3:
i3+=1
next3=ugly[i3]*3
if ugly[l] == next5:
i5+=1
next5=ugly[i5]*5
return ugly[-1]
def main():
n=int(input())
print(uglynumber(n))
if __name__ == '__main__':
main()
| true |
b8ecd6f025b1a3b12273e2ade400cc8eb96c193a | Python | Reims796/List_v2 | /ft_odd_even_separator_lst.py | UTF-8 | 432 | 3.0625 | 3 | [] | no_license | def ft_len(a):
b = 0
for i in a:
a += 1
return a
def ft_odd_even_separator_lst(lst):
a = 0
for i in lst:
a = a + 1
b = a
i = 0
n = []
k = []
x = [[0], [0]]
for i in range(b):
if lst[i] % 2 == 0:
n.append(lst[i])
elif lst[i] % 2 != 0:
k.append(lst[i])
i += 1
x[0] = n
x[1] = k
return x
| true |
43ea28cbb228df149438afb23cafec1b3d64341f | Python | DYF-AI/opencv-x | /SeamlessCloning/normal_versus_mixed_clone.py | UTF-8 | 1,086 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding:utf-8 -*-
import cv2
import numpy as np
def seamless_mix_normal(image_src:str, image_dst:str):
# 1. 读取src和dst
img_src = cv2.imread(image_src)
img_dst= cv2.imread(image_dst)
# 创建mask
mask = 255 * np.ones(img_dst.shape, img_dst.dtype)
# 将src贴到dst的中心位置
width, height, channels = img_src.shape
center = (int(height/2), int(width/2))
# 对比normal和mixed两种方式
normal_clone = cv2.seamlessClone(img_dst, img_src, mask, center, cv2.NORMAL_CLONE)
mixed_clone = cv2.seamlessClone(img_dst, img_src, mask, center, cv2.MIXED_CLONE)
# 输出结果
# cv2.imwrite("images/opencv-normal-clone-example.jpg", normal_clone)
# cv2.imwrite("images/opencv-mixed-clone-example.jpg", mixed_clone)
cv2.imshow("normal", normal_clone)
cv2.imshow("mixed", mixed_clone)
cv2.waitKey(20000)
def demo():
image_src = "images/wood-texture.jpg"
image_dst = "images/iloveyouticket.jpg"
seamless_mix_normal(image_src, image_dst)
if __name__ == '__main__':
import fire
fire.Fire() | true |
e5cef50daa5226a6209f4b1d87e71ce48d950c01 | Python | oleglr/GB_Python_Algorithms | /lesson3/task5.py | UTF-8 | 976 | 4.3125 | 4 | [] | no_license | # 7. В одномерном массиве целых чисел определить два наименьших элемента. Они могут быть как равны между собой
# (оба являться минимальными), так и различаться.
import random
SIZE = 10
MIN_ITEM = 0
MAX_ITEM = 100
array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
print(f'Исходный массив: \n{array}')
if array[0] < array[1]:
min_1, min_2 = 0, 1
else:
min_1, min_2 = 1, 0
for i in range(2, len(array)):
if array[i] < array[min_1]:
spam = min_1
min_1 = i
if array[spam] < array[min_2]:
min_2 = spam
elif array[i] < array[min_2]:
min_2 = i
print(f'Первое минимальное значение: {array[min_1]} на {min_1} позиции')
print(f'Второе минимальное значение: {array[min_2]} на {min_2} позиции')
| true |
fb3e07f38e8c8b9f4540c723e8b791fafab28474 | Python | quintuskilbourn/Socket-Messenger-Py- | /messenger.py | UTF-8 | 3,244 | 3.515625 | 4 | [] | no_license | import socket
from threading import Thread
from Queue import Queue
q = Queue() #create queue to communicate between threads
#server code
def server():
q.get() #prevents printing and taking input from overlapping (reads 'take input la' from client thread)
ip = raw_input("Enter your IP: ") #server enters ip and port - must happen before client does the same
pnum = int(raw_input("Enter chosen port: "))
myServ = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create new sock of type SOCK_STREAM(TCP) to accept connection
myServ.bind((ip, pnum)) #binds socket to given server ip and host
myServ.listen(1) #listens for one connection request
myClient, address = myServ.accept() #returns new socket capable of sending and receiving messages
q.put('client connected') #OKAY for client thread to start
print("\nConnected to " + (ip) + " on port " + str(pnum))
while True: #server only receives
recv_msg = myClient.recv(2048) #server receives message
if recv_msg == "exit":
print('\n***Your friend has exited - type \'exit\' to end***\n')
break
print("-- "+(recv_msg))
myClient.close()
def client():
ip = raw_input("Enter your friend's IP: ") #must happen after server has entered socket address
pnum = int(raw_input("Enter chosen port: "))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create new sock of type SOCK_STREAM(TCP)
sock.connect((ip, pnum)) #connects to socket at given address
print("Connected to " + (ip) + " on port " + str(pnum))
q.put('take input la') #allows server to run
while True:
reply = raw_input() #client only sends messages
sock.sendall(reply)
if reply == "exit":
break
sock.close()
cType = ' '
while cType != 'C' and cType != 'W' and cType != "exit": #while loop to take input
cType = raw_input("Exit (exit)\n(C)onnect\n(W)ait for connection\n")
if cType=='W':
q.put('LOL') #dummy q.put so server runs - this q.get is important is user chooses (C)onnect
server = Thread(target=server) #sets 'server' to server function
server.start() #starts server thread for receiving
q.get() #prevents client from being started too early and printing while user is inputting - from 'client connected'
c = Thread(target=client) #creates client thread for sending
c.start()
c.join()
server.join()
elif cType =='C':
c = Thread(target=client) #creates client thread for sending
c.start()
s = Thread(target=server) #starts server thread for receiving
s.start()
c.join()
s.join()
| true |
f238f9a65e8fe0cb09217e84d93727dd47842a35 | Python | stitchEm/stitchEm | /tests/pyvs/html_validator.py | UTF-8 | 848 | 3.265625 | 3 | [
"MIT",
"DOC"
] | permissive | from HTMLParser import HTMLParser
class HTMLValidator(HTMLParser):
"""
super simple html validator : check that each opening tag is closed
with respect to tag hierarchy
"""
def __init__(self):
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.tag_stack.append(tag)
def handle_endtag(self, tag):
try:
open_tag = self.tag_stack.pop()
assert open_tag == tag
except IndexError:
raise Exception(
"found an end tag but there was no more opened ones")
except AssertionError:
raise Exception(
"mismatch between opened tag {} and closing tag {}".format(
open_tag, tag))
def feed(self, data):
self.tag_stack = []
HTMLParser.feed(self, data)
| true |
fde1ef022ab3a379b3f4dafeb83f67b5189467e5 | Python | akevinblackwell/Raspberry-Pi-Class-2017 | /Cell1Test.py | UTF-8 | 442 | 3.28125 | 3 | [] | no_license | '''
ButtonPush() - a function to check the status of our TicTacToe buttons
and return a cell number if one is pushed.
ButtonPushSetupSetdown(True/False) - a function that configures the GPIO pins. Call it once with True to start.
Call again when done with False.
'''
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(3, GPIO.IN)
while True: # while no button pushed yet,
print(GPIO.input(3))
| true |
40bfaab16cbc0f3224d869ddafcb4ffcf786df30 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_96/476.py | UTF-8 | 1,527 | 3.140625 | 3 | [] | no_license |
def solve(case):
N, S, p, rest = case.split(" ", 3)
N = int(N)
S = int(S)
p = int(p)
t = [int(x) for x in rest.split(" ")]
#print N, S, p, t
possible_surprising = S
result = 0
for total in t:
if total <= 1:
# surprising not possible
max_score = total
if max_score >= p:
result += 1
elif total >= 29:
# surprising not possible
max_score = 10
if max_score >= p:
result += 1
elif total % 3 == 1:
# surprising max == !surprising max
max_score = (total / 3) + 1
if max_score >= p:
result += 1
else:
# surprising possible!
max_score = ((total + 1) / 3)
# not surprising case
if max_score >= p:
result += 1
# surprising case
elif possible_surprising and max_score + 1 >= p:
result += 1
possible_surprising -= 1
return result
def main():
input = open('B-large.in')
output = open('output.txt', 'w')
total_case_num = int(input.readline().strip())
for case_num in range(1, total_case_num + 1):
case = input.readline().strip()
result = solve(case)
output.write("Case #%s: %s\n" % (case_num, result))
if __name__ == '__main__':
#solve("2 1 1 8 0")
main() | true |
addf666200d59fbc19f52c1bc4ec73517fdc23df | Python | fengjiran/tensorflow_learning | /Inpainting/patch/celebahq/utils.py | UTF-8 | 13,296 | 2.59375 | 3 | [] | no_license | from __future__ import print_function
import yaml
import numpy as np
import tensorflow as tf
# from tensorflow.python.framework import ops
def check_image(image):
assertion = tf.assert_equal(tf.shape(image)[-1], 3, message='image must have 3 color channels')
with tf.control_dependencies([assertion]):
image = tf.identity(image)
if image.get_shape().ndims not in (3, 4):
raise ValueError('image must be either 3 or 4 dimentions')
# make the last dimension 3 so that you can unstack the colors
shape = list(image.get_shape())
shape[-1] = 3
image.set_shape(shape)
return image
def deprocess(image):
# [-1, 1] => [0, 1]
return (image + 1.0) / 2.0
def rgb2lab(srgb):
srgb = check_image(srgb)
srgb_pixels = tf.reshape(srgb, [-1, 3])
# srgb to xyz
linear_mask = tf.cast(srgb_pixels <= 0.04045, dtype=tf.float32)
exponential_mask = tf.cast(srgb_pixels > 0.04045, dtype=tf.float32)
rgb_pixels = (srgb_pixels / 12.92 * linear_mask) + (((srgb_pixels + 0.055) / 1.055)**2.4) * exponential_mask
rgb2xyz = tf.constant([
[0.412453, 0.212671, 0.019334],
[0.357580, 0.715160, 0.119193],
[0.180423, 0.072169, 0.950227]
])
xyz_pixels = tf.matmul(rgb_pixels, rgb2xyz)
xyz_normalized_pixels = tf.multiply(xyz_pixels, [1.0 / 0.950456, 1.0, 1.0 / 1.088754])
epsilon = 6.0 / 29.0
linear_mask = tf.cast(xyz_normalized_pixels <= (epsilon**3), dtype=tf.float32)
exponential_mask = tf.cast(xyz_normalized_pixels > (epsilon**3), dtype=tf.float32)
fxfyfz_pixels = (xyz_normalized_pixels / (3.0 * epsilon**2) + 4.0 / 29.0) * \
linear_mask + (xyz_normalized_pixels ** (1.0 / 3.0)) * exponential_mask
def spatial_discounting_mask(cfg):
gamma = cfg['spatial_discount_gamma']
height = cfg['hole_height']
width = cfg['hole_width']
shape = [1, height, width, 1]
if cfg['discount_mask']:
mask_values = np.ones((height, width))
for i in range(height):
for j in range(width):
mask_values[i, j] = gamma**min(i, j, height - i, width - j)
mask_values = np.expand_dims(mask_values, 0)
mask_values = np.expand_dims(mask_values, 3)
else:
mask_values = np.ones(shape)
return tf.constant(mask_values, dtype=tf.float32, shape=shape)
def random_bbox(cfg):
# image_shape:(H,W,C)
height = cfg['img_height']
width = cfg['img_width']
hole_height = cfg['hole_height']
hole_width = cfg['hole_width']
bbox = []
for _ in range(cfg['batch_size']):
top = tf.random_uniform([], minval=0, maxval=height - hole_height, dtype=tf.int32)
left = tf.random_uniform([], minval=0, maxval=width - hole_width, dtype=tf.int32)
h = tf.constant(hole_height)
w = tf.constant(hole_width)
bbox.append((top, left, h, w))
return bbox
def bbox2mask(bbox, cfg):
"""Generate mask tensor from bbox.
Args:
bbox: configuration tuple, (top, left, height, width)
config: Config should have configuration including IMG_SHAPES,
MAX_DELTA_HEIGHT, MAX_DELTA_WIDTH.
Returns
-------
tf.Tensor: output with shape [bs, H, W, 1]
"""
height = cfg['img_height']
width = cfg['img_width']
masks = []
for (top, left, h, w) in bbox:
mask = tf.pad(tensor=tf.ones((h, w), dtype=tf.float32),
paddings=[[top, height - h - top],
[left, width - w - left]])
mask = tf.expand_dims(mask, 0)
mask = tf.expand_dims(mask, -1)
masks.append(mask)
return tf.concat(masks, axis=0)
def local_patch(x, bbox):
"""Crop local patch according to bbox.
Args:
x: input
bbox: (top, left, height, width)
Returns
-------
tf.Tensor: local patch
"""
patches = []
batch_size = x.get_shape().as_list()[0]
assert batch_size == len(bbox)
for i in range(batch_size):
patch = tf.image.crop_to_bounding_box(x[i], bbox[i][0], bbox[i][1], bbox[i][2], bbox[i][3])
patch = tf.expand_dims(patch, 0)
patches.append(patch)
# x = tf.image.crop_to_bounding_box(x, bbox[0], bbox[1], bbox[2], bbox[3])
return tf.concat(patches, axis=0)
def gan_wgan_loss(pos, neg):
g_loss = -tf.reduce_mean(neg)
d_loss = tf.reduce_mean(neg - pos)
return g_loss, d_loss
def random_interpolates(x, y, alpha=None):
"""Generate.
x: first dimension as batch_size
y: first dimension as batch_size
alpha: [BATCH_SIZE, 1]
"""
shape = x.get_shape().as_list()
x = tf.reshape(x, [shape[0], -1])
y = tf.reshape(y, [shape[0], -1])
if alpha is None:
alpha = tf.random_uniform(shape=[shape[0], 1])
interpolates = x + alpha * (y - x)
return tf.reshape(interpolates, shape)
def gradient_penalty(x, y, mask=None, norm=1.):
gradients = tf.gradients(y, x)[0]
if mask is None:
mask = tf.ones_like(gradients)
slopes = tf.sqrt(tf.reduce_mean(tf.square(gradients) * mask, axis=[1, 2, 3]))
return tf.reduce_mean(tf.square(slopes - norm) / (norm**2))
def lipschitz_penalty(x, y, mask=None, norm=1.):
gradients = tf.gradients(y, x)[0]
if mask is None:
mask = tf.ones_like(gradients)
slopes = tf.sqrt(tf.reduce_mean(tf.square(gradients) * mask, axis=[1, 2, 3]))
return tf.reduce_mean(tf.square(tf.nn.relu(slopes - norm)))
def images_summary(images, name, max_outs):
"""Summary images.
**Note** that images should be scaled to [-1, 1] for 'RGB' or 'BGR',
[0, 1] for 'GREY'.
:param images: images tensor (in NHWC format)
:param name: name of images summary
:param max_outs: max_outputs for images summary
:param color_format: 'BGR', 'RGB' or 'GREY'
:return: None
"""
# img = tf.cast((images + 1) * 127.5, tf.int8)
img = (images + 1) / 2.
tf.summary.image(name, img, max_outs)
# with tf.variable_scope(name), tf.device('/cpu:0'):
# if color_format == 'BGR':
# img = tf.clip_by_value(
# (tf.reverse(images, [-1]) + 1.) * 127.5, 0., 255.)
# elif color_format == 'RGB':
# # img = tf.clip_by_value((images + 1.) * 127.5, 0, 255)
# # img = (images + 1) / 2
# img = tf.cast((img + 1) * 127.5, tf.int8)
# elif color_format == 'GREY':
# img = tf.clip_by_value(images * 255., 0, 255)
# else:
# raise NotImplementedError("color format is not supported.")
# tf.summary.image(name, img, max_outputs=max_outs)
def gradients_summary(y, x, norm=tf.abs, name='gradients_y_wrt_x'):
grad = tf.reduce_mean(norm(tf.gradients(y, x)))
tf.summary.scalar(name, grad)
def instance_norm(x, name="instance_norm"):
with tf.variable_scope(name):
depth = x.get_shape()[3]
scale = tf.get_variable("scale", [depth],
initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32))
offset = tf.get_variable("offset", [depth],
initializer=tf.constant_initializer(0.0))
mean, variance = tf.nn.moments(x, axes=[1, 2], keep_dims=True)
epsilon = 1e-5
inv = tf.rsqrt(variance + epsilon)
normalized = (x - mean) * inv
return scale * normalized + offset
# weight_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
weight_init = tf.contrib.layers.xavier_initializer_conv2d()
weight_regularizer = None
def atrous_conv(x, channels, kernel=3, dilation=1, use_bias=True, sn=True, name='conv_0'):
with tf.variable_scope(name):
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels],
initializer=weight_init,
regularizer=weight_regularizer)
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.atrous_conv2d(value=x,
filters=spectral_norm(w),
rate=dilation,
padding='SAME')
if use_bias:
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
use_bias=use_bias, dilation_rate=dilation)
return x
def conv(x, channels, kernel=4, stride=1, dilation=1,
pad=0, pad_type='zero', use_bias=True, sn=True, name='conv_0'):
with tf.variable_scope(name):
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, x.get_shape()[-1], channels],
initializer=weight_init,
regularizer=weight_regularizer)
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(input=x,
filter=spectral_norm(w),
strides=[1, stride, stride, 1],
dilations=[1, dilation, dilation, 1],
padding='VALID',
data_format='NHWC')
if use_bias:
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias)
return x
def deconv(x, channels, kernel=4, stride=1, use_bias=True, sn=True, name='deconv_0'):
with tf.variable_scope(name):
x_shape = x.get_shape().as_list()
output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, channels]
if sn:
w = tf.get_variable("kernel", shape=[kernel, kernel, channels, x.get_shape()[-1]],
initializer=weight_init,
regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w),
output_shape=output_shape,
strides=[1, stride, stride, 1],
padding='SAME')
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d_transpose(inputs=x, filters=channels,
kernel_size=kernel,
kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride,
padding='SAME',
use_bias=use_bias)
return x
def resnet_block(x, out_channels, dilation=1, name='resnet_block'):
with tf.variable_scope(name):
y = atrous_conv(x, out_channels, kernel=3, dilation=dilation, name='conv1')
# y = conv(x, out_channels, kernel=3, stride=1, dilation=dilation,
# pad=dilation, pad_type='reflect', name='conv1')
y = instance_norm(y, name='in1')
y = tf.nn.relu(y)
y = conv(y, out_channels, kernel=3, stride=1, dilation=1,
pad=1, pad_type='reflect', name='conv2')
y = instance_norm(y, name='in2')
return x + y
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable('u', [1, w_shape[-1]],
initializer=tf.random_normal_initializer(),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
cfg = yaml.load(f)
x = tf.random_uniform([cfg['batch_size'], 256, 256, 3])
bbox = random_bbox(cfg)
patches = local_patch(x, bbox)
mask = bbox2mask(bbox, cfg)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
bbox, mask, patches = sess.run([bbox, mask, patches])
print(bbox)
print(mask.shape)
print(patches.shape)
| true |
c67e7296e776d7f7f6cfdb78730a651386252822 | Python | zidanlagaronda/Zidane | /Tugas_ProjeckPCD/percobaan1.py | UTF-8 | 239 | 2.609375 | 3 | [] | no_license | import cv2
import numpy as np
img = cv2.imread("Dane.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("gambar Dane original", img)
cv2.imshow("gambar Dane grayscale", gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
28c5eb942814d60fed84f1de7b9c883174ba3c01 | Python | safia88/GhostPost | /ghost/models.py | UTF-8 | 666 | 2.71875 | 3 | [
"MIT"
] | permissive | """
Ghost: Boasts, Roasts
Boolean if boast or roast
Charfield for content of post
integer field for up and down votes
datetime field for submit time
"""
from django.db import models
class Post(models.Model):
is_boast = models.BooleanField(default=True)
content = models.CharField(max_length=280)
up_votes = models.IntegerField(default=0)
down_votes = models.IntegerField(default=0)
total_votes = models.IntegerField(default=0)
submit_time = models.DateTimeField(auto_now_add=True, blank=True)
secret_key = models.CharField(max_length=6)
@property
def count(self):
return self.up_votes - self.down_votes
| true |
ab0012ffafe259aa18c731c249cc9c9c9bbf178f | Python | jiadaizhao/LeetCode | /1001-1100/1018-Binary Prefix Divisible By 5/1018-Binary Prefix Divisible By 5.py | UTF-8 | 278 | 2.671875 | 3 | [
"MIT"
] | permissive | class Solution:
def prefixesDivBy5(self, A: List[int]) -> List[bool]:
curr = 0
result = [False] * len(A)
for i, a in enumerate(A):
curr = (curr * 2 + a) % 5
if curr == 0:
result[i] = True
return result
| true |
1df4b6285c15fc346a8fa13bb8b80163591c75ed | Python | StvnLm/100-days-of-python | /Day 1-3/Factorial.py | UTF-8 | 115 | 3.828125 | 4 | [] | no_license | def factorial(n):
product = 1
for x in range(n):
product = x * n
print(product)
factorial(10)
| true |
84bf95a45f6e6d96d998e1a1a2b0d75b5312db20 | Python | kauemenezes/Mestrado | /RNA/python-perceptron-one-layer/main.py | UTF-8 | 889 | 2.640625 | 3 | [] | no_license | import numpy as np
from perceptron import Perceptron
import datasets
dataset = datasets.get_dermatology_dataset()
hit_rates = []
no_of_attributes = dataset.shape[1] - 1
no_of_classes = len(dataset[0, no_of_attributes])
perceptron = Perceptron(no_of_classes, no_of_attributes, 'logistic')
for j in range(0, 20):
print("realization %d" % j)
train_X, train_y, test_X, test_y = perceptron.train_test_split(dataset)
perceptron.train(train_X, train_y)
predictions = perceptron.predict(test_X)
hit_rates.append(perceptron.evaluate(test_y, predictions))
print(perceptron.confusion_matrix(test_y, predictions))
# Perceptron.plot_decision_boundaries(train_X, train_y, test_X, test_y, j)
print('hit rates: {}'.format(hit_rates))
print('accuracy: {}'.format(np.mean(hit_rates)))
print('std: {}'.format(np.std(hit_rates)))
# Perceptron.show_plot_decision_boundaries()
| true |
e956d55a10ef8a9d0c043a3e8728f2bf2bc2c2b1 | Python | mj596/blazarpp | /scripts/calcElectronEnDiss_todo/calcElectronEnDiss.py | UTF-8 | 1,328 | 2.625 | 3 | [] | no_license | import numpy as np
import sys
import math
import matplotlib.pyplot as plt
def read_data( filename ):
x=[]
y=[]
file = open(filename,'r')
for line in file.readlines():
x.append(float(line.split(" ")[0]))
y.append(float(line.split(" ")[1]))
file.close()
return np.array([x,y])
#def getEff( idin, idout ):
#
# ein = read_data( 'Injection_'+str(id) )
# eout = read_data( 'Ngamma_'+str(id) )
# ein[1] *= ein[0]*ein[0]
# eout[1] *= eout[0]*eout[0]
#
# def calcEff( xin, xout ):
# return np.trapz( xout[1] )/np.trapz( xin[1] )
## return np.trapz( xout[1], x=xout[0] )/np.trapz( xin[1], x=xin[0] )
#
# return calcEff( ein, eout )
#
#def plot_eff( ):
# import os
# r=[]
# eff=[]
# files = [f for f in os.listdir('.') if os.path.isfile(f)]
# for file in files:
# line = file.split("_")
# if line[0] == 'Ngamma':
# print line[1], getEff( line[1] )
# r.append( line[1] )
# eff.append( getEff( line[1] ) )
#
# plt.plot(r,eff,'*')
# plt.show()
#
#plot_eff()
#
##id=1
##ein = read_data( 'Injection_'+str(id) )
##eout = read_data( 'Ngamma_'+str(id) )
##ein[1] *= ein[0]*ein[0]
##eout[1] *= eout[0]*eout[0]
##plt.loglog(ein[0],ein[1])
##plt.loglog(eout[0],eout[1])
##plt.show()
| true |
63563d9f89e1f4f12352fa63d97c472374456fae | Python | gjacopo/test-huggin | /scripts/article_analytics.py | UTF-8 | 2,513 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env python3
# note the line above is useful to launch the script from the command line, e.g. using
# python -m ...
"""
Document the script, for instance:
Parse in the KNIME output an article table from the main fields of an online article parsed through
the KNIME input.
"""
from six import string_types
from collections.abc import Sequence
import pandas as pd
try:
__name__
except NameError:
__name__ = "__main__"
try:
from newspaper import Article
except ImportError:
raise ImportError ('package newspaper not available')
DEF_URLS = ['http', 'https', 'ftp']
DEF_FIELDS = [ 'authors', 'publish_date', 'text','top_image', 'keywords', 'summary']
def art_to_table(url, fields = None):
"""Create an article table from the main fields of an online article parsed through its URL.
>>> table = art_to_table(url, fields = None)
Arguments
---------
url : str
URL of the online article; should start with any string from `DEF_URLS`.
fields : list[str]
fields of the article to parse in the output table; when `None`, set to the default
list of fields: `DEF_FIELDS`.
Returns
-------
table: pd.DataFrame
A data frame with the `fields` of the article as columns.
"""
try: # that... or use typing of functions enabled by Python 3: -->
assert(isinstance(url,string_types))
except AssertionError:
raise TypeError('wrong type for URL')
try: # catching other errors... or not
assert(any([isinstance(url.startswith(pre)) for pre in DEF_URLS))
except AssertionError:
raise IOError('URL type not recognised')
try:
assert (fields is None or isinstance(fields, Sequence))
except:
raise TypeError('wrong type for fields')
else:
if fields is None:
fields = DEF_FIELDS
# catch errors
try:
article = Article(url)
except:
raise IOError('Error creating Article instance from URL')
try:
article.download()
article.parse()
article.nlp()
except:
raise IOError('Error processing article')
d = {}
for f in fields:
try:
d.update({f: [getattr(article, f)}])
except:
raise IOError('Unknown field %s of article' % f)
return pd.DataFrame(d)
if __name__ == "__main__":
output_table_1 = art_to_table(input_table_1['text'][0])
| true |
a001a0e01478554a6e9f1375b7fb47c56d3c04db | Python | leoleezoom/omnipod_rf | /print_packet.py | UTF-8 | 1,634 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python2
import argparse
import binascii
import json
def main(options=None):
parser = argparse.ArgumentParser(description='Print out structured version of packet (given as a hex string).')
parser.add_argument('data', metavar='data', type=str, nargs='+',
help='data as a hex string')
parser.add_argument('--json', action='store_true',
help='print as json (default: text line)')
args = parser.parse_args()
hex_str = args.data[0]
pod_address_1 = hex_str[0:8]
byte5 = ord(hex_str[8:10].decode("hex"))
packet_type = byte5 >> 5
sequence = byte5 & 0b11111
pod_address_2 = hex_str[10:18]
body = ""
message_type = ""
if len(hex_str) > 20:
unknown = hex_str[18:20]
message_type = ord(hex_str[20:22].decode("hex"))
body = hex_str[22:-2]
crc = ord(hex_str[-2:].decode("hex"))
# attr style
#print "addr1=%s addr2=%s" % (addr1, addr2)
# compact style:
if args.json:
obj = {
"pod_address_1": pod_address_1,
"packet_type": packet_type,
"sequence": sequence,
"pod_address_2": pod_address_2,
"message_type": message_type,
"body": body,
"crc": crc,
"raw_packet": hex_str,
}
print json.dumps(obj, sort_keys=True,indent=4, separators=(',', ': '))
else:
print "ID1:%s PTYPE:%s SEQ:%d ID2:%s MTYPE:%02x BODY:%s CRC:%02x" % (pod_address_1, format(packet_type, '#05b')[2:], sequence, pod_address_2, message_type, body, crc)
if __name__ == '__main__':
main()
| true |
e8ae9edf6ab994b6d389c71470d3eedf040bcdcc | Python | haominhe/Undergraduate | /CIS210 Computer Science I/Projects/p1/jumbler.py | UTF-8 | 2,173 | 4.0625 | 4 | [
"MIT"
] | permissive | # Solve a jumble (anagram) by checking against each word in a dictionary
# Fall 2014 Project 1, Part 2
# Authors: Haomin He
# References: Consulted with tutor Sara and Rickie.
#
# Usage: python jumbler.py jumbleword wordlist.txt
#
import argparse
def jumbler(jumble, wordlist):
"""
Print elements of wordlist that can be rearranged into the jumble.
Args:
jumble: The anagram as a string
wordlist: A sequence of words as a file or list
Returns: nothing
Effects: prints each matching word on an individual line,
then a count of matching words (or "No matches" if zero)
"""
matches = 0
lines = 0
for word in wordlist:
word = word.strip() # Remove spaces or carriage return at ends
if sorted(word) == sorted(jumble):
print(word)
matches = matches + 1
lines = lines + 1
if matches == 0:
print('No matches')
else:
print("{} matches in {} lines".format(matches,lines))
return None
def run_tests():
"""
Simple test cases for jumbler.
Args: none
Returns: nothing
Effects: Prints test results
"""
shortlist = [ "alpha", "beta", "sister", "gamma", "resist", "theta" ]
print("Expecting match on alpha:")
jumbler("phaal", shortlist)
print("Expecting matches on sister and resist:")
jumbler("tiress", shortlist)
print("Expecting no matches:")
jumbler("alxha", shortlist)
def main():
"""
Interaction if run from the command line.
Magic for now; we'll look at what's going on here
in the next week or two.
"""
parser = argparse.ArgumentParser(description="Solve a jumble (anagram)")
parser.add_argument("jumble", type=str, help="Jumbled word (anagram)")
parser.add_argument('wordlist', type=argparse.FileType('r'),
help="A text file containing dictionary words, one word per line.")
args = parser.parse_args() # gets arguments from command line
jumble = args.jumble
wordlist = args.wordlist
jumbler(jumble, wordlist)
if __name__ == "__main__":
#run_tests()
main()
| true |
d7739653779c61c1bfba3f2465423bb8f351b8cd | Python | iam-smjamilsagar/Digital-Time | /main.py | UTF-8 | 957 | 3.0625 | 3 | [] | no_license | import speech_recognition as sr
import pyttsx3
import datetime
listener = sr.Recognizer()
alexa = pyttsx3.init()
voices = alexa.getProperty('voices')
alexa.setProperty('voice', voices[1].id)
def talk(text):
alexa.say(text)
alexa.runAndWait()
def take_command():
try:
with sr.Microphone() as source:
print('Your device is listening, Please speak...')
voice = listener.listen(source)
command = listener.recognize_google(voice)
command = command.lower()
if 'alexa' in command:
command = command.replace('alexa', '')
except:
pass
return command
def run_alexa():
command = take_command()
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
print('Current time is: ' + time)
talk('Current time is: ' + time)
else:
print('Did not get it. Can you please tell it again')
run_alexa() | true |
043f4a35bcb80f35ebe70d56f860cfeefc35e047 | Python | ksuarz/hundred-days | /text/piglatin.py | UTF-8 | 815 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env python
'''
Converts words to pig latin. This is a very naive implementation. All
non-alphanumeric, non-whitespace characters are treated as part of a word.
'''
import sys
if len(sys.argv) < 2:
print 'Usage: piglatin.py [TEXT]'
else:
# First, build up our vowels and consonants
start, end = ord('a'), ord('z') + 1
vowels = 'aeiou'
consonants = [chr(i) for i in range(start, end) if chr(i) not in vowels]
# Now, do some text manipulation
text = ' '.join(sys.argv[1:]).lower().strip()
result = []
for word in text.split():
c = word[0]
if c in consonants:
result.append(word[1:] + '-' + c + 'ay')
elif c in vowels:
result.append(word + 'way')
else:
result.append(word)
print ' '.join(result)
| true |
4cb0518d4f238ed95d8a5524573abdb39df6c6cd | Python | Ashw0rld/rpg-code | /main.py | UTF-8 | 7,874 | 3.15625 | 3 | [] | no_license | from classes.game import Person, bcolors
from classes.magic import Spell
from classes.inventory import Item
import random
# create black magic
fire = Spell("fire", 10, 100, "black")
thunder = Spell("thunder", 10, 100, "black")
blizzard = Spell("blizzard", 10, 100, "black")
meteor = Spell("meteor", 60, 200, "black")
quake = Spell("quake", 14, 140, "black")
# create white magic
cure = Spell("cure", 12, 120, "white")
cura = Spell("cura", 18, 180, "white")
#create some items
potion = Item("Potion", "potion", "Heals for 50 HP.", 50)
hipotion = Item("HI-Potion", "potion", "Heals for 100 HP.", 100)
superpotion = Item("Super Potion", "potion", "Heals for 500 HP.", 500)
elixer = Item("Elixer", "elixer", "Fully restores HP/MP of one party member.", 9999)
megaelixer = Item("Mega Elixer", "elixer", "Fully restores party's HP/MP.", 9999)
grenade = Item("Grenade", "attack", "Deals for 500 damage.", 500)
player_spells = [fire, thunder, blizzard, meteor, quake, cura, cure]
player_items = [{'item': potion, 'quantity': 15}, {'item': hipotion, 'quantity': 5}, {'item': superpotion, 'quantity': 5},
{'item': elixer, 'quantity': 5}, {'item': megaelixer, 'quantity': 2}, {'item': grenade, 'quantity': 5}]
# instantiate people
player1 = Person("Ash", 461, 65, 60, 34, player_spells, player_items)
player2 = Person("Kun", 460, 65, 60, 34, player_spells, player_items)
player3 = Person("Anv", 416, 65, 60, 34, player_spells, player_items)
players = [player1, player2, player3]
enemy1 = Person("Gul", 700, 65, 45, 23, player_spells, player_items)
enemy2 = Person("Pnw", 700, 65, 45, 23, player_spells, player_items)
enemies = [enemy1, enemy2]
running = True
print(bcolors.FAIL + bcolors.BOLD + 'AN ENEMY ATTACKS!!!' + bcolors.ENDC)
while running:
print('===================')
print(bcolors.BOLD + "NAME HP MP")
for player in players:
player.get_stats()
for enemy in enemies:
enemy.get_enemy_status()
for player in players:
player.choose_action()
choice = input('Choose action :- ')
print('You chose ' + player.get_action(int(choice)))
if int(choice) == 1:
dmg = player.gen_damage()
enemy = player.choose_target(enemies)
enemies[enemy].take_damage(dmg)
print('You attacked ' + enemies[enemy].name.replace(" ", "") + ' for ' + str(dmg) + ' point.')
if enemies[enemy].get_hp() == 0:
print(enemies[enemy].name.replace(" ", "") + " has died.")
del enemies[enemy]
elif int(choice) == 2:
player.choose_magic()
mag = int(input('Choose magic :- ')) - 1
if mag == -1:
continue
spell = player.magic[mag]
mag_dmg = spell.gen_dmg()
curr_mp = player.get_mp()
if curr_mp < spell.cost:
print(bcolors.FAIL + 'NOT ENOUGH MAGIC POINTS!' + bcolors.ENDC)
continue
player.red_mp(spell.cost)
if spell.type1 == "white":
player.heal(mag_dmg)
print(bcolors.OKBLUE + spell.name + ' heals with ' + str(mag_dmg) + ' HP. ' + bcolors.ENDC)
elif spell.type1 == "black":
enemy = player.choose_target(enemies)
enemies[enemy].take_damage(mag_dmg)
print(bcolors.OKBLUE + spell.name + ' deals with ' + str(mag_dmg) + ' points of damage.' + bcolors.ENDC)
if enemies[enemy].get_hp() == 0:
print(enemies[enemy].name.replace(" ", "") + " has died.")
del enemies[enemy]
elif int(choice) == 3:
player.choose_item()
item_choice = int(input("Choose Item :- ")) - 1
item = player.items[item_choice]
if item_choice == -1:
continue
if item['quantity'] == 0:
print("This item is finished!")
continue
if item['item'].type == "potion":
player.heal(item['item'].prop)
print(item['item'].name + " heals for " + str(item['item'].prop) + " HP.")
elif item['item'].type == "elixer":
player.hp = player.get_maxhp()
player.mp = player.get_maxmp()
print("Your HP is " + str(player.get_hp()) + " and MP is " + str(player.get_mp()) + ".")
elif item['item'].type == "attack":
enemy = player.choose_target(enemies)
enemies[enemy].take_damage(item['item'].prop)
print(bcolors.WARNING + item['item'].name + " damages " + enemies[enemy].name.replace(" ", "")
+ " with " + str(item['item'].prop) + " HP." + bcolors.ENDC)
if enemies[enemy].get_hp() == 0:
print(enemies[enemy].name.replace(" ", "") + " has died.")
del enemies[enemy]
item['quantity'] -= 1
if len(enemies) == 0:
print(bcolors.OKGREEN + 'YOU WIN!!!' + bcolors.ENDC)
running = False
break
elif len(players) == 0:
print(bcolors.FAIL + 'ENEMY HAS DEFEATED YOU!!!' + bcolors.ENDC)
running = False
break
for enemy in enemies:
enemy_choice = random.randrange(0, 3)
if enemy_choice == 0:
enm_dmg = enemy.gen_damage()
target = random.randrange(0, 3)
players[target].take_damage(enm_dmg)
print(enemy.name + ' attacked ' + players[target].name.replace(" ", "") + ' for ' + str(enm_dmg) + ' point.')
elif enemy_choice == 1:
spell, mag_dmg = enemy.choose_enemy_spell()
if spell.type1 == "white":
enemy.heal(mag_dmg)
print(bcolors.OKBLUE + spell.name + ' heals ' + enemy.name.replace(" ", "") + ' with ' + str(mag_dmg)
+ ' HP. ' + bcolors.ENDC)
elif spell.type1 == "black":
target = random.randrange(0, 3)
players[target].take_damage(mag_dmg)
print(enemy.name + " chose " + spell.name + " on " + players[target].name.replace(" ", "")
+ " for a damage of " + str(mag_dmg) + " HP.")
if players[target].get_hp() == 0:
print(players[target].name.replace(" ", "") + " has died.")
del players[target]
elif enemy_choice == 2:
item = enemy.choose_enemy_item()
if item['item'].type == "potion":
enemy.heal(item['item'].prop)
print(item['item'].name + " heals " + enemy.name.replace(" ", "") + " for " + str(item['item'].prop) +
" HP.")
elif item['item'].type == "elixer":
enemy.hp = enemy.get_maxhp()
enemy.mp = enemy.get_maxmp()
print(enemy.name.replace(" ", "") + "'s HP is " + str(enemy.get_hp()) + " and MP is " +
str(enemy.get_mp()) + ".")
elif item['item'].type == "attack":
target = random.randrange(0, 3)
players[target].take_damage(item['item'].prop)
print(enemy.name + " chose " + item['item'].name + " on " + players[target].name.replace(" ", "")
+ " for a damage of " + item['item'].prop + " HP.")
if players[target].get_hp() == 0:
print(players[target].name.replace(" ", "") + " has died.")
del players[target]
item['quantity'] -= 1
| true |
536d3aa44b373eeb6620bd540d66ef32ff258994 | Python | jose-carlos-code/CursoEmvideo-python | /exercícios/EX_CursoEmVideo/ex078.py | UTF-8 | 279 | 3.375 | 3 | [
"MIT"
] | permissive | pos = 0
valores = list()
for v in range(1, 5+1):
pos += 1
valores.append(int(input(f'digite o valor na posição {pos}: ')))
print(f'\nvocê digitou os valores {valores}')
print(f'\no maior valor foi {max(valores)}')
print(f'\no menor valor digitado foi {min(valores)}')
| true |
18f20a9d2eb2bcb17b73b6be6f83f40eefa784e9 | Python | c0dir/Vk-Bots | /whoami.py | UTF-8 | 212 | 2.515625 | 3 | [] | no_license | import vk
if __name__ == '__main__':
with open('access_token.txt') as fp:
access_token = fp.read()
vkapi = vk.Api(access_token)
me, = vkapi.users.get()
print('{first_name} {last_name}'.format(**me))
| true |
a8342d79e189f19ede2b02f65bd13fddb5aa3c07 | Python | dezed/mantid | /Testing/SystemTests/tests/analysis/PredictPeaksTest.py | UTF-8 | 4,531 | 2.8125 | 3 | [] | no_license | # pylint: disable=no-init,too-few-public-methods
import stresstesting
from mantid.simpleapi import *
from mantid.geometry import CrystalStructure
# The reference data for these tests were created with PredictPeaks in the state at Release 3.5,
# if PredictPeaks changes significantly, both reference data and test may need to be adjusted.
# The WISH test has a data mismatch which might be caused by the 'old' code having a bug (issue #14105).
# The difference is that peaks may have different d-values because they are assigned to a different detector.
# Instead of using the CheckWorkspacesMatch, only H, K and L are compared.
class PredictPeaksTestWISH(stresstesting.MantidStressTest):
def runTest(self):
simulationWorkspace = CreateSimulationWorkspace(Instrument='WISH',
BinParams='0,1,2',
UnitX='TOF')
SetUB(simulationWorkspace, a=5.5, b=6.5, c=8.1, u='12,1,1', v='0,4,9')
peaks = PredictPeaks(simulationWorkspace,
WavelengthMin=0.5, WavelengthMax=6,
MinDSpacing=0.5, MaxDSpacing=10)
reference = LoadNexus('predict_peaks_test_random_ub.nxs')
hkls_predicted = self._get_hkls(peaks)
hkls_reference = self._get_hkls(reference)
lists_match, message = self._compare_hkl_lists(hkls_predicted, hkls_reference)
self.assertEquals(lists_match, True, message)
def _get_hkls(self, peaksWorkspace):
h_list = peaksWorkspace.column('h')
k_list = peaksWorkspace.column('k')
l_list = peaksWorkspace.column('l')
return [(x, y, z) for x, y, z in zip(h_list, k_list, l_list)]
def _compare_hkl_lists(self, lhs, rhs):
if len(lhs) != len(rhs):
return False, 'Lengths do not match: {} vs. {}'.format(len(lhs), len(rhs))
lhs_sorted = sorted(lhs)
rhs_sorted = sorted(rhs)
for i in range(len(lhs)):
if lhs_sorted[i] != rhs_sorted[i]:
return False, 'Mismatch at position {}: {} vs. {}'.format(i, lhs_sorted[i], rhs_sorted[i])
return True, None
class PredictPeaksTestTOPAZ(stresstesting.MantidStressTest):
def runTest(self):
simulationWorkspace = CreateSimulationWorkspace(Instrument='TOPAZ',
BinParams='0,1,2',
UnitX='TOF')
SetUB(simulationWorkspace, a=5.5, b=6.5, c=8.1, u='12,1,1', v='0,4,9')
peaks = PredictPeaks(simulationWorkspace,
WavelengthMin=0.5, WavelengthMax=6,
MinDSpacing=0.5, MaxDSpacing=10)
reference = LoadNexus('predict_peaks_test_random_ub_topaz.nxs')
simulationWorkspaceMatch = CheckWorkspacesMatch(peaks, reference)
self.assertEquals(simulationWorkspaceMatch, 'Success!')
class PredictPeaksCalculateStructureFactorsTest(stresstesting.MantidStressTest):
def runTest(self):
simulationWorkspace = CreateSimulationWorkspace(Instrument='WISH',
BinParams='0,1,2',
UnitX='TOF')
SetUB(simulationWorkspace, a=5.5, b=6.5, c=8.1, u='12,1,1', v='0,4,9')
# Setting some random crystal structure. Correctness of structure factor calculations is ensured in the
# test suite of StructureFactorCalculator and does not need to be tested here.
simulationWorkspace.sample().setCrystalStructure(
CrystalStructure('5.5 6.5 8.1', 'P m m m', 'Fe 0.121 0.234 0.899 1.0 0.01'))
peaks = PredictPeaks(simulationWorkspace,
WavelengthMin=0.5, WavelengthMax=6,
MinDSpacing=0.5, MaxDSpacing=10,
CalculateStructureFactors=True)
self.assertEquals(peaks.getNumberPeaks(), 540)
for i in range(540):
peak = peaks.getPeak(i)
self.assertLessThan(0.0, peak.getIntensity())
peaks_no_sf = PredictPeaks(simulationWorkspace,
WavelengthMin=0.5, WavelengthMax=6,
MinDSpacing=0.5, MaxDSpacing=10,
CalculateStructureFactors=False)
for i in range(540):
peak = peaks_no_sf.getPeak(i)
self.assertEquals(0.0, peak.getIntensity())
| true |
ed0ffaab0b616209ef973471cb412dcb2eaaa1e3 | Python | lvchy/ClientTools | /UABTools/bkdrhash.py | UTF-8 | 239 | 3.125 | 3 | [] | no_license | _seed = 131
def bkdrhash(str):
hashnum = 0
sz = len(str)
for i in range(sz):
hashnum = (hashnum * _seed) + ord(str[i])
return hashnum & 0x7FFFFFFF
if __name__ == "__main__":
print(bkdrhash('hello world')) | true |
c6386cbf35bbc5bf5a0f8af79cc37e40bf616be3 | Python | hilmarm/sisy_table | /main.py | UTF-8 | 745 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python
from table_solver import TableSolver
from draw_table import DrawTable
from create_timetable import CreateTimetable
from parse_programm import ParseProgramm as PP
import random
def main():
# A = [[0,2], [1,3], [9,10], [4,8], [6,10], [2,6]]
my_input = 'input/program20180820'
pp = PP(my_input)
pp.run()
A = []
for artist in pp.artists:
A.append(artist.time)
solver = TableSolver(A, 10)
solver.SolveMIP()
# drawer = DrawTable(pp, [x.solution_value() for x in solver.vars])
# drawer.draw_table()
drawer = CreateTimetable(pp, [x.solution_value() for x in solver.vars])
for day in [0,1,2,3,4,5,6]:
drawer.draw_day(day)
if __name__ == '__main__':
main()
| true |
834aa52d5b1df6854946c7e478296b108a1ccd6c | Python | DL-Metaphysics/DL-LJ | /贝叶斯个性化排序/贝叶斯.py | UTF-8 | 3,333 | 2.671875 | 3 | [] | no_license | import tensorflow as tf
import numpy
import os
import random
from collections import defaultdict
#tensorflow实现BPR
def load_data(data_path):
user_ratings = defaultdict(set)#set:集合,集合无重复
max_u_id = -1
max_i_id = -1
with open(data_path,'r') as f:
for lines in f.readlines():
u,i,_,_ = line.split("\t")
u = int(u)
i = int(i)
user_ratings[u].add(i)
max_u_id = max(u,max_u_id)
max_i_id = max(i,max_i_id)
print("max_u_id:",max_u_id)
print("max_i_id:",max_i_id)
return max_u_id,max_i_id,user_ratings
data_path = os.path.join('D:\\tmp\\ml-100k', 'u.data')
user_count, item_count, user_ratings = load_data(data_path)#输出用户数和电影数,同时把每个用户看过的电影都保存在user_ratings中
#数据集 max_u_id = 943,max_i_id = 1682
#对每一个用户u,在user_rating中随机找到他评分过的一部电影i,保存在user_ratings_test中
def generate_test(user_ratings):
user_test = dict()#生成一个字典
for u,i_list in user_ratings.items():#?
user_test[u] = random.sample(user_ratings,1)[0]#[0]是为了把元素提取出来
return user_test
user_ratings_test = generate_test(user_ratings)#得到一个评分过的电影
#用tensorflow迭代用的若干批训练集,根据user_ratings找到若干训练用的三元组<u,i,j>
#用户u随机抽取,i从user——ratings中随机抽取,j从总的电影集中抽取,但(u,j)不在user_ratings中
#构造训练集三元组<u,i,j>
def generate_train_batch(user_ratings,user_rating_test,item_count,batch_size = 512):
t = []
for b in range(batch_size):
u = random.sample(user_ratings.keys(),1)[0]
i = random.sample(user_ratings[u],1)[0]
while i == user_ratings_test[u]:
i = random.sample(user_ratings[u],1)[0]
j = random.randint(1,item_count)
while j in user_ratings[u]:
j = random.randint(1,item_count)#返回item_count个0-1的数
t.append([u,i,j])
return numpy.asarray(t)
#测试集三元组<u,i,j>
#i从user_ratings_test中随机抽取,j是u没有评分过的电影
def generate_test_batch(user_ratings,user_ratings_test,item_count,batch_size = 512):
for u in user_ratings.keys():
t = []
i = user_rating_test[u]
for j in range(1,item_count + 1):
if not(j in user_ratings[u]):
t.append([u,i,j])
yield numpy.asarray(t)
#tensorflow实现
def bpr_mf(user_count,item_count,hidden_dim):#hidden_dim为隐含维度k
u = tf.placeholder(tf.int32,[None])
i = tf.placeholder(tf.int32, [None])
j = tf.placeholder(tf.int32, [None])
with tf.device("/cpu:0"):#选择CPU
#建立变量op
user_emb_w = tf.get_variable("user_emb_w",[user_count + 1,hidden_dim],initializer = tf.random_normal_initializer(0,0.1))
item_emb_w = tf.get_variable("item_emb_w",[item_count + 1,hidden_dim],initializer = tf.random_normal_initializer(0,0.1))
u_emb = tf.nn.embedding_lookup(user_emb_w,u)
i_emb = tf.nn.embedding_lookup(item_emb_w,i)
j_emb = tf.nn.embedding_lookup(item_emb_w,j)
#MF predict : u_i > u_j
#multiply为点乘
x = tf.reduce_sum(tf.multiply(u_emb,(i_emb - j_emb)),1,keep_dims=True)
| true |
7ebd16415511337defe5c634de51c88e9c578dbf | Python | gabriel-piedade95/Biologia_de_Sistemas | /convergencia_redes.py | UTF-8 | 1,352 | 3.171875 | 3 | [] | no_license |
def _cal_T_estados(lista, est_ant):
if est_ant == lista[est_ant]:
return 0
if est_ant not in lista:
return 1
anteriores = []
for k in range(0, len(lista)):
if lista[k] == est_ant and k != est_ant:
anteriores.append(k)
n = 1
for i in range(0, len(anteriores)):
n += _cal_T_estados(lista, anteriores[i])
return n
def cal_T(lista):
T = [0] * len(lista)
for i in range(0, len(lista)):
T[i] = _cal_T_estados(lista, i)
return T
def _cal_L_estados(lista, est_ant):
prox_est = lista[est_ant]
if est_ant == prox_est:
return 0
if lista[prox_est] == prox_est:
return 1
return 1 + _cal_L_estados(lista, prox_est)
def cal_L(lista):
L = [0] * len(lista)
for i in range(0, len(lista)):
L[i] = _cal_L_estados(lista, i)
return L
def _caminho_atrator(lista, est_ant):
prox_est = lista[est_ant]
if est_ant == prox_est:
return
if lista[prox_est] == prox_est:
return [est_ant]
return [est_ant] + _caminho_atrator(lista, prox_est)
def cal_w(lista):
w = [0] * len(lista)
L = cal_L(lista)
T = cal_T(lista)
for i in range(0, len(lista)):
somatorio_w = 0
caminho = _caminho_atrator(lista, i)
if L[i] != 0 and caminho != None:
for estado in caminho:
somatorio_w += T[estado]
w[i] = somatorio_w/L[i]
return w
def cal_W_total(lista):
return sum(cal_w(lista))/len(lista)
| true |
002fcfbd7061a46d64d868b1a069dea7a055af97 | Python | vivekworks/learning-to-code | /4. Discovering Computer Science/Python/Chapter 4 - Growth And Decay/Exercises 2/exercise425.py | UTF-8 | 474 | 3.609375 | 4 | [] | no_license | """
Purpose : Plot investment amount
Author : Vivek T S
Date : 02/11/2018
"""
import matplotlib.pyplot as pyplot
def invest(investment, rate, years):
amount = investment
amountList = []
amountList.append(amount)
for month in range(years*10):
amount = amount+(amount*(rate/100))+50
print(amount)
amountList.append(amount)
pyplot.plot(range(0,(years*10)+1),amountList,color='pink',label='Investment')
pyplot.show()
def main():
print(invest(2000,12,12))
main()
| true |
5a4852f65ca2cb6c775f13d361323bebe625ea78 | Python | iramgee/PracticingPython | /assignment7_2.py | UTF-8 | 511 | 3.078125 | 3 | [] | no_license | # Use the file name mbox-short.txt as the file name
fname = raw_input("Enter file name: ")
fh = open(fname)
count = 0
n = 0
total = 0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
count = count + 1
pos = line.index(':')
loc = pos +1
dig = line[loc:]
num = float(dig)
for n in [num]:
total = total + n
linestrip = line.rstrip()
print "Average spam confidence:",total / count
# Average spam confidence: 0.750718518519
| true |
95ce5ef8a635ff867aaf72d242e115ed60f436e6 | Python | iamsjn/CodeKata | /hackerrank/sum-vs-xor.py | UTF-8 | 494 | 3.03125 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
def get_binary(result, i):
while i > 1:
print(i % 2)
i = i // 2
# def get_xor(n, i):
# return get_binary(n) ^ get_binary(i)
# Complete the sumXor function below.
# def sumXor(n):
# count = 0
# # for i in range(0, n):
# if get_binary(n + 0) == get_xor(n, 0):
# count += 1
# return count
if __name__ == '__main__':
print(get_binary('', 4))
# print(sumXor(5))
| true |
bfd8dcf8a5fe881827ff6bde58fa19b7754a83cd | Python | zhaotun/python-files | /movefile.py | UTF-8 | 1,008 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding:utf8 -*-
import os
import shutil
source_path = os.path.abspath(r'F:\Face\DataSet\face_anti_spoofing\IR\IR_video\IR_Print_video2img')
target_path = os.path.abspath(r'F:\Face\DataSet\face_anti_spoofing\IR\IR_video\test')
if not os.path.exists(target_path):
os.makedirs(target_path)
i=0
if os.path.exists(source_path):
# root 所指的是当前正在遍历的这个文件夹的本身的地址
# dirs 是一个 list,内容是该文件夹中所有的目录的名字(不包括子目录)
# files 同样是 list, 内容是该文件夹中所有的文件(不包括子目录)
for root, dirs, files in os.walk(source_path): # walk 遍历当前source_path目录和所有子目录的文件和目录
for file in files: # files 是所有的文件名列表,
src_file = os.path.join(root, file)
shutil.copy(src_file, target_path)
print(src_file)
i=i+1
print('%d files moved!'%i)
| true |
4f3a28d12f8fdbf22521b395379c933d14fffd69 | Python | Natorius6/ArcadeWork | /SNOWMAN.py | UTF-8 | 888 | 3.328125 | 3 | [] | no_license | import arcade
#snowman drawing code
#size of the game window
WIDTH = 600
HEIGHT = 600
#opens the game window
arcade.open_window(WIDTH, HEIGHT, "hello")
#draws background
arcade.set_background_color(arcade.color.AIR_SUPERIORITY_BLUE)
arcade.start_render()
#draws body of the snowman
arcade.draw_circle_filled(WIDTH/2, HEIGHT/3, 90, arcade.color.WHITE) #bottom circle
arcade.draw_circle_filled(WIDTH/2, HEIGHT/2, 60, arcade.color.WHITE) #middle circle
arcade.draw_circle_filled(WIDTH/2, (HEIGHT/3)*1.8, 40, arcade.color.WHITE) #top circle
#draws eyes of the snowman
arcade.draw_point(WIDTH/2 + 15, (HEIGHT/3)*1.8 + 10, arcade.color.BLACK, 7) #right eye
arcade.draw_point(WIDTH/2 - 15, (HEIGHT/3)*1.8 + 10, arcade.color.BLACK, 7) #left eye
#snowman mouth
arcade.draw_line(WIDTH/2 - 15, (HEIGHT/3)*1.8, WIDTH/2 + 15, (HEIGHT/3)*1.8, (0, 0, 0), 5)
arcade.finish_render()
arcade.run()
| true |
3dfed51d8404683a3f4892ff66194fac4e140193 | Python | combateer3/PN532-python-lib | /card_timer.py | UTF-8 | 1,640 | 3.359375 | 3 | [] | no_license | from multiprocessing import Process, Event, Value
import time
# this function blocks until the card has been removed
# for some number of seconds
def card_removal_wait(card_read_func, sec=1, check_interval=0.1):
card_removed = Event()
# 2 processes are spawned
# one keeps a timer
# other resets the timer when the card is read again
removed_for = Value('f', 0) # keeps track of how long the card has been removed for
lock_timer = Process(target=card_remove_timer, args=(card_removed, removed_for, sec, check_interval))
lock_con = Process(target=check_card_active, args=(card_removed, removed_for, card_read_func))
# start processes
lock_timer.start()
lock_con.start()
card_removed.wait() # wait until the processes have determined that the card has been removed
# clean up
lock_timer.terminate()
lock_con.terminate()
def card_remove_timer(card_removed, removed_for, time_limit, check_interval):
while removed_for.value < time_limit:
time.sleep(check_interval)
with removed_for.get_lock():
removed_for.value += check_interval
# while loop is only passed if the timer went time_limit seconds without resetting
card_removed.set()
# if the card is still on the reader, this function will continuously reset the timer
# activity_func is the function to call to check if the card is on the reader
def check_card_active(card_removed, removed_for, activity_func):
while not card_removed.is_set():
activity_func() # should block unless it has a card to read
with removed_for.get_lock():
removed_for.value = 0
| true |
e7522fa65806f5335526e13e44093d875cd15f7e | Python | siddhujz/wikianalytics | /python/wikicrawl-v4.py | UTF-8 | 15,475 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 21:54:41 2017
@author: kaliis
"""
import re
import sys
import json
import string
import nltk
from imdbpie import Imdb
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tag import StanfordNERTagger
from nltk.tag.perceptron import PerceptronTagger
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
imdb = Imdb()
imdb = Imdb(anonymize=True) # to proxy requestsfrom nltk.tag import StanfordNERTagger
tagger = PerceptronTagger()
stanford_dir = '/home/kaliis/workdir/cloud/working/stanford-ner-2016-10-31/'
jarfile = stanford_dir + 'stanford-ner.jar'
modelfile = stanford_dir + 'classifiers/english.all.3class.distsim.crf.ser.gz'
stanfordNERTagger = StanfordNERTagger(model_filename=modelfile, path_to_jar=jarfile)
wordcolon = re.compile(r"\w*[:]") # to match Wikipedia: File: Portal: etc
def ordinary(title):
'''
Convert a given title into ASCII
Return - the title with '_' translated into a space,
with %2C translated into ',' and so on; however, return
None for title which translates poorly, due to foreign
characters or if it begins with "Wikipedia:" (an internal
Wiki page)
'''
title = title.strip().replace('_',' ')
try:
while '%' in title:
percent = title.index('%')
before = title[:percent]
after = title[percent+3:]
convert = chr(eval("0x"+title[percent+1:percent+3]))
if convert not in string.printable: title = None
else: title = before + convert + after
except:
return None
if wordcolon.match(title): return None
return title
def cleanWikiLink(proposed):
'''
Local function that cleans up a link, from
various forms, eg. "Category:junk", "cat|Cat",
"\xf3a5","Topic#subtopic|", etc.
Either returns None (too hard of a topic),
or the first topic before the | separator
'''
if '|' in proposed:
proposed = proposed.split('|')[0]
if '#' in proposed:
proposed = proposed.split('#')[0]
if ':' in proposed:
return None
if any(c not in string.printable for c in proposed):
return None
return proposed
REGEX_WIKILINK = '(?<=\[\[).*?(?=\]\])'
def get_wikilinks(line):
'''returns a list of statements(word or group of words)
which have internal links(in the wikipedia dateset)'''
links = re.findall(REGEX_WIKILINK, line)
return links
REGEX_WORDS_IN_BOLD = "'(?<='{3})[\w \s]*(?='{3})"
def get_bold_text(line):
'''returns a list of statements(word or group of words)
written in Bold(in the wikipedia dateset)'''
links = re.findall(REGEX_WORDS_IN_BOLD, line)
return links
REGEX_HEADING_LEVEL2 = '^\=\=[^=].*[^=]\=\=$'
REGEX_HEADING_LEVEL3 = '^\=\=\=[^=].*[^=]\=\=\=$'
REGEX_HEADING_LEVEL4 = '^\=\=\=\=[^=].*[^=]\=\=\=\=$'
REGEX_HEADING_LEVEL5 = '^\=\=\=\=\=[^=].*[^=]\=\=\=\=\=$'
REGEX_HEADING_ALL_LEVELS = '^\=\=.*\=\=$'
def get_headings(line, REGEX_HEADING_LEVEL):
links = re.findall(REGEX_HEADING_LEVEL, line)
return links
REGEX_TEXT_IN_BRACES = '(?<=\{\{).*?(?=\}\})'
REGEX_TEXT_WITH_STARTING_BRACES = '(?<=\{\{).*?$'
REGEX_TEXT_WITH_ENDING_BRACES = '^.*?(?=\}\})'
REGEX_TEXT_ONLY_START = '^(\[\[)(?!File:)|^(\w*)'
REGEX_REF = '(?<=\<ref).*?(?=\</ref>)'
REGEX_REF_START = '(?<=\<ref).*?$'
REGEX_REF_END = '^.*?(?=\</ref>)'
def plain_text(line):
'''Remove text in braces - replace with empty string'''
line = re.sub(REGEX_TEXT_IN_BRACES, '', line)
'''Remove braces - replace with empty string'''
line = re.sub("\{\{\}\}", '', line)
'''Remove text after starting braces - replace with empty string'''
line = re.sub(REGEX_TEXT_WITH_STARTING_BRACES, '', line)
'''Remove starting braces - replace with empty string'''
line = re.sub("\{\{", '', line)
'''Remove text before closing braces - replace with empty string'''
line = re.sub(REGEX_TEXT_WITH_ENDING_BRACES, '', line)
'''Remove ending braces - replace with empty string'''
line = re.sub("\}\}", '', line)
'''Remove text in between ref tags - replace with empty string'''
line = re.sub(REGEX_REF, "", line)
'''Remove ref tags - replace with empty string'''
line = re.sub("<ref</ref>", "", line)
'''Remove text after starting ref tags - replace with empty string'''
line = re.sub(REGEX_REF_START, "", line)
'''Remove starting ref tags - replace with empty string'''
line = re.sub("<ref", "", line)
'''Remove text before closing ref tags - replace with empty string'''
line = re.sub(REGEX_REF_END, "", line)
'''Remove closing ref tags - replace with empty string'''
line = re.sub("</ref>", "", line)
'''Remove text other than words and following special characters - replace with empty string'''
pure_text = re.sub('[^\w\s.!,?]', '', line)
return pure_text
def get_ner_tags(text):
'''Return a list of word,tag determined by using
Stanford NER(Named Entity Recognizer) Tagger'''
return stanfordNERTagger.tag(text.split())
#Open the File
f = open("part0001")
lines = f.readlines()
start = "$$$===cs5630s17===$$$===Title===$$$"
end = "$$$===cs5630s17===$$$===cs5630s17===$$$"
#Create a WikiArticle List
wikiArticleList = list()
isNewArticle = True
for line in lines:
if isNewArticle:
if start in line.strip():
isNewArticle = False
'''Initialize an article'''
wikiArticle = dict()
'''Get and Set the title of the WikiArticle'''
'''Initialize the wikilinks array'''
'''Initialize the headings array'''
parts = line.strip().split(" ")
wikiArticle['title'] = ordinary(parts[-1])
wikiArticle['wikilinks'] = list()
wikiArticle['headings'] = list()
wikiArticle['headings_level2'] = list()
wikiArticle['headings_level3'] = list()
wikiArticle['headings_level4'] = list()
wikiArticle['headings_level5'] = list()
wikiArticle['pure_text'] = ""
wikiArticle['ner_tags'] = list()
wikiArticle['raw_text'] = list()
wikiArticle['text_in_bold'] = list()
continue
else:
'''Get all the wikilinks present in the line'''
wikilinks = get_wikilinks(line)
if len(wikilinks) != 0:
for wikilink in wikilinks:
topic = cleanWikiLink(wikilink)
if topic:
wikiArticle['wikilinks'].append(topic)
'''Get all the headings present in the line'''
headings = get_headings(line, REGEX_HEADING_ALL_LEVELS)
headings_level2 = get_headings(line, REGEX_HEADING_LEVEL2)
headings_level3 = get_headings(line, REGEX_HEADING_LEVEL3)
headings_level4 = get_headings(line, REGEX_HEADING_LEVEL4)
headings_level5 = get_headings(line, REGEX_HEADING_LEVEL5)
if len(headings) != 0:
for heading in headings:
'''Remove all leading and trailing equal signs and append it to the headings list'''
wikiArticle['headings'].append(heading.strip("="))
if len(headings_level2) != 0:
for heading in headings_level2:
'''Remove all leading and trailing equal signs and append it to the headings_level2 list'''
wikiArticle['headings_level2'].append(heading.strip("="))
if len(headings_level3) != 0:
for heading in headings_level3:
'''Remove all leading and trailing equal signs and append it to the headings_level3 list'''
wikiArticle['headings_level3'].append(heading.strip("="))
if len(headings_level4) != 0:
for heading in headings_level4:
'''Remove all leading and trailing equal signs and append it to the headings_level4 list'''
wikiArticle['headings_level4'].append(heading.strip("="))
if len(headings_level5) != 0:
for heading in headings_level5:
'''Remove all leading and trailing equal signs and append it to the headings_level5 list'''
wikiArticle['headings_level5'].append(heading.strip("="))
'''Get the text that is in Bold'''
bold_words = get_bold_text(line)
if len(bold_words) != 0:
for bold_sentence in bold_words:
'''Remove all leading and trailing "'" signs and append it to the bold_text list'''
#print("bold_sentence = ", bold_sentence)
bold_sentence = bold_sentence.strip("'")
wikiArticle['text_in_bold'].append(bold_sentence)
if re.match(REGEX_TEXT_ONLY_START, line).group(0) != '':
wikiArticle['pure_text'] += plain_text(line) + " "
if end not in line.strip():
wikiArticle['raw_text'].append(line)
continue
if end in line.strip():
isNewArticle = True
if wikiArticle['pure_text'] != "":
wikiArticle['ner_tags'] = get_ner_tags(wikiArticle['pure_text'])
wikiArticleList.append(wikiArticle)
break
wikiArticleCount = len(wikiArticleList)
#print "No. of wikiArticles = ", len(wikiArticleList)
print "No. of wikiArticles = ", wikiArticleCount
print "text_in_bold---------------------------------------------------------------------"
print wikiArticleList[0]['text_in_bold']
print "pure_text********************************************************************"
print wikiArticleList[0]['pure_text']
print "Stanford NER Tags********************************************************************"
#print wikiArticleList[0]['ner_tags']
#print "********************************************************************"
#Remove for wikiArticle in wikiArticleList:
#Remove print "wikiArticle['title'] = ", wikiArticle['title']
#Remove wikilinksCount = 0
#Remove for wikiArticle in wikiArticleList:
#Remove wikilinksCount += len(wikiArticle['wikilinks'])
#Remove print "Total number of wikilinks = ", wikilinksCount
#Remove print "Average number of wikilinks per wikiArticle = ", wikilinksCount//wikiArticleCount
#Remove headingsCount = 0
#Remove headings_level2Count = 0
#Remove headings_level3Count = 0
#Remove headings_level4Count = 0
#Remove headings_level5Count = 0
#Remove for wikiArticle in wikiArticleList:
#Remove headingsCount += len(wikiArticle['headings'])
#Remove headings_level2Count += len(wikiArticle['headings_level2'])
#Remove headings_level3Count += len(wikiArticle['headings_level3'])
#Remove headings_level4Count += len(wikiArticle['headings_level4'])
#Remove headings_level5Count += len(wikiArticle['headings_level5'])
#Remove print "Total number of headings = ", headingsCount
#Remove print "Total number of level 2 headings = ", headings_level2Count
#Remove print "Total number of level 3 headings = ", headings_level3Count
#Remove print "Total number of level 4 headings = ", headings_level4Count
#Remove print "Total number of level 5 headings = ", headings_level5Count
#Remove print(wikiArticleList[0]['headings'])
''' Stop words usually refer to the most common words in a language,
there is no single universal list of stop words used.
by all natural language processing tools.
Reduces Dimensionality.
removes stop words '''
#def remove_stops(data_str):
# # expects a string
# stops = set(stopwords.words("english"))
# list_pos = 0
# cleaned_str = ''
# text = data_str.split()
# for word in text:
# if word not in stops:
# # rebuild cleaned_str
# if list_pos == 0:
# cleaned_str = word
# else:
# cleaned_str = cleaned_str + ' ' + word
# list_pos += 1
# return cleaned_str
def remove_stops(data_str):
cleaned_str = ''
if data_str is not None and data_str != '':
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(data_str)
filtered_words = [word for word in word_tokens if word not in stop_words]
#stops = stopwords.words('english')
#filtered_words = [word for word in word_list if word not in stops]
cleaned_str = ' '.join(filtered_words)
return cleaned_str
''' Lemmatise different forms of a word(families of derivationally related words with similar meanings) '''
def lemmatize(data_str):
# expects a string
list_pos = 0
cleaned_str = ''
lmtzr = WordNetLemmatizer()
text = data_str.split()
tagged_words = tagger.tag(text)
for word in tagged_words:
if 'v' in word[1].lower():
lemma = lmtzr.lemmatize(word[0], pos='v')
else:
lemma = lmtzr.lemmatize(word[0], pos='n')
if list_pos == 0:
cleaned_str = lemma
else:
cleaned_str = cleaned_str + ' ' + lemma
list_pos += 1
return cleaned_str
''' Part-of-speech(POS) tagging - Tag words using POS Tagging,
keep just the words that are tagged Nouns, Adjectives and Verbs '''
def tag_and_remove(data_str):
cleaned_str = ' '
# noun tags
nn_tags = ['NN', 'NNP', 'NNP', 'NNPS', 'NNS']
# adjectives
jj_tags = ['JJ', 'JJR', 'JJS']
# verbs
vb_tags = ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
nltk_tags = nn_tags + jj_tags + vb_tags
# break string into 'words'
text = data_str.split()
# tag the text and keep only those with the right tags
tagged_text = tagger.tag(text)
for tagged_word in tagged_text:
if tagged_word[1] in nltk_tags:
cleaned_str += tagged_word[0] + ' '
return cleaned_str
tempstr = "Hello, how are you doing? what are you upto? Sam organizes everything. My friend is not organized. Cars are usually found everywhere in the US"
#data_str = wikiArticleList[0]['pure_text']
a = wikiArticleList[0]['pure_text']
#a = tempstr
print("------------aaaa--------------")
print("wikiArticleList[0][pure_text] = ", a)
b = remove_stops(a)
print("------------bbbb--------------")
print("remove_stops(wikiArticleList[0]['pure_text']) = ", b)
c = lemmatize(b)
print("------------cccc--------------")
#print("lemmatize(remove_stops(wikiArticleList[0]['pure_text'])) = ", c)
d = tag_and_remove(c)
print("------------dddd--------------")
#print("tag_and_remove(lemmatize(remove_stops(wikiArticleList[0]['pure_text']))) = ", d)
sentiment_dictionary = {}
for line in open("AFINN-111.txt"):
word, score = line.split('\t')
sentiment_dictionary[word] = int(score)
''' Do sentiment analysis on a group of sentences
and return the sentiment scores (pos, neg)'''
def sentiment_analysis(data_str):
result = []
for sentence in sent_tokenize(data_str):
pos = 0
neg = 0
for word in word_tokenize(sentence):
score = sentiment_dictionary.get(word, 0)
if score > 0:
pos += score
if score < 0:
neg += score
result.append([pos, neg])
return result
result = sentiment_analysis("Srini is the most peaceful person. He is very lazy.")
print("-------------Sentiment Analysis-----------------")
for s in result: print(s)
#print("-------------ImdbPie-----------------")
#top250 = imdb.top_250()
#print(top250[0]['title'])
#file = open('top250.txt', 'w+')
#for movie in top250:
# print("Movie = ", movie['title'])
# file.write("\n" + movie['title'].encode('utf8'))
| true |
4e693ac100a6a3277fe3c15ee4bdd13e77103c9a | Python | jingyiZhang123/leetcode_practice | /array/581_shortest_unsorted_continuous_subarrary.py | UTF-8 | 1,830 | 3.859375 | 4 | [] | no_license | """
Given an integer array, you need to find one continuous subarray that if you only sort this subarray in ascending order, then the whole array will be sorted in ascending order, too.
You need to find the shortest such subarray and output its length.
Example 1:
Input: [2, 6, 4, 8, 10, 9, 15]
[4,-2, 4, 2, -1, 6]
Output: 5
Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order.
Note:
Then length of the input array is in range [1, 10,000].
The input array may contain duplicates, so ascending order here means <=.
"""
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
data_len = len(nums)
start = -1
end = -2
min_value = nums[-1]
max_value = nums[0]
for i in range(data_len):
max_value = max(max_value, nums[i])
min_value = min(min_value, nums[-(i+1)])
if nums[i] < max_value:
end = i
if nums[-(i+1)] > min_value:
start = data_len-1-i
return end - start + 1
print(Solution().findUnsortedSubarray([1,3,2,3,3]))
# class test(object):
# """docstring for test"""
# def __init__(self, L):
# self.index = 0
# self.data_len = len(L)
# self.L = L
# def __iter__(self):
# while self.index < self.data_len:
# yield self.L[self.index]
# self.index += 1
# raise StopIteration
# def __str__(self):
# return str(self.L)
# a = test([1,2,3,4,5])
# for i in a:
# print(a)
# a.L.remove(i)
# a = test([1,2,3,4,5])
| true |
bd7e0f4ae0f8a1688b80c1ef02b8b3c9c203559d | Python | ShaneKilloran/ChessEngine | /MiniMax.py | UTF-8 | 2,766 | 3.96875 | 4 | [
"MIT"
] | permissive | ##########################
###### MINI-MAX ######
##########################
class MiniMax:
# print utility value of root node (assuming it is max)
# print names of all nodes visited during search
def __init__(self, root):
#self.game_tree = game_tree # GameTree
self.root = root # GameNode
#self.currentNode = None # GameNode
self.successors = root.children # List of GameNodes
return
def minimax(self, node):
# first, find the max value
#best_val = self.max_value(node) # should be root node of tree
# second, find the node which HAS that max value
# --> means we need to propagate the values back up the
# tree as part of our minimax algorithm
successors = node.children
#print ("MiniMax: Utility Value of Root Node: = " + str(best_val))
# find the node with our best move
best_move = None
best_val = -1
for elem in successors: # ---> Need to propagate values up tree for this to work
print("Looking at ",elem.move, "with value: ", elem.value)
if elem.value >= best_val:
best_move = elem.move
best_val = elem.value
# return that best value that we've found
print("Best move is: ",best_move)
return best_move
def max_value(self, node):
#print ("MiniMax-->MAX: Visited Node :: " + str(node.move))
if self.isTerminal(node):
return self.getUtility(node)
infinity = float('inf')
max_value = -infinity
successors_states = self.getSuccessors(node)
for state in successors_states:
max_value = max(max_value, self.min_value(state))
return max_value
def min_value(self, node):
#print ("MiniMax-->MIN: Visited Node :: " + str(node.move))
if self.isTerminal(node):
return self.getUtility(node)
infinity = float('inf')
min_value = infinity
successor_states = self.getSuccessors(node)
for state in successor_states:
min_value = min(min_value, self.max_value(state))
return min_value
# #
# UTILITY METHODS #
# #
# successor states in a game tree are the child nodes...
def getSuccessors(self, node):
assert node is not None
return node.children
# return true if the node has NO children (successor states)
# return false if the node has children (successor states)
def isTerminal(self, node):
assert node is not None
return len(node.children) == 0
def getUtility(self, node):
assert node is not None
return node.value
| true |
0abbba473e45469f2d48ccdfd94026840bac8877 | Python | zach-fried/Data-Analysis | /sentiments.py | UTF-8 | 599 | 2.640625 | 3 | [] | no_license | from mastodon import Mastodon
# Create actual API instance
mastodon = Mastodon(
access_token = 'c6d72eee8edd1242f2aae49b78cbef3f23ba35f27775fdad90b9d9766ad5e73b',
api_base_url = 'https://mastodon.social'
)
# First toot
# mastodon.toot('Tooting via python using #mastodonpy!')
# Testing API search function
query = mastodon.search("trump")
# Write query results to 'query.txt' file
query_results = open('query.txt', 'w')
for id in query.statuses.id:
query_results.write(id + "\n")
query_results.close()
i = 0
for status in query['statuses']:
print(status)
print(i)
i += 1 | true |
df7718eb08f0bcf7619f8aca6f10e54f6fce0d03 | Python | swjuno/clustor | /face_detect_opencv_haar_movie.py | UTF-8 | 1,080 | 2.578125 | 3 | [] | no_license | import cv2
face_cascade = cv2.CascadeClassifier('./data/haarcascade_frontalface_default.xml')
#mouth_cascade = cv2.CascadeClassifier('./data/haarcascade_mcs_mouth.xml')
scaler=0.4
cap =cv2.VideoCapture('./img/5.mp4')
while True:
ret, img = cap.read()
if not ret:
break
img_resize = cv2.resize(img, (int(img.shape[1]*scaler), int(img.shape[0]*scaler)))
img_gray=cv2.cvtColor(img_resize, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img_gray,1.1,3)
for x, y, w, h in faces:
cv2.rectangle(img_resize, (x, y), (x + w, y + h), (255, 0, 0), 2)
'''
face = src[y: y + h, x: x + w]
face_gray = src_gray[y: y + h, x: x + w]
eyes = eye_cascade.detectMultiScale(face_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
'''
cv2.imshow('img', img_resize)
key= cv2.waitKey(30)
if key & 0xFF == 27:#esc
break
cap.release()
cv2.destroyAllWindows()
| true |
a6ec3ff0eba1de06169ffc89f4b3f8b2b4c34f65 | Python | iceman67/DataAnalysis | /6-10wk-openweathermap/openweathermap_json_csv2.py | UTF-8 | 1,898 | 3.171875 | 3 | [] | no_license | import requests
import json
"""#### openweathermap 결과 CSV 저장"""
def search_city_extract(city):
API_KEY = 'a070fcd8fc2db8d5d1f140466a2012b4' # initialize your key here
# call API and convert response into Python dictionary
url = f'http://api.openweathermap.org/data/2.5/weather?q={city}&APPID={API_KEY}'
#print (url)
response = requests.get(url)
# error like unknown city name, inavalid api key
if response.status_code != 200:
message = response.get('message', '')
return f'Error getting temperature for {city.title()}. Error message = {message}'
data = response.json()
# get current temperature and convert it into Celsius
now = datetime.now()
date_time = now.strftime("%m/%d/%Y %H:%M:%S")
current_pressure = data['main']['pressure']
current_temperature = data['main']['temp']
current_humidity = data['main']['humidity']
#print("{},{},{},{}".format(date_time,current_temperature, current_humidity, current_pressure))
result = list()
result.append(date_time)
result.append(current_temperature)
result.append(current_humidity)
result.append(current_pressure)
return result
import csv
import requests
from datetime import datetime
import time
try:
count = int(input("# of service rqeusts:"))
except ValueError as e:
print(e)
city='cheonan'
try:
city = input("city:")
except ValueError as e:
print(e)
delay = 600
with open("{}.csv".format(city), "w", newline='') as csv_file:
fieldnames = ['date', 'temperature','humidity','pressure']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for i in range(count):
result = search_city_extract(city)
print (result)
writer.writerow({'date': result[0], 'temperature': result[1], 'humidity':result[2], 'pressure':result[3] })
time.sleep(delay)
| true |
a0154616c01fa107bff083e40b1e2cae89f75557 | Python | Wizmann/ACM-ICPC | /HackerRank/All Contests/ProjectEuler+/022.py | UTF-8 | 353 | 3.359375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | def get_score(i, s):
res = 0
for c in s:
res += ord(c) - ord('A') + 1
res *= i
return res
n = int(raw_input())
d = {}
names = sorted([raw_input() for i in xrange(n)])
for i, name in enumerate(names):
d[name] = get_score(i + 1, name)
q = int(raw_input())
for i in xrange(q):
name = raw_input()
print d[name]
| true |
520b2b0d211da3d190c9b7b816505eefd069e40c | Python | RuidongZ/LeetCode | /code/240.py | UTF-8 | 1,237 | 4 | 4 | [] | no_license | # -*- Encoding:UTF-8 -*-
# 240. Search a 2D Matrix II
# Write an efficient algorithm that searches for a value in an m x n matrix.
# This matrix has the following properties:
#
# Integers in each row are sorted in ascending from left to right.
# Integers in each column are sorted in ascending from top to bottom.
# For example,
#
# Consider the following matrix:
#
# [
# [1, 4, 7, 11, 15],
# [2, 5, 8, 12, 19],
# [3, 6, 9, 16, 22],
# [10, 13, 14, 17, 24],
# [18, 21, 23, 26, 30]
# ]
# Given target = 5, return true.
#
# Given target = 20, return false.
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
row = 0
while row < len(matrix):
if matrix[row][0] > target:
return False
if matrix[row][-1] < target:
row += 1
continue
for n in matrix[row]:
if n > target:
break
if n == target:
return True
row += 1
return False
| true |
723be6f53e67aad81b83602e18b6820a51b5a4e0 | Python | allenai/allennlp | /tests/common/params_test.py | UTF-8 | 11,067 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import (
infer_and_cast,
Params,
remove_keys_from_params,
with_overrides,
)
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
def test_with_overrides(self):
original = {
"foo": {"bar": {"baz": 3}, "x": 0},
"bar": ["a", "b", "c"],
"baz": {"bar": 2, "y": 3, "x": [0, 1, 2]},
}
overrides = {
"foo.bar": {"z": 2},
"bar.0": "d",
"baz.bar": 1,
"baz.x": [0, 0],
"z": 2,
}
assert with_overrides(original, overrides) == {
"foo": {"bar": {"z": 2}, "x": 0},
"bar": ["d", "b", "c"],
"baz": {"bar": 1, "y": 3, "x": [0, 0]},
"z": 2,
}
def test_bad_overrides(self):
with pytest.raises(ValueError, match="contains unused keys"):
with_overrides({"foo": [0, 1, 2]}, {"foo.3": 4})
with pytest.raises(ValueError, match="expected list or dict"):
with_overrides({"foo": 3}, {"foo.x": 2})
@pytest.mark.parametrize("input_type", [dict, str])
def test_overrides(self, input_type):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = {
"train_data_path": "FOO",
"model.type": "BAR",
"model.text_field_embedder.token_embedders.tokens.type": "BAZ",
"data_loader.batch_sampler.sorting_keys.0": "question",
}
params = Params.from_file(
filename, overrides if input_type == dict else json.dumps(overrides)
)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["data_loader"]["batch_sampler"]["sorting_keys"][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["token_embedders"]["tokens"]["type"] == "BAZ"
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
def test_remove_keys_from_params(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert params["data_loader"]["batch_sampler"]["type"] == "bucket"
assert params["data_loader"]["batch_sampler"]["batch_size"] == 80
remove_keys_from_params(params, keys=["batch_size"])
assert "batch_size" not in params["data_loader"]["batch_sampler"]
remove_keys_from_params(params, keys=["type", "batch_size"])
assert "type" not in params["data_loader"]["batch_sampler"]
remove_keys_from_params(params, keys=["data_loader"])
assert "data_loader" not in params
| true |
91a59c05d6013a8b1adae55e9d2924d971a59c63 | Python | furkandv/Example | /inputalarakparolaoluşturma.py | UTF-8 | 321 | 3.859375 | 4 | [] | no_license | """
Kullanıcıdan input alarak random parola oluşturma +++
"""
import random
isim = input ("isminizi giriniz: ")
parola = " "
for i in range (random.randint(5,8)):
parola += random.choice(isim)
for i in range (random.randint(3,5)):
parola += str(random.randint(0,9))
print ("Parolanız: ",parola) | true |
9fd1e134c2d1bc99e219d40a1d6eaaef40975f50 | Python | Arjundoodle/Machine_learning | /numpy.py | UTF-8 | 1,775 | 3.0625 | 3 | [] | no_license | import numpy as np
#%%
#1D, 2D and 3D Array
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
arr2 = np.array([[1, 2, 3], [4, 5, 6]])
arr3 = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
arrs=np.array(['apple', 'banana', 'cherry'])
print(arr,arr2,arr3)
#%%
print(arr[2],arr2[0][1])
#%%
print(arr[1:5])
print(arr[-3:-1])
print(arr2[0:2, 2])
#%%
print(arr.dtype)
print(arrs.dtype)
#%%
newarr = arr.astype('i')
print(newarr.dtype)
#%%
x = arr.copy()
print(x)
print(x.base)
#%%
print(arr.shape)
#%%
arr = np.array([[1, 2, 3], [4, 5, 6]])
newarr = arr.reshape(-1)
print(newarr)
#%%
arr = np.array([1, 2, 3])
for x in arr:
print(x)
print("")
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
for x in arr:
for y in x:
for z in y:
print(z)
#%%
arr = np.array([1, 2, 3])
for idx, x in np.ndenumerate(arr):
print(idx, x)
#%%
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.concatenate((arr1, arr2))
print(arr)
#%%
arr = np.stack((arr1, arr2), axis=1)
print(arr)
#%%
arr = np.array([1, 2, 3, 4, 5, 6])
newarr = np.array_split(arr, 3)
print(newarr)
#%%
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15], [16, 17, 18]])
newarr = np.array_split(arr, 3)
print(newarr)
#%%
arr = np.array([1, 2, 3, 4, 5, 4, 4])
x = np.where(arr == 4)
print(x)
#%%
arr = np.array([6, 7, 8, 9])
x = np.searchsorted(arr, 7)
print(x)
#%%
arr = np.array(['banana', 'cherry', 'apple'])
print(np.sort(arr))
arr = np.array([[3, 2, 4], [5, 0, 1]])
print(np.sort(arr))
#%%
arr = np.array([41, 42, 43, 44])
x = [True, False, True, False]
newarr = arr[x]
print(newarr)
#%%
arr = np.array([41, 42, 43, 44])
filter_arr = arr > 42
newarr = arr[filter_arr]
print(filter_arr)
print(newarr)
#%%
| true |
5f986386f8faad4c81cf5e5681cfcea8da25dacd | Python | kalpanasingh/rat-tools | /ratzdab/test/test_trig.py | UTF-8 | 1,535 | 2.8125 | 3 | [] | no_license | '''unit tests for ratzdab conversion utilities: trig headers'''
import unittest
import ratzdab
from rat import ROOT
class TestTRIG(unittest.TestCase):
def test_trig(self):
'''Test conversion of RAT::DS::TRIGInfo objects to and from ZDAB
TriggerInfos.
Exceptions:
* runID is not set by ratzdab::unpack::trig
'''
trig = ROOT.RAT.DS.TRIGInfo()
trig.trigMask = 0x10101011
trig.pulserRate = 0x20202022
trig.MTC_CSR = 0x30303033
trig.lockoutWidth = 0x40404044
trig.prescaleFreq = 0x50505055
trig.eventID = 0x60606066
trig.runID = 0x70707077
for i in range(10):
trig.trigTHold.push_back(11 * i)
trig.trigZeroOffset.push_back(22 * i)
zdab_trig = ratzdab.pack.trig(trig)
trig_converted = ratzdab.unpack.trig(zdab_trig)
self.assertTrue(trig.trigMask == trig_converted.trigMask)
self.assertTrue(trig.pulserRate == trig_converted.pulserRate)
self.assertTrue(trig.MTC_CSR == trig_converted.MTC_CSR)
self.assertTrue(trig.lockoutWidth == trig_converted.lockoutWidth)
self.assertTrue(trig.prescaleFreq == trig_converted.prescaleFreq)
self.assertTrue(trig.eventID == trig_converted.eventID)
for i in range(10):
self.assertTrue(trig.trigTHold[i] == trig_converted.trigTHold[i])
self.assertTrue(trig.trigZeroOffset[i] == trig_converted.trigZeroOffset[i])
if __name__ == '__main__':
unittest.main()
| true |
dc3438898f76d8fa0789fb2717ff4db7d5ab7b2c | Python | Creoles/creole | /creole/cli/util.py | UTF-8 | 1,277 | 3.09375 | 3 | [] | no_license | # coding: utf-8
import os
from contextlib import contextmanager
from subprocess import check_output, check_call, CalledProcessError
@contextmanager
def cd(dir_path):
orig_dir = os.path.abspath('.')
os.chdir(dir_path)
yield
os.chdir(orig_dir)
@contextmanager
def cd_root():
"""Change working dir to root of current git repo"""
orig_dir = os.path.abspath('.')
root = get_repo_root()
os.chdir(root)
yield
os.chdir(orig_dir)
def get_repo_root():
"""Get root path of current working repo utilzing `git`"""
return check_output(['git', 'rev-parse', '--show-toplevel']).strip()
def run(cmd, shell=False):
"""Run command, using :func:`~subprocess.check_call`. Won't raise
:class:`~subprocess.CalledProcessError`, thus rely on the command's
output and exit code to properly present the error.
Args:
cmd: A list of string, format is same as
:func:`~subprocess.check_call`
shell: Use shell to run the command, see doc of
:func:`~subprocess.check_call` for the security concern
Returns:
An integer of the command's exit code
"""
try:
rv = check_call(cmd, shell=shell)
except CalledProcessError as e:
rv = e.returncode
return rv
| true |
1adc536cabbfa03f1fafb20906711efe7f91aa0f | Python | databill86/advanced-statistics | /statistics/src/3-14-times-magazine.py | UTF-8 | 1,139 | 2.609375 | 3 | [] | no_license | # Import
import matplotlib.pyplot as plt
import pandas as pd
import pymc3 as pm
from scipy.stats import norm
import seaborn as sns
# Config
os.chdir("/home/jovyan/work")
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
plt.rcParams["figure.figsize"] = (12, 3)
# Preparation
data = pd.read_csv("./data/times_magazine.csv")
print(tabulate(data.head(), headers="keys", tablefmt="psql"))
# Modeling
N = len(data.Female)
lam_ = data.Female.mean()
with pm.Model() as model:
lam_1 = pm.Exponential("lam_1", lam_)
lam_2 = pm.Exponential("lam_2", lam_)
tau = pm.DiscreteUniform("tau", lower=1923, upper=1923+N)
idx = np.arange(1923, 1923+N)
lam = pm.math.switch(tau > idx, lam_1, lam_2)
female = pm.Poisson("female", lam, observed=data.Female)
step = pm.Metropolis()
trace = pm.sample(20000, tune=5000, step=step)
# Plot
fig, ax = plt.subplots(nrows=1, ncols=2)
sns.distplot(trace["lam_1"], label="λ1", ax=ax[0])
sns.distplot(trace["lam_2"], label="λ2", ax=ax[0])
sns.countplot(trace["tau"], ax=ax[1])
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig("./results/3-14-times-magazine.png") | true |
a7819d85ba2334bfb1ab674ab9ad348c0e74a1ef | Python | Sourish1997/ray-tracing | /materials/reflective_material.py | UTF-8 | 565 | 2.859375 | 3 | [] | no_license | from .material import Material
import numpy as np
class ReflectiveMaterial(Material):
def __init__(self, amb, ref):
super().__init__(amb)
self.ref = ref
def get_color(self, point, normal, ray, lights):
c_rgb = np.zeros(3)
ambient = np.array([0.4, 0.4, 0.4])
c_rgb += self.amb * ambient
for i in range(len(lights)):
a = lights[i].get_dir(point) / point
if np.all(a == a[0]):
c_rgb += (lights[i].col * lights[i].get_intensity(point) * self.ref)
return c_rgb
| true |
779c99e1e8a2522397aed989080fd35023f26074 | Python | SimeonTsvetanov/Coding-Lessons | /SoftUni Lessons/Python Development/Python Advanced January 2020/Python Advanced/06. EXERCISE TUPLES AND SETS/08 - Multidimensional Lists - Exercise 2/05. Alice in Wonderland.py | UTF-8 | 4,319 | 3.609375 | 4 | [] | no_license | class Matrix:
def __init__(self, rows: int, type_data: type, separator=None):
self.rows = rows
self.type_data = type_data
self.separator = separator
self.data = Matrix.creation(self)
@property
def sum_numbers(self):
if (self.type_data == int) or (self.type_data == float):
return sum([sum(r) for r in self.data])
return "Elements aren't numbers"
def next_positions(self, direction, current_row, current_col, check_if_valid=False):
delta = {"up": [-1, 0], "down": [+1, 0], "left": [0, -1], "right": [0, + 1]}[direction]
next_row, next_col = [current_row + delta[0], current_col + delta[1]]
if check_if_valid:
if not self.check_if_element_index_is_valid(r=next_row, c=next_col):
return False
return [next_row, next_col]
@property
def flat_matrix(self):
return [j for sub in self.data for j in sub]
@property
def primary_diagonal(self):
return [self.data[i][i] for i in range(len(self.data))]
@property
def secondary_diagonal(self):
return [self.data[i][len(self.data) - i - 1] for i in range(len(self.data))]
def creation(self):
if self.separator:
return [[self.type_data(sym) for sym in input().split(self.separator)] for _ in range(self.rows)]
else:
return [[self.type_data(sym) for sym in input()] for _ in range(self.rows)]
def find_coordinates_of_objects(self, element_to_search):
"""
list :param matrix: the 2d Matrix to search in
any :param element_to_search: the element we will be searching for
list of tuples :return: It will return a list of tuples(the coordinates) of all found objects
"""
found_coordinates = []
for r in range(len(self.data)):
for c in range(len(self.data[r])):
if self.data[r][c] == element_to_search:
found_coordinates.append((r, c))
return found_coordinates
def swap_elements(self, x_1: int, y_1: int, x_2: int, y_2: int):
self.data[x_1][y_1], self.data[x_2][y_2] = self.data[x_2][y_2], self.data[x_1][y_1]
def check_if_element_index_is_valid(self, r, c):
if (0 <= r < len(self.data)) and (0 <= c < len(self.data[0])):
return True
else:
return False
def __repr__(self):
output_string = ''
for r in self.data:
output_string += f"{' '.join((list(map(str, r))))}\n"
return output_string
size = int(input())
matrix = Matrix(rows=size, type_data=str, separator=" ")
alice = matrix.find_coordinates_of_objects("A")[0]
total_count_teas = 0
position = alice
while True:
matrix.data[position[0]][position[1]] = "*"
command = input()
next_position = matrix.next_positions(direction=command, current_row=position[0], current_col=position[1], check_if_valid=True)
if next_position:
# Still in range
if matrix.data[next_position[0]][next_position[1]] == "." or matrix.data[next_position[0]][next_position[1]] == "*":
# Just walk slowly
matrix.data[position[0]][position[1]] = "*"
matrix.data[next_position[0]][next_position[1]] = "*"
position = [next_position[0], next_position[1]]
elif matrix.data[next_position[0]][next_position[1]].isdigit():
# Collect some Tea
total_count_teas += int(matrix.data[next_position[0]][next_position[1]])
matrix.data[next_position[0]][next_position[1]] = "*"
position = [next_position[0], next_position[1]]
if total_count_teas >= 10:
print(f"She did it! She went to the party.")
break
elif matrix.data[next_position[0]][next_position[1]] == "R":
# She is in trouble
matrix.data[next_position[0]][next_position[1]] = "*"
position = [next_position[0], next_position[1]]
print(f"Alice didn't make it to the tea party.")
break
else:
# Alice is out and can't be found!
print(f"Alice didn't make it to the tea party.")
break
print(matrix)
| true |
d4f07fb7e9c944c81377c35bb915c79248c83431 | Python | zhaoyuanjdf/LwModel2 | /models/model_base.py | UTF-8 | 1,328 | 2.625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
from keras.models import Sequential
from keras.models import load_model
class ModelBase(object):
def __init__(self):
self.model = Sequential()
def fit(self, x, y, batch_size=32, epochs=10, verbose=1, callbacks=None,
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None, initial_epoch=0):
self.model.fit(x, y, batch_size, epochs, verbose, callbacks,
validation_split, validation_data, shuffle,
class_weight, sample_weight, initial_epoch)
def predict_on_batch(self, x):
return self.model.predict_on_batch(x)
def get_model_para(self):
return self.model.get_weights()
def train_on_batch(self, x, y):
return self.model.train_on_batch(x, y)
def predict(self, x):
return self.model.predict(x)
def save_model(self, model_path):
self.model.save(model_path)
def fit_generator(self, generator, steps_per_epoch, epochs=1, validation_data=None):
self.model.fit_generator(generator, steps_per_epoch, epochs=epochs, validation_data=validation_data,
workers=2, use_multiprocessing=True)
def load_model(self, model_path):
self.model = load_model(model_path)
| true |
60b9169d301d81293de5894b2d80d8a62eaea851 | Python | thales-mro/python-basic | /numpy/vectorized-computation/permute-axes-with-transpose-statement-high-dimension-arrays.py | UTF-8 | 198 | 3.34375 | 3 | [] | no_license | import numpy as np
X = np.arange(16).reshape((2, 2, 4))
print("Original X:")
print(X)
Y = X.transpose((1, 0, 2))
print("Rearrange with transpose:")
print(Y)
print("Default transpose:")
print(X.T) | true |
84d44a1ea8a3bf79c09b178d6b40835a602925b8 | Python | VirenS13117/Reinforcement-Learning | /Ex6/src/Stochastic_Environment.py | UTF-8 | 3,658 | 3.15625 | 3 | [] | no_license | import numpy as np
class StochasticGrid:
def __init__(self, blocks):
self.min_x = 0
self.max_x = 8
self.min_y = 0
self.max_y = 5
self.start = (3,0)
self.goal_state = (8,5)
self.actions = ["left", "right", "up", "down"]
self.curr_state = self.start
self.blocks = blocks
return
def change_blocklist(self, new_blocks):
self.blocks = new_blocks
return
def is_goal(self, state):
return state[0] == self.goal_state[0] and state[1] == self.goal_state[1]
def reset(self):
self.curr_state = self.start
return self.curr_state
def get_action_name(self, action_id):
if action_id == 0:
return "left"
elif action_id == 1:
return "right"
elif action_id == 2:
return "up"
elif action_id == 3:
return "down"
else:
print("invalid action id")
return -1
def isLegalState(self, curr_state):
return (curr_state not in self.blocks) and (self.min_x <= curr_state[0] <= self.max_x) and (self.min_y <= curr_state[1] <= self.max_y)
def make_transition(self, state, action):
reward = 0
done = False
curr_state = self.make_move(state, self.get_action_name(action))
if self.is_goal(curr_state):
reward = 1
done = True
return curr_state, reward, done, {}
def left_perpendicular(self, action):
if action == "up":
return "left"
elif action == "left":
return "down"
elif action == "down":
return "right"
else:
return "up"
def right_perpendicular(self, action):
if action == "up":
return "right"
elif action == "left":
return "up"
elif action == "down":
return "left"
else:
return "down"
def get_deterministic_action(self, action):
num = np.random.random()
action_left = self.left_perpendicular(action)
action_right = self.right_perpendicular(action)
return num,[(action,0.8), (action_left, 0.1), (action_right, 0.1)]
def step(self, action):
done = False
epsilon, actions_list = self.get_deterministic_action(self.get_action_name(action))
optimal_state = self.make_move(self.curr_state, actions_list[0][0])
state_left = self.make_move(self.curr_state, actions_list[1][0])
state_right = self.make_move(self.curr_state, actions_list[2][0])
reward_optimal, reward_left, reward_right = 0, 0, 0
if self.is_goal(optimal_state):
reward_optimal = 1
if self.is_goal(state_left):
reward_left = 1
if self.is_goal(state_right):
reward_right = 1
self.curr_state = optimal_state
info = [(optimal_state, reward_optimal, 0.8), (state_left, reward_left, 0.1), (state_right, reward_right, 0.1)]
return self.curr_state, reward_optimal, done, info
def get_current_state(self):
return self.curr_state
def make_move(self, state, action):
dx, dy = 0, 0
new_state = state
if action == "up":
dy += 1
elif action == "down":
dy += -1
elif action == "left":
dx += -1
elif action == "right":
dx += 1
else:
print("wrong action : ", action)
return new_state
new_state = (state[0]+dx, state[1]+dy)
if self.isLegalState(new_state):
return new_state
return state
| true |
c452a7f4e4878e5e249c5e165fdde5d561e00418 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_96/1179.py | UTF-8 | 609 | 2.6875 | 3 | [] | no_license | f = open("B-large.in")
T = int(f.readline())
out = open("B-large.out", 'w')
for i in range(T):
out.write("Case #" + str(i+1) + ": ")
line = f.readline().strip().split()
maxi = 0
N = int(line[0])
S = int(line[1])
p = int(line[2])
scores = [int(line[j]) for j in range(3, len(line))]
for t in scores:
if t >= p*3-2:
maxi += 1
else:
if t >= p*3 - 4 and S > 0 and p*3 - 4 > 0:
maxi += 1
S -= 1
out.write(str(maxi) + "\n")
out.close()
f.close()
| true |
faf4d9c8292b46cd09e483cbfbba00c3e16a8aa5 | Python | air01a/pentestingtools | /crypto/genereSalt.py | UTF-8 | 1,549 | 2.5625 | 3 | [] | no_license | import hashlib
import sys
sentence=sys.argv[1]
clear=""
finalHash=""
salt1=""
combinaison=2**len(sentence)
separator=['','*',':','=','|']
def sha1(cleartext):
return hashlib.sha1(cleartext).hexdigest()
def permutation(tab):
result=[]
if len(tab)>1:
for i in range(0,len(tab)):
permute=tab[i]
tab2=tab[:]
tab2.remove(tab2[i])
tmp=permutation(tab2)
for soluce in tmp:
result.append([permute]+soluce)
return result
else:
return [tab]
permutationList=permutation([0,1,2])
for i in xrange(1,combinaison):
res=[]
byte=i
j=combinaison/2
z=i
while j>=1:
if (byte-j)>=0:
res.append(1)
byte=byte-j
else:
res.append(0)
j=j/2
salt2=""
for j in xrange(0,len(res)):
if res[j]==1:
salt2=salt2+sentence[j].upper()
else:
salt2=salt2+sentence[j]
sha1Tab=[salt1,salt2,clear]
for index in permutationList:
for sep in separator:
res=[]
p1=sha1Tab[index[0]]
p2=sha1Tab[index[1]]
p3=sha1Tab[index[2]]
res.append(sha1(p1)+sep+p2+sep+p3)
res.append(p1+sep+sha1(p2)+sep+p3)
res.append(p1+sep+p2+sep+sha1(p3))
res.append(sha1(p1+sep+p2)+sep+p3)
res.append(p1+sep+sha1(p2+sep+p3))
res.append(sha1(p1)+sep+sha1(p2)+sep+sha1(p3))
res.append(sha1(p1)+sep+p2+sep+sha1(p3))
res.append(sha1(p1)+sep+sha1(p2)+sep+sha1(p3))
res.append(p1+sep+p2+sep+p3)
#print p1+sep+p2+sep+p3
for z in xrange(0,len(res)):
if sha1(res[z])==finalHash:
print salt2
print z
print sep
print str(index[0])+' /' +str(index[1])+' /' + str(index[2])
| true |
74cc970b7a0454472846413c9c20aae8fee1c290 | Python | xin-tian1978/signpost | /test/edison_crypto_test/data/processing/integrate.py | UTF-8 | 680 | 3.21875 | 3 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env python3
import argparse
import csv
parser = argparse.ArgumentParser()
parser.add_argument('file', help='csv file')
parser.add_argument('start', type=float, help='start x value')
parser.add_argument('end', type=float, help='end x value')
args = parser.parse_args()
s = 0
a = []
with open(args.file) as csvf:
reader = csv.reader(csvf, delimiter=',')
for i in range(16):
next(reader, None)
for row in reader:
x, y, m = row
x = float(x)
y = float(y)
if x < args.start or x > args.end: continue
a.append([x,y])
for i in range(1,len(a)):
s += (a[i][0] - a[i-1][0]) * (a[i][1] + a[i-1][1])/2
print(s*3.3)
| true |
c98c453cfb03efbaee6d2b147cb0d8b87288712d | Python | ceegin/Pocket-Passport | /model.py | UTF-8 | 2,525 | 2.84375 | 3 | [] | no_license | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
##############################################################################
# Model definitions
class User(db.Model):
"""User login information."""
__tablename__ = "users"
user_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
email = db.Column(db.String(64), nullable=True)
first_name = db.Column(db.String(64), nullable=False)
last_name = db.Column(db.String(64), nullable=False)
password = db.Column(db.String(64), nullable=True)
def __repr__(self):
"""Provide helpful representation when printed."""
return "<User user_id=%s email=%s>" % (self.user_id, self.email)
class SavedPhoto(db.Model):
"""User's saved photos."""
__tablename__ = "saved_photos"
saved_photos_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
user_id = db.Column(db.Integer,
db.ForeignKey('users.user_id'))
photo_id = db.Column(db.String(200))
img_src = db.Column(db.String(200), nullable=False)
user = db.relationship("User",
backref=db.backref("saved_photos",
order_by=saved_photos_id))
def __repr__(self):
"""Provide helpful representation when printed."""
return "<Photo photo_id=%s img_src=%s>" % (self.photo_id, self.img_src)
################################################################################
# Helper functions
def connect_to_db(app, db_uri=None):
"""Connect the database to Flask app."""
# Configure to use our PstgreSQL database
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri or 'postgresql:///pocketpassport'
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
def example_data():
"""Example data for testing"""
ron = User(first_name='Ron',
last_name='Weasley',
email='rweasley@gmail.com',
password='magic')
harry = User(first_name='Harry',
last_name='Potter',
email='hpotter@gmail.com',
password='gryff11')
db.session.add_all([ron, harry])
db.session.commit()
if __name__ == "__main__":
# As a convenience, if we run this module interactively, it will leave
# you in a state of being able to work with the database directly.
from server import app
connect_to_db(app)
print "Connected to DB."
| true |
80eee3b503e749bf074106681059edae8580977c | Python | eupston/Deepbeat-beatbox2midi | /utils/onset_offset.py | UTF-8 | 1,002 | 2.640625 | 3 | [
"MIT"
] | permissive | import librosa
import numpy as np
def onset_offset(sr, onset, onsetframes, silences):
##----------Converts Silences Milliseconds to frames-------
silences_frames = []
for items in silences:
silences_frames.append([round(items[0]*(sr/1000)), round(items[1]*(sr/1000))])
#-------------------------------------------------------------------
##--------------------------------------------
onset_frames = onsetframes.tolist()
#grabs the onset and offset based on silences threshold and onset detected
onset_offset =[]
for i, onset in enumerate(onset_frames):
if onset != onset_frames[-1]:
issilence = [silence[0] for silence in silences_frames if silence[0] > onset and silence[0] < onset_frames[i+1]]
if len(issilence) > 0:
onset_offset.append([onset, issilence[0]])
else:
onset_offset.append([onset,onset_frames[i+1]])
else:
onset_offset.append([onset,silences_frames[-1][0]])
#return onset and offsets in frames
return onset_offset
| true |
627f156cf4ab8c18843a9f9bc19ccbd8182d5e8a | Python | hbcbh1999/pure-LDP | /pure_ldp/frequency_oracles/direct_encoding/de_client.py | UTF-8 | 1,492 | 2.734375 | 3 | [
"MIT"
] | permissive | from pure_ldp.core import FreqOracleClient
import math
import numpy as np
import random
class DEClient(FreqOracleClient):
def __init__(self, epsilon, d, index_mapper=None):
super().__init__(epsilon, d, index_mapper)
self.update_params(epsilon, d, index_mapper)
def update_params(self, epsilon=None, d=None, index_mapper=None):
"""
Used to update the client DE parameters.
Args:
epsilon: optional - privacy budget
d: optional - domain size
index_mapper: optional - function
"""
super().update_params(epsilon, d, index_mapper)
if epsilon is not None or d is not None: # If epsilon changes, update probs
self.const = math.pow(math.e, self.epsilon) + self.d - 1
self.p = (math.pow(math.e, self.epsilon)) / (self.const)
self.q = 1/self.const
def _perturb(self, data):
if random.random() < self.p:
return data
else:
perturbed_data = random.randint(0,self.d-2)
if perturbed_data == data:
return self.d-1
else:
return perturbed_data
def privatise(self, data):
"""
Privatises a user's data item using Direct Encoding (DE)
Args:
data: data item
Returns: privatised data vector
"""
index = self.index_mapper(data) # Maps data to the range {0,...,d-1}
return self._perturb(index) | true |
6094915dc5929832722d46d950b40962cfb0775b | Python | Ganesh-sunkara-1998/Python | /Important codes/satya questions/code6.py | UTF-8 | 269 | 3.703125 | 4 | [] | no_license | ''' Write a python function to get a string which is
n (non-negitive integer) copies of a given string and return it...'''
def function(n):
#print(" copied the string:-",n)
return n
def main():
n=input("Enter your number:-")
function(n)
main()
| true |
a3734f414293d80137e0cc24d788df70d9688de2 | Python | ruxtom/csc8112 | /consumers/graphing.py | UTF-8 | 657 | 2.8125 | 3 | [] | no_license | import plotly.graph_objects as go
from time import process_time
# Creates a graph with the given data in the server/public/external_html folder
def createGraph(title, xAxis, yAxis, yAxisTitle, outputFileName):
tStart = process_time()
fig = go.Figure(data=go.Bar(x=xAxis, y=yAxis))
fig.update_layout(title=title, xaxis_title="Room", yaxis_title=yAxisTitle)
fig.write_html("../server/public/external_html/" + str(outputFileName) +
".html")
# fig.write_html("./server/public/external_html/" + str(outputFileName) +
# ".html")
tStop = process_time()
print("Total graphing time:", tStop-tStart)
| true |
ff973b1343a79269a6b9de3c27f417275b60ee7b | Python | 5l1v3r1/wargames | /cryptopals/set 4/challenge 29/run.py | UTF-8 | 2,328 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
# The matasano crypto challenges - Set 4 Challenge 29 (http://cryptopals.com/sets/4/challenges/29/)
#
# Copyright (c) 2015 - Albert Puigsech Galicia (albert@puigsech.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import random
import struct
# Cryptohelper from https://github.com/apuigsech/cryptohelper
from cryptohelper import *
key = ''.join([chr(random.randint(0,255)) for i in range(16)])
def sha1_MAC(m, k):
return sha1(k + m)
def challenge_MAC_calc(m):
return m, sha1_MAC(m, key)
def challenge_MAC_check(m, hash):
h = sha1_MAC(m, key)
if h == hash:
return True
else:
return False
def guess_keylen():
# TODO: Find way to get it.
return 16
def tamper_data(data, hash, new_data):
keylen = guess_keylen()
m = message_pad(data, len(data)+keylen, "B") + new_data
s = struct.unpack(">IIIII", hash)
new_data = message_pad(new_data, len(m)+keylen, "B")
h = sha1(new_data, s, False)
return m, h
def main(argv):
message = "comment1=cooking%20MCs;userdata=foo;comment2=%20like%20a%20pound%20of%20bacon"
new_message = ";admin=true"
m_orig,h_orig = challenge_MAC_calc(message)
m_tamper,h_tamper = tamper_data(m_orig, h_orig, new_message)
if challenge_MAC_check(m_tamper, h_tamper) == True:
print "WIN"
else:
print "LOSE"
if __name__ == "__main__":
main(sys.argv)
| true |
e35c68ae2a727db7d6c38f856781b30b4a528fad | Python | Ruaman/PatternFlow | /algorithms/image/correction/main.py | UTF-8 | 704 | 2.609375 | 3 | [] | no_license | import matplotlib.pyplot as plt
from skimage import data
from PatternFlow.image.correction.correction import adjust_log
def main():
img = data.moon()
img_log = adjust_log(img)
img_inv_log = adjust_log(img, inv=True)
# config figure size
fig = plt.figure(figsize=(10, 5))
fig.add_subplot(1, 3, 1)
plt.title("origin")
plt.imshow(img, cmap=plt.cm.gray)
fig.add_subplot(1, 3, 2)
plt.title("log correction")
plt.imshow(img_log, cmap=plt.cm.gray)
fig.add_subplot(1, 3, 3)
plt.title("inverse log correction")
plt.imshow(img_inv_log, cmap=plt.cm.gray)
# plt.show()
plt.savefig("correction_result.png")
if __name__ == '__main__':
main()
| true |
bb7457407c49b8f9c9883d0b55018f95ea709fb1 | Python | krzkrusz/pageobjects | /page_object_example/tests/new_customer_page.py | UTF-8 | 3,050 | 2.625 | 3 | [] | no_license | from page_object_example.base_page import BasePage
class NewCustomerPage(BasePage):
@property
def name_text_field(self):
return self.driver.find_element_by_name('name')
@property
def gender_male_radio_button(self):
return self.driver.find_element_by_xpath('.//input[@type="radio" and @value="m"]')
@property
def gender_female_radio_button(self):
return self.driver.find_element_by_xpath('.//input[@type="radio" and @value="f"]')
@property
def birth_text_field(self):
return self.driver.find_element_by_xpath('.//input[@name="dob"]')
@property
def address_text_field(self):
return self.driver.find_element_by_xpath('.//textarea[@name="addr"]')
@property
def city_text_field(self):
return self.driver.find_element_by_xpath('.//input[@name="city"]')
@property
def state_text_field(self):
return self.driver.find_element_by_xpath('.//input[@name="state"]')
@property
def pin_text_field(self):
return self.driver.find_element_by_xpath('.//input[@name="pinno"]')
@property
def mobile_number_text_field(self):
return self.driver.find_element_by_xpath('.//input[@name="telephoneno"]')
@property
def email_text_field(self):
return self.driver.find_element_by_xpath('.//input[@name="emailid"]')
@property
def password_text_field(self):
return self.driver.find_element_by_xpath('.//input[@name="password"]')
@property
def submit_button(self):
return self.driver.find_element_by_xpath('.//input[@name="sub"]')
@property
def reset_button(self):
return self.driver.find_element_by_xpath('.//input[@name="res"]')
def open(self):
self.driver.find_element_by_xpath('.//a[@href="addcustomerpage.php"]').click()
def fill_form(self,data,click_submit=True, click_reset=False):
if 'customer_name' in data.keys():
self.name_text_field.send_keys(data['customer_name'])
if 'gender' in data.keys() and data['gender'] == 'male':
self.gender_male_radio_button.click()
if 'date_of_birth' in data.keys():
self.birth_text_field.send_keys(data['date_of_birth'])
if 'address' in data.keys():
self.address_text_field.send_keys(data['address'])
if 'city' in data.keys():
self.city_text_field.send_keys(data['city'])
if 'state' in data.keys():
self.state_text_field.send_keys(data['state'])
if 'pin' in data.keys():
self.pin_text_field.send_keys(data['pin'])
if 'mobile_number' in data.keys():
self.mobile_number_text_field.send_keys(data['mobile_number'])
if 'email' in data.keys():
self.email_text_field.send_keys(data['email'])
if 'password' in data.keys():
self.password_text_field.send_keys(data['password'])
if click_submit:
self.submit_button.click()
elif click_reset:
self.reset_button.click()
| true |
f175443cf6eb6211eaf1b086f53f8b113da85a4b | Python | xodhx4/webcam_image_recognizer | /util.py | UTF-8 | 262 | 3.25 | 3 | [] | no_license | """Util functions for this package
"""
import os
def makepath(path):
"""Make dir if not exit.
Args:
path (string): The path to check
"""
if not os.path.exists(path):
os.mkdir(path)
print(f"Make folder : {path}")
| true |
3325e4ab645853a2d7d6de9f60103f638b15bafa | Python | naidenovaleksei/ml_cookbook | /tree_graphviz.py | UTF-8 | 1,241 | 2.96875 | 3 | [] | no_license | # Примеры визуализации дерева решений
def visualize_tree_graphviz(tree, features):
"""вариант 1 graphviz (лучше)"""
# sudo apt-get install graphviz
# sudo pip install graphviz
from graphviz import Source
from sklearn.tree import export_graphviz
# DOT формат дерева (строка)
tree_data = export_graphviz(tree, feature_names=features, class_names=["renew", "churn"], label='all', proportion=True, precision=3)
# отображаем дерево, созданное из DOT формата
Source(tree_data)
def visualize_tree_matplotlib(tree, features):
"""вариант 2 matplotlib (хуже)"""
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree
# параметры plot_tree почти те же, что и в export_graphviz
plot_tree(tree, feature_names=features, class_names=["renew", "churn"], label='all', ax=ax)
plt.show()
if __name__ == "__main__":
# берем дерево
import joblib
model = joblib.load("tree.joblib")
tree = model['model']
features = model['features']
visualize_tree_graphviz(tree, features)
visualize_tree_matplotlib(tree, features)
| true |
aeb7ac2d840d856cfe90f563588e26a9e8e54b4a | Python | kwangilkimkenny/socialcalProject | /blog/templates/socialActCO2.py | UTF-8 | 848 | 3.84375 | 4 | [] | no_license | print("모든 것의 가치를 산정한다는 것은 어려운 일이다. \n하지만 모든 것은 가치가 있다. 공짜라고 진짜 가치가 없는 것은 아니다. \n공기가 없다면 생명체는 살 수 없다. \n맑은 공기를 마시시 위해서 우리가 지불해야하는 가치(비용)은 얼마가 될까. \n 이 계산기는 이러한 문제를 계산해보고 가진것들의 가치를 다시한번 생각해 보자.")
print()
x= input("activity 'the value of lowering heating by ? degree.': ")
inputA= float(x)
Valueofyear = inputA * 23 * 231
Valueofday = Valueofyear / 365
print()
print("It can reduce the temperature by one degree by 231 kilograms a year.")
print("CER(certified emission reduction) is $23 for 1 KG")
print()
print("value of day:$", round(Valueofday))
print("value of year:$", round(Valueofyear))
| true |
a3d29d23e6bbb70bc37757b1992e2a01ed3c7189 | Python | mortenjc/lang | /python/crypto/week3.py | UTF-8 | 436 | 2.875 | 3 | [] | no_license | #!/usr/bin/python
from Crypto.Hash import SHA256
def get_bytes_from_file(filename):
return open(filename, "rb").read()
file = get_bytes_from_file("week3.mp4")
BLKSZ =1024
blocks = len(file)/BLKSZ
chunk = file[blocks*BLKSZ:]
sha = SHA256.new(chunk).digest()
for i in range(blocks):
offset = (blocks-i-1)*BLKSZ
chunk = file[offset:offset+BLKSZ] + sha
sha = SHA256.new(chunk).digest()
print sha.encode('hex')
| true |
efb9766e4dba3ec16cacdd629bdf8bab833c89ab | Python | JayeJuniper/Project-04 | /worklog_db.py | UTF-8 | 6,873 | 3.359375 | 3 | [] | no_license | from collections import OrderedDict
import datetime
import os
import re
from peewee import *
db = SqliteDatabase('worklog.db')
class Entry(Model):
employee = CharField(max_length=255, unique=False)
task_name = CharField(max_length=255, unique=False)
duration = CharField(max_length=255, unique=False)
notes = CharField(max_length=255, unique=False)
timestamp = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
def initialize():
"""Create the database and the table if they don't exist."""
db.connect()
db.create_tables([Entry], safe=True)
def main_loop():
"""Show the menu"""
choice = None
while choice != 'q':
clear()
print("""Welcome to project 4: Worklog with a database.
Select the following options or press 'q' to quit.""")
for key, value in directory_main.items():
print('{}) {}'.format(key, value.__doc__))
choice = input('Action: ').lower().strip()
if choice in directory_main:
clear()
directory_main[choice]()
def view_loop():
"""View an entry"""
choice = None
while choice != 'q':
clear()
print("View an entry:\nSelect the following options or press 'q' to g\
o back.")
for key, value in directory_view.items():
print('{}) {}'.format(key, value.__doc__))
choice = input('Action: ').lower().strip()
if choice in directory_view:
clear()
entries = directory_view[choice]()
view_entry(entries)
def view_entry(entries):
"""print out entry"""
for entry in entries:
clear()
print("Here are your selected logs:")
print("""
Date: {}
Employee: {}
Task: {}
duration: {}
Notes: {}
""".format(entry.timestamp.strftime('%A %B %d, %Y %I:%Mp'),
entry.employee,
entry.task_name,
entry.duration,
entry.notes
))
print('n) next entry')
print('d) delete entry')
next_action = None
while next_action is None:
next_action = input('Action: ').lower().strip()
if next_action == 'd':
delete_entry(entry)
elif next_action != 'n':
next_action = None
def add_entry():
"""Add entry"""
print("Create an entry:")
data1 = get_employee_name()
clear()
print("Create an entry:")
data2 = get_task_name()
clear()
print("Create an entry:")
data3 = get_time_spent()
clear()
print("Create an entry:")
data4 = get_notes()
clear()
Entry.create(employee=data1, task_name=data2, duration=data3, notes=data4)
print("Saved successfully!")
input('Press ENTER to continue.')
def get_employee_name():
"""Prompt the employee for their name."""
while True:
employee = input("Enter employee name: ")
if len(employee) == 0:
print("\nYou must enter your name!\n")
continue
else:
return employee
def get_task_name():
"""Prompt the employee for the task name."""
while True:
task_name = input("Enter a task name: ")
if len(task_name) == 0:
print("\nYou must enter a task name!\n")
continue
else:
return task_name
def get_time_spent():
"""Prompt the employee for the time spent on their task."""
while True:
duration = input("Enter number of minutes spent working on the task: \
")
try:
int(duration)
except ValueError:
print("\nNot a valid time entry! Enter time as a whole integer.\n\
")
continue
else:
return duration
def get_notes():
"""Prompt employee to provide any additional notes."""
notes = input("Notes for this task (ENTER if None): ")
return notes
def find_by_employee():
"""Find by employee"""
entries = Entry.select().order_by(Entry.employee.desc())
print("Find by employee:\nSelect an employee from the list below:")
employees = []
for entry in entries:
if entry.employee not in employees:
employees.append(entry.employee)
for entry in employees:
print("{}) {}".format(employees.index(entry), str(entry)))
selection = test_input(len(employees))
return entries.where(Entry.employee.contains(employees[selection]))
def find_by_date():
"""Find by date"""
entries = Entry.select().order_by(Entry.timestamp.desc())
print("Find by date:\nSelect a date from the list below:")
date = []
for entry in entries:
if entry.timestamp not in date:
date.append(entry.timestamp)
for entry in date:
print("{}) {}".format(date.index(entry),
entry.strftime('%A %B %d, %Y %I:%Mp')))
selection = test_input(len(date))
return entries.where(Entry.timestamp.contains(date[selection]))
def find_by_time_spent():
"""Find by time spent"""
entries = Entry.select().order_by(Entry.timestamp.desc())
print("Find by date:\nSelect a date from the list below:")
duration = []
for entry in entries:
if entry.duration not in duration:
duration.append(entry.duration)
for entry in duration:
print("{}) {}".format(duration.index(entry), entry))
selection = test_input(len(duration))
return entries.where(Entry.duration.contains(duration[selection]))
def find_by_search_term():
"""Find by search term"""
search_query = input("Enter a term to search database:\n> ")
entries = Entry.select().order_by(Entry.timestamp.desc())
logs = entries.where(Entry.employee.contains(search_query)|
Entry.task_name.contains(search_query)|
Entry.notes.contains(search_query))
return logs
def delete_entry(entry):
"""Delete entry"""
if input("Are you sure? [yN] ").lower() == 'y':
entry.delete_instance()
print('Entry deleted!')
input('Press ENTER to continue.')
def test_input(length):
selection = None
while selection is None:
try:
selection = int(input("> "))
except ValueError:
print("Invalid selection. Please select a number.")
selection = None
if selection not in range(0, length):
selection = None
return selection
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
directory_main = OrderedDict([
('1', add_entry),
('2', view_loop),
])
directory_view = OrderedDict([
('1', find_by_employee),
('2', find_by_date),
('3', find_by_time_spent),
('4', find_by_search_term)
])
if __name__ == '__main__':
initialize()
main_loop()
| true |
b71c19a80d43e2e8819d061e20ee341635ebf52e | Python | abnerrf/cursoPython3 | /3.pythonIntermediario/aula3/aula3.py | UTF-8 | 616 | 3.953125 | 4 | [] | no_license | '''
FUNÇÕES (DEF) EM PYTHON - *args **kwargs -
'''
def func(*args):
print(args)
lista = [1,2,3,4,5]
print(*lista)
print('')
#######################################
def funcao(*args):
for v in args:
print(v)
funcao(1,2,3,4,5)
#####################################
def teste(*args, **kwargs):
print(args)
nome = kwargs.get('nome')
print(nome)
idade = kwargs.get('idade')
if idade is not None:
print(idade)
else:
print('Idade inexistente')
lista = [1,2,3,4,5]
lista2 = [10,20,30,40,50]
teste(*lista, *lista2, nome='Abner', sobrenome='Rodrigues', idade=27) | true |
e695246eebc9ee22426eddf2ba9233d33afdcfa8 | Python | RaulVS14/adventofcode2020 | /Day 16/day_16_functions.py | UTF-8 | 4,771 | 2.671875 | 3 | [] | no_license | import re
from helpers.helpers import read_file
def process_file(file):
row = 0
data = {}
data["rules"] = {}
key_word = False
while row < len(file):
row_match = re.match(r'(?P<key>^[a-z ]*): (?P<rule1>\d*\-\d*) or (?P<rule2>\d*\-\d*)', file[row])
if row_match:
data["rules"][row_match.group('key')] = [row_match.group('rule1').split("-"),
row_match.group('rule2').split("-")]
elif not file[row]:
row += 1
continue
elif re.match(r'(?P<key>^[a-z ]*):', file[row]):
match_row_key = re.match(r'(?P<key>^[a-z ]*):', file[row])
key_word = match_row_key.group('key')
data[key_word] = []
else:
data[key_word].append(file[row].split(","))
row += 1
return data
def check_field(field, rules):
for rule in rules:
for rule_part in rules[rule]:
if int(rule_part[0]) <= int(field) <= int(rule_part[1]):
return True
return False
def find_in_valid_fields(rules_and_tickets):
rules = rules_and_tickets["rules"]
tickets = rules_and_tickets["nearby tickets"]
invalid_fields = []
for ticket in tickets:
for field in ticket:
if not check_field(field, rules):
invalid_fields.append(int(field))
return invalid_fields
def get_sum_of_invalid_field_numbers(file_name):
file = read_file(file_name)
rules_and_tickets = process_file(file)
list_of_invalid_fields = find_in_valid_fields(rules_and_tickets)
return sum(list_of_invalid_fields)
def remove_invalid_tickets(rules_and_tickets):
rules = rules_and_tickets["rules"]
tickets = rules_and_tickets["nearby tickets"]
new_tickets = []
for index in range(len(tickets)):
valid_ticket = True
for field in tickets[index]:
if not check_field(field, rules):
valid_ticket = False
break
if valid_ticket:
new_tickets.append(tickets[index])
rules_and_tickets["nearby tickets"] = new_tickets[:]
return rules_and_tickets
def check_field_for_label(field, current_rule):
for rule_part in current_rule:
if int(rule_part[0]) <= int(field) <= int(rule_part[1]):
return True
return False
def find_field_labels(rules_and_tickets):
rules = rules_and_tickets["rules"]
tickets = rules_and_tickets["nearby tickets"]
field_labels = {}
index = 0
while index < len(rules):
rules_set = []
for rule in rules:
current_rule_name, current_rule = rule, rules[rule]
result = True
for ticket in tickets:
result = result and check_field_for_label(ticket[index], current_rule)
if result:
rules_set.append(current_rule_name)
field_labels[index] = rules_set
index += 1
return field_labels
def process_labels_dict(labels_dict):
label_list_dict = {}
while len(label_list_dict.keys()) != len(labels_dict.keys()):
for i in labels_dict:
if len(labels_dict[i]) == 1:
current_label = labels_dict[i][0]
label_list_dict[str(i)] = current_label
labels_dict = remove_processed_labels_from_repeating_label_dict_lists(current_label, i, labels_dict)
return label_list_dict
def remove_processed_labels_from_repeating_label_dict_lists(current_label, i, labels_dict):
for j in labels_dict:
if i != j and current_label in labels_dict[j]:
labels_dict[j].pop(labels_dict[j].index(current_label))
return labels_dict
def multiplie_departed_field_numbers(labels_list, ticket):
multiplication = 1
for i in range(len(labels_list)):
if "departure" in labels_list[i]:
multiplication *= int(ticket[int(i)])
return multiplication
def get_multiplied_departure_field_numbers_from_your_ticket(file_name):
labels_list, your_ticket = get_order_label_list(file_name)
return multiplie_departed_field_numbers(labels_list, your_ticket)
def organize_labels(indexed_label_dict):
organize = []
while len(organize) < len(indexed_label_dict.keys()):
organize.append(indexed_label_dict[str(len(organize))])
return organize
def get_order_label_list(file_name):
file = read_file(file_name)
rules_and_tickets = process_file(file)
filtered_tickets = remove_invalid_tickets(rules_and_tickets)
labels_dict = find_field_labels(filtered_tickets)
indexed_label_dict = process_labels_dict(labels_dict)
organized_labels_list = organize_labels(indexed_label_dict)
return organized_labels_list, rules_and_tickets["your ticket"][0]
| true |
431633c33d2b112e2b11a681bae411e53431d5e8 | Python | anchitshrivastava/Instagram-Scrapping | /engagement_score.py | UTF-8 | 3,325 | 2.625 | 3 | [] | no_license | from instaloader import Instaloader, Profile
from instaloader.exceptions import QueryReturnedNotFoundException, LoginRequiredException,ProfileNotExistsException
import pandas as pd
# L = Instaloader()
# df = pd.read_csv("/Users/anchitshrivastava/Desktop/Tatras Data/Instagram Scrapping/Vietnam csv/Vietnam_combined_2_with_count.csv")
# users = df['Insta_Usernames']
# engagement_data={}
# user_count = 0
# count = 0
# for user in users:
# # if count == 10:
# # break
# # count += 1
# try:
# user = user.strip()
# user_count= user_count+1
# print(len(user))
# print("User:",user_count,':',user)
# profile = Profile.from_username(L.context, user)
# profile_url = "https://www.instagram.com/" + user + "/"
# print(profile_url)
# if not profile.is_private:
# ctr=0
# total_comments=0
# total_likes=0
# for post in profile.get_posts():
# # L.download_post(post, target=profile.username)
# total_likes = total_likes+post.likes
# total_comments = total_comments + post.comments
# ctr = ctr+1
# if ctr == 10:
# break
# engagement = ((total_comments+total_likes)/profile.followers)*10
# engagement_data[user] = engagement
# else:
# print("PROFILE IS PRIVATE OR HAVE LESS FOLLOWERS")
# print("========================")
#
# except (QueryReturnedNotFoundException, LoginRequiredException, ProfileNotExistsException):
# print("PROFILE NOT FOUND")
# print("========================")
#
# print(engagement_data)
def engagememt_data(user):
engagement_data = {}
user_count = 0
try:
user = user.strip()
user_count= user_count+1
print(len(user))
print("User:",user_count,':',user)
profile = Profile.from_username(L.context, user)
profile_url = "https://www.instagram.com/" + user + "/"
print(profile_url)
if not profile.is_private:
ctr=0
total_comments=0
total_likes=0
for post in profile.get_posts():
# L.download_post(post, target=profile.username)
total_likes = total_likes+post.likes
total_comments = total_comments + post.comments
ctr = ctr+1
if ctr == 10:
break
engagement = ((total_comments+total_likes)/profile.followers)*10
engagement_data[user] = engagement
print(engagement)
return engagement
else:
print("PROFILE IS PRIVATE OR HAVE LESS FOLLOWERS")
print("========================")
pass
except (QueryReturnedNotFoundException, LoginRequiredException, ProfileNotExistsException):
print("PROFILE NOT FOUND")
print("========================")
if __name__ == '__main__':
L = Instaloader()
df = pd.read_csv("/Users/anchitshrivastava/Desktop/Tatras Data/Instagram Scrapping/user_data_eng - KSA_1.csv")
users = df['Insta_Usernames']
engagement_data = {}
user_count = 0
df['Engagement'] = users.apply(engagememt_data)
df.to_csv("ksa Data with engagement1 1.csv") | true |