blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
309cbfe9d61299fda90361ea551ee655595191b5 | Python | Sefan90/AdventOfCode2020 | /Day18/test.py | UTF-8 | 2,993 | 3.28125 | 3 | [] | no_license | import copy
import re
def Part1():
f = open("testinput.txt","r")
inputlist = [[i for i in r.replace(" ","").strip()] for r in f.readlines()]
for row in inputlist:
newlist = []
for i in range(len(row)):
#print(row[i])
if row[i] == "+":
newlist.append(str(int(row[i-1])+int(row[i+1])))
elif row[i] == "*":
newlist.append("*")
print(newlist)
summa = 1
for i in range(len(newlist)):
if newlist[i].isnumeric():
summa *= int(newlist[i])
print(summa)
def rec2(row):
newlist = []
for i in range(len(row)):
if row[i] == "+":
newlist.append(str(int(row[i-1])+int(row[i+1])))
elif row[i] == "*":
newlist.append(str(int(row[i-1])*int(row[i+1])))
return newlist
def rec(row):
newlist = []
for i in range(len(row)):
#print(row[i])
if len(row[i]) > 1:
newlist += rec(row[i])
elif row[i] == "+":
newlist.append(str(int(row[i-1])+int(row[i+1])))
elif row[i] == "*":
newlist.append("*")
return newlist
def splitstr(my_string):
in_parens=False
buffer=''
my_list = []
for char in my_string:
if char == ' ':
continue
if char =='(':
in_parens=True
elif char==')':
in_parens = False
my_list.append(buffer)
buffer=''
elif in_parens:
buffer+=char
else:
my_list.append(char)
return my_list
def par():
f = open("testinput.txt","r")
inputlist = [[i for i in splitstr(r.strip())] for r in f.readlines()]
print(inputlist)
for row in inputlist:
lista = []
for i in row:
if len(i) > 1:
lista += rec(i)
else:
lista.append(i)
print(lista)
for row in inputlist:
newlist = rec(row)
print(newlist)
summa = 1
for i in range(len(newlist)):
if newlist[i].isnumeric():
summa *= int(newlist[i])
print(summa)
def par2():
f = open("testinput.txt","r")
inputlist = [[i for i in r.replace(" ","").strip()] for r in f.readlines()]
print(inputlist)
newlist = []
para = 0
for k,row in enumerate(inputlist):
while True:
lista = []
for i in row:
if i == "(":
para += 1
elif i == ")":
para -= 1
elif para == 0:
if len(lista) == 0:
newlist.append(i)
else:
lista.append(i)
newlist.append([lista])
lista = []
inputlist[k] = copy.deepcopy(newlist)
if "(" not in newlist:
break
print(inputlist)
par() | true |
abf1abee126126329b11a1c653e298d410f52821 | Python | MonishPandey/Python | /wangoes.py | UTF-8 | 385 | 3.5625 | 4 | [] | no_license | c=0
d=5
b=16
for i in range(5):
for j in range(5):
if(i==0):
c+=1
print(c,end=" ")
elif(j==4 ):
c+=1
print(c,end=" ")
elif(i==4):
print(c+d,end=" ")
d-=1
elif(j==0):
print(b,end=" ")
b-=1
else:
print(" ",end="")
print() | true |
0a1d0104e59649d65cff88135b613f7d715ef4c5 | Python | hogitayden/nguyenhoanggiang-fundametal-c4e22 | /Session3/menu3.py | UTF-8 | 266 | 3.484375 | 3 | [] | no_license | items = ['Campucheer', 'Thailen','Turkey chicken']
print(*items, sep = '| ')
while True:
i = int(input("Serial num of dishes? "))
if i < 3:
print(items[i - 1])
break
else:
print("Sorry, we dun hav dis")
print(*items, sep = '\n') | true |
a8f3d1063ff2c4e1e7b6ad24a033b013a1c73339 | Python | jmiths/PE | /python48.py | UTF-8 | 703 | 3.03125 | 3 | [] | no_license | #!/usr/bin/python3
primes = [1]*3000
primes[0] = 0
p = [2]
for index in range(len(primes)):
if primes[index] == 0:
continue
else:
for bad_val in range(index*(2+(2*index)),len(primes),2*index+1):
primes[bad_val] = 0
for index in range(len(primes)):
if primes[index] == 1:
p.append(2*index+1)
#print(p)
arr = [i for i in range(2,1000000)]
temp = ''
for num in arr:
s = set()
#if num in p:
# temp += '0'
# continue
for prime in p:
if num == 0 or prime > num:
break
while num % prime == 0:
s.add(prime)
num = int(num/prime)
temp += (str(len(s)))
print(temp.index("4444")+2)
| true |
1d6e9e88a47b28ee2c1f2d6db47fc0c08fd93136 | Python | cocotools/CoCoTools | /scripts/plot_pre_ort.py | UTF-8 | 731 | 2.578125 | 3 | [] | no_license | import pickle
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cocotools as coco
with open('results/graphs/pht00.pck') as f:
pht00 = pickle.load(f)
names_dict = {}
for node in pht00:
map, name = node.split('-', 1)
if map == 'PHT00':
names_dict[node] = name
pht00 = nx.relabel_nodes(pht00, names_dict)
circle = nx.circular_layout(pht00)
for p in circle.itervalues():
p *= 80
f, ax = plt.subplots(figsize=(14,14))
colors = [cm.spectral(i * 10) for i in range(pht00.number_of_nodes() / 2)] * 2
coco.draw_network(pht00, circle, radius=3, ax=ax, node_alpha=0.20,
node_color=colors)
plt.show()
plt.savefig('results/figures/pht00_preORT.pdf')
| true |
9ea8dea265766ad597e73c4990b618acd464d69c | Python | partho-maple/coding-interview-gym | /leetcode.com/python/127_Word_Ladder.py | UTF-8 | 3,356 | 3.640625 | 4 | [
"MIT"
] | permissive | from collections import defaultdict
from collections import deque
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if endWord not in wordList or not endWord or not beginWord or not wordList:
return 0
wordLen = len(beginWord)
allComboDict = defaultdict(list)
for word in wordList:
for i in range(wordLen):
allComboDict[word[:i] + "*" + word[i + 1:]].append(
word) # Creating the adjacency list of . the intermediate nodes/words. Every Node has 3 intermediate nodes
queue = deque([(beginWord, 1)])
visited = {beginWord: True}
while queue:
currentWord, level = queue.popleft() # Here the words are the nodes
for i in range(wordLen):
intermediateWord = currentWord[:i] + "*" + currentWord[
i + 1:] # here the words are the . intermediate nodes. Every Node has 3 intermediate nodes. And here, the nodes doesn't have the adjacency list. The intermediate node has it.
for word in allComboDict[intermediateWord]:
if word == endWord:
return level + 1
if word not in visited:
visited[word] = True
queue.append((word, level + 1))
allComboDict[intermediateWord] = []
return 0
# Using BFS. My own solution during Mock Test
import collections
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if endWord not in wordList or not endWord or not beginWord or not wordList:
return 0
wordLen = len(beginWord)
letterToWordMap = defaultdict(list)
wordList.append(beginWord)
for word in wordList:
for i in range(wordLen):
pattern = word[:i] + "*" + word[i + 1:]
letterToWordMap[pattern].append(word)
graph = defaultdict(list)
for word in wordList:
for i in range(wordLen):
pattern = word[:i] + "*" + word[i + 1:]
nodeList = letterToWordMap[pattern]
graph[word].extend(nodeList)
queue = deque([(beginWord, 1)])
visited = set()
visited.add(beginWord)
while queue:
currentLevelSize = len(queue)
while currentLevelSize > 0:
currentWord, level = queue.popleft()
currentLevelSize -= 1
for neighbours in graph[currentWord]:
if neighbours == endWord:
return level + 1
else:
if neighbours not in visited:
visited.add(neighbours)
queue.append((neighbours, level + 1))
return 0
sol = Solution()
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log","cog"]
out = sol.ladderLength(beginWord, endWord, wordList)
print("Res: ", out) | true |
c6b679b47e4ef4e9c61ce90af59703e1ca42672c | Python | green-fox-academy/Chiflado | /week-04/day-01/teacher_and_students.py | UTF-8 | 397 | 3.40625 | 3 | [] | no_license | class Student(object):
def learn(self):
return 'Asszem ertem...'
def question(self, teacher):
return teacher.answer()
class Teacher(object):
def teach(self, student):
return student.learn()
def answer(self):
return 'Meg mindig nem erted?!'
Moricka = Student()
Belaba = Teacher()
print(Moricka.question(Belaba))
print(Belaba.teach(Moricka)) | true |
af64075d4c227fb13b5127421683aa5b317022a0 | Python | pyccel/pyccel | /tests/external/scripts/mpi4py/np_point_to_point.py | UTF-8 | 636 | 2.640625 | 3 | [
"MIT"
] | permissive | # pylint: disable=missing-function-docstring, missing-module-docstring
from mpi4py import MPI
from numpy import zeros
from numpy import ones
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# passing MPI datatypes explicitly
if rank == 0:
data = ones(5, 'int')
comm.Send([data, MPI.INT], dest=1, tag=77)
elif rank == 1:
data = zeros(5, 'int')
comm.Recv([data, MPI.INT], source=0, tag=77)
print(data)
# automatic MPI datatype discovery
if rank == 0:
data_ = ones(5, 'double')
comm.Send(data, dest=1, tag=13)
elif rank == 1:
data_ = zeros(5, 'double')
comm.Recv(data, source=0, tag=13)
print(data)
| true |
21b725449b157378f83ab0f6aca3e9e07f956ae3 | Python | diogosimao/snug-as-a-bug-in-a-couch | /apps/list_manager/tests/test_forms.py | UTF-8 | 693 | 2.5625 | 3 | [] | no_license | from django.test import TestCase
from apps.list_manager.forms import SearchForm, ChoicesForm
class SearchFormTest(TestCase):
def test_init_form(self):
self.assertTrue(SearchForm())
class ChoicesFormTest(TestCase):
def test_form_data_input(self):
form_data = {'name': 'movies_choices', 'value': ['1', '2']}
form = ChoicesForm(data=form_data)
self.assertTrue(form.is_valid())
def test_form_choices_input(self):
choices = [('1', 'choice1'), ('2', 'str')]
form_data = {'name': 'movies_choices', 'value': ['1', '2']}
form = ChoicesForm(movies_choices_list=choices, data=form_data)
self.assertTrue(form.is_valid())
| true |
296935fe203736400561ab9d9f43d0b1b9bc88b9 | Python | dimagi/commcare-hq | /corehq/util/workbook_json/excel.py | UTF-8 | 10,186 | 2.796875 | 3 | [
"BSD-3-Clause"
] | permissive | import io
from zipfile import BadZipfile
from tempfile import NamedTemporaryFile
import openpyxl
from openpyxl.utils.exceptions import InvalidFileException
from django.core.files.uploadedfile import UploadedFile
from django.utils.translation import gettext as _
class InvalidExcelFileException(Exception):
pass
class JSONReaderError(Exception):
pass
class HeaderValueError(Exception):
pass
class StringTypeRequiredError(Exception):
pass
class WorkbookJSONError(Exception):
pass
class IteratorJSONReader(object):
"""
>>> def normalize(it):
... r = []
... for row in IteratorJSONReader(it):
... r.append(sorted(row.items()))
... return r
>>> normalize([])
[]
>>> normalize([['A', 'B', 'C'], ['1', '2', '3']])
[[('A', '1'), ('B', '2'), ('C', '3')]]
>>> normalize([['A', 'data: key', 'user 1', 'user 2', 'is-ok?'],
... ['1', '2', '3', '4', 'yes']])
[[('A', '1'), ('data', {'key': '2'}), ('is-ok', True), ('user', ['3', '4'])]]
"""
def __init__(self, rows):
# you can only call __iter__ once
self._rows = iter(rows)
try:
self.headers = list(next(self._rows))
except StopIteration:
self.headers = []
self.fieldnames = self.get_fieldnames()
def row_to_json(self, row):
obj = {}
for value, header in zip(row, self.headers):
self.set_field_value(obj, header, value)
return obj
def __iter__(self):
try:
for row in self._rows:
yield self.row_to_json(row)
finally:
del self._rows
def get_fieldnames(self):
obj = {}
for field, value in zip(self.headers, [''] * len(self.headers)):
if not isinstance(field, str):
raise HeaderValueError('Field %s is not a string.' % field)
self.set_field_value(obj, field, value)
return list(obj)
@classmethod
def set_field_value(cls, obj, field, value):
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, str):
value = value.strip()
# try dict
try:
field, subfield = field.split(':')
except Exception:
pass
else:
field = field.strip()
if field not in obj:
obj[field] = {}
cls.set_field_value(obj[field], subfield, value)
return
# try list
try:
field, _ = field.split()
except Exception:
pass
else:
dud = {}
cls.set_field_value(dud, field, value)
(field, value), = list(dud.items())
if field not in obj:
obj[field] = []
elif not isinstance(obj[field], list):
obj[field] = [obj[field]]
if value not in (None, ''):
obj[field].append(value)
return
# else flat
# try boolean
try:
field, nothing = field.split('?')
assert(nothing.strip() == '')
except Exception:
pass
else:
try:
value = {
'yes': True,
'true': True,
'no': False,
'false': False,
'': False,
None: False,
}[value.lower() if hasattr(value, 'lower') else value]
except KeyError:
raise JSONReaderError(
'Values for field %s must be "yes" or "no", not "%s"' % (
field, value)
)
# set for any flat type
field = field.strip()
if field in obj:
raise JSONReaderError(
'You have a repeat field: %s' % field
)
obj[field] = value
def get_workbook(file_or_filename):
try:
return WorkbookJSONReader(file_or_filename)
except (HeaderValueError, InvalidExcelFileException) as e:
raise WorkbookJSONError(_(
"Upload failed! "
"Please make sure you are using a valid Excel 2007 or later (.xlsx) file. "
"Error details: {}."
).format(e))
except JSONReaderError as e:
raise WorkbookJSONError(_(
"Upload failed due to a problem with Excel columns. Error details: {}."
).format(e))
except HeaderValueError as e:
raise WorkbookJSONError(_(
"Upload encountered a data type error: {}."
).format(e))
except AttributeError as e:
raise WorkbookJSONError(_(
"Error processing Excel file: {}."
).format(e))
def get_single_worksheet(file_or_filename, title=None):
workbook = get_workbook(file_or_filename)
try:
worksheet = workbook.get_worksheet(title=title)
except WorksheetNotFound:
raise WorkbookJSONError(_(
"Could not find sheet '{title}'."
).format(title=title) if title else _("Uploaded file does not contian any sheets."))
return worksheet
class WorksheetNotFound(Exception):
def __init__(self, title):
self.title = title
super(WorksheetNotFound, self).__init__()
class WorksheetJSONReader(IteratorJSONReader):
def __init__(self, worksheet, title=None):
width = 0
self.title = title
self.worksheet = worksheet
try:
header_row = next(self.worksheet.iter_rows())
except StopIteration:
header_row = []
for cell in header_row:
if cell.value is None:
break
else:
width += 1
self.worksheet.calculate_dimension(force=True)
def iterator():
def _convert_float(value):
"""
excel doesn't distinguish between 1 and 1.0
if it can be an integer assume it is
"""
if isinstance(value, float) and int(value) == value:
return int(value)
else:
# Specifically check for None so that we can allow a value of 0
return value if value is not None else ''
for row in self.worksheet.iter_rows():
cell_values = [
_convert_float(cell.value)
for cell in row[:width]
]
if not any(cell != '' for cell in cell_values):
break
yield cell_values
super(WorksheetJSONReader, self).__init__(iterator())
class WorkbookJSONReader(object):
def __init__(self, file_or_filename):
check_types = (UploadedFile, io.RawIOBase, io.BufferedIOBase)
if isinstance(file_or_filename, check_types):
tmp = NamedTemporaryFile(mode='wb', suffix='.xlsx', delete=False)
file_or_filename.seek(0)
tmp.write(file_or_filename.read())
file_or_filename.seek(0)
tmp.close()
file_or_filename = tmp.name
try:
self.wb = openpyxl.load_workbook(file_or_filename, read_only=True, data_only=True)
except (BadZipfile, InvalidFileException, KeyError) as e:
raise InvalidExcelFileException(str(e))
self.worksheets_by_title = {}
self.worksheets = []
for worksheet in self.wb.worksheets:
try:
ws = WorksheetJSONReader(worksheet, title=worksheet.title)
except IndexError:
raise JSONReaderError('This Excel file has unrecognised formatting. Please try downloading '
'the lookup table first, and then add data to it.')
self.worksheets_by_title[worksheet.title] = ws
self.worksheets.append(ws)
def get_worksheet(self, title=None, index=None):
if title is not None and index is not None:
raise TypeError("Can only get worksheet by title *or* index")
if title:
try:
return self.worksheets_by_title[title]
except KeyError:
raise WorksheetNotFound(title=title)
elif index:
try:
return self.worksheets[index]
except IndexError:
raise WorksheetNotFound(title=index)
else:
try:
return self.worksheets[0]
except IndexError:
raise WorksheetNotFound(title=0)
def flatten_json_to_path(obj, path=()):
if isinstance(obj, dict):
for key, value in obj.items():
for item in flatten_json_to_path(value, path + (key,)):
yield item
elif isinstance(obj, list):
for key, value in enumerate(obj):
for item in flatten_json_to_path(value, path + (key,)):
yield item
else:
yield (path, obj)
def format_header(path, value):
# pretty sure making a string-builder would be slower than concatenation
s = path[0]
for p in path[1:]:
if isinstance(p, str):
s += f': {p}'
elif isinstance(p, int):
s += f' {p + 1}'
if isinstance(value, bool):
s += '?'
value = 'yes' if value else 'no'
return s, value
def flatten_json(obj):
for key, value in flatten_json_to_path(obj):
yield format_header(key, value)
def json_to_headers(obj):
return [key for key, value in sorted(flatten_json(obj), key=lambda t: alphanumeric_sort_key(t[0]))]
def alphanumeric_sort_key(key):
"""
Sort the given iterable in the way that humans expect.
Thanks to http://stackoverflow.com/a/2669120/240553
"""
import re
def convert(text):
return int(text) if text.isdigit() else text
return [convert(c) for c in re.split('([0-9]+)', key)]
def enforce_string_type(value):
if isinstance(value, str):
return value
if isinstance(value, int):
return str(value)
# Don't try to guess for decimal types how they should be converted to string
raise StringTypeRequiredError()
| true |
d45651195898031ad3bf80f98d990708d5675a0d | Python | Hacker-GHope/Spider | /Spider_doubanGroupExplore/doubanGroupExplore.py | UTF-8 | 4,689 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2018/10/23 10:01
# @Author : G.Hope
# @Email : 1638327522@qq.com
# @File : doubanGroupExplore.py
# @Software: PyCharm
import json
import requests
from lxml import etree
# 获取页面
def get_one_page(url):
headers = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
text = response.content.decode('utf-8')
return text
return None
# 解析页面练习
def parse_with_xpath(html):
etree_html = etree.HTML(html)
# print(etree_html)
# 匹配所有节点 //*
# result = etree_html.xpath('//*')
# print(result)
# print(len(result))
# 匹配所有子节点 //a 文本获取:text()
# result = etree_html.xpath('//a/text()')
# print(result)
# 查找元素子节点 /
# result = etree_html.xpath('//div/p/text()')
# print(result)
# 查找元素所有子孙节点 //
# result = etree_html.xpath('//div[@class="channel-item"]//h3/a/text()')
# print(result)
# 父节点 ..
# result = etree_html.xpath('//span[@class="pubtime"]/../span/a/text()')
# print(result)
# 属性匹配 [@class="xxx"]
# 文本匹配 text() 获取所有文本//text()
# result = etree_html.xpath('//div[@class="article"]//text()')
# print(result)
# 属性获取 @href
# result = etree_html.xpath('//div[@class="bd"]/h3/a/@href')
# print(result)
# 属性多值匹配 contains(@class 'xx')
# result = etree_html.xpath('//div[contains(@class, "grid-16-8")]//div[@class="likes"]/text()[1]')
# print(result)
# 多属性匹配 or, and, mod, //book | //cd, + - * div = != < > <= >=
# result = etree_html.xpath('//span[@class="pubtime" and contains(text(), "09-07")]/text()')
# print(result)
# 按序选择 [1] [last()] [poistion() < 3] [last() -2]
# 节点轴
# //li/ancestor::* 所有祖先节点
# //li/ancestor::div div这个祖先节点
# //li/attribute::* attribute轴,获取li节点所有属性值
# //li/child::a[@href="link1.html"] child轴,获取直接子节点
# //li/descendant::span 获取所有span类型的子孙节点
# //li/following::* 选取文档中当前节点的结束标记之后的所有节点
# //li/following-sibling::* 选取当前节点之后的所用同级节点
# result = etree_html.xpath('//img/attribute::*')
# print(result)
result = etree_html.xpath(
'//div[contains(@class, "channel-group-rec")]//div[@class="title"]/following::*[1]/text()')
print(result)
# 解析目标页面
def parse_item_xpath(html):
# 接收参数,使用解析库生成目标对象
etree_html = etree.HTML(html)
# 解析小组讨论题目
result = etree_html.xpath('//div[@class="bd"]/h3/a/text()')
# print(result)
result_title = result
# 解析小组组名
result = etree_html.xpath('//div[@class="source"]/span/a/text()')
# print(result)
result_source = result
# 解析小组讨论内容简介
result = etree_html.xpath('//div[@class="block"]/p/text()')
# print(result)
result_block = result
# 解析当前获得喜欢数
result = etree_html.xpath('//div[@class="likes"]/text()[1]')
# print(result)
result_likes = result
# 解析发布时间
result = etree_html.xpath('//span[@class="pubtime"]/text()')
# print(result)
result_pubtime = result
# 解析图片地址
# result = etree_html.xpath('//div[@class="pic"]/div/img/@src')
# print(result)
# result_src = result
items = []
for i in range(len(result_block)):
item = {}
item['title'] = result_title[i]
item['source'] = result_source[i]
item['block'] = result_block[i]
item['likes'] = result_likes[i]
item['pubtime'] = result_pubtime[i]
# 有些讨论没有图片
# item['src'] = result_src[i]
items.append(item)
return items
# 将数据本地化
def write_json(items):
info_json = json.dumps(items, ensure_ascii=False, check_circular=True)
filename = './豆瓣小组精选'
with open(filename, "a", encoding='utf-8') as f:
f.write(info_json)
# 主程序
def main():
for i in range(1, 298):
page = str(i * 30)
url = "https://www.douban.com/group/explore?start=" + page
if i == 0:
url = "https://www.douban.com/group/explore"
html = get_one_page(url)
# print(html)
# parse_with_xpath(html)
items = parse_item_xpath(html)
write_json(items)
if __name__ == '__main__':
main()
| true |
c2f77d0dcfeb2752f42f41321e333047ecd23033 | Python | se20z09/z09_homework1 | /test/test_addition.py | UTF-8 | 265 | 2.9375 | 3 | [
"MIT"
] | permissive | import unittest
from code.addition import add
from code.increment import inc
class TestAddition(unittest.TestCase):
def test_addition(self):
self.assertEqual(add(2, 4), 6)
def test_addition_neg(self):
self.assertNotEqual(add(82, -42), 16)
| true |
0e3bcfd8097f9ca228b7b98a2c62cd80512bcd57 | Python | manoznp/LearnPython-Challenge | /DAY4/homework1.py | UTF-8 | 1,314 | 4.09375 | 4 | [] | no_license | # Write the function that calculate the discount amount and discount %, and profit of an item.
def DiscountAmount():
return MP - SP
def DiscountPercentage():
return (DiscountAmount() / MP) * 100
def ProfitAmount():
return SP - AP
def ProfitPercentage():
return (ProfitAmount() / AP) * 100
def switch():
switcher = {
1: "The discount is {}".format(DiscountAmount()),
2: "The discount percentage is {}%".format(DiscountPercentage()),
3: "The Profit is {}".format(ProfitAmount()),
4: "The Profit percentage is {}%".format(ProfitPercentage())
}
return switcher.get(choice, "Invalid Queries!!")
AP = int(input("Enter ActualPrice: "))
MP = int(input("Enter MarketPrice: "))
SP = int(input("Enter SellingPrice: "))
print("*************************************")
print("1. Calculate Discount Amount")
print("2. Calculate Discount Percentage")
print("3. Calculate Profit Amount")
print("4. Calculate Profit Percentage")
choice = int(input("what you like ? : "))
print(switch())
print("*************************************")
# print("The discount is {}".format(DiscountAmount()))
# print("The discount % is {}%".format(DiscountPercentage()))
# print("The Profit is {}".format(ProfitAmount()))
# print("The Profit % is {}%".format(ProfitPercentage())) | true |
174a641f4738945f32bc597c4b25632a7a9acd63 | Python | IPalos/AirfoilGridPy | /Python/util.py | UTF-8 | 4,715 | 2.890625 | 3 | [] | no_license | """
Maneja las utilidades y funciones auxiliares del programa
"""
import sys
import csv
import subprocess as sp
import matplotlib.pyplot as plt
import math
import numpy as np
import math_deps as m
#INSERTAR DIRECCION DE DONDE IMPORTAR LOS ARCHIVOS
a=sp.Popen("pwd", stdout=sp.PIPE)
user = a.stdout.read()
rootPath=user[:-1]
def greeting(version):
print ("\n\n\nEste es Construct2D, edicion Python2\n\n\n")
print ('Version: '+str(version)+"\n\n\n")
def read_cl(path):
"""
Utiliza el primer agrumento como archivo input
"""
filename=''
cpath=path+'/sample_airfoils'
while filename == '':
if len(sys.argv) > 1 and sys.argv[1] != '':
filename= str(sys.argv[1])
else:
print "Directorio actual: "+ str(cpath)
pathCorrect = raw_input("Desea cambiarlo?:(y/n)")
if (pathCorrect=='y' or pathCorrect =='yes' or pathCorrect=='Y'):
cpath =raw_input("Intgrese un nuevo directorio: ")
print "Archivos encontrados en el directorio actual: "
sp.call(['ls', cpath, '-l'])
filename= raw_input("Ingresa el nombre del archivo del perfil: \n(sin la extension) ")
return cpath, filename
def createProfileInput(path,file):
"""
Crea una lista de duplas (x,y) a partir de
el archivo de input
path -> string, la direccion donde se encuentra el arhicvo
file -> string, el nombre del archivo
OUTPUT:(listaDePuntosDelPerfil, numeroDePuntos)
"""
with open(path+"/"+file , 'rb') as f:
reader = csv.reader(f)
profileList = map(list, reader)
profileTitle= profileList[0][0]
profileList=profileList[1:]
profileSize=len(profileList)
for i in range(len(profileList)):
for j in range(len(profileList[i])):
profileList[i][j]=float(profileList[i][j])
x=[]
y=[]
for i in profileList:
x.append(i[0])
y.append(i[1])
print "============DATOS DEL ARCHIVO======================"
print "Nombre del perfil: "+ profileTitle
print "Numero de puntos: "+ str(profileSize)
return (x,y, profileSize, profileTitle)
def transformAirfoil(AirfoilClass):
"""
Normaliza el perfil alar y coloca el frente en 0
"""
x=AirfoilClass.x
y=AirfoilClass.y
xtemp=[]
ytemp=[]
dist=min(x)
for i in x:
xtemp.append(i-dist)
npoint=max(xtemp)
ratio=(max(xtemp)-min(xtemp))
for i in range(AirfoilClass.npoints):
xtemp[i]=xtemp[i]/ratio
ytemp.append(y[i]/ratio)
def PlotAirfoil(AirfoilClass):
plt.scatter(AirfoilClass.x,AirfoilClass.y)
plt.axis([-1.0,2.0,-1.5,1.5]) #define en espacio a graficar con las medidas anteriores
# plt.axis("equal") #grafica a la misma escala en eje x y y
plt.show()
plt.axis
plt.xlabel(AirfoilClass.name)
def createFarfield(gridClass, radi, fdst):
"""
Crea la frontera exterior de la malla
"""
imax = gridClass.imax
jmax= gridClass.jmax
srf1 = gridClass.surfbounds[0]
srf2 = gridClass.surfbounds[1]
tol = 1e-12
errval=1000.0
d0=0.0
#Calcula la calidad de la malla
#checar si is_even es necesario
# while errval > tol:
# lguess = 2.0 * math.pi * radi + float(m.isEven(imax)) * d0
# d0 = 1.0/fdst*lguess/float(imax-1)
# lcirc = 2.0 * math.pi * radi + float(m.isEven(imax))*d0
# errval = math.sqrt((lguess-lcirc)**2)/lguess
# d0=1.0/fdst*lcirc/float(imax-1)
gridClass.x[0,-1]=radi +0.5
gridClass.y[0,-1]= 0.0
gridClass.x[-1,-1] = gridClass.x[0,jmax-1]
gridClass.y[-1,-1]=0.0
ang=0.0
## DEBUG: si hay que regresar a la normalidad, descomentar esto y
#los gridClass de abajo
# nouter = int(math.ceil(float(imax)/2))
nouter = imax
nfaux = nouter -1 + m.isEven(imax)
for i in range(nouter):
space = 2.0*math.pi*radi/float(imax-1)
ang= ang + space/radi
gridClass.x[i,-1]=radi*math.cos(ang)+0.5
gridClass.y[i,-1]=radi*math.sin(ang)
# gridClass.x[imax-i,-1] = gridClass.x[i-1,-1]
# gridClass.y[imax-i,-1] = -gridClass.y[i-1,-1]
def writeSrfGrid(gridClass,file):
"""
Funcion para escribir el archivo .p3d
gridClass -> instancia de un objeto GridClass
"""
imax = gridClass.imax
jmax = gridClass.jmax
renderPath=rootPath+"/renders/"+file+".p3d"
f=open(renderPath,"w")
f.write(str(imax)+" "+str(jmax)+"\n")
for j in range(jmax):
for i in range(imax):
f.write(str(gridClass.x[i,j])+'\n')
for j in range(jmax):
for i in range(imax):
f.write(str(gridClass.y[i,j])+'\n')
return renderPath
| true |
7c74fbea1a7258ee7e4cfdb66bfd0dbe6cd69ca6 | Python | HiloGoes/GEO_grafita | /treat_reduce.py | UTF-8 | 1,509 | 2.78125 | 3 | [] | no_license |
#read READ CSV TO SCRR_TRAINING & SCRR_TEST
#treining = pd.read_csv('../training.csv') #examples
#test = pd.read_csv('../test.csv') #examples
import numpy as np
import pandas as pd
import verde as vd
def grafita_reduce(training_csv,test_csv,n_iterations=1,*different_spacing=false,*spacing_range=[500]):
"""
(get a better description)
Grafita info: Receive data an replies (number of iterations) at differents filtred parameters
Parameters
----------
training_csv : pdlist
test_csv : pdlist
n_iterations : int
*different_spacing : boolean
Returns
-------
description : type
what does..
"""
#CREATE THC/KC & KC/CTC
training_csv['THC/KC'] = training_csv['THC']/training_csv['KC']
test_csv['KC/CTC'] = test_csv['KC']/test_csv['CTC']
training_csv.drop('Unnamed: 0', axis='columns', inplace=True) #getting rid of unnamed columns, do it before loading to this function, and save to csv.
list_training=copy.deepcopy()#create a list of same data, to generate (do filter just for needed features)
vd.BlockReduce(np.median, spacing=spacing_range) #ATRIBUTE VARIABLE BEHAVIOR AT spacing=500
###CONSTRUTIVE THOUGHTS FOR THIS FUNCTION:
#try to compare each training process before it's complete
#if this is possible, compute remaining process and return a range value of best results
#probably is best creat a new scope, or build it's own class to manage many functionalities.
pass
| true |
079847afbd44ac37faca0ce1f88f289555cb3635 | Python | marlacabral/Projects | /Projeto_02_JOKENPO/projeto02.py | UTF-8 | 3,286 | 3.890625 | 4 | [] | no_license | #O programa tem que:
# Permitir que eu decida quantas rodadas iremos fazer;
# Ler a minha escolha (Pedra, papel ou tesoura);
# Decidir de forma aleatória a decisão do computador;
# Mostrar quantas rodadas cada jogador ganhou;
# Determinar quem foi o grande campeão de acordo com a quantidade de vitórias de cada um (computador e jogador);
# Perguntar se o Jogador quer jogar novamente, se sim inicie volte a escolha de quantidade de rodadas, se não finalize o programa.
from random import randint #importei biblioteca para sorteio de numero aleatório
from time import sleep #importei biblioteca para espaço de tempo
print('Bem vindo ao JOKENPÔ! Eu te desafio a me ganhar...quem perder, lava a louça por uma semana :)')
sleep(1)
votacao = " "
itens = ('Pedra', 'Papel', 'Tesoura')
contador_de_jogadas = 0
a = 0
b = 0
while True:
jogadas = int(input('Insira o número de rodadas desejadas: '))
for c in range(jogadas):
contador_de_jogadas = contador_de_jogadas +1
eu = randint(0, 2)
print('''Suas opções são:
[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA''')
voce = int(input('Qual é a sua jogada? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!!!!')
sleep(1)
print('-=' * 11)
print('Eu joguei {}'.format(itens[eu]))
print('Você jogou {}'.format(itens[voce]))
print('-=' * 11)
sleep(1)
if eu == 0:
if voce == 0:
print('Ahh! Empatamos :(')
elif voce == 1:
print('Você venceu :)')
b += 1
elif voce == 2:
print('Uhuull, eu ganhei \o/')
a += 1
else:
print('JOGADA INVÁLIDA')
elif eu == 1:
if voce == 0:
print('Uhuull, eu ganhei \o/')
a += 1
elif voce == 1:
print('Ahh! Empatamos :(')
elif voce == 2:
print('Você venceu :)')
b += 1
else:
print('JOGADA INVÁLIDA')
elif eu == 2:
if voce == 0:
print('Você venceu :)')
b += 1
elif voce == 1:
print('Uhuull, eu ganhei \o/')
a += 1
elif voce == 2:
print('Ahh! Empatamos :(')
else:
print('JOGADA INVÁLIDA')
if c+1 == jogadas:
print(f'Eu ganhei {a} partidas e você ganhou {b} partidas.')
if a == b:
print('Como nós empatamos, não existe campeão, nós dois vamos precisar lavar a louça :(')
if a > b:
print('O troféu de campeão é meu!!!! A louça é sua por uma semana *-*')
if a < b:
print('O grande campeão é você!!! \o/\o/ . A louça sobrou pra mim :/')
votacao = str(input("Gostaria jogar novamente? [S/N]: ")).upper().strip()[0]
if votacao == 'N':
break
elif votacao == 'S':
contador_de_jogadas = 0
| true |
e8c14fc502ca92d3d5628f1ce4cf2e076e4b6249 | Python | nitcse2018/daa-djsamyak | /DAA Assignment 1/Divide and Conquer/Tower_of_Hanoi.py | UTF-8 | 446 | 3.78125 | 4 | [] | no_license | moveCounter=0
def TowerOfHanoi(n , from_rod, to_rod, aux_rod):
if n == 1:
print (f"Move disk 1 from rod {from_rod} to rod {to_rod}")
return
TowerOfHanoi(n-1, from_rod, aux_rod, to_rod)
print (f"Move disk {n}, from rod {from_rod}, to rod {to_rod}")
TowerOfHanoi(n-1, aux_rod, to_rod, from_rod)
n=input("How many disks would you like to play with ? \n")
TowerOfHanoi(int(n), 'A', 'C', 'B') | true |
9c7619456dcc70fb116f039351182ca7912ce0ce | Python | jbosboom/sigmars-gardener | /solver.py | UTF-8 | 2,736 | 2.84375 | 3 | [] | no_license | from typing import Dict, List, Tuple
import data
_cardinals = ['air', 'earth', 'fire', 'water']
_metals = ['lead', 'tin', 'iron', 'copper', 'silver', 'gold']
def solve(puzzle: Dict[int, str]):
# Gold "matches with itself" for the purposes of the moves list.
# Find the current metal, the least metal present.
metals = [m for m in puzzle.values() if m in _metals]
metals.sort(key=lambda m: _metals.index(m))
moves: List[Tuple[int, int]] = _solve_recurse(puzzle.copy(), metals[0], set())
moves.reverse()
return moves
def _atoms_match(a, b):
# This could be turned into a lookup in a set of pairs.
if a in _cardinals and (a == b or b == 'salt'):
return True
if b in _cardinals and (a == b or a == 'salt'):
return True
if a == 'salt' and b == 'salt':
return True
if (a in _metals and b == 'quicksilver') or (b in _metals and a == 'quicksilver'):
return True
if (a == 'vitae' and b == 'mors') or (a == 'mors' and b == 'vitae'):
return True
return False
def _solve_recurse(puzzle, current_metal, fail_memo):
if not puzzle:
return []
fail_set = frozenset(puzzle.items())
if fail_set in fail_memo:
return None
free_atoms = []
for atom in puzzle:
if puzzle[atom] not in _metals or puzzle[atom] == current_metal:
# note that the dummy is always free
neighbor_free = [n not in puzzle for n in data.neighbors[atom]]
# allow wraparound
neighbor_free.extend(neighbor_free[:2])
for i in range(len(neighbor_free)):
if neighbor_free[i:i+3] == [True, True, True]:
free_atoms.append(atom)
break
for i, atom1 in enumerate(free_atoms):
if current_metal == 'gold' and puzzle[atom1] == 'gold':
del puzzle[atom1]
moves = _solve_recurse(puzzle, None, fail_memo)
if moves is not None:
moves.append((atom1, atom1))
return moves
puzzle[atom1] = 'gold'
for atom2 in free_atoms[i+1:]:
if _atoms_match(puzzle[atom1], puzzle[atom2]):
t1, t2 = puzzle[atom1], puzzle[atom2]
next_metal = _metals[_metals.index(current_metal)+1] if t1 == current_metal or t2 == current_metal else current_metal
del puzzle[atom1]
del puzzle[atom2]
moves = _solve_recurse(puzzle, next_metal, fail_memo)
if moves is not None:
moves.append((atom1, atom2))
return moves
puzzle[atom1] = t1
puzzle[atom2] = t2
fail_memo.add(fail_set)
return None | true |
8be86ee443a674d496d86a7018719507a038162f | Python | imkanda/ventibot | /wish.py | UTF-8 | 8,674 | 2.734375 | 3 | [] | no_license | import json
import random
from datetime import date
class Wish:
def __init__(self):
with open("pullables.json", "r") as pullables_file:
self.pullables = json.load(pullables_file)
self.FIVE_STAR_START_INDEX = 16
# {rarity: [(character/wep, emoji)]}
# rarities for wish functions
self.rarity_pull = ['5*', '4*', '3*']
def wish(self, userid, pulls: int = 10, simulation: bool = True):
# if a pull is a simulation, it does not require a real userid or primogems and does not affect a players pity
fourstar_pity = 1
fivestar_pity = 1
if not simulation:
fourstar_pity, fivestar_pity = self.get_pity(userid)
if not self.primo_transaction(userid, 0-(pulls*160)):
return False, False
highest_rarity = None
wish_pulls = []
for pull in range(0, pulls):
if fivestar_pity == 90:
rarity = '5*'
elif fourstar_pity >= 10:
rarity = '4*'
else:
if 76 <= fivestar_pity <= 89:
pity = 32
elif 46 <= fivestar_pity <= 75:
pity = 1.2
else:
pity = 0.6
rarity = random.choices(self.rarity_pull, cum_weights=(pity, 5.1, 94.3))[0]
if rarity == '5*':
highest_rarity = "5*"
fivestar_pity = 1
fourstar_pity += 1
elif rarity == '4*':
if highest_rarity != "5*":
highest_rarity = "4*"
fivestar_pity += 1
fourstar_pity = 1
else:
if not highest_rarity:
highest_rarity = "3*"
fivestar_pity += 1
fourstar_pity += 1
pulled = random.choice(self.pullables["rarities"][rarity])
wish_pulls.append([pulled, self.get_emote_pullable(pulled)])
if not simulation:
self.save_pity(userid, [fourstar_pity, fivestar_pity])
self.add_item(userid, pulled)
self.add_wishes(userid, 1)
return highest_rarity, wish_pulls
def get_emote_pullable(self, pullable):
emote = None
fl_to_type = {"w": "weapons", "c": "characters"}
if pullable[0] == 'w':
for i in self.pullables["weapons"]:
if i == pullable[1:]:
emote = self.pullables[fl_to_type[pullable[0]]][i]
elif pullable[0] == 'c':
charArr = self.pullables["character_index"]
for i in range(len(charArr)):
if charArr[i] == pullable[1:]:
emote = self.pullables["characters"][i]
return emote
def get_all_data(self) -> dict:
with open("wishes.json", "r") as wishes_file:
wishes_dict = json.load(wishes_file)
return wishes_dict
def get_user_data(self, userid: int) -> dict:
wishes_dict = self.get_all_data()
if str(userid) not in wishes_dict:
return {}
return wishes_dict[str(userid)]
def add_item(self, userid: int, item: str):
units = self.get_units(userid)
if item[0] == "w": # item is a weapon, add it to inven
units.append(item)
elif item[0] == "c": # item is a character, check if already owned+constellation level then add
clevel = self.get_character_constellation(userid, item[1:], units)
if clevel == -1:
units.append(item[1:])
elif clevel in range(0, 6):
for i in range(len(units)):
if units[i].startswith(item[1:]): # assumes no duplicates of same unit since only detects first one
units[i] = f"{item[1:]} C{clevel + 1}"
break
self.save_units(userid, units)
def get_character_constellation(self, userid: int, character: str, units: list):
clevel_int = -1
for unit in units:
if unit == character:
return 0
elif unit[0:len(character)] == character:
constellation_level = unit[-2:]
if constellation_level[0] == 'C':
try:
clevel_int = int(constellation_level[1])
return clevel_int
except ValueError:
pass
return clevel_int
def save_user_data(self, userid: int, data=None): # save user data or create new user if no data is provided
if data is None:
data = {"four_pity": 0, "five_pity": 0, "wishes": 0, "primos": 0, "cooldown": "", "units": []}
wishes_dict = self.get_all_data()
wishes_dict[str(userid)] = data
with open("wishes.json", "w") as wishes_file:
json.dump(wishes_dict, wishes_file, indent=2)
def get_units(self, userid: int) -> list:
data = self.get_user_data(userid)
return data["units"]
def save_units(self, userid: int, units: list):
data = self.get_user_data(userid)
data["units"] = units
self.save_user_data(userid, data)
def get_pity(self, userid: int) -> tuple[int, int]:
data = self.get_user_data(userid)
return data["four_pity"], data["five_pity"]
def save_pity(self, userid: int, pity: list[int, int]): # userid, [4pity, 5pity]
data = self.get_user_data(userid)
data["four_pity"] = pity[0]
data["five_pity"] = pity[1]
self.save_user_data(userid, data)
def primo_transaction(self, userid: int, num: int):
user_data = self.get_user_data(userid)
if num > 0: # add primos
user_data["primos"] += num
self.save_user_data(userid, user_data)
elif num < 0: # use primos
primos = user_data["primos"]
if (primos + num) >= 0:
user_data["primos"] = primos + num
self.save_user_data(userid, user_data)
else:
return False
return user_data["primos"]
def get_cooldown(self, userid: int):
data = self.get_user_data(userid)
return data['cooldown']
def set_cooldown(self, userid: int, cooldown: str):
data = self.get_user_data(userid)
data['cooldown'] = cooldown
self.save_user_data(userid, data)
def get_wishes(self, userid: int) -> int:
data = self.get_user_data(userid)
return data['wishes']
def add_wishes(self, userid: int, wishes_to_add: int):
data = self.get_user_data(userid)
data['wishes'] += wishes_to_add
self.save_user_data(userid, data)
def check_user_exists(self, userid: int):
if str(userid) in self.get_all_data():
return True
return False
# gives daily primos, creates new user if not in json file already
def daily_primo(self, userid: int):
daily_valid = False
if not self.check_user_exists(userid):
self.save_user_data(userid)
self.primo_transaction(userid, 1832)
daily_valid = True
primo_add = 458
current_date = str(date.today())
if not daily_valid:
cooldown_date = self.get_cooldown(userid)
if str(current_date) > str(cooldown_date):
daily_valid = True
if daily_valid:
self.set_cooldown(userid, current_date)
return self.primo_transaction(userid, primo_add)
else:
return False
# parses user's unit catalog
def catalog_parse(self, userid):
# grab user data and check if user/units exist
userData = self.get_user_data(userid)
if userData == {}:
return 'NONE'
unitData = userData["units"]
if unitData == {}:
return {}
# User exists and has units
catalog_response = []
rarities = self.pullables["rarities"]
emoteList = self.pullables["characters"]
for unit in unitData:
five_stars = rarities[self.rarity_pull[0]]
four_stars = rarities[self.rarity_pull[1]]
for i in range(len(four_stars)):
if four_stars[i][1:] in unit:
catalog_response.append(f"{emoteList[i]} {unit}")
continue
for i in range(len(five_stars)):
if five_stars[i][1:] in unit:
catalog_response.append(f"{emoteList[self.FIVE_STAR_START_INDEX + i]} {unit}")
continue
return catalog_response
| true |
ac99c26706f2747fbe47c8e869185da96f03ea41 | Python | nishanth-kumar0/annihilators_LVDS | /Read excel.py | UTF-8 | 279 | 2.71875 | 3 | [] | no_license | import pandas as pd
import numplate as np
df = pd.read_excel('sample.xlsx')
print(df)
newData['calculated_column']= newData[“Height”] + newData[“Weight”]
for a in newData:
if np.f==a["Height"]:
print("Hence the stolen")
else:
continue | true |
be41d8b9377024992e2a6dc7cd169f2d8fa6bd77 | Python | marcosvpinho/RKE_Analyzer | /divide.py | UTF-8 | 1,205 | 2.65625 | 3 | [
"MIT"
] | permissive | """
Embedded Python Blocks:
Each this file is saved, GRC will instantiate the first class it finds to get
ports and parameters of your block. The arguments to __init__ will be the
parameters. All of them are required to have default values!
"""
import numpy as np
from gnuradio import gr
class divide(gr.sync_block):
def __init__(self, threshold = 0.01): # only default arguments here
gr.sync_block.__init__(
self,
name='Level_Control/Squelch',
in_sig=[np.float32],
out_sig=[np.float32],
)
self.threshold = threshold
self.maximum = 0.0
self.flag = 0.0
def work(self, input_items, output_items):
in_stream = input_items[0][:]
size = np.shape(in_stream)[0]
self.maximum = np.maximum.reduce(in_stream)
if(self.maximum < self.threshold):
#output_items[0][:] = np.zeros((size),dtype=np.float32)
self.consume(0,size)
return 0
# self.maximum = 0.1
#print(in_stream)
#print(self.maximum)
out = np.zeros((size),dtype=np.float32)
for i in range(0,size):
out[i]=(in_stream[i]/self.maximum)-0.5
output_items[0][:] = out[:]
return len(output_items[0])
def retorna_maximo(self):
return self.maximum
| true |
23654cee9d4b2df3157cba71481ce8ac4546e2c1 | Python | 196884/Python | /PE/pe0010.py | UTF-8 | 468 | 3.390625 | 3 | [] | no_license | import array
def solve():
"""
Returns all prime numbers <= n via Eratosthene's sieve
"""
result = 0
n = 2000000
sieve = array.array('i', (True for i in range(0, n)))
for k in range(2, n):
if sieve[k]:
result += k
i = k * k
while i < n:
sieve[i] = False
i += k
return result
if __name__ == "__main__":
result = solve()
print "Result: %d" % result
| true |
64a9cadf258902469929f0aa4d34208eac2ebd3f | Python | IDSDatasets/SF-SOINN-IDS | /SOINN/SF_SOINN.py | UTF-8 | 12,068 | 3.359375 | 3 | [] | no_license | import numpy as np
import igraph as ig
from numpy.linalg import norm
import math
import random
from collections import Counter
# label used for representing noise
NOISE_LABEL = 'noise'
class SF_SOINN(object):
"""
Improved version of the Self-organizing incremental neural network called SOINN+.
Parameters
----------
x1 : array, shape = [n_features]
First random initialization example.
x2 : array, shape = [n_features]
Second random initialization example.
x3 : array, shape = [n_features]
Third random initialization example.
max_edge_age : int
Maximum edge age after which an edge gets deleted.
iter_lambda : int (default: 100)
Every iter_lambda iterations the grouping process and the node deletion process start.
pull_factor : int (default: 100)
Pull factor for node merging.
Attributes
----------
t : int
Iteration counter of network update. Increments every time an input signal is processed.
pull_factor : int (default: 100)
Pull factor for node merging.
network : igraph
The igraph graph representing the incremental neural network.
Nodes have the following attributes:
'w' : array, shape = [n_features]
Weights of the node.
'wt' : int
Winning time of the node, i.e., how often that node was selected as the winner node.
'st' : float
Similarity threshold of the node.
'it' : int
Idle time of the node, i.e., counter of how many iterations the node was not selected as the winner.
'u' : float
Utility of a node.
'c' : string
Definitive class label of the node.
'cl' : list
List of class labels that are assigned to that node.
Edges have the following attributes:
'it' : int
Edge's idle time.
'wt' : int
Number of times the edge was reset.
"""
def __init__(self, x1, x2, x3, max_edge_age, iter_lambda=100, pull_factor=100):
self.t = 3
self.max_edge_age = max_edge_age
self.iter_lambda = iter_lambda
self.pull_factor = pull_factor
self.n_del_edges = 0
self.n_del_nodes = 0
# generating the graph and adding 3 random training samples
self.network = ig.Graph()
self.network.add_vertices(3)
self.network.vs[0]['w'] = x1
self.network.vs[1]['w'] = x2
self.network.vs[2]['w'] = x3
self.network.vs[0]['wt'] = 1
self.network.vs[1]['wt'] = 1
self.network.vs[2]['wt'] = 1
self.network.vs[0]['it'] = 1
self.network.vs[1]['it'] = 1
self.network.vs[2]['it'] = 1
self.network.vs[0]['cl'] = [NOISE_LABEL]
self.network.vs[1]['cl'] = [NOISE_LABEL]
self.network.vs[2]['cl'] = [NOISE_LABEL]
self.network.vs[0]['c'] = NOISE_LABEL
self.network.vs[1]['c'] = NOISE_LABEL
self.network.vs[2]['c'] = NOISE_LABEL
def _distance(self, a, b):
"""
Computes the fractional distance between two arrays a and b.
Parameters
----------
a : array, shape = [n_features]
First array.
b : array, shape = [n_features]
Second array.
Returns
-------
distance : float
The fractional distance between the two input arrays.
"""
f = 0.5
diff = abs(a - b) ** f
sum = np.sum(diff)
distance = math.pow(sum, 1/f)
return distance
def _get_n1_n2(self, x):
"""
Computes winner and second winner.
Parameters
----------
x : array, shape = [n_features]
The input signal to be processed.
Returns
-------
n1 : igraph vertex
The winner node as an igraph vertex.
n2 : igraph vertex
The second winning node as an igraph vertex.
"""
# determine winner and second winner
distances = dict()
for n in self.network.vs:
distances[n] = self._distance(x, n['w'])
n1 = min(distances, key=distances.get)
del distances[n1]
n2 = min(distances, key=distances.get)
return n1, n2
def _similarity_threshold(self, x, node):
"""
Computes similarity thresholds of winner and second winner.
If winner (or second winner) is a singleton node, then threshold will correpond to the distance to the closest node.
If winner (or second winner) is not a singleton, then threshold will correspond to the distance to the furthest neighbor.
Parameters
----------
x : array, shape = [n_features]
The input signal to be processed.
node : igraph vertex
The winner (or second winner) node as an igraph vertex.
"""
distances = []
# node is singleton, threshold will be the distance to closest node
if node.degree() == 0:
for n in self.network.vs:
if n.index != node.index:
distances.append(self._distance(node['w'], n['w']))
d = min(distances)
# node has neighbors, threshold is the distance to furthest neighbor
else:
for n in node.neighbors():
distances.append(self._distance(node['w'], n['w']))
d = max(distances)
# threshold depends on the number of winning times, the larger this value the larger the threshold becomes
if d != 0:
d += d * (1 - 1 / node['wt'])
#d += d * (1 - 1 / self.t)
node['st'] = d
def _add_node(self, weights, y):
"""
Add new node to the network.
Parameters
----------
weights : array, shape = [n_features]
The weights of the new node to add.
y : string
The class (label) associated to the input signal.
"""
new_node = self.network.add_vertex()
new_node['w'] = weights
new_node['wt'] = 1
new_node['it'] = 1
new_node['cl'] = [y]
new_node['c'] = y
def _merge_nodes(self, n1, x):
"""
Merge winner to the new input signal x so that the winner and its neighbors are adjusted towards the new case x.
This shift is proportional to the wt of the winning node, the larger wt the less influence has x on the rest of the net.
Parameters
----------
n1 : igraph vertex
The winning node.
x : array, shape = [n_features]
The weight array of the new node.
"""
# weights update winner
n1['w'] = n1['w'] + ((x - n1['w']) / n1['wt'])
# weights update winner's neighbors
for n in n1.neighbors():
pulled_weight = self.pull_factor * n['wt']
n['w'] = n['w'] + ((x - n['w']) / pulled_weight)
# winner's idle time is reset
n1['it'] = 1
def _linking(self, n1, n2):
"""
SOINN+ tries to link nodes that are likely to represent signal and not noise.
Linking depends on the trusworthiness of a node.
Parameters
----------
n1 : igraph vertex
The winner node.
n2 : igraph vertex
The second winner node.
"""
# create edge between winner and second winner if it not exists
n_edges = self.network.ecount()
if n_edges == 0 or not self.network.are_connected(n1.index, n2.index):
edge = self.network.add_edge(source=n1.index, target=n2.index)
edge['it'] = 1
else:
# if edge between winner and second winner exists, then reset its idle time
self.network.es[self.network.get_eid(n1.index, n2.index)]['it'] = 0
# increment lifetime of all edges that are connected to winner
for e in self.network.vs[n1.index].incident():
e['it'] += 1
def _edge_deletion(self):
"""
Remove edges that exceed maximum age or connect different clusters.
"""
for e in self.network.es:
source = self.network.vs[e.source]
target = self.network.vs[e.target]
if e['it'] > self.max_edge_age or source['c'] != target['c']:
self.network.delete_edges(e.index)
self.n_del_edges += 1
def _nodes_deletion(self):
"""
Nodes deletion algorithm.
Nodes are deleted based on their un-utility. High values of unutility mean that the node was often not selected as winner.
"""
max_u = 0.0
us = []
# computing utilities for all nodes
for n in self.network.vs:
n['u'] = n['wt'] / n['it']
us.append(n['u'])
if n['u'] > max_u:
max_u = n['u']
u_mean = np.mean(us)
# nodes deletion
for n in self.network.vs:
if n.degree() == 0 and n['u'] < u_mean:
prob_survival = n['u'] / max_u
prob_deletion = 1 - prob_survival
if prob_deletion > prob_survival:
self.network.delete_vertices(n.index)
self.n_del_nodes += 1
# update idle times
for n in self.network.vs:
n['it'] += 1
def _group(self):
"""
Determine class labels by selecting the most frequently assigned class for each node.
"""
# assigning the most frequent label as class label
for n in self.network.vs:
occurence_count = Counter(n['cl'])
n['c'] = occurence_count.most_common(1)[0][0]
n['cl'] = [n['c']]
def input_signal(self, x, y=None, learning=True):
"""
Fit the input signal x. If the input label is not set, then the new input is added with the noise label.
Parameters
----------
x : array, shape = [n_features]
The input signal weight vector.
y : string (default: None)
The class (label) associated to the input signal.
learning : boolean (default: True)
Set to True if learning, False if just prediction. Requires y to be setted.
Returns
-------
prediction : string
The predicted label. None if prediction fails.
confidence : float
The propability that the result is a true positive or true negative.
"""
n1, n2 = self._get_n1_n2(x)
if learning:
if y is None:
y = NOISE_LABEL
self.t += 1
n_nodes = self.network.vcount()
n_edges = self.network.ecount()
prediction = n1['c']
self._similarity_threshold(x, n1)
self._similarity_threshold(x, n2)
d1 = self._distance(x, n1['w'])
d2 = self._distance(x, n2['w'])
if d1 >= n1['st'] or d2 >= n2['st']:
self._add_node(x, y)
else:
n1['wt'] += 1
# noise labels should not accumulate, for active learning
if y != NOISE_LABEL:
n1['cl'].append(y)
self._merge_nodes(n1, x)
self._linking(n1, n2)
if self.t % self.iter_lambda == 0:
if n_nodes > 3:
self._nodes_deletion()
self._group()
if n_edges > 3:
self._edge_deletion()
else:
# make prediction, retrieve closest node and output result
prediction = n1['c']
# compute confidence of result
confidence = 0
if n1 in self.network.vs:
self._similarity_threshold(x, n1)
if n1['st'] != None and n1['st'] != 0:
confidence = 1 - self._distance(
x, n1['w']) / n1['st']
return prediction, confidence
| true |
cd6347a984186d214d57484a8d37134f1a3ed070 | Python | ludovicchabant/PieCrust2 | /tests/test_configuration.py | UTF-8 | 4,919 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import copy
import yaml
import pytest
from collections import OrderedDict
from piecrust.configuration import (
Configuration, ConfigurationLoader, merge_dicts,
MERGE_APPEND_LISTS, MERGE_PREPEND_LISTS, MERGE_OVERWRITE_VALUES)
@pytest.mark.parametrize('values, expected', [
(None, {}),
({'foo': 'bar'}, {'foo': 'bar'})
])
def test_config_init(values, expected):
config = Configuration(values)
assert config.getAll() == expected
def test_config_set_all():
config = Configuration()
config.setAll({'foo': 'bar'})
assert config.getAll() == {'foo': 'bar'}
def test_config_get_and_set():
config = Configuration({'foo': 'bar', 'answer': 42})
assert config.get('foo') == 'bar'
assert config.get('answer') == 42
config.set('foo', 'something')
assert config.get('foo') == 'something'
def test_config_get_and_set_nested():
config = Configuration({
'foo': [4, 2],
'bar': {
'child1': 'one',
'child2': 'two'
}
})
assert config.get('foo') == [4, 2]
assert config.get('bar/child1') == 'one'
assert config.get('bar/child2') == 'two'
config.set('bar/child1', 'other one')
config.set('bar/child3', 'new one')
assert config.get('bar/child1') == 'other one'
assert config.get('bar/child3') == 'new one'
def test_config_get_missing():
config = Configuration({'foo': 'bar'})
assert config.get('baz') is None
def test_config_has():
config = Configuration({'foo': 'bar'})
assert config.has('foo') is True
assert config.has('baz') is False
def test_config_deep_set_non_existing():
config = Configuration({'foo': 'bar'})
assert config.get('baz') is None
config.set('baz/or/whatever', 'something')
assert config.has('baz') is True
assert config.has('baz/or') is True
assert config.get('baz/or/whatever') == 'something'
def test_config_deep_set_existing():
config = Configuration({'foo': 'bar', 'baz': {'wat': 'nothing'}})
assert config.has('baz') is True
assert config.get('baz/wat') == 'nothing'
assert config.get('baz/or') is None
config.set('baz/or/whatever', 'something')
assert config.has('baz') is True
assert config.has('baz/or') is True
assert config.get('baz/or/whatever') == 'something'
@pytest.mark.parametrize('local, incoming, expected', [
({}, {}, {}),
({'foo': 'bar'}, {}, {'foo': 'bar'}),
({}, {'foo': 'bar'}, {'foo': 'bar'}),
({'foo': 'bar'}, {'foo': 'other'}, {'foo': 'other'}),
({'foo': [1, 2]}, {'foo': [3]}, {'foo': [3, 1, 2]}),
({'foo': [1, 2]}, {'foo': 'bar'}, {'foo': 'bar'}),
({'foo': {'bar': 1, 'baz': 2}}, {'foo': 'bar'}, {'foo': 'bar'}),
({'foo': {'bar': 1, 'baz': 2}}, {'foo': {'other': 3}},
{'foo': {'bar': 1, 'baz': 2, 'other': 3}}),
({'foo': {'bar': 1, 'baz': 2}}, {'foo': {'baz': 10}},
{'foo': {'bar': 1, 'baz': 10}})
])
def test_merge_dicts(local, incoming, expected):
local2 = copy.deepcopy(local)
merge_dicts(local2, incoming)
assert local2 == expected
def test_config_merge():
config = Configuration({
'foo': [4, 2],
'bar': {
'child1': 'one',
'child2': 'two'
}
})
other = Configuration({
'baz': True,
'blah': 'blah blah',
'bar': {
'child1': 'other one',
'child10': 'ten'
}
})
config.merge(other)
expected = {
'foo': [4, 2],
'baz': True,
'blah': 'blah blah',
'bar': {
'child1': 'other one',
'child2': 'two',
'child10': 'ten'
}
}
assert config.getAll() == expected
@pytest.mark.parametrize('mode, expected', [
(MERGE_APPEND_LISTS,
{'foo': [4, 2, 1, 0], 'bar': 'something'}),
(MERGE_PREPEND_LISTS,
{'foo': [1, 0, 4, 2], 'bar': 'something'}),
(MERGE_OVERWRITE_VALUES,
{'foo': [4, 2], 'bar': 'other thing'})
])
def test_config_merge_with_mode(mode, expected):
config = Configuration({
'foo': [4, 2],
'bar': 'something'
})
other = {'foo': [1, 0], 'bar': 'other thing'}
config.merge(other, mode=mode)
assert config.getAll() == expected
def test_ordered_loader():
sample = """
one:
two: fish
red: fish
blue: fish
two:
a: yes
b: no
c: null
"""
data = yaml.load(sample, Loader=ConfigurationLoader)
assert type(data) is OrderedDict
assert list(data['one'].keys()) == ['two', 'red', 'blue']
def test_load_time1():
sample = """
time: 21:35
"""
data = yaml.load(sample, Loader=ConfigurationLoader)
assert type(data['time']) is int
assert data['time'] == (21 * 60 * 60 + 35 * 60)
def test_load_time2():
sample = """
time: 21:35:50
"""
data = yaml.load(sample, Loader=ConfigurationLoader)
assert type(data['time']) is int
assert data['time'] == (21 * 60 * 60 + 35 * 60 + 50)
| true |
cb2c62f0f4819faa5b067847fb1b15bd74e53bc7 | Python | EzzeldinIsmail/Graphics | /pil fractal trees.py | UTF-8 | 1,126 | 3.265625 | 3 | [] | no_license | from PIL.Image import *
from PIL.ImageDraw import *
from PIL import *
from math import sin, cos, radians
height, width = 2000, 2000
im = new("L", (height, width), color=255)
d = Draw(im)
tree_length = 500 # 200
start_angle = 25 # 30
recursive_depth = 25 # 30
curviness = 1.1 # 1.1
n = 0
colours = ["Red", "Yellow", "Green", "Blue", "Purple"]
def rec_tree(x, y, length, angle):
global n, d
if length < 30:
return
n +=1
x1 = x + int(length*sin(radians(angle)))
y1 = y - int(length*cos(radians(angle)))
d.line([(x, y),(x1, y1)], width=int(length/30), fill=0)
print(angle, x, y, "->", x1, y1)
# rec_tree(length-(length*(30/100)), angle-(angle*(30/100)), depth*60/100)
rec_tree(x1, y1, length * (1 - (recursive_depth / 100)), abs(angle * curviness))
rec_tree(x1, y1, length * (1 - (recursive_depth / 100)), -abs(angle * curviness))
d.line([(width/2, height), (width/2, height-200)], width=int((tree_length+30)/30))
rec_tree(width/2, height-200, tree_length, start_angle)
rec_tree(width/2, height-200, tree_length, -start_angle)
im.show()
| true |
fa13af6249a22b0376d5501aa84e677c10a3a7f5 | Python | taoing/python_code | /2.3 process/07_copy_file.py | UTF-8 | 1,289 | 3.234375 | 3 | [] | no_license | #coding=utf-8
from multiprocessing import Manager, Pool
import time
import os
import random
# copy file
def copy_file(file_name, old_folder_name, new_folder_name, queue):
read_file = open('./' + old_folder_name +'/' + file_name, 'rb')
write_file = open('./' + new_folder_name + '/' + file_name, 'wb')
# 读取文件内容写入到新文件中,即是copy
write_file.write(read_file.read())
read_file.close()
write_file.close()
queue.put(file_name)
def main():
# 输入要copy的文件夹名
old_folder_name = input('请输入copy文件夹:')
# 复制后文件夹名
new_folder_name = old_folder_name + '_2'
# 创建文件夹
os.mkdir(new_folder_name)
# 获取copy文件夹中的文件列表
file_list = os.listdir('./' + old_folder_name)
# 创建多进程copy文件
pool = Pool(5)
queue = Manager().Queue()
for file_name in file_list:
pool.apply_async(copy_file, args = (file_name, old_folder_name, new_folder_name, queue))
num = 0
file_num = len(file_list)
while num < file_num:
queue.get()
num = num + 1
copyrate = num/file_num
print('\rCopy rate: %.2f %%...' % (copyrate*100), end = '')
print('\r\nCopy end!')
if __name__ == '__main__':
main() | true |
bb9956bcc8053051c1911a484cd7ac603bbde03f | Python | yglj/learngit | /PythonPractice/hm_py/hm_oop/oop_Polymorphism.py | UTF-8 | 1,284 | 3.953125 | 4 | [] | no_license | # 多态 不同的子类对象调用父类方法,执行结果不同 ,以继承和重写为前提
class Dog:
def __init__(self, name):
self.name = name
class FlyDog(Dog):
pass
class LazyDog(Dog):
pass
class Person:
def __init__(self, name):
self.name = name
def liu_gou(self, dog): # 不用关心具体的狗的类型
print('%s and %s play patch game' % (self.name, dog.name))
f = FlyDog('废狗')
l = LazyDog('懒狗')
p = Person('小明')
p.liu_gou(f)
p.liu_gou(l)
# is用于判断是不是引用同一个对象
# ==用于判断引用变量值是否相等
a = [1, 2]
b = [1, 2, 3]
print(id(a), id(b))
a.append(3)
print(a == b)
print(a is b)
class Soldier:
def __init__(self, name):
self.name = name
self.gun = None
def fire(self):
if self.gun is None:
print('%s 还没有枪' % self.name)
else:
print('%s 冲啊、、、' % self.name)
print('[%s]....biu,biu,biu' % self.gun.name)
class Gun:
def __init__(self, name):
self.name = name
self.num = 0
def add_bullet(self, num):
self.num = num
print('装入%s颗子弹' % self.num)
gun = Gun('ak47')
xiao = Soldier('xiao')
xiao.gun = gun
xiao.fire()
| true |
270ce1c5ebca16fb8a1a81fec867392133bc9ca1 | Python | 00schen/gazebo_robomaster_gym | /src/prototype_sim.py | UTF-8 | 5,628 | 2.890625 | 3 | [] | no_license | """Port of the Chipmunk tank demo. Showcase a topdown tank driving towards the
mouse, and hitting obstacles on the way.
"""
import random
from math import *
import pygame
from pygame.locals import *
import pymunk
from pymunk.vec2d import Vec2d
import pymunk.pygame_util
from gym_env import *
RATIO = .10
SCREEN = (int(8100*RATIO), int(5100*RATIO))
class PymunkEnv(RobomasterEnv):
def __init__(self):
super().__init__()
space = pymunk.Space()
space.iterations = 10
space.sleep_time_threshold = 0.5
static_body = space.static_body
# Create segments around the edge of the screen.
shape = pymunk.Segment(static_body, (1,1), (1,SCREEN[1]), 1.0)
space.add(shape)
shape.elasticity = 0
shape.friction = 10
shape = pymunk.Segment(static_body, (SCREEN[0],1), SCREEN, 1.0)
space.add(shape)
shape.elasticity = 0
shape.friction = 10
shape = pymunk.Segment(static_body, (1,1), (SCREEN[0],1), 1.0)
space.add(shape)
shape.elasticity = 0
shape.friction = 10
shape = pymunk.Segment(static_body, (1,SCREEN[0]), SCREEN, 1.0)
space.add(shape)
shape.elasticity = 0
shape.friction = 10
for i in range(0,len(self.segments),4):
vertices = [(int(s[0]*1000*RATIO), int(s[1]*1000*RATIO)) for s in self.segments[i:i+4]]
shape = pymunk.Poly(static_body, vertices)
shape.friction = 10
shape.color = (0,255,255,255)
space.add(shape)
self._tank_bodies = [generate_tank(space,(500,500)), generate_tank(space,(500,4600)), generate_tank(space,(7600,500)), generate_tank(space,(7600,4600))]
self.space = space
def generate_tank(space, center):
# We joint the tank to the control body and control the tank indirectly by modifying the control body.
tank_control_body = pymunk.Body(body_type=pymunk.Body.KINEMATIC)
tank_control_body.position = 320, 240
space.add(tank_control_body)
tank_body = pymunk.Body()
space.add(tank_body)
shape = pymunk.Poly.create_box(tank_body, (550*RATIO, 420*RATIO), 0.0)
shape.mass = 1
shape.friction = 10
space.add(shape)
tank_body.position = int(center[0]*RATIO),int(center[1]*RATIO)
for s in tank_body.shapes:
s.color = (0,255,100,255)
pivot = pymunk.PivotJoint(tank_control_body, tank_body, (0,0), (0,0))
space.add(pivot)
pivot.max_bias = 0 # disable joint correction
pivot.max_force = 10000 # emulate linear friction
gear = pymunk.GearJoint(tank_control_body, tank_body, 0.0, 1.0)
space.add(gear)
gear.error_bias = 0 # attempt to fully correct the joint each step
gear.max_bias = 1.2 # but limit it's angular correction rate
gear.max_force = 50000 # emulate angular friction
return (tank_body, tank_control_body)
def step(self, action1, action2):
super.step(action1, action2)
def update(self, dt, surface):
tank_body = self.tank_body
tank_control_body = self.tank_control_body
self._odom_info = [tuple(list(tank_body.position)+[0,tank_body.angle]) for _ in range(4)]
self.update_robot_coords()
space = self.space
mouse_delta = Vec2d(0,0) # mouse_delta exact length does not matter
pressed = pygame.key.get_pressed()
if pressed[pygame.K_a]:
mouse_delta = Vec2d(-1,0)
if pressed[pygame.K_q]:
mouse_delta = Vec2d(-1,1)
if pressed[pygame.K_w]:
mouse_delta = Vec2d(0,1)
if pressed[pygame.K_e]:
mouse_delta = Vec2d(1,1)
if pressed[pygame.K_d]:
mouse_delta = Vec2d(1,0)
if pressed[pygame.K_x]:
mouse_delta = Vec2d(1,-1)
if pressed[pygame.K_s]:
mouse_delta = Vec2d(0,-1)
if pressed[pygame.K_z]:
mouse_delta = Vec2d(-1,-1)
if mouse_delta.get_length_sqrd() > 0:
if abs((tank_body.angle-mouse_delta.angle)%pi) < abs((tank_body.angle-(mouse_delta.angle+pi)%pi)):
tank_control_body.angle = mouse_delta.angle
active_rotation_vector = tank_body.rotation_vector
else:
tank_control_body.angle = (mouse_delta.angle+pi)%pi
active_rotation_vector = tank_body.rotation_vector.cpvrotate(Vec2d(-1,0))
if mouse_delta.dot(active_rotation_vector) > 0.0:
direction = 1.0
else:
direction = -1.0
dv = Vec2d(30.0*direction, 0.0)
tank_control_body.velocity = active_rotation_vector.cpvrotate(dv)
else:
tank_control_body.angle = tank_body.angle
tank_control_body.velocity = 0,0
space.step(dt)
env = PymunkEnv()
pygame.init()
screen = pygame.display.set_mode(SCREEN)
clock = pygame.time.Clock()
draw_options = pymunk.pygame_util.DrawOptions(screen)
font = pygame.font.Font(None, 24)
text = "Use the mouse to drive the tank, it will follow the cursor."
text = font.render(text, 1, pygame.color.THECOLORS["white"])
while True:
for event in pygame.event.get():
if event.type == QUIT or pygame.key.get_pressed()[K_ESCAPE]:
exit()
screen.fill(pygame.color.THECOLORS["black"])
env.space.debug_draw(draw_options)
screen.blit(text, (15,15))
fps = 60.0
env.update(1/fps, screen)
pygame.display.flip()
clock.tick(fps) | true |
c7b588eb38ea82f5c0498b83f33359cf02a2cf0b | Python | rodcordeiro/estudosPython | /exercicios/idade.py | UTF-8 | 332 | 4.09375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
#Lista de exercicios - Exercicio1
#Solicitando a idade e verificando se é maior ou menor de idade
nome = input("Qual seu nome? ")
idade = int(input("Qual sua idade? "))
if idade <18:
msg = "menor de idade"
else:
msg = "maior de idade"
print("Olá {}, você tem {} anos e é {}.".format(nome,idade,msg)) | true |
725f4379259d8bc9173aa89fae223ecf24713c9c | Python | Mousumi-Singha/data-pipeline-airflow | /airflow/airflow/plugins/operators/data_quality.py | UTF-8 | 1,747 | 2.796875 | 3 | [] | no_license | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataQualityOperator(BaseOperator):
"""
Runs data quality check by passing test SQL query and expected result
:param postgres_conn_id: Redshift connection ID
:param test_query: SQL query to run on Redshift data warehouse
"""
ui_color = '#89DA59'
@apply_defaults
def __init__(self,
postgres_conn_id='',
test_query=[],
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.test_query = test_query
def execute(self, context):
self.log.info("Getting credentials")
postgres = PostgresHook(self.postgres_conn_id)
self.log.info("Running test")
for test in self.test_query:
result = int(postgres.get_first(sql=test['sql'])[0])
# check if equal
if test['op'] == 'eq':
if result != test['val']:
raise AssertionError(f"Check failed: {result} {test['op']} {test['val']}")
# check if not equal
elif stmt['op'] == 'ne':
if result == test['val']:
raise AssertionError(f"Check failed: {result} {test['op']} {test['val']}")
# check if greater than
elif test['op'] == 'gt':
if result <= test['val']:
raise AssertionError(f"Check failed: {result} {test['op']} {test['val']}")
self.log.info(f"Passed check: {result} {test['op']} {test['val']}")
| true |
701aa6b57e8209d9278655f2ba9f8b657a5f7a49 | Python | tsergien/Python-Django-piscine | /d05/d05/ex05/views.py | UTF-8 | 2,442 | 2.796875 | 3 | [] | no_license | from django.shortcuts import render
from ex05.models import Movies
from django.shortcuts import get_object_or_404
# Create your views here.
def init(request):
movies_list = []
try:
db = Movies()
result = 'OK'
except Exception as error:
result = error
return render(request, 'ex05/init.html', {'result': result})
def populate(request):
try:
Movies(episode_nb=1, title='The Phantom Menace', director='George Lucas',\
producer='Rick McCallum', release_date='1999-05-19').save()
Movies(episode_nb=2, title='Attack of the Clones', director='George Lucas',\
producer='Rick McCallum', release_date='2002-05-16').save()
Movies(episode_nb=3, title='Revenge of the Sith', director='George Lucas',\
producer='Rick McCallum', release_date='2005-05-19').save()
Movies(episode_nb=4, title='A New Hope', director='George Lucas',\
producer='Gary Kurtz, Rick McCallum', release_date='1977-05-25').save()
Movies(episode_nb=5, title='The Empire Strikes Back', director='Irvin Kershner',\
producer='Gary Kutz, Rick McCallum', release_date='1980-05-17').save()
Movies(episode_nb=6, title='Return of the Jedi', director='Richard Marquand',\
producer='Howard G. Kazanjian, George Lucas, Rick McCallum', release_date='1983-05-25').save()
Movies(episode_nb=7, title='The Force Awakens', director='J. J. Abrams',\
producer='Kathleen Kennedy, J. J. Abrams, Bryan Burk', release_date='2015-12-11').save()
result = 'OK'
except Exception as error:
print(f'\033[91m Error: {error} \033[91m')
result = str(error)
return render(request, 'ex05/populate.html', {'result': result})
def display(request):
movies_list = []
try:
for movie in Movies.objects.all():
movies_list.append([movie.episode_nb, movie.title, movie.producer, movie.director, movie.opening_crawl, movie.release_date])
except:
movies_list = []
return render(request, 'ex05/display.html', {'movies_list': movies_list, 'is_empty': len(movies_list) == 0 })
def remove(request):
movies_titles = []
try:
for movie in Movies.objects.all():
movies_titles.append(movie.title)
movie_to_delete = request.POST.get('movie_to_delete', "")
instance = get_object_or_404(Movies, title=movie_to_delete)
instance.delete()
except Exception as e:
print(e)
return render(request, 'ex05/remove.html', {'movies_titles': movies_titles}) | true |
2fb618ba92c5ede2d68c0974219dbf27ab33de13 | Python | MartinThoma/python-lernen | /scripts/Tag-2/taschenrechner.py | UTF-8 | 193 | 3.859375 | 4 | [] | no_license | erste_zahl = input("Wie lautet die erste Zahl? ")
erste_zahl = int(erste_zahl)
zweite_zahl = input("Wie lautet die zweite Zahl? ")
zweite_zahl = int(zweite_zahl)
print(erste_zahl * zweite_zahl) | true |
47fb815f781268ef2fb25107e6dc758afad48190 | Python | lee0c/advent-of-code | /day4/day4a.py | UTF-8 | 243 | 3.0625 | 3 | [] | no_license | # Advent of Code, Day 4, Part a
# Lee Cattarin
valid_passphrases = 0
for line in open("input.txt", "r"):
terms = line.split()
termset = set(terms)
if len(terms) == len(termset):
valid_passphrases += 1
print(valid_passphrases)
| true |
012a73a6e735438765b05e6a826bf56c7c493716 | Python | DreamMer2408/python-lagou | /extrct_info.py | UTF-8 | 1,781 | 2.5625 | 3 | [] | no_license | import plotly
import plotly.graph_objs as go
import pymongo
client=pymongo.MongoClient('localhost',27017)
job=client['job']
table=job['jobs']
plotly.tools.set_credentials_file(username='dreamMER',api_key='XHhQzF9YtGeHKxFFzXqd')
def bar_data():
cityList=[]
for i in table.find():
cityList.append(i['城市'])
city_index=list(set(cityList))
post_time=[]
for i in city_index:
post_time.append(cityList.count(i))
trace=go.Bar(
x=city_index,
y=post_time,
name='机器学习岗位'
)
data=[trace]
fig=go.Figure(data=data)
plotly.plotly.plot(fig,filename='机器学习城市分布')
def pie_data():
education_list=['本科','硕士','博士','不限']
under=0
master=0
doctor=0
other=0
for i in table.find():
if i['学位']==education_list[0]:
under+=1
elif i['学位']==education_list[1]:
master+=1
elif i['学位']==education_list[2]:
doctor+=1
else:
other+=1
values=[under,master,doctor,other]
trace=go.Pie(labels=education_list,values=values,
hoverinfo='label+percent',textinfo='value',
textfont=dict(size=20))
plotly.plotly.plot([trace],filename='学历要求')
def bar_data_jingyan():
cityList=[]
for i in table.find():
cityList.append(i['经验'])
city_index=list(set(cityList))
post_time=[]
for i in city_index:
post_time.append(cityList.count(i))
trace=go.Bar(
x=city_index,
y=post_time,
name='经验要求'
)
data=[trace]
fig=go.Figure(data=data)
plotly.plotly.plot(fig,filename='经验要求')
if __name__ == '__main__':
#bar_data()
#pie_data()
bar_data_jingyan()
| true |
0d5ccbd499c47c4f2772f0a675a8bc3d44f29478 | Python | PedroPrisxzma/IA-QLearning | /modules/state.py | UTF-8 | 1,098 | 3.171875 | 3 | [] | no_license | import random
from modules.action import Actions
class State:
def __init__(self, posX: int, posY: int, char: str, steps: int):
self.posX = posX
self.posY = posY
self.steps = steps
self.is_terminal = False if char == "." or char == "#" else True
self.char = char
self.qvalues = {
Actions.UP: 0.0,
Actions.RIGHT: 0.0,
Actions.DOWN: 0.0,
Actions.LEFT: 0.0,
}
def update_qvalue(self, learning_rate, reward, discount_factor, next_state, action):
max_next_state_qvalue = max(next_state.qvalues.values())
self.qvalues[action] = self.qvalues[action] + learning_rate * (
reward + discount_factor * max_next_state_qvalue - self.qvalues[action]
)
def __str__(self):
print("State:")
print(" posX: ", self.posX)
print(" posY: ", self.posY)
print(" steps: ", self.steps)
print(" terminal:", self.is_terminal)
print(" char: ", self.char)
print(" Qvalues: ", self.qvalues)
return "" | true |
6e2a258c217ea7f6e8f6fbb5140b8005038606ea | Python | vauxoo-dev/gist-vauxoo | /sql_injection.py | UTF-8 | 1,755 | 3.421875 | 3 | [] | no_license | def cr_execute(query, *parameters):
"""Get the database-dictionary-data from each field
to validate and parser the type of data"""
param_parsed = []
for param in parameters:
param_type_of_data = seudo_dict_of_data[param]
if not isinstance(param, param_type_of_data):
raise BaseException("I catched a SQL injection! Tontuelo")
if issubclass(param_type_of_data, list):
param = '(' + ','.join(map(str, param)) + ')'
param_parsed.append(param)
return query % tuple(param_parsed)
# Hard code the database-dictionary-data for example.
# This information is stored in database original cursor.
seudo_dict_of_data = {
10: int,
';update injection..;': tuple,
(1, 2, 3): tuple,
}
query = "SELECT amount, id FROM table WHERE amount=%d, id IN %s"
print "*" * 10, "Parameters in the correct way without sql injection"
parameters = (10, (1, 2, 3))
res = cr_execute(query, *parameters)
print "res: ", res
# Console Result:
# ********** Parameters in the correct way without sql injection
# res: SELECT amount, id FROM table WHERE amount=10, id IN (1, 2, 3)
print "*" * 10, "Parameters in the bad way with sql injection"
parameters = (10, ';update injection..;',)
res = cr_execute(query % parameters)
print "res: ", res
# Console Result:
# ********** Parameters in the bad way with sql injection
# res: SELECT amount, id FROM table WHERE amount=10, id IN ;update injection..;
print "*" * 10, "Parameters in the correct way with sql injection"
parameters = (10, ';update injection..;',)
res = cr_execute(query, *parameters)
print "res: ", res
# Console Result:
# ********** Parameters in the correct way with sql injection
# BaseException: I catched a SQL injection! Tontuelo
| true |
8f0546ae27b913468df6c448244ca2b55b86805b | Python | belaltaher8/Project-Euler | /problem2.py | UTF-8 | 372 | 3.4375 | 3 | [] | no_license | sum = 0
firstNum = 1
secondNum = 2
flag = True
listOfFib = []
listOfFib.append(firstNum)
while flag is True:
listOfFib.append(secondNum)
nextFib = firstNum + secondNum
firstNum = secondNum
secondNum = nextFib
if(secondNum > 4000000):
flag = False
for i in listOfFib:
if(i%2==0):
sum = sum + i
print(sum)
| true |
6f5972d82e3077277dc3a0a77eb7208c6dc89c8a | Python | zhangshaoyu1/Couplet-Based-on-Deep-Learning | /seq2seq.py | UTF-8 | 4,372 | 2.59375 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.python.layers import core as layers_core
# seq2seq模型:序列到序列的转换
def getLayeredCell(layer_size, num_units, input_keep_prob,
output_keep_prob=1.0):
return rnn.MultiRNNCell([rnn.DropoutWrapper(rnn.BasicLSTMCell(num_units),
input_keep_prob, output_keep_prob) for i in range(layer_size)])
# 编码:将输入序列压缩成指定长度的向量,这个向量就可以看成是这个序列的语义
def bi_encoder(embed_input, in_seq_len, num_units, layer_size, input_keep_prob):
# encode input into a vector
bi_layer_size = int(layer_size / 2)
encode_cell_fw = getLayeredCell(bi_layer_size, num_units, input_keep_prob)
encode_cell_bw = getLayeredCell(bi_layer_size, num_units, input_keep_prob)
bi_encoder_output, bi_encoder_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=encode_cell_fw,
cell_bw=encode_cell_bw,
inputs=embed_input,
sequence_length=in_seq_len,
dtype=embed_input.dtype,
time_major=False)
encoder_output = tf.concat(bi_encoder_output, -1)
encoder_state = []
for layer_id in range(bi_layer_size):
encoder_state.append(bi_encoder_state[0][layer_id])
encoder_state.append(bi_encoder_state[1][layer_id])
encoder_state = tuple(encoder_state)
return encoder_output, encoder_state
# 解码:负责根据语义向量生成指定的序列
def attention_decoder_cell(encoder_output, in_seq_len, num_units, layer_size,
input_keep_prob):
attention_mechanim = tf.contrib.seq2seq.BahdanauAttention(num_units,
encoder_output, in_seq_len, normalize=True)
cell = getLayeredCell(layer_size, num_units, input_keep_prob)
cell = tf.contrib.seq2seq.AttentionWrapper(cell, attention_mechanim,
attention_layer_size=num_units)
return cell
# seq2seq训练
def seq2seq(in_seq, in_seq_len, target_seq, target_seq_len, vocab_size,
num_units, layers, dropout):
if target_seq != None:
input_keep_prob = 1 - dropout
else:
input_keep_prob = 1
projection_layer = layers_core.Dense(vocab_size, use_bias=False)
# 放入输入和目标序列
with tf.device('/cpu:0'):
embedding = tf.get_variable(
name='embedding',
shape=[vocab_size, num_units])
embed_input = tf.nn.embedding_lookup(embedding, in_seq, name='embed_input')
# 编码和解码
encoder_output, encoder_state = bi_encoder(embed_input, in_seq_len,
num_units, layers, input_keep_prob)
decoder_cell = attention_decoder_cell(encoder_output, in_seq_len, num_units,
layers, input_keep_prob)
batch_size = tf.shape(in_seq_len)[0]
init_state = decoder_cell.zero_state(batch_size, tf.float32).clone(
cell_state=encoder_state)
if target_seq != None:
embed_target = tf.nn.embedding_lookup(embedding, target_seq,
name='embed_target')
helper = tf.contrib.seq2seq.TrainingHelper(
embed_target, target_seq_len, time_major=False)
else:
# TODO: start tokens and end tokens are hard code
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding, tf.fill([batch_size], 0), 1)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper,
init_state, output_layer=projection_layer)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder,
maximum_iterations=100)
if target_seq != None:
return outputs.rnn_output
else:
return outputs.sample_id
# 获取损失
def seq_loss(output, target, seq_len):
target = target[:, 1:]
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output,
labels=target)
batch_size = tf.shape(target)[0]
loss_mask = tf.sequence_mask(seq_len, tf.shape(output)[1])
cost = cost * tf.to_float(loss_mask)
return tf.reduce_sum(cost) / tf.to_float(batch_size)
| true |
7ecc97c5ae9370ac5ac1c3443d57a04bd4c677a0 | Python | Onapsis/pywars | /example_data/test_run.py | UTF-8 | 549 | 2.515625 | 3 | [
"MIT"
] | permissive | import sys
from pywars.engine import BotPlayer, PyWarsGameController
def main(argv):
bot1_username = "user1"
bot2_username = "user2"
bots = [BotPlayer(bot1_username, argv[0]),
BotPlayer(bot2_username, argv[1])]
pywars_game = PyWarsGameController(bots)
for b in bots:
pywars_game.add_player(b.username, b.script)
pywars_game.run()
json_data = pywars_game.json_game_output()
print(json_data)
sys.exit(0)
if __name__ == "__main__":
main(["bots/bot1/script.py", "bots/bot2/script.py"])
| true |
a2ed149c46e7b24dd03981e7fb5504808328200a | Python | RichOrtiz/lab-07-conditionals | /oddcalc.py | UTF-8 | 537 | 3.4375 | 3 | [] | no_license | firstNum = input("Enter your first number: ")
secondNum = input("Enter your second number: ")
multiplied = int(firstNum) * int(secondNum)
divided = int(firstNum) / int(secondNum)
modulo = int(firstNum) % int(secondNum)
response = input("What operation do you want to do? Multiply(mult), Divide(div), or Modulo(mod)? ")
if (response == "mult"):
print(multiplied)
elif (response == "div"):
print(divided)
elif (response == "mod"):
print(modulo)
else:
print("*** iNvAlId OpErAtIoN ***")
| true |
f9dfab8cea36e55cbbf27b9a854f622eb8d4f515 | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4163/codes/1618_2936.py | UTF-8 | 136 | 3.015625 | 3 | [] | no_license |
p=float(input("pressao"))
n=float(input("numero de mols"))
t=float(input("temperatura"))
r= 0.082
s= 273.15+t
v=n * r * s / p
print(v)
| true |
3caf781dd514ac5101d2983d9144e25d6977d16a | Python | huangketsudou/leetcode_python | /List/hasGroupsSizeX.py | UTF-8 | 656 | 3.3125 | 3 | [] | no_license | #数组的最大公约数
class Solution:
def hasGroupsSizeX(self, deck: List[int]) -> bool:
from collections import Counter
c=Counter(deck)
number=list(set(c.values()))
number.sort()
for i in range(min(number),1,-1):
flag=True
for j in number:
if j % i!=0:
flag=False
if flag:return True
return False
class Solution(object):
def hasGroupsSizeX(self, deck):
from fractions import gcd
vals = collections.Counter(deck).values()
return reduce(gcd, vals) >= 2
k=Solution()
print(k.hasGroupsSizeX([1]))
| true |
dbdf84bcb0b730bed1fa050c2dab869a1d03f06e | Python | namth2015/python | /1.DataScience/2.BigO/Green18/lec12_empty_room.py | UTF-8 | 806 | 3.734375 | 4 | [] | no_license | class node:
def __init__(self,data = None, next = None):
self.data = data
self.next = next
class linkedlist:
def __init__(self):
self.head = None
self.tail = None
def insertTail(self,data):
p = node(data,None)
if self.head == None:
self.head = self.tail = p
else:
self.tail.next = p
self.tail = p
def print(self):
itr = self.head
while itr:
print(itr.data[0], itr.data[1])
itr = itr.next
n = int(input())
ll = linkedlist()
for i in range(n):
temp = input().split()
ll.insertTail(temp)
ll_empty = linkedlist()
itr = ll.head
while itr:
if itr.data[2] == '0':
ll_empty.insertTail(itr.data)
itr = itr.next
ll_empty.print()
| true |
a890b45eaa6237c6c0659650037e24fcecd9febb | Python | MituaTandris/deduplicatio | /scratch.py | UTF-8 | 111 | 2.84375 | 3 | [] | no_license | handle = open("C:\output.txt", "w")
i=0
while i <1000:
handle.write("This is a test!")
i=i+1
handle.close() | true |
e7d1892ab9eba2d07c1a2d23f16de81e879bfacc | Python | kevinkatsura/TUBES-ALSTRUKDAT | /Tubes daspro/F03.py | UTF-8 | 4,856 | 2.53125 | 3 | [] | no_license | import csv
import globals as g
import F01 as a
csv.register_dialect("titikkoma", delimiter=";")
def Save_File():
if (not(g.Login_success)):
print("Silahkan login terlebih dahulu untuk menggunakan fitur ini.")
return;
Nama_File = input("Masukkan nama File User: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save = csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_User[i][0]!='*' and i<1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_User[i][0],g.array_File_User[i][1],g.array_File_User[i][2],g.array_File_User[i][3],
g.array_File_User[i][4],g.array_File_User[i][5],g.array_File_User[i][6],g.array_File_User[i][7])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
Nama_File = input("Masukkan nama File Daftar Wahana: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save = csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_Wahana[i][0]!='*' and i < 1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_Wahana[i][0],g.array_File_Wahana[i][1],g.array_File_Wahana[i][2],g.array_File_Wahana[i][3],
g.array_File_Wahana[i][4])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
Nama_File = input("Masukkan nama File Pembelian Tiket: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save = csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_Pembelian[i][0]!='*' and i<1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_Pembelian[i][0],g.array_File_Pembelian[i][1],g.array_File_Pembelian[i][2],g.array_File_Pembelian[i][3])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
Nama_File = input("Masukkan nama File Penggunaan Tiket: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save = csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_Penggunaan[i][0]!='*' and i <1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_Penggunaan[i][0],g.array_File_Penggunaan[i][1],g.array_File_Penggunaan[i][2],g.array_File_Penggunaan[i][3])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
Nama_File = input("Masukkan nama File Kepemilikan Tiket: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save= csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_Kepemilikan[i][0]!='*' and i<1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_Kepemilikan[i][0],g.array_File_Kepemilikan[i][1],g.array_File_Kepemilikan[i][2])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
Nama_File = input("Masukkan nama File Refund Tiket: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save = csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_Refund[i][0]!='*' and i < 1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_Refund[i][0],g.array_File_Refund[i][1],g.array_File_Refund[i][2],g.array_File_Refund[i][3])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
Nama_File = input("Masukkan nama File Kritik dan Saran: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save = csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_Kritik[i][0]!='*' and i <1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_Kritik[i][0],g.array_File_Kritik[i][1],g.array_File_Kritik[i][2],g.array_File_Kritik[i][3])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
Nama_File = input("Masukkan nama File Kehilangan Tiket: ")
File_Save = open(Nama_File, 'w', newline='', encoding="utf8")
writer_Save = csv.writer(File_Save, dialect="titikkoma")
#menyalin data array ke dalam file writer
i=0
while (g.array_File_Kehilangan[i][0]!='*' and i <1000): #mark ketika membaca '*' atau lebih dari 1000 data
tmp=(g.array_File_Kehilangan[i][0],g.array_File_Kehilangan[i][1],g.array_File_Kehilangan[i][2],g.array_File_Kehilangan[i][3])
writer_Save.writerow(tmp)
i+=1
File_Save.close()
print()
print("Data berhasil disimpan!")
| true |
12b4ce3bdda7808ae393191770b101b8d7702e99 | Python | hgdsraj/318FinalProject | /deprecated-neural-network-more-features/pair_images_by_time.py | UTF-8 | 1,296 | 2.546875 | 3 | [] | no_license | import sys
import shutil
from pyspark.sql import SparkSession, types, functions
from pyspark.sql.types import DoubleType, IntegerType, LongType, FloatType, ArrayType,DataType
spark = SparkSession.builder.appName('Weather Image Classifier').getOrCreate()
assert sys.version_info >= (3, 4) # make sure we have Python 3.4+
assert spark.version >= '2.2' # make sure we have Spark 2.2+
katkam_in_directory = sys.argv[1] # should be katkam-<rgb/greyscaled>-json
weather_in_directory = sys.argv[2] # should be cleaned-weather
out_directory = sys.argv[3] # should be cleaned-katkam-<rgb/greyscale>
def main():
df = spark.read.option('maxColumns', 100000).json(katkam_in_directory)
schema_file = open('schema')
schema_lines = [i.strip() for i in schema_file.readlines()]
schema = types.StructType([types.StructField(i, types.StringType(), False) for i in schema_lines])
schema_file.close()
weather = spark.read.option('maxColumns', 100000).csv(weather_in_directory, schema=schema)
df = df.select(df['time'].alias('Date/Time'), df['image'])
df = weather.join(df, 'Date/Time')
df = df.select(df['Date/Time'], df['image'])
df.write.json(out_directory, mode='overwrite')
shutil.rmtree(katkam_in_directory) #remove tempdir
if __name__=='__main__':
main() | true |
56a63161648771e48be0dc50ad21d3e0a6ba5f2e | Python | RuthAngus/astro-viz | /kawaler.py | UTF-8 | 2,507 | 2.921875 | 3 | [
"MIT"
] | permissive | """
Plot kawaler evolution.
"""
import numpy as np
import matplotlib.pyplot as plt
plotpar = {'axes.labelsize': 30,
'font.size': 25,
'legend.fontsize': 25,
'xtick.labelsize': 25,
'ytick.labelsize': 25,
'text.usetex': True}
plt.rcParams.update(plotpar)
def J2Omega(J, R, M):
return J / (R**2 * M)
def dJdt(Omega, R, M, n=2.5, a=1, c=1):
return Omega**(1 + 4*a*n/3) * R**(2-n) * M**(-n/3)
def dOdt(Omega, R, M):
dJ_dt = dJdt(Omega, R, M)
return J2Omega(dJ_dt, R, M)
def calc_periods(omega_init, R, M, delta_t, niter=100):
"""
Evolve a star fwd in time and calculate its rotation period.
"""
omega = omega_init*1
ps, os = [np.zeros(niter) for i in range(2)]
for i in range(niter):
omega -= dOdt(omega, R, M) * delta_t
os[i] = omega
ps[i] = 2 * np.pi / omega
return os, ps
def J_conserv(R, M):
return R**2 * M
if __name__ == "__main__":
plt.figure(figsize=(16, 9))
niter = 1000
R = np.array([1, 1, 1])
M = np.array([1, 1, 1])
period_init = np.array([3, 4, 4.5])
omega_init = 2 * np.pi / period_init
delta_t = 1e-3
rad_init = np.array([2, 2, 2])
nrad = 20
fudges = [.08, 0, -.05]
periods = np.zeros((3, nrad + niter))
# periods = np.zeros((3, niter))
# For three stars:
for i in range(3):
# Contraction onto the main sequence.
radii = np.linspace(rad_init[i], R[i], nrad)
masses = np.ones_like(radii) * M[i]
p_contract = J_conserv(radii, masses)
correction = period_init[i] - p_contract[-1]
p_contract += correction
periods[i, :nrad] = p_contract
# Kawaler spin down.
os, ps = calc_periods(omega_init[i], R[i], M[i], delta_t, niter=niter)
periods[i, nrad:] = ps
plt.plot((np.log10(np.arange(np.shape(periods)[1]) * delta_t * 5e6)
- np.log10(1e6)) * np.log10(5e6),
periods[i]+fudges[i])
# # Kawaler spin down.
# os, ps = calc_periods(omega_init[i], R[i], M[i], delta_t, niter=niter)
# plt.plot(np.log10(np.arange(len(ps)) * delta_t * 5e6),
# periods[i]+fudges[i])
print(np.log10(5e6))
print(np.log10(np.arange(np.shape(periods)[1]) * delta_t * 5e6 - 1e6)[1])
plt.xlabel("$\log_{10}(\mathrm{Time~[yr]})$")
plt.ylabel("$P_{\mathrm{rot}}~\mathrm{[Days]}$")
plt.xlim(0, np.log10(5e6))
plt.savefig("kawaler")
plt.close()
| true |
94df5c5f4f617ad5f412c5bba84c86cdd6895228 | Python | jhinebau/OpenPNM | /OpenPNM/Fluids/surface_tension.py | UTF-8 | 2,623 | 2.890625 | 3 | [
"MIT"
] | permissive | r"""
===============================================================================
Submodule -- surface_tension
===============================================================================
Some text here?
"""
import scipy as sp
def constant(fluid,network,propname,value,**params):
r"""
Assigns specified constant value
"""
network.set_pore_data(phase=fluid,prop=propname,data=value)
def na(fluid,network,propname,**params):
r"""
Assigns nonsensical, but numerical value of -1.
This ensurse stability of other methods
but introduces the possibility of being misused.
"""
value = -1
network.set_pore_data(phase=fluid,prop=propname,data=value)
def empirical(fluid,network,propname,a=[0],**params):
r"""
Uses a polynomial fit of empirical data to calculate property
"""
T = network.get_pore_data(phase=fluid,prop='temperature')
value = sp.zeros_like(T)
for i in range(0,sp.size(a)):
value = value + a[i]*(T**i)
network.set_pore_data(phase=fluid,prop=propname,data=value)
def Eotvos(fluid,network,propname,k,molar_density='molar_density', **params):
r'''
Documentation for this method is being updated, we are sorry for the inconvenience.
'''
Tc = fluid.get_pore_data(prop='Tc')
T = network.get_pore_data(phase=fluid,prop='temperature')
Vm = 1/network.get_pore_data(phase=fluid,prop=molar_density)
value = k*(Tc-T)/(Vm**(2/3))
network.set_pore_data(phase=fluid,prop=propname,data=value)
def GuggenheimKatayama(fluid,network,propname,K2,n,**params):
r'''
Documentation for this method is being updated, we are sorry for the inconvenience.
'''
T = network.get_pore_data(phase=fluid,prop='temperature')
Pc = fluid.get_pore_data(prop='Pc')
Tc = fluid.get_pore_data(prop='Tc')
sigma_o = K2*Tc**(1/3)*Pc**(2/3)
value = sigma_o*(1-T/Tc)**n
network.set_pore_data(phase=fluid,prop=propname,data=value)
def BrockBird_scaling(fluid,network,propname,sigma_o,To,**params):
r"""
Uses Brock_Bird model to adjust surface tension from it's value at a given reference temperature to temperature of interest
Parameters
----------
fluid : OpenPNM Fluid Object
sigma_o : float
Surface tension at reference temperature (N/m)
To : float
Temperature at reference conditions (K)
"""
Tc = fluid.get_pore_data(prop='Tc')
Ti = network.get_pore_data(phase=fluid,prop='temperature')
Tro = To/Tc
Tri = Ti/Tc
value = sigma_o*(1-Tri)**(11/9)/(1-Tro)**(11/9)
network.set_pore_data(phase=fluid,prop=propname,data=value)
| true |
c243e072188997ac1cd0bb7fa75993a9b0d71940 | Python | OllieJC/caesar | /app.py | UTF-8 | 1,646 | 3.59375 | 4 | [] | no_license | """
This is our web application! It's built using a 'framework' (a bunch of tools) called Flask http://flask.pocoo.org/
"""
from flask import Flask, render_template, request, url_for
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html')
@app.route('/encrypt', methods=['GET', 'POST'])
def encrypt():
if request.method == 'GET':
return render_template('encrypt.html')
#if the user POSTs us some data, let's encrypt it and return the encrypted message!
elif request.method == 'POST':
message = sanitise_text_input(request.form["message"])
key = sanitise_number_input(request.form['key'])
encryptedmessage = encrypt_caesar(message, key)
return render_template('encrypt.html', encrypted_message=encryptedmessage)
@app.route('/decrypt', methods=['GET', 'POST'])
def decrypt():
return render_template('decrypt.html')
# input validation functions
# https://www.owasp.org/index.php/Input_Validation_Cheat_Sheet
# - removes potentially hazardous characters
def sanitise_text_input(s):
import re, html
allowed_characters = "a-zA-Z0-9\_\,\s\.\-\!\?"
s = re.sub(r'[^{}]+'.format(allowed_characters), '', s)
return html.escape(s)
# - removes any non-numeric numbers and converts the string to an int
def sanitise_number_input(n):
import re
allowed_characters = "0-9"
n = re.sub(r'[^{}]+'.format(allowed_characters), '', str(n))
try:
return int(n)
except ValueError:
return 0
def encrypt_caesar(message, key):
#TODO
return
def decrypt_caesar(message):
#TODO
return
| true |
5ec7c74ba58eab05db857c1e08ed9cbbf0f495a9 | Python | lkkushan101/Python_Requests_TAF | /ReusableRequest/PutRequest.py | UTF-8 | 2,223 | 2.578125 | 3 | [] | no_license | import requests
import json
from requests_oauth2 import OAuth2BearerToken
from requests_oauthlib import OAuth1Session
def send_post_request(request_url,request_parameters):
r = requests.post(url=request_url, data = request_parameters)
return r
def send_get_request_with_Auth(request_url,request_parameters, user_name, password):
r = requests.post(request_url, request_parameters, auth=HTTPBasicAuth(user_name, password))
data = json.loads(r.content)
return data
def send_post_request_with_Auth1(request_url,request_parameters,client_key, client_secret, resource_owner_key,resource_owner_secret):
authentication = OAuth1Session('client_key',
client_secret='client_secret',
resource_owner_key='resource_owner_key',
resource_owner_secret='resource_owner_secret')
r = authentication.post(request_url, data=request_parameters)
data = json.loads(r.content)
return data
def send_post_request_with_header(request_url,request_parameters, header):
r = requests.post(url=request_url, data=request_parameters, header={header})
return r
def send_get_request_with_Auth_with_header(request_url,request_parameters, user_name, password,header):
r = requests.post(request_url, request_parameters, auth=HTTPBasicAuth(user_name, password), header={header})
data = json.loads(r.content)
return data
def send_post_request_with_Auth1_with_header(request_url,request_parameters,client_key, client_secret, resource_owner_key,resource_owner_secret,header):
authentication = OAuth1Session('client_key',
client_secret='client_secret',
resource_owner_key='resource_owner_key',
resource_owner_secret='resource_owner_secret')
r = authentication.post(request_url, data=request_parameters, header={header})
data = json.loads(r.content)
return data
def send_put_request_with_Auth2(request_url, access_token, request_parameters):
with requests.Session() as s:
s.auth = OAuth2BearerToken(access_token)
r = s.put(request_url, request_parameters)
r.raise_for_status()
data = r.json() | true |
0da5d56392d963672740029d660db32de00230a7 | Python | lukaszmichalskii/Fourier-Series | /src/fourier_series.py | UTF-8 | 4,134 | 2.953125 | 3 | [] | no_license | import math
import pygame as pygame
from pygame.math import Vector2
from src.discrete_fourier_transform import discrete_fourier_transform
from src.settings import Settings
from src.signal_generator import SignalGenerator
class FourierSeries:
def __init__(self):
pygame.init()
pygame.event.set_allowed([pygame.QUIT])
self.clock = pygame.time.Clock()
self.settings = Settings()
self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height),
flags=pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.NOFRAME)
self.signal_generator = SignalGenerator()
self.x_signal = self.signal_generator.generate_signal()[0]
self.y_signal = self.signal_generator.generate_signal()[1]
self.fourierX = discrete_fourier_transform(self.x_signal)
self.fourierY = discrete_fourier_transform(self.y_signal)
self.time = 0
self.signal = []
def _check_events(self):
for event in pygame.event.get():
if event == pygame.QUIT:
pygame.quit()
def _set_position(self):
vector_x = self._draw_fourier(self.settings.screen_width * 5 / 7,
self.settings.screen_height / 5, 0, self.fourierX)
vector_y = self._draw_fourier(self.settings.screen_width / 7,
self.settings.screen_height / 2, math.pi / 2, self.fourierY)
vector = Vector2(vector_x.x, vector_y.y)
return {'vector_x': vector_x, 'vector_y': vector_y, 'vector': vector}
def _draw_fourier(self,x, y, rotation, fourier):
for i in range(len(fourier)):
# tracking x, y coordinates
prev_x = x
prev_y = y
freq = fourier[i].get('freq')
radius = fourier[i].get('amp')
phase = fourier[i].get('phase')
x += radius * math.cos(freq * self.time + phase + rotation)
y += radius * math.sin(freq * self.time + phase + rotation)
pygame.draw.circle(self.screen,
self.settings.circle_color,
self.settings.translate.__add__(Vector2(prev_x, prev_y)),
radius, 1)
pygame.draw.line(self.screen, self.settings.line_color,
self.settings.translate.__add__(Vector2(prev_x, prev_y)),
self.settings.translate.__add__(Vector2(x, y)), 1)
return Vector2(x, y)
def _draw_signal(self, surface, signal, color):
for i in range(len(signal)-1):
# pygame.draw.circle(self.screen, self.settings.line_color,
# self.settings.translate.__add__(Vector2(self.signal[i].x, self.signal[i].y)),1)
pygame.draw.line(surface, color,
(signal[i].x, signal[i].y), (signal[i+1].x, signal[i+1].y))
def _draw_position_lines(self, surface, vector_x, vector_y, vector, color):
pygame.draw.line(surface, color, (vector_x.x, vector_x.y), (vector.x, vector.y))
pygame.draw.line(surface, color, (vector_y.x, vector_y.y), (vector.x, vector.y))
# epicycles control
if self.time > math.pi * 2:
self.time = 0
self.signal = []
def _draw(self):
vectors = self._set_position()
vector_x = vectors['vector_x']
vector_y = vectors['vector_y']
vector = vectors['vector']
self.signal.insert(0, vector)
# drawing section
self._draw_position_lines(self.screen, vector_x, vector_y, vector, self.settings.line_color)
self._draw_signal(self.screen, self.signal, self.settings.line_color)
dt = 2 * math.pi / len(self.fourierY)
self.time += dt
def run(self):
while 1:
self._check_events()
self.screen.fill(self.settings.bg_color)
self._draw()
self.clock.tick(60)
pygame.display.flip()
if __name__ == '__main__':
fs = FourierSeries()
fs.run()
| true |
905af97897b80ddc8fcf7a6f32cfa7a72d8a4517 | Python | Aasthaengg/IBMdataset | /Python_codes/p03037/s770539167.py | UTF-8 | 297 | 2.75 | 3 | [] | no_license | n, m = map(int, input().split())
LR = [list(map(int, input().split())) for _ in range(m)]
L = []
R = []
L_append = L.append
R_append = R.append
for l, r in LR:
L.append(l)
R.append(r)
min_R = min(R)
max_L = max(L)
if min_R >= max_L:
ans = min_R +1 - max_L
else:
ans = 0
print(ans) | true |
2c9b928d6c0e2dc3ad76a90b637aaf3ebf6bcdcb | Python | kkelfkai/web_auto_pro | /cases/qq/test_qq_ddt_excel.py | UTF-8 | 917 | 2.515625 | 3 | [] | no_license | import unittest
import requests
import ddt
import os
from api.qqfortune.qqfortune import QQFortune
from common.readexcel import ExcelUtil
p = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
filepath = os.path.join(p, "data", "test_data_qqfortune.xlsx")
sheetName = "Sheet1"
data = ExcelUtil(filepath, sheetName)
test_datas = data.dict_data()
print(test_datas)
@ddt.ddt
class TestQQFortune(unittest.TestCase):
def setUp(self):
self.s = requests.session()
self.qq = QQFortune(self.s)
def tearDown(self):
self.s.close()
@ddt.data(*test_datas)
def test_qqfortune(self, data):
# 1. 准备数据
key = data["key"]
qq = data["qq"]
exp = bool(data["exp"])
# 2. 传入参数 & 执行
act = self.qq.qqfortune(key, qq)
self.assertTrue(exp == act)
if __name__ == '__main__':
unittest.main() | true |
ea0193a1c81344e10ac3d01cbdb4d32356a1388f | Python | J-AugustoManzano/livro_Python | /ExerciciosAprendizagem/Cap10/c10ex18.py | UTF-8 | 181 | 2.59375 | 3 | [] | no_license | try:
with open("ARQBIN05.BIN", "rb").close() as arquivo:
print("*** O arquivo foi aberto ***")
except FileNotFoundError:
print("*** O arquivo não existe ***")
| true |
8cd222311493d2f9259391db805cafb971cd3855 | Python | cook-cookie/nlp100knock | /sec9/nlp89.py | UTF-8 | 812 | 2.828125 | 3 | [] | no_license | import pickle
from scipy import io
import numpy as np
from nlp87 import cos_sim
fname_dict_index_t = 'dict_index_t'
fname_matrix_x300 = 'matrix_x300'
# 辞書読み込み
with open(fname_dict_index_t, 'rb') as data_file:
dict_index_t = pickle.load(data_file)
# 行列読み込み
matrix_x300 = io.loadmat(fname_matrix_x300)['matrix_x300']
# vec("Spain") - vec("Madrid") + vec("Athens")とのコサイン類似度算出
vec = matrix_x300[dict_index_t['Spain']] - matrix_x300[dict_index_t['Madrid']] + matrix_x300[dict_index_t['Athens']]
distances = [cos_sim(vec, matrix_x300[i]) for i in range(0, len(dict_index_t))]
# 上位10件を表示
index_sorted = np.argsort(distances)
keys = list(dict_index_t.keys())
for index in index_sorted[:-11:-1]:
print('{}\t{}'.format(keys[index], distances[index]))
| true |
1cdaeee5c4a108b6b132526d017a66abf014c6d1 | Python | thatcr/python-xll | /xll/test/test_register.py | UTF-8 | 762 | 3.03125 | 3 | [] | no_license | import inspect
from ..register import typetext_from_signature, ctype_from_signature
def test_typecode():
def add(a: int, b: int) -> int:
"""
Add two numbers together.
Parameters:
a:int The first number
b:int The second number
Returns:
int: The sum of the numbers.
"""
...
sig = inspect.signature(add)
assert typetext_from_signature(sig) == "JJJ"
assert (
ctype_from_signature(sig)
== "signed long int (*)(signed long int, signed long int)"
)
def untyped(a, b):
...
sig = inspect.signature(untyped)
assert typetext_from_signature(sig) == "UUU"
assert ctype_from_signature(sig) == "LPXLOPER12 (*)(LPXLOPER12, LPXLOPER12)"
| true |
d8f7e256ac335fdf777ae01b16dd9177ad26966a | Python | Kirchenprogrammierer/Cheats | /CD zum Buch "Einstieg in Python"/Programmbeispiele/GUI/gui_radio_ausfuehrung.py | UTF-8 | 779 | 3.375 | 3 | [] | no_license | import tkinter
def ende():
main.destroy()
def anzeigen():
lb["text"] = "Auswahl: " + farbe.get()
main = tkinter.Tk()
# Widget-Variable
farbe = tkinter.StringVar()
farbe.set("rot")
# Gruppe von Radio-Buttons
rb1 = tkinter.Radiobutton(main, text="rot",
variable=farbe, value="rot", command=anzeigen)
rb1.pack()
rb2 = tkinter.Radiobutton(main, text="gelb",
variable=farbe, value="gelb", command=anzeigen)
rb2.pack()
rb3 = tkinter.Radiobutton(main, text="blau",
variable=farbe, value="blau", command=anzeigen)
rb3.pack()
# Anzeigelabel
lb = tkinter.Label(main, text = "Auswahl:")
lb.pack()
bende = tkinter.Button(main, text = "Ende",
command = ende)
bende.pack()
main.mainloop()
| true |
46d488900687d7dae63db302fc2a711ff122a6d2 | Python | cheeyeo/deep_learning_computer_vision | /chapter29_facial_recognition/vgg_face2.py | UTF-8 | 399 | 2.90625 | 3 | [] | no_license | # Example of loading a VGG Face2 model
# keras_vggface provides 3 pre-trained VGGFace models:
# VGGFace(model='vgg16') => VGGFace model
# VGGFace(model='resnet50') => VGGFace2 model
# VGGFace(model='senet50') => VGGFace2 model
from keras_vggface.vggface import VGGFace
model = VGGFace(model='resnet50')
print('Input shape: {}'.format(model.inputs))
print('Output shape: {}'.format(model.outputs)) | true |
450ec4ee192d5b60d5e73b73c72616c98040bfc4 | Python | NREL/ditto | /tests/test_opendss_writer.py | UTF-8 | 6,533 | 2.578125 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
test_opendss_writer
----------------------------------
Tests for writing functions of the OpenDSS writer
"""
import logging
import os
import six
import tempfile
import pytest
import pytest as pt
logger = logging.getLogger(__name__)
def test_parse_wire():
from ditto.store import Store
from ditto.models.wire import Wire
from ditto.writers.opendss.write import Writer
m = Store()
wire_A = Wire(
m,
phase="A",
nameclass="wire_test_phase",
diameter=5,
gmr=10,
ampacity=500,
emergency_ampacity=1000,
resistance=5,
)
output_path = tempfile.gettempdir()
w = Writer(output_path=output_path)
parsed_wire = w.parse_wire(wire_A)
assert parsed_wire["GMRac"] == 10
assert parsed_wire["Diam"] == 5
assert parsed_wire["normamps"] == 500
assert parsed_wire["emergamps"] == 1000
assert parsed_wire["Rac"] == 5
def test_parse_concentric_neutral_cable():
"""
Tests the parsing of concentric neutral cables.
The example used is:
New CNDATA.250_1/3 k=13 DiaStrand=0.064 Rstrand=2.816666667 epsR=2.3
~ InsLayer=0.220 DiaIns=1.06 DiaCable=1.16 Rac=0.076705 GMRac=0.20568 diam=0.573
~ Runits=kft Radunits=in GMRunits=in
"""
from ditto.store import Store
from ditto.models.wire import Wire
from ditto.writers.opendss.write import Writer
m = Store()
wire = Wire(
m,
phase="A",
nameclass="250_1/3",
diameter=0.0145542, # 0.573 inches in meters
gmr=0.005224272, # 0.20568 inches in meters
ampacity=500,
emergency_ampacity=1000,
resistance=0.000251656824147, # 0.076705 ohm/kft in ohm/meter
concentric_neutral_resistance=0.00924103237205, # 2.816666667 ohm/kft in ohm/meter
concentric_neutral_diameter=0.0016256, # 0.064 inches in meters
concentric_neutral_outside_diameter=0.029464, # 1.16 inches in meters
concentric_neutral_nstrand=13,
)
output_path = tempfile.gettempdir()
w = Writer(output_path=output_path)
parsed_cable = w.parse_cable(wire)
assert parsed_cable["k"] == 13
assert parsed_cable["DiaStrand"] == 0.0016256
assert parsed_cable["Rstrand"] == 0.00924103237205
assert parsed_cable["Diam"] == 0.0145542
assert parsed_cable["DiaCable"] == 0.029464
assert parsed_cable["Rac"] == 0.000251656824147
assert parsed_cable["GMRac"] == 0.005224272
assert parsed_cable["Runits"] == "m"
assert parsed_cable["Radunits"] == "m"
assert parsed_cable["GMRunits"] == "m"
def setup_line_test():
"""Setup a line with 4 wires."""
from ditto.store import Store
from ditto.models.line import Line
from ditto.models.wire import Wire
m = Store()
wire_A = Wire(
m,
phase="A",
nameclass="wire_test_phase",
diameter=5,
gmr=10,
ampacity=500,
emergency_ampacity=1000,
resistance=5,
)
wire_B = Wire(
m,
phase="B",
nameclass="wire_test_phase",
diameter=5,
gmr=10,
ampacity=500,
emergency_ampacity=1000,
resistance=5,
)
wire_C = Wire(
m,
phase="C",
nameclass="wire_test_phase",
diameter=5, # Missing GMR
ampacity=500,
emergency_ampacity=1000,
resistance=5,
)
wire_N = Wire(
m,
phase="N",
nameclass="wire_test_neutral",
diameter=5,
gmr=10,
ampacity=500,
emergency_ampacity=1000,
resistance=5,
)
line = Line(m, name="l1", wires=[wire_A, wire_B, wire_C, wire_N])
return line
def test_write_wiredata():
"""Test the method write_wiredata()."""
from ditto.writers.opendss.write import Writer
line = setup_line_test()
output_path = tempfile.gettempdir()
w = Writer(output_path=output_path)
w.write_wiredata([line])
def get_property_from_dss_string(string, property):
"""Get the value of the given property within the dss string."""
L = string.split(" ")
result = []
for l in L:
if "=" in l:
ll = [x.strip() for x in l.split("=")]
if ll[0].lower() == property.lower():
result.append(ll[1].lower())
if len(result) == 1:
return result[0]
elif len(result) == 0:
return None
else:
return result
def test_get_property_from_dss_string():
"""Tests the get_property_from_dss_string function."""
string = (
"New LineGeometry.Geometry_1 Nconds=4 Nphases=3 Units=km Cond=1 Wire=wire_test_phase Normamps=500.0 Emergamps=1000.0 Cond=2 Wire=wire_test_phase Normamps=500.0 Emergamps=1000.0 Cond=3 Wire=wire_test_phase_1 Normamps=500.0 Emergamps=1000.0 Cond=4 Wire=wire_test_neutral Normamps=500.0 Emergamps=1000.0 Reduce=y\n"
)
assert get_property_from_dss_string(string, "reduce") == "y"
assert get_property_from_dss_string(string, "Nconds") == "4"
assert get_property_from_dss_string(string, "nphases") == "3"
assert get_property_from_dss_string(string, "Emergamps") == [
"1000.0",
"1000.0",
"1000.0",
"1000.0",
]
assert get_property_from_dss_string(string, "crazy_property") is None
def test_write_linegeometry():
"""Test the write_linegeometry() method."""
from ditto.writers.opendss.write import Writer
line = setup_line_test()
output_path = tempfile.gettempdir()
w = Writer(output_path=output_path)
w.write_wiredata([line])
w.write_linegeometry([line])
with open(os.path.join(output_path, "LineGeometry.dss"), "r") as fp:
lines = fp.readlines()
assert get_property_from_dss_string(lines[0], "reduce") == "y"
assert get_property_from_dss_string(lines[0], "nconds") == "4"
assert get_property_from_dss_string(lines[0], "nphases") == "3"
assert get_property_from_dss_string(lines[0], "units") == "m"
assert get_property_from_dss_string(lines[0], "normamps") == [
"500.0",
"500.0",
"500.0",
"500.0",
]
assert get_property_from_dss_string(lines[0], "Emergamps") == [
"1000.0",
"1000.0",
"1000.0",
"1000.0",
]
def test_write_linecodes():
"""Test the write_linecodes() method."""
from ditto.writers.opendss.write import Writer
line = setup_line_test()
output_path = tempfile.gettempdir()
w = Writer(output_path=output_path)
w.write_linecodes([line])
| true |
6ea734a718ca5ecc8f760c195df95316e1ce2a33 | Python | flexgp/novelty-prog-sys | /src/PonyGE2/src/scripts/stats_parser.py | UTF-8 | 10,319 | 2.875 | 3 | [
"MIT",
"GPL-3.0-only",
"GPL-1.0-or-later"
] | permissive | from sys import path
path.append("../src")
from utilities.algorithm.general import check_python_version
check_python_version()
import getopt
import sys
from os import getcwd, listdir, path, sep
import matplotlib
import numpy as np
np.seterr(all="raise")
import pandas as pd
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rc('font', family='Times New Roman')
def help_message():
"""
Prints a help message to explain the usage of this file.
:return: Nothing
"""
lines_1 = ["Welcome to PonyGE's post-run stats parser.",
"-------------------",
"The following are the available command line args.",
"You must specify an experiment name."]
lines_2 = [["\t--help:", "Shows this help message."],
["\t--experiment_name:", "The name of the containing folder in "
"which target runs are saved, e.g. "
"in results/[EXPERIMENT_NAME]."]]
# This simply justifies the print statement such that it is visually
# pleasing to look at.
for line in lines_1:
print(line)
col_width = max(len(line[0]) for line in lines_2)
for line in sorted(lines_2):
print(" ".join(words.ljust(col_width) for words in line))
def parse_opts(command_line_args):
"""
Parses command line arguments and returns usable variables which are used
as inputs for other functions in this file.
:param command_line_args: flags passed in from the command line at
execution
:return: experiment_name: the name of the containing folder of results,
stats: a list of strings. Each string is a valid key from the
stats.stats.stats dictionary.
graph: an optional boolean flag for graphing specified stats.
"""
try:
opts, args = getopt.getopt(command_line_args[1:], "",
["help", "experiment_name="])
except getopt.GetoptError as err:
s = "scripts.parse_stats.parse_opts\n" \
"Error: in order to parse stats you need to specify the location" \
" of the target stats files.\n" \
" Run python stats_parser.py --help for more info."
print(str(err))
raise Exception(s)
if not opts:
s = "scripts.parse_stats.parse_opts\n" \
"Error: in order to parse stats you need to specify the location" \
" of the target stats files.\n" \
" Run python stats_parser.py --help for more info."
raise Exception(s)
experiment_name = None
# iterate over all arguments in the option parser.
for opt, arg in opts:
if opt == "--help":
# Print help message.
help_message()
exit()
elif opt == "--experiment_name":
# Set experiment name (i.e. containing folder for multiple runs).
experiment_name = arg
return experiment_name
def parse_stats_from_runs(experiment_name):
"""
Analyses a list of given stats from a group of runs saved under an
"experiment_name" folder. Creates a summary .csv file which can be used by
plotting functions in utilities.save_plot. Saves a file of the format:
run0_gen0 run1_gen0 . . . run(n-1)_gen0
run0_gen1 run1_gen1 . . . run(n-1)_gen1
run0_gen2 run1_gen2 . . . run(n-1)_gen2
. . . . . .
. . . . . .
. . . . . .
run0_gen(n-1) run1_gen(n-1) . . . run(n-1)_gen(n-1)
run0_gen(n) run1_gen(n) . . . run(n-1)_gen(n)
Generated file is compatible with
utilities.save_plot.save_average_plot_across_runs()
:param experiment_name: The name of a collecting folder within the
./results folder which holds multiple runs.
:param graph: A boolean flag for whether or not to save figure.
:return: Nothing.
"""
# Since results files are not kept in source directory, need to escape
# one folder.
file_path = path.join(getcwd(), "..", "results")
# Check for use of experiment manager.
if experiment_name:
file_path = path.join(file_path, experiment_name)
else:
s = "scripts.parse_stats.parse_stats_from_runs\n" \
"Error: experiment name not specified."
raise Exception(s)
# Find list of all runs contained in the specified folder.
runs = [run for run in listdir(file_path) if
path.isdir(path.join(file_path, run))]
# Place to store the header for full stats file.
header = ""
# Array to store all stats
full_stats = []
# Get list of all stats to parse. Check stats file of first run from
# runs folder.
ping_file = path.join(file_path, str(runs[0]), "stats.tsv")
# Load in data and get the names of all stats.
stats = list(pd.read_csv(ping_file, sep="\t"))
# Make list of stats we do not wish to parse.
no_parse_list = ["gen", "total_inds", "time_adjust"]
for stat in [stat for stat in stats if stat not in no_parse_list and
not stat.startswith("Unnamed")]:
# Iterate over all stats.
print("Parsing", stat)
summary_stats = []
# Iterate over all runs
for run in runs:
# Get file name
file_name = path.join(file_path, str(run), "stats.tsv")
# Load in data
data = pd.read_csv(file_name, sep="\t")
try:
# Try to extract specific stat from the data.
if list(data[stat]):
summary_stats.append(list(data[stat]))
else:
s = "scripts.parse_stats.parse_stats_from_runs\n" \
"Error: stat %s is empty for run %s." % (stat, run)
raise Exception(s)
except KeyError:
# The requested stat doesn't exist.
s = "scripts.parse_stats.parse_stats_from_runs\nError: " \
"stat %s does not exist in run %s." % (stat, run)
raise Exception(s)
try:
# Generate numpy array of all stats
summary_stats = np.array(summary_stats)
# Append Stat to header.
header = header + stat + "_mean,"
summary_stats_mean = np.nanmean(summary_stats, axis=0)
full_stats.append(summary_stats_mean)
# Append Stat to header.
header = header + stat + "_std,"
summary_stats_std = np.nanstd(summary_stats, axis=0)
full_stats.append(summary_stats_std)
summary_stats = np.transpose(summary_stats)
# Save stats as a .csv file.
np.savetxt(path.join(file_path, (stat + ".csv")), summary_stats,
delimiter=",")
# Graph stat by calling graphing function.
save_average_plot_across_runs(path.join(file_path, (stat +
".csv")))
except FloatingPointError:
print("scripts.stats_parser.parse_stats_from_runs\n"
"Warning: FloatingPointError encountered while parsing %s "
"stats." % (stat))
# Convert and rotate full stats
full_stats = np.array(full_stats)
full_stats = np.transpose(full_stats)
# Save full stats to csv file.
np.savetxt(path.join(file_path, "full_stats.csv"), full_stats,
delimiter=",", header=header[:-1])
def save_average_plot_across_runs(filename):
"""
Saves an average plot of multiple runs. Input file data must be of the
format:
run0_gen0 run1_gen0 . . . run(n-1)_gen0
run0_gen1 run1_gen1 . . . run(n-1)_gen1
run0_gen2 run1_gen2 . . . run(n-1)_gen2
. . . . . .
. . . . . .
. . . . . .
run0_gen(n-1) run1_gen(n-1) . . . run(n-1)_gen(n-1)
run0_gen(n) run1_gen(n) . . . run(n-1)_gen(n)
The required file can be generated using
stats.parse_stats.parse_stats_from_runs()
Generates a .pdf graph of average value with standard deviation.
:param filename: the full file name of a .csv file containing the fitnesses
of each generation of multiple runs. Must be comma separated.
:return: Nothing.
"""
# Get stat name from the filename. Used later for saving graph.
stat_name = filename.split(sep)[-1].split(".")[0]
# Load in data.
data = np.genfromtxt(filename, delimiter=',')[:, :-1]
# Generate average and standard deviations of loaded data.
ave = np.nanmean(data, axis=1)
std = np.nanstd(data, axis=1)
# Calculate max and min of standard deviation.
stdmax = ave + std
stdmin = ave - std
# Generate generation range over which data is to be graphed.
max_gens = len(ave)
r = range(1, max_gens + 1)
# Initialise figure plot.
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
# Plot data and standard deviation infill.
ax1.plot(r, ave, color="blue")
ax1.fill_between(r, stdmin, stdmax, color="DodgerBlue", alpha=0.5)
# Set x-axis limits.
plt.xlim(0, max_gens + 1)
# Set title and axes.
plt.title("Average " + stat_name)
plt.xlabel('Generation', fontsize=14)
plt.ylabel('Average ' + stat_name, fontsize=14)
# Save graph under the same name as the original .csv file but with a
# .pdf extension instead.
new_filename = filename[:-3] + "pdf"
plt.savefig(str(new_filename))
plt.close()
if __name__ == "__main__":
# Get experiment name and graphing flag from command line parser.
experiment_name = parse_opts(sys.argv)
# Call statistics parser for experiment name.
parse_stats_from_runs(experiment_name)
| true |
6f1de0530cc4c07299908189f172822e35f06a21 | Python | AMDonati/SMC-T | /src/train/SMC_loss.py | UTF-8 | 4,147 | 2.828125 | 3 | [] | no_license | import tensorflow as tf
import math
#TODO: redo the mathematical computation of the loss from the log likelihood to check that the formulas implemented are correct.
#TODO: ask Florian if I need to add a @tf.function to this function. cf https://www.tensorflow.org/tutorials/generative/cvae as an example.
def compute_SMC_ll_one_layer(list_means):
'''
compute log p(z_j / z_(0:j-1), Y(0:j-1))
:param epsilon: epsilon of the reparametrized noise (normal gaussian distrib.) > shape (B,P,S,D)
:param sigma: cov variance matrix in the reparametrized noise > shape (D,D)
:return:
a tensor of shape (B,P,S) with the log-likelihood corresponding to one layer.
'''
mean_z = list_means[0]
mean_k = list_means[1]
mean_v = list_means[2]
mean_q = list_means[3]
mean_z = tf.transpose(mean_z, perm=[0, 3, 1, 2]) # shape (B,D,P,S)
mean_k = tf.transpose(mean_k, perm=[0, 3, 1, 2]) # shape (B,D,P,S)
mean_v = tf.transpose(mean_v, perm=[0, 3, 1, 2]) # shape (B,D,P,S)
mean_q = tf.transpose(mean_q, perm=[0, 3, 1, 2]) # shape (B,D,P,S)
epsilon_part= tf.reduce_sum(tf.multiply(mean_z, mean_z), axis=1) + tf.reduce_sum(tf.multiply(mean_k, mean_k), axis=1) + tf.reduce_sum(tf.multiply(mean_v, mean_v), axis=1) + tf.reduce_sum(tf.multiply(mean_q, mean_q), axis=1)
#det_part=tf.linalg.logdet(sigma) # 2*math.pi removed for easier debugging.
#ll_one_layer = det_part + epsilon_part
ll_one_layer = epsilon_part
return ll_one_layer # (B,P,S)
def compute_SMC_log_likelihood(list_epsilon, list_sigma, sampling_weights):
'''
compute the SMC_log_likelihood for the multi_layer case by looping over layers.
:param list_epsilon: list of epsilons containing the epsilon of every layer.
:param list_sigma: list of sigmas containing the (same) sigma for each layer.
:param sampling_weights: shape (B,P,1): w_T of the last_timestep from the SMC Cell/Layer.
:return:
A scalar computing log p (z_j / z_(0:j-1), Y_(0:j-1))
'''
ll_all_layers=[]
for epsilon, sigma in zip(list_epsilon, list_sigma):
ll_one_layer=compute_SMC_ll_one_layer(epsilon, sigma) # shape (B,P,S)
ll_all_layers.append(ll_one_layer)
# stacking each loss by layer on a tensor of shape (B,L,P,S)
total_ll=tf.stack(ll_all_layers, axis=1) # shape (B,L,P,S)
# multiply by -1/2 and suming over layer dimension:
total_ll=tf.reduce_sum(tf.scalar_mul(-1/2, total_ll), axis=1) # shape (B,P,S)
# # mean over the seq_len dim
# total_ll=tf.reduce_mean(total_ll, axis=-1) # shape (B,P)
#
# # weighted sum over particles dim using sampling_weights:
# if len(tf.shape(sampling_weights)) == 3:
# sampling_weights = tf.squeeze(sampling_weights, axis=-1)
# SMC_loss = tf.reduce_sum(sampling_weights * total_ll, axis=-1) # dim (B,)
#
# # mean over batch dim:
# SMC_loss=tf.reduce_mean(SMC_loss, axis=0)
return SMC_loss
if __name__ == "__main__":
B=8
P=5
S=3
L=4
D=12
sigma_scalar=1
#--------------------------test of compute_ll_one_layer---------------------------------------------------
sigma_tensor = tf.constant(sigma_scalar, shape=(D,), dtype=tf.float32)
sigma = tf.Variable(tf.linalg.diag(sigma_tensor), dtype=tf.float32)
# test for epsilon = zeros tensor
epsilon = tf.zeros(shape=(B, P, S, D))
SMC_loss_tensor=compute_SMC_ll_one_layer(epsilon=epsilon, sigma=sigma) # ok, test passed. return a zero tensor :-) when sigma=1 &
#epsilon is a zero tensor.
#--------------------------test of compute_log_likelihood------------------------------------------------
#list_epsilon=[tf.random.normal(shape=(B,P,S,D)) for _ in range(L)]
# test with all epsilon as zero tensor...
list_epsilon=[tf.zeros(shape=(B,P,S,D)) for _ in range(L)]
list_sigma=[sigma for _ in range(L)]
sampling_weights=tf.random.uniform(shape=(B,P,1))
# normalization:
sampling_weights=sampling_weights/tf.expand_dims(tf.reduce_sum(sampling_weights, axis=1), axis=1)
SMC_loss=compute_SMC_log_likelihood(list_epsilon=list_epsilon, list_sigma=list_sigma, sampling_weights=sampling_weights)
print(SMC_loss.numpy()) # ok, test passed. return 0 when sigma is the identity matrix and epsilon is a zero tensor. | true |
f3c9d6608891d59ade8516ee86eea17cd1c9920a | Python | SyedUmaidAhmed/IoT-with-Temperature-Sensor-DHT-11 | /temp_pers.py | UTF-8 | 1,152 | 2.515625 | 3 | [] | no_license | import adafruit_dht
from ISStreamer.Streamer import Streamer
import time
import board
# --------- User Settings ---------
SENSOR_LOCATION_NAME = "Student Labs"
BUCKET_NAME = "Temperature Monitor"
BUCKET_KEY = "EA3F6F8BCLTV"
ACCESS_KEY = "ist_DW0n0QFM5mJAK4Hb9BujgTKbwsJyoyEG"
MINUTES_BETWEEN_READS = 1
METRIC_UNITS = False
# ---------------------------------
dhtSensor = adafruit_dht.DHT11(board.D4) #GPIO 4
streamer = Streamer(bucket_name=BUCKET_NAME, bucket_key=BUCKET_KEY, access_key=ACCESS_KEY)
while True:
try:
humidity = dhtSensor.humidity
temp_c = dhtSensor.temperature
except RuntimeError:
print("RuntimeError, trying again...")
continue
if METRIC_UNITS:
streamer.log(SENSOR_LOCATION_NAME + " Temperature(C)", temp_c)
else:
temp_f = format(temp_c * 9.0 / 5.0 + 32.0, ".2f")
streamer.log(SENSOR_LOCATION_NAME + " Temperature(F)", temp_f)
humidity = format(humidity,".2f")
streamer.log(SENSOR_LOCATION_NAME + " Humidity(%)", humidity)
streamer.flush()
time.sleep(60*MINUTES_BETWEEN_READS) | true |
c683d0b286d72d45a22b8a5246c2083150509834 | Python | roydan24/roydangameproject | /gamercode.py | UTF-8 | 10,732 | 3.28125 | 3 | [] | no_license |
import random
import pygame
# ----- CONSTANTS
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
YELLOW = (255, 255, 0)
SKY_BLUE = (95, 165, 228)
GREEN = (34, 139, 34)
WIDTH = 800
HEIGHT = 600
TITLE = "Link's Coin Fall"
NUM_COINS = 30
# Create player class
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = pygame.image.load("./images/link copy.png")
# scaling the image down .5x
self.image = pygame.transform.scale(self.image, (64, 64))
self.rect = self.image.get_rect()
self.vel_x = 0
self.vel_y = 0
# Move left/right
self.rect.x += self.vel_x
# List of sprites we can bump against
self.level = None
def update(self):
""" Move the player. """
# Gravity
self.calc_grav()
# Move left/right
self.rect.x += self.vel_x
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.vel_x > 0:
self.rect.right = block.rect.left
elif self.vel_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.vel_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.vel_y > 0:
self.rect.bottom = block.rect.top
elif self.vel_y < 0:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.vel_y = 0
def calc_grav(self):
""" Calculate effect of gravity. """
if self.vel_y == 0:
self.vel_y = 1
else:
self.vel_y += .35
# See if we are on the ground.
if self.rect.y >= HEIGHT - self.rect.height and self.vel_y >= 0:
self.vel_y = 0
self.rect.y = HEIGHT - self.rect.height
def jump(self):
""" Called when user hits 'jump' button. """
# move down a bit and see if there is a platform below us.
# Move down 2 pixels because it doesn't work well if we only move down
# 1 when working with a platform moving down.
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
self.rect.y -= 2
# If it is ok to jump, set our speed upwards
if len(platform_hit_list) > 0 or self.rect.bottom >= HEIGHT:
self.vel_y = -10
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.vel_x > 0:
self.rect.right = block.rect.left
elif self.vel_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Player movement
def go_left(self):
self.vel_x = -4
def go_right(self):
self.vel_x = 4
def stop(self):
self.vel_x = 0
# Create enemy class
class Enemy(pygame.sprite.Sprite):
def __init__(self, y_coord):
"""
Arguments:
y_coord - initial y-coordinate
"""
super().__init__()
self.image = pygame.image.load("./images/goomba.png")
self.image = pygame.transform.scale(self.image, (64, 64))
self.rect = self.image.get_rect()
# initial location middle of the screen at y_coord
self.rect.centerx = WIDTH / 2
self.rect.centery = 570
self.x_vel = 5
def update(self):
"""Move the enemy side-to-side"""
self.rect.x += self.x_vel
# Keep enemy in the screen
if self.rect.right > WIDTH or self.rect.left < 0:
self.x_vel *= -1
# Create coin class
class Coin(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image = pygame.image.load("./images/coin.png")
self.image = pygame.transform.scale(self.image, (32, 32))
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = (
random.randrange(0, WIDTH),
random.randrange(0, HEIGHT)
)
self.vel_y = random.choice([1, 2])
def update(self):
"""Update location of coin."""
self.rect.y += self.vel_y
# reset location if it reaches the bottom
if self.rect.y > HEIGHT:
self.rect.x = random.randrange(0, WIDTH)
self.rect.y = random.randrange(-15, 0)
# May add platforms later on
class Platform(pygame.sprite.Sprite):
""" Platform the user can jump on """
def __init__(self, width, height):
""" Platform constructor. Assumes constructed with user passing in
an array of 5 numbers like what's defined at the top of this
code. """
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(WHITE)
self.rect = self.image.get_rect()
class Level(object):
""" This is a generic super-class used to define a level.
Create a child class for each level with level-specific
info. """
def __init__(self, player):
""" Constructor. Pass in a handle to player. Needed for when moving platforms
collide with the player. """
self.platform_list = pygame.sprite.Group()
self.enemy_list = pygame.sprite.Group()
self.player = player
# Background image
self.background = None
# Update everything on this level
def update(self):
""" Update everything in this level."""
self.platform_list.update()
self.enemy_list.update()
def draw(self, screen):
""" Draw everything on this level. """
# Draw all the sprite lists that we have
self.platform_list.draw(screen)
self.enemy_list.draw(screen)
# Create platforms for the level
class Level_01(Level):
""" Definition for level 1. """
def __init__(self, player):
""" Create level 1. """
# Call the parent constructor
Level.__init__(self, player)
# Array with width, height, x, and y of platform
level = [
[210, 70, 600, 800],
]
# Go through the array above and add platforms
for platform in level:
block = Platform(platform[0], platform[1])
block.rect.x = platform[2]
block.rect.y = platform[3]
block.player = self.player
self.platform_list.add(block)
def main():
pygame.init()
# ----- SCREEN PROPERTIES
size = (WIDTH, HEIGHT)
screen = pygame.display.set_mode(size)
pygame.display.set_caption(TITLE)
# ----- LOCAL VARIABLES
done = False
clock = pygame.time.Clock()
score = 0
# Sprite groups
all_sprites = pygame.sprite.RenderUpdates()
enemy_sprites = pygame.sprite.Group()
coin_sprites = pygame.sprite.Group()
# enemies
enemy = Enemy(1)
all_sprites.add(enemy)
enemy_sprites.add(enemy)
# Create player
player = Player()
all_sprites.add(player)
# create coins
coin_list = []
for i in range(NUM_COINS):
coin = Coin()
coin_list.append(coin)
coin.rect.x = random.randrange(WIDTH - coin.rect.width)
coin.rect.y = random.randrange(HEIGHT - coin.rect.height)
all_sprites.add(coin)
coin_sprites.add(coin)
# Create all the levels
level_list = []
level_list.append(Level_01(player))
# Set the current level
current_level_no = 0
current_level = level_list[current_level_no]
active_sprite_list = pygame.sprite.Group()
player.level = current_level
# ----- MAIN LOOP
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.go_left()
if event.key == pygame.K_RIGHT:
player.go_right()
if event.key == pygame.K_UP:
player.jump()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and player.vel_x < 0:
player.stop()
if event.key == pygame.K_RIGHT and player.vel_x > 0:
player.stop()
# If the player and enemy gets near the right side, shift the world left (-x)
if player.rect.right > WIDTH:
player.rect.right = WIDTH
# If the player gets near the left side, shift the world right (+x)
if player.rect.left < 0:
player.rect.left = 0
# -- Event Handler
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# ----- LOGIC
all_sprites.update()
# Update items in the level
current_level.update()
# If the player gets near the right side, shift the world left (-x)
if player.rect.right > WIDTH:
player.rect.right = WIDTH
# If the player gets near the left side, shift the world right (+x)
if player.rect.left < 0:
player.rect.left = 0
# Update coin
for coin in coin_sprites:
coin.update()
# Player collides with coin
coins_collected = pygame.sprite.spritecollide(player, coin_sprites, True)
for coin in coins_collected:
score += 1
print(f"Score = {score}")
if score == NUM_COINS:
done = True
print("You win!")
# Enemy collides with player
enemy_collide = pygame.sprite.spritecollide(player, enemy_sprites, False)
for enemy in enemy_collide:
print("Game Over")
done = True
# ----- DRAW
screen.fill(GREEN)
current_level.draw(screen)
dirty_rectangles = all_sprites.draw(screen)
# ----- UPDATE
pygame.display.flip()
pygame.display.update(dirty_rectangles)
clock.tick(60)
pygame.quit()
if __name__ == "__main__":
main() | true |
f08049d7dbef74f9f36f4eef674299a3a80770b9 | Python | shaikhsahal125/ComputerSecurity-Rutgers | /assignment3/hw10/vdecrypt | UTF-8 | 1,285 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
def main():
keyfile = sys.argv[1] # takes the keyfile from arguments
message = sys.argv[2] # takes the message from args
ciphertext = sys.argv[3] # takes the ciphertext file name from args
# open files
kf = open(keyfile, "rb") # open file in binary format
msg = open(message, "rb") # opens the file in binary format
cpt = open(ciphertext, "wb") # open with set to write file in binary format
plainTextList = []
keyList = []
msg_byte = msg.read(1)
while msg_byte:
plainTextList.append(msg_byte)
msg_byte = msg.read(1)
if kf:
kf_byte = kf.read(1)
while kf_byte:
keyList.append(kf_byte)
kf_byte = kf.read(1)
fileLength = len(keyList)
else:
keyList = [0]
fileLength = 0
i = 0
for byte in range(len(plainTextList)):
if keyList != [] and len(keyList) == i:
i = 0
intCipher = (ord(plainTextList[byte]) - ord(keyList[i]) + 256) % 256
cpt.write(int.to_bytes(intCipher, byteorder=sys.byteorder, length=1))
i += 1
kf.close()
msg.close()
cpt.close()
print(f'keyfile={keyfile}, length={fileLength}')
if __name__ == '__main__':
main()
| true |
8167e93e0c1d84ab4b7c40cc3ae368b833cce74f | Python | cbare/Jazzopedia | /scripts/read_xml.py | UTF-8 | 710 | 2.84375 | 3 | [] | no_license | from lxml import etree
import sys
names = []
jazz = ["Miles Davis", "John Coltrane", "Bill Evans", "Paul Chambers (3)", "Cannonball Adderley",
"Lee Konitz", "Lennie Tristano", "McCoy Tyner", "Elvin Jones", "Keith Jarrett"]
doc = etree.parse('/Users/CBare/Documents/projects/jazz_discography/data/discogs_20161101_artists.xml')
i = 0
for node in doc.xpath('/artists/artist'):
x = etree.tostring(node, pretty_print=True, encoding="utf-8").decode('utf-8')
name = ''.join(node.xpath('name')[0].itertext())
if name in jazz:
print(etree.tostring(node, pretty_print=True, encoding='utf-8').decode('utf-8'))
names.append(name)
print("artists:", len(names))
print("----------"*6)
for name in names[:100]:
print(name)
print("----------"*6)
| true |
f0e24074253258f6c60753a72373a68f042bec49 | Python | frankcheng1991/master | /cheat sheet file/numpy_learn.py | UTF-8 | 9,895 | 3.890625 | 4 | [] | no_license | # Numpy notebook
d = {}
d['fish'] = 'wet'
print d['fish']
print d.get('fish', 'N/A')
try:
print d['monkey']
except:
print 'error!'
print d.get('monkey', 'N/A') # Get an element with a default; prints "N/A"
'''
Numpy
Numpy provides a high-performance multidimensional array
and basic tools to compute with and manipulate these arrays.
'''
import numpy as np
#------------------ Part 1 : Arrays ------------------------#
# full array creating method: url = https://docs.scipy.org/doc/numpy/user/basics.creation.html#arrays-creation
'''
Arrays is a grid of values of the same type.
The number of dimensions is the rank of the array.
the shape of an array is a tuple of integers giving the size of the array along each dimension
'''
'''
for arrays from lower dimensional to high dimensional : column -> row -> width -> ...
1d array: ([1, 2, 3, 4, ...]) -> 1 row n columns
2d array or matrix: ([[1, 2, 3, ...], [4, 5, 6, ...], ...]) -> n row & m columns
r2 = np.array([[1, 2, 3], [4, 5, 6]])
print r2.shape # consequence: row = 2, column = 3
3d array: ([[[1, 2, 3, ...], [4, 5, 6, ...], ...], [[7, 8, 9, ...], [10, 11, 12, ...], ...]])
r3 = np.array([[[1, 2, 3, 1], [4, 5, 6, 1], [0, 0, 0, 1]], [[7, 8, 9, 1], [10, 11, 12, 1], [0, 0, 0, 1]]])
print r3.shape # consequence: width:2 -> row: 3 -> column: 4
'''
a = np.array([1, 2, 3]) # Create a rank 1 array
print type(a)
print a.shape # the shape of an array is a tuple of integers giving the size of the array along each dimension
print [ele for ele in a]
b = np.array([[1, 2, 3], [4, 5, 6]])
print b.shape # row = 2, column = 3
print b[0, 0], b[0, 1], b[1, 0]
print np.array([[1,2.0],[0,0],(1+1j,3.)])
# Numpy also provides many functions to create arrays:
a = np.zeros((3, 2)) # create 3 rows and 2 columns
print a
print a[2, 1] # the 0 on the last row and last column
b = np.ones((1, 2))
print b
c = np.full((2, 2), 7) # create a constant array
print c
d = np.eye(2) # create 2x2 identity matrix
print d
e = np.random.random((2, 2)) # Create an array filled with random values
print e
f = np.arange(10) # arange() will create arrays with regularly incrementing values.
print f
g = np.arange(2, 10, dtype = float)
print g
h = np.arange(2, 3, 0.1) # step is 0.1
print h
i = np.linspace(1., 4., 6) # create 6 points between 1 and 4 including 4
print i
j = np.indices((2, 4))
print j
#------------------ Part 2 : Array indexing ------------------------#
# Full indexing url: https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
# 1st way: Slicing (basic indexing) -> start:stop:step; output not include stop; when step negative, reverse
## index for array: [0, 1, 2, 3, ..., -3, -2, -1]
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) # create 3x4 matrix, 2 "[" means 2 d
b = a[:2, 1:3] # take row 0, row 1; column 1 and column2 -> 2 by 2 2d matrix
print b
print a[0, 1]
c = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
print c[1:7:2] # start:stop:step
print c[-2: 10]
print c[-3: 3: -1] # reverse
d = np.array([[[1], [2], [3]], [[4], [5], [6]]])
d[1:2] # If the number of objects in the selection tuple is less than N , then : is assumed for any subsequent dimensions.
d[..., 0] # Ellipsis expand to the number of : objects needed to make a selection tuple of the same length as x.ndim.
#There may only be a single ellipsis present. It equals to d[:, :, 0]
d[..., 0].flatten()
d[:, np.newaxis, :, :] # Each newaxis object in the selection tuple serves to expand the dimensions of the resulting selection by one unit-length dimension. The added dimension is the position of the newaxis object in the selection tuple.
'''
Two ways of accessing the data in the middle row of the array.
Mixing integer indexing with slices yields an array of lower rank,
while using only slices yields an array of the same rank as the
original array:
'''
row_r1 = a[1, :] # Rank 1 view of the second row of a
row_r2 = a[1:2, :] # Rank 2 view of the second row of a
print row_r1, row_r1.shape
print row_r2, row_r2.shape
col_r1 = a[:, 1]
col_r2 = a[:, 1:2]
print col_r1, col_r1.shape
print col_r2, col_r2.shape
# 2nd way: Integer array indexing (advanced indexing)
'''
a = np.array([
[1,2,3],
[4,5,6],
[7,8,9],
[10,11,12]
]) # 2d array: row = 4, column = 3
basic indexing example:
a[(0, 1)] # equals to a[0, 1]
a[[0, slice(None)]]
advanced indexing example:
a[(0, 1), ]
a[[0, 1]]
'''
a = np.array([[1, 2], [3, 4], [5, 6]])
print a[[0, 1, 2], [0, 1, 0]] # The above example of integer array indexing is equivalent to this:
print np.array([a[0, 0], a[1, 1], a[2, 0]]) # this is the basic indexing
print a[[0, 1, 2], [0, 1, 0]].shape
print a[[0, 0], [1, 1]]
print np.array([a[0, 1], a[0, 1]])
# One useful trick with integer array indexing is selecting or mutating one element from each row of a matrix:
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
print a
b = np.array([0, 2, 0, 1]) # # Create an array of indices
print a[np.arange(4), b] # Select one element from each row of a using the indices in b
a[np.arange(4), b] += 10 # Mutate one element from each row of a using the indices in b
print a
# Another example for broadcasting -> url: https://docs.scipy.org/doc/numpy/reference/ufuncs.html#ufuncs-broadcasting
a = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]
])
rows = np.array([0, 3], dtype = np.intp)
columns = np.array([0, 2], dtype = np.intp)
print a[rows[:, np.newaxis], columns]
print a[np.ix_(rows, columns)]
# 3rd way: Boolean array indexing:
'''
Boolean array indexing lets you pick out arbitrary elements of an array.
Frequently this type of indexing is used to select the elements of an array that satisfy some condition.
Here is an example:
'''
a = np.array([[1, 2], [3, 4], [5, 6]])
bool_idx = (a > 2)
'''
Find the elements of a that are bigger than 2;
this returns a numpy array of Booleans of the same
shape as a, where each slot of bool_idx tells
whether that element of a is > 2.
'''
print bool_idx
'''
We use boolean array indexing to construct a rank 1 array
consisting of the elements of a corresponding to the True values
of bool_idx
'''
print a[bool_idx] # We can do all of the above in a single concise statement:
print a[a > 2]
# Care must only be taken to make sure that the boolean index has exactly as many dimensions as it is supposed to work with.
x = np.array([
[0, 1],
[1, 1],
[2, 2]
])
# sum(x) or x.sum() -> sum everything
# columnsum = x.sum(axis = 0)
rowsum = x.sum(-1) # or x.sum(1)
x[rowsum <= 2, :]
rowsum = x.sum(-1, keepdims = True) # If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array.
#x[rowsum <= 2]
rows = (x.sum(-1) % 2) == 0
columns = [0, 2]
#x[np.ix_(rows, columns)]
#------------------ Part 3 : Datatypes ------------------------#
# full data types introduction: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
x = np.array([1, 2], dtype = np.int64)
#------------------ Part 4 : Array math ------------------------#
## full math functions: url = https://docs.scipy.org/doc/numpy/reference/routines.math.html
## Here are other array mulnipulation: url = https://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html
x = np.array([[1, 2], [3, 4]], dtype = np.float64) # 2d matrix
y = np.array([[5, 6], [7, 8]], dtype = np.float64) # 2d matrix
v = np.array([9, 10]) # 1d array
w = np.array([11, 12]) # 2d array
## +
print x + y
print np.add(x, y)
## -
print x - y
print np.subtract(x, y)
## * : not matrix maltiplication, just elementwise product:
print x * y
print np.multiply(x, y)
## /:
print x / y
print np.divide(x, y)
## root square:
print x ** 0.5
print np.sqrt(x)
## use 'dot' to compute the inner product of vectors
print x.dot(y) # alternative way below:
print np.dot(x, y)
print v.dot(w) # for vectors to compute inner product; while v*w compute elementwise product
print np.dot(v, x)
## sum
print x.shape # (row, column)
print np.sum(x) # sum of all element
print np.sum(x, axis = 0) # sum of each column
print np.sum(x, axis = 1) # sum of each row
## use 'T' to transpose
print x.T
print v.T # Note that taking the transpose of a rank 1 array does nothing, convert it to rank 2 first:
v2 = v[np.newaxis, :]
print v2.T
#------------------ Part 5 : Broadcasting ------------------------#
## URl = https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
## add the vector v to each row of the matrix x:
x = np.arange(1, 13).reshape(4,3)
v = np.array([1, 0, 1])
vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other
print vv
y = x + vv
print y
## numpy broadcasting allow us to perform this computation without actually creating multiple copies of v. Consider this version, using broadcasting:
y = x + v # Add v to each row of x using broadcasting
print y
'''
Explanation:
The line y = x + v works even though x has shape (4, 3) and v has shape (3,)
due to broadcasting; this line works as if v actually had shape (4, 3),
where each row was a copy of v, and the sum was performed elementwise.
Broadcasting two arrays together follows these rules:
1. If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have the same length.
2. The two arrays are said to be compatible in a dimension if they have the same size in the dimension, or if one of the arrays has size 1 in that dimension.
3. The arrays can be broadcast together if they are compatible in all dimensions.
4. After broadcasting, each array behaves as if it had shape equal to the elementwise maximum of shapes of the two input arrays.
5. In any dimension where one array had size 1 and the other array had size greater than 1, the first array behaves as if it were copied along that dimension
'''
## in broadcasting, shape(3, ) equals to shape(1, 3); stretching must be from 1 -> n | true |
1176eec64a879fe10cb37d127a6cf1c87d4fe7ae | Python | AbhishekDoshi26/python-programs | /Panda/drop_duplicates().py | UTF-8 | 421 | 2.765625 | 3 | [] | no_license | import pandas as pd
employees = pd.read_csv('Datasets\employees.csv', parse_dates=[
"Start Date", "Last Login Time"])
employees['Senior Management'] = employees['Senior Management'].astype(bool)
employees['Gender'] = employees['Gender'].astype("category")
employees.sort_values('First Name', inplace=True)
employees.drop_duplicates(subset=['First Name', 'Team'], inplace=True)
print(employees)
| true |
f2b9053fe89a3269e9871de492a9b7b97b660bdc | Python | jldinh/multicell | /src/multicell/simulation_builder.py | UTF-8 | 2,490 | 3.03125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 16 11:34:49 2015
@author: jl
"""
import numpy as np
from openalea.tissueshape import tovec, grid_tissue
from multicell import simulation
def generate_cell_grid(x, y=0, z=0, noise_amplitude=1e-3):
"""Builds the representation of a grid tissue of the specified shape.
Parameters
----------
x : int
Number of cells along the x axis.
y : int
Number of cells along the y axis, not used if 0. (default: 0)
z : int
Number of cells along the z axis, not used if 0. (default: 0)
noise_amplitude : double
Amplitude of the random noise applied to the position of vertices.
The perturbations applied to each vertex are independent and
follow a uniform distribution between -noise_amplitude/2. and
noise_amplitude/2. (default: 1e-3)
Returns
-------
Topomesh
Topomesh of the grid tissue
dict
Dictionary (vertex ID -> position) of the tissue
"""
shape = tuple([u for u in [x,y,z] if u >= 1])
tissuedb = grid_tissue.regular_grid(shape)
mesh = tissuedb.get_topology("mesh_id")
pos = tovec(tissuedb.get_property("position") )
bary = reduce(lambda x,y: x + y,pos.itervalues() ) / len(pos)
pos = dict((pid,vec - bary + np.random.uniform(-noise_amplitude/2., noise_amplitude/2., len(shape))) for pid,vec in pos.iteritems())
return mesh, pos
def generate_cell_grid_sim(x, y=0, z=0, noise_amplitude = 1e-3, sim_class=simulation.Simulation):
"""Builds a Simulation object with a grid-shaped tissue.
Parameters
----------
x : int
Number of cells along the x axis
y : int
Number of cells along the y axis, not used if 0 (default: 0)
z : int
Number of cells along the z axis, not used if 0 (default: 0)
noise_amplitude : double
Amplitude of the random noise applied to the position of vertices.
The perturbations applied to each vertex are independent and
follow a uniform distribution between -noise_amplitude/2. and
noise_amplitude/2. (default: 1e-3)
Returns
-------
Simulation
Simulation object containing the grid.
"""
sim = sim_class()
mesh, pos = generate_cell_grid(x, y, z, noise_amplitude)
sim.import_topomesh(mesh, pos)
return sim
| true |
981287fb679a01c68bd55345c85b4383efa1ec18 | Python | yemarn510/YM_Python | /02_PSP/PSP/unitest.py | UTF-8 | 2,577 | 3.03125 | 3 | [] | no_license | import unittest
import PSPQuickSortInput
import PSPQuickSortProcess
class TestStringMethods(unittest.TestCase):
def test_getArray_success_with_valid_values(self):
"""
This is testing for normal files
"""
# prepare
fileName = "10Lines"
expectedResult = [12.0, 13.5, 1.0, 5.5,
9.0, 19.5, 12.0, 23.5, 5.0, 51.0]
# execute
actuatlResponse = PSPQuickSortInput.getArray(fileName)
# assert
self.assertTrue(expectedResult, actuatlResponse)
def test_getArray_error_with_not_existing_file(self):
"""
This is test for Non Existing file
"""
# prepare
fileName = "10Lines1"
# execute
actuatlResponse = PSPQuickSortInput.getArray(fileName)
# assert
self.assertTrue(actuatlResponse)
def test_getArray_error_with_empty_value(self):
"""
This is for testing empty files
"""
# prepare
fileName = "empty"
expectedResult = "\n The file is empty \n"
# execute
actuatlResponse = PSPQuickSortInput.getArray(fileName)
# assert
self.assertTrue(actuatlResponse)
def test_getArray_error_inlude_strings(self):
"""
This is for testing array including strings
"""
# prepare
fileName = "10ContStrings"
expectedResult = "\n The Lines contain Strings \n"
# execute
actuatlResponse = PSPQuickSortInput.getArray(fileName)
# assert
self.assertTrue(actuatlResponse)
def test_getArray_success_contain_blanks(self):
"""
This is for testing blank lines
"""
# prepare
fileName = "10ContBlanks"
expectedResult = [2.0, 3.4, 5.9, 6.5, 12.0, 13.0]
# execute
actuatlResponse = PSPQuickSortInput.getArray(fileName)
# assert
self.assertTrue(expectedResult, actuatlResponse)
def test_sort_success_return_sortedArray(self):
"""
This is the test for getting sortedArray
"""
# prepare
unsortedArray = [12.0, 13.5, 1.0, 5.5,
9.0, 19.5, 12.0, 23.5, 5.0, 51.0]
expectedResult = [1.0, 5.0, 5.5, 9.0,
12.0, 12.0, 13.5, 19.5, 23.5, 51.0]
# execute
actuatlResponse = PSPQuickSortProcess.sort(unsortedArray)
# assert
self.assertEqual(expectedResult, actuatlResponse)
if __name__ == '__main__':
unittest.main()
| true |
8c32af2d409081c58732f632552603ce63783178 | Python | MathewBurnett/Salty-Hamlet | /test_parseBurn.py | UTF-8 | 803 | 3.0625 | 3 | [] | no_license | from nose.tools import assert_equal, assert_true
import parseBurn
import datetime
def test_can_parse_kill():
html = r'<div class="col-4"><div><a href="https://zkillboard.com/kill/38481364" target="_blank"><img class="img-circle" src="static/28844.png" width="150px"></a><h3>8,364,738,569 ISK</h3><p>2014-04-27 20:01:00<br><a href="https://zkillboard.com/kill/38481364" target="_blank">Nantik23 <p></p></a></p></div></div>'
kills = parseBurn.parseHTML(html)
assert_true(len(kills) > 0, 'no kills were parsed')
assert_equal(u'https://zkillboard.com/kill/38481364', kills[0].link)
assert_equal(8364738569, kills[0].amount)
assert_equal('Nantik23', kills[0].name)
#2014-04-27 20:01:00
date = datetime.datetime(2014, 4, 27, 20, 01, 00)
assert_equal(date, kills[0].date) | true |
09e484069ae320396d7e31bece427efeb1bd9701 | Python | JuanMaRo/Python-programming-exercises | /q017.py | UTF-8 | 562 | 3.71875 | 4 | [] | no_license | #deposit, withdrawal
def account():
dep = 0
while True:
command = str(input('''
[d] deposito
[w] retiro
[s] salir
'''))
if command == 'd':
d = int(input('deposito de $'))
dep += d
print('deposito de ${}'.format(dep))
elif command == 'w':
w = int(input('retiro de $'))
dep -= w
print('deposito de ${}'.format(dep))
else:
break
if __name__ == '__main__':
account()
| true |
e80b1cff1a74a38cc8eaa17c91dbcced5bd2bce4 | Python | rishabhstein/PhD_Codes | /lib/FractalDimension.py | UTF-8 | 3,193 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 09:47:15 2019
@author: daniel
"""
import numpy as np
import matplotlib.pyplot as plt
def fractal_dimension(array, max_box_size = None, min_box_size = 1, n_samples = 20, n_offsets = 0, plot = False):
"""Calculates the fractal dimension of a 3D numpy array.
Args:
array (np.ndarray): The array to calculate the fractal dimension of.
max_box_size (int): The largest box size, given as the power of 2 so that
2**max_box_size gives the sidelength of the largest box.
min_box_size (int): The smallest box size, given as the power of 2 so that
2**min_box_size gives the sidelength of the smallest box.
Default value 1.
n_samples (int): number of scales to measure over.
n_offsets (int): number of offsets to search over to find the smallest set N(s) to
cover all voxels>0.
plot (bool): set to true to see the analytical plot of a calculation.
"""
#determine the scales to measure on
if max_box_size == None:
#default max size is the largest power of 2 that fits in the smallest dimension of the array:
max_box_size = int(np.floor(np.log2(np.min(array.shape))))
scales = np.floor(np.logspace(max_box_size,min_box_size, num = n_samples, base =2 ))
scales = np.unique(scales) #remove duplicates that could occur as a result of the floor
#get the locations of all non-zero pixels
locs = np.where(array > 0)
voxels = np.array([(x,y,z) for x,y,z in zip(*locs)])
#count the minimum amount of boxes touched
Ns = []
#loop over all scales
for scale in scales:
touched = []
if n_offsets == 0:
offsets = [0]
else:
offsets = np.linspace(0, scale, n_offsets)
#search over all offsets
for offset in offsets:
bin_edges = [np.arange(0, i, scale) for i in array.shape]
bin_edges = [np.hstack([0-offset,x + offset]) for x in bin_edges]
H1, e = np.histogramdd(voxels, bins = bin_edges)
touched.append(np.sum(H1>0))
Ns.append(touched)
Ns = np.array(Ns)
#From all sets N found, keep the smallest one at each scale
Ns = Ns.min(axis=1)
#Only keep scales at which Ns changed
scales = np.array([np.min(scales[Ns == x]) for x in np.unique(Ns)])
Ns = np.unique(Ns)
Ns = Ns[Ns > 0]
scales = scales[:len(Ns)]
#perform fit
coeffs = np.polyfit(np.log(1/scales), np.log(Ns),1)
#make plot
if plot:
fig, ax = plt.subplots(figsize = (8,6))
ax.scatter(np.log(1/scales), np.log(np.unique(Ns)), c = "teal", label = "Measured ratios")
ax.set_ylabel("$\log N(\epsilon)$")
ax.set_xlabel("$\log 1/ \epsilon$")
fitted_y_vals = np.polyval(coeffs, np.log(1/scales))
ax.plot(np.log(1/scales), fitted_y_vals, "k--", label = f"Fit: {np.round(coeffs[0],3)}X+{coeffs[1]}")
ax.legend();
return(coeffs[0])
| true |
313fbe728150c06e08570ae64a7f89ad1f33e5b0 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_53/967.py | UTF-8 | 961 | 2.890625 | 3 | [] | no_license | import sys
fin = sys.stdin
def change_status(status_neigbord, status):
if status_neigbord == "ON":
if status == "OFF":
return "ON"
else:
return "OFF"
else:
return status
for case in range(1, int(fin.readline()) + 1):
N, K = map(int, fin.readline().split())
#print N, ":", K
snappers = ["OFF" for x in range(0,N)]
for i in range(0,K):
#print i
snappers_copy = list(snappers)
#print i,":", snappers_copy
for j in range(0, N):
if j == 0 and snappers_copy[0] == "OFF":
snappers[0] = "ON"
break
elif j == 0 and snappers_copy[0] == "ON":
snappers[0] = "OFF"
elif j > 0 and snappers_copy[j-1] == "OFF":
snappers[j-1] = "ON"
break
else:
snappers[j] = change_status(snappers_copy[j-1], snappers_copy[j])
#print j,snappers
if "OFF" in snappers:
print "Case #%d: %s" % (case, "OFF")
else:
print "Case #%d: %s" % (case, "ON")
| true |
8b9dec243778e9ddb755b983bd518a3f9c168178 | Python | vkuznet/WMCore | /src/python/WMCore/Database/DBCreator.py | UTF-8 | 3,666 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
"""
_DBCreator_
Base class for formatters that create tables.
"""
from builtins import str
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class DBCreator(DBFormatter):
"""
_DBCreator_
Generic class for creating database tables.
"""
def __init__(self, logger, dbinterface):
"""
_init_
Call the constructor of the parent class and create empty dictionaries
to hold table create statements, constraint statements and insert
statements.
"""
DBFormatter.__init__(self, logger, dbinterface)
self.create = {}
self.constraints = {}
self.inserts = {}
self.indexes = {}
def execute(self, conn = None, transaction = False):
"""
_execute_
Generic method to create tables and constraints by execute
sql statements in the create, and constraints dictionaries.
Before execution the keys assigned to the tables in the self.create
dictionary are sorted, to offer the possibilitiy of executing
table creation in a certain order.
"""
# create tables
for i in sorted(self.create.keys()):
try:
self.dbi.processData(self.create[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.create[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# create indexes
for i in self.indexes:
try:
self.dbi.processData(self.indexes[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.indexes[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# set constraints
for i in self.constraints:
try:
self.dbi.processData(self.constraints[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.constraints[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# insert permanent data
for i in self.inserts:
try:
self.dbi.processData(self.inserts[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.inserts[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
return True
def __str__(self):
"""
_str_
Return a well formatted text representation of the schema held in the
self.create, self.constraints, self.inserts, self.indexes dictionaries.
"""
string = ''
for i in self.create, self.constraints, self.inserts, self.indexes:
for j in i:
string = string + i[j].lstrip() + '\n'
return string
| true |
36f9c7f3ff441ec16abbd0001d760f77fd83eaab | Python | JBXVI/Python3HK-JBXVI | /ARP/NETSCANNER/netscanner.py | UTF-8 | 427 | 2.859375 | 3 | [] | no_license | #this program will print ip of all ip in the range
import scapy.all as scapy
def scan(ip):
arp_request =scapy.ARP(pdst= ip)
broadcast = scapy.Ether(dst ="ff:ff:ff:ff:ff:ff")
arp_broadcast =broadcast /arp_request
answer_list =scapy.srp(arp_broadcast ,timeout =1 ,verbose = False)[0]
for names in answer_list:
print(names[1].psrc,"\t\t\t",names[1].hwsrc)
scan("192.168.1.1/24")#enter any ip range
| true |
23ea952ae182acf52619237bb75e8f37f64f3049 | Python | kaustubhsingh/CodeSomething | /Python/swap.py | UTF-8 | 121 | 3.515625 | 4 | [] | no_license |
def Swap(A, x, y):
k = A[x]
A[x] = A[y]
A[y] = k
A = [4, 5, 2, 3]
print (A)
Swap(A, 1, 2)
print (A) | true |
4606614f845efa847a4700ab427fd3b70697edb9 | Python | junglefive/BleProduceTestHelper | /img_conver_to_black.py | UTF-8 | 386 | 2.703125 | 3 | [
"MIT"
] | permissive | from PIL import Image
import numpy as np
img_name = "logo.png"
img = Image.open(img_name).convert('L')
img.show()
img_arry = np.array(img)
rows,cols = img_arry.shape
for i in range(rows):
for j in range(cols):
if img_arry[i,j] <= 20:
img_arry[i,j] = 0
else:
img_arry[i,j] = 255
img_save = Image.fromarray(img_arry)
img_save.save(img_name) | true |
8642b9ecebeda1afa0e3c9a2820d07e8e17479be | Python | sumz95/HackNotts | /application/forms.py | UTF-8 | 1,404 | 2.53125 | 3 | [] | no_license | from flask.ext.wtf import Form
from wtforms import TextField, validators
class EnterDBInfo(Form):
dbNotes = TextField(label='Items to add to DB',
description="db_enter",
validators=[validators.required(),
validators.Length(min=0, max=128,
message=u'Enter 128 characters or less')])
dbUsername = TextField(label='Items to add to DB',
description="db_enter",
validators=[validators.required(),
validators.Length(min=0, max=128,
message=u'Enter 128 characters or less')])
class RetrieveDBInfo(Form):
numRetrieve = TextField(label='Number of DB Items to Get',
description="db_get",
validators=[validators.required(),
validators.Regexp('^\d{1}$',
message=u'Enter a number between 0 and 10')])
class LoginForm(Form):
dbUser = TextField(label='Items to add to DB',
description="db_enter",
validators=[validators.required(),
validators.Length(min=0, max=128,
message=u'Enter 128 characters or less')])
dbPass = TextField(label='Items to add to DB',
description="db_enter",
validators=[validators.required(),
validators.Length(min=0, max=128,
message=u'Enter 128 characters or less')])
| true |
57d06b7e8f0a606bda98779191ce0924e5a83c05 | Python | vitorskt/ExerciciosPython | /arquivo pythonico.py | UTF-8 | 113 | 2.65625 | 3 | [] | no_license | with open('a.txt', 'w') as file:
file.write('Linha1\n')
file.write('Linha2\n')
file.write('Linha3\n') | true |
1d07fbec0032cec57478923b5c2825ec115bfa45 | Python | georggoetz/hackerrank-py | /Algorithm/Implementation/bomberman_game.py | UTF-8 | 1,888 | 3.171875 | 3 | [] | no_license | # http://www.hackerrank.com/challenges/bomber-man
def print_grid(grid):
for i in range(len(grid)):
print(''.join(grid[i]))
def blow_up(grid1, grid2):
for i in range(r):
for j in range(c):
if grid1[i][j] == 'O':
grid2[i][j] = '.'
if i + 1 < r:
grid2[i + 1][j] = '.'
if i - 1 >= 0:
grid2[i - 1][j] = '.'
if j + 1 < c:
grid2[i][j + 1] = '.'
if j - 1 >= 0:
grid2[i][j - 1] = '.'
if __name__ == '__main__':
r, c, n = map(int, input().split())
if n > 0 and n % 2 == 0:
for i in range(r):
print('O' * c)
else:
initial_grid = []
grid1 = []
grid2 = []
for i in range(r):
initial_grid.append([])
grid1.append([])
grid2.append([])
for v in input():
initial_grid[i].append(v)
grid1[i].append('O')
grid2[i].append('O')
if n < 2:
print_grid(initial_grid)
else:
blow_up(initial_grid, grid1)
if n % 4 == 3:
print_grid(grid1)
else:
blow_up(grid1, grid2)
print_grid(grid2)
# t = 0
# .......
# ...O...
# ....O..
# .......
# OO.....
# OO.....
#
# t = 1
# .......
# ...O...
# ....O..
# .......
# OO.....
# OO.....
#
# t = 2
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
#
# t = 3
# OOO.OOO
# OO...OO
# OOO...O
# ..OO.OO
# ...OOOO
# ...OOOO
#
# t = 4
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
#
# t = 5
# .......
# ...O...
# ....O..
# .......
# OO.....
# OO.....
#
# t = 6
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
# OOOOOOO
#
# t = 7
# OOO.OOO
# OO...OO
# OOO...O
# ..OO.OO
# ...OOOO
# ...OOOO
| true |
1f50fda90d402fa151f9cbcf5d374daaa498244a | Python | Jennivine/AIOC | /2016/2 Farmer Drama.py | UTF-8 | 794 | 3.390625 | 3 | [] | no_license | '''Question 2: Farmer Drama'''
#prepares the list, reads in data
with open("farmin.txt","r") as f: #won't run because farmin.txt doesn't exist
a = f.readline()
a = list(f.readline().replace("\n","").split(" "))
a = map(int,a)
def f(xs, n):
# print xs, n
if xs[0] == xs[-1]:
del xs[0]
del xs[-1]
return (xs, n)
elif xs[0] > xs[-1]:
val = xs[-1]
del xs[-1]
xs[-1] += val
return (xs, n+1)
elif xs[0] < xs[-1]:
val = xs[0]
del xs[0]
xs[0] += val
return (xs, n+1)
def solve(xs):
n = 0
while True:
if len(xs)<= 1:
return n
else:
xs,n = f(xs,n)
#with open("farmout.txt","w") as fileout:
# fileout.write(str(solve(a)))
| true |
468cfe5a3ff3ce9b86a772d20d1d8e9bc21d4d7e | Python | wescottj98/ngram | /ngram.py | UTF-8 | 2,078 | 3.703125 | 4 | [] | no_license | ngram = {}
def addWord(word):
wordStr = " "
# turn array of strings passed in parameter "word" into a single string
wordStr = wordStr.join(word)
# add words or increment value if they exist already
if wordStr in ngram:
ngram[wordStr] = ngram[wordStr]+1
else:
ngram[wordStr] = 1
wSpace = {}
def sortSpace(string):
spaces = 0
# count number of spaces between words to sort by m-value
for i in range(0, len(string)):
if string[i]==' ':
spaces = spaces + 1
# sort by m-value
if str(spaces) in wSpace:
wSpace[str(spaces)].append(string)
else:
wSpace[str(spaces)] = [string]
# optional code for removing stop words
rmStopWords = []
stopWords = ["a","is","the"]
def removeStopWords(word):
if not word in stopWords:
rmStopWords.append(word)
# parameters
# text = "a good puzzle is a fun puzzle"
text = open('data.txt', 'r').read()
m = 2
removeStopWordsFromText = False
# n refers to number of words in text for calculating complexity
# complexity is ~ < 2n*m + 2n
# O complexity is Linear O(n) (assuming n>m)
words = text.split()
cnt = 1
for word in words:
removeStopWords(word)
if removeStopWordsFromText:
words = rmStopWords
# n*m complexity
# go through text and sort each word into a dictionary where
# the word is the key and the value is the number of times that word appears
# the operation stated above takes place m times
while cnt < m+1:
for i in range(0, len(words)):
# condition prevents out of bounds condition and extra counting of the
# last word
if not i+cnt > len(words):
addWord(words[i:i+cnt])
cnt = cnt + 1
# information about ngrams is collected, still needs to be organized to
# fit the specifications for output
ngrams = []
# < n complexity
for key, value in ngram.items():
ngrams.append(key+ " " +str(value))
# < n complexity
for string in ngrams:
sortSpace(string)
# < n*m complexity
for key, value in wSpace.items():
v = sorted(value)
for i in v:
print(i)
| true |
661dfc6ddbddab2f32efe7efb4a4b8bd5a281677 | Python | alexia-avelar/EntregaFinalProyecto_Coatl | /logic/usuario_logic.py | UTF-8 | 2,884 | 2.703125 | 3 | [] | no_license | from core.dx_logic import Logic
from persist_objects.usuario_obj import UsuarioObj
class UsuarioLogic(Logic):
def __init__(self):
super().__init__()
self.tableName = "usuario"
def getAllUsuarios(self):
usuarioList = super().getAllRows(self.tableName)
usuarioObjList = []
for element in usuarioList:
usuarioObjList.append(element)
return usuarioObjList
def createUsuarioObj(self, user, email, password):
usuarioDict = dict(
user=user,
email=email,
password=password,
)
return usuarioDict
def insertUsuario(self, user, email, password):
database = self.database
sql = (
f"INSERT INTO `dbcine`.`usuario`(`idusuario`,`user`,`email`, `password`) "
+ f"VALUES(0,'{user}','{email}','{password}');"
)
row = database.executeNonQueryRows(sql)
return row
def updateUsuarioByEmailPassword(self, user, email, password):
database = self.database
sql = (
"UPDATE `dbcine`.`usuario` "
+ f"SET `user` = '{user}', `email` = '{email}', `password` = '{password}' "
+ f"WHERE `email` = '{email}' and `password` = '{password}';"
)
row = database.executeNonQueryRows(sql)
return row
def getUserById(self, idusuario):
database = self.database
sql = "SELECT * FROM `dbcine`.`usuario` " + f"where idusuario ={idusuario};"
usuarioDict = database.executeQueryOneRow(sql)
return usuarioDict
def getUserByEmailPassword(self, email, password):
database = self.database
sql = (
"SELECT * FROM `dbcine`.`usuario` "
+ f"WHERE `email` = '{email}' and `password` = '{password}';"
)
usuarioDict = database.executeQueryOneRow(sql)
return usuarioDict
def deleteUsuarioByEmailPassword(self, email, password):
database = self.database
sql = (
"DELETE FROM `dbcine`.`usuario` "
+ f"WHERE `email` = '{email}' and `password` = '{password}';"
)
row = database.executeNonQueryRows(sql)
return row
def deleteUsuarioById(self, idusuario):
database = self.database
sql = "DELETE FROM `dbcine`.`usuario` " + f"WHERE idusuario = {idusuario};"
row = database.executeNonQueryRows(sql)
return row
def verificarUsuario(self, email, password):
user = {}
database = self.database
sql = f"""SELECT * FROM `dbcine`.`usuario`
WHERE email='{email}' and password='{password}';"""
user = self.getUserByEmailPassword(email, password)
if user is None:
return user
else:
return user | true |
edc461331d1adf5dea12b0c571928e193036709e | Python | pablonavajas/ES2B4Assignment1 | /TaskA3-3 | UTF-8 | 1,415 | 4.34375 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Python Template for ES2B4 Assignment
#Name: PABLO NAVAJAS HELGUERO
#ID : 1604648
#Assignment: assignment_a3_3.py
#Section: A3-3
### My code:
#Ask user to enter the value
NumberA= input("Introduce number: ")
#Set the value of the Factorial to 1 to use it in the loop
Factorial=1
#Create a loop so that we can exit it in case of Error
for a in NumberA:
#Check if the Input is a number
try:
NumberB = int(NumberA)
#In case it is not a number, inform user and exit program
except:
print("That is not a NUMBER. You need to enter a valid number. ")
break
#In case it is a number smaller than zero:
if NumberB<0:
#Inform user factorials of negative numbers do not exist and exit program
print("Sorry, factorials of negative numbers do not exist.")
break
#In case it is zero:
elif NumberB==0:
#Inform user that the factorial of 0 is 1, and close program
print("The factorial of 0 is 1.")
break
#In case it is a valid number:
else:
#Create a loop that finds the Factorial of a number
for i in range(1,NumberB + 1):
Factorial = Factorial * i
#Print the solution and close program
print ("The factorial of", NumberA, " is ", Factorial,".")
break
| true |
337cfb34cb1139f2a9f4f490abaa20472d6d8ba5 | Python | veriAjinkya/Peculiar | /evaluator/evaluator.py | UTF-8 | 2,459 | 2.890625 | 3 | [] | no_license | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import sys
from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score
def read_answers(filename):
answers = {}
with open(filename) as f:
for line in f:
line = line.strip()
idx1, label = line.split()
answers[(idx1)] = label
return answers
def read_predictions(filenames):
predictions = []
for filename in filenames:
prediction = {}
with open(filename) as f:
for line in f:
line = line.strip()
idx1, label = line.split()
if 'txt' in line:
idx1 = idx1.split('/')[-1][:-4]
prediction[(idx1)] = label
predictions.append(prediction)
return predictions
def calculate_scores(answers, predictions):
scores = []
for prediction in predictions:
y_trues, y_preds = [], []
for key in answers:
if key not in prediction:
logging.error(
"Missing prediction for ({},{}) pair.".format(key[0], key[1]))
sys.exit()
y_trues.append(answers[key])
y_preds.append(prediction[key])
score = {}
score['Recall'] = recall_score(
y_trues, y_preds, average='macro')
score['Prediction'] = precision_score(
y_trues, y_preds, average='macro')
score['F1'] = f1_score(
y_trues, y_preds, average='macro')
score["Accuracy"] = accuracy_score(y_trues, y_preds)
scores.append(score)
return scores
def evaluate(answers_file, predictions_files):
answers = read_answers(answers_file)
predictions = read_predictions(predictions_files)
scores = calculate_scores(answers, predictions)
for i in range(len(scores)):
print(predictions_files[i])
print(scores[i])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Evaluate leaderboard predictions for smart contract dataset.')
parser.add_argument('--answers', '-a',
help="filename of the labels, in txt format.")
parser.add_argument('--predictions', '-p', nargs='+',
help="filenames of the leaderboard predictions, in txt format.")
args = parser.parse_args()
evaluate(args.answers, args.predictions)
| true |
decd00b1a64bcdb40c64fa0f3674353b2cfce53b | Python | zxwtry/OJ | /python/leetcode/P140_WordBreakII.py | UTF-8 | 912 | 3.15625 | 3 | [] | no_license | #coding=utf-8
'''
url: leetcode.com/problems/word-break-ii
@author: zxwtry
@email: zxwtry@qq.com
@date: 2017年5月30日
@details: Solution: 75ms 71.43%
'''
class Solution(object):
def search(self, s, d, m):
if (len(s) == 0): return [""]
if s in m: return m[s]
a, p = [], len(s)
for v in d:
l = len(v)
if (not s.startswith(v)): continue
b = self.search(s[l:], d, m)
g = "" if p == l else " "
for u in b:
a.append(v + g + u)
m[s] = a
return a
def wordBreak(self, s, d):
"""
:type s: str
:type d: List[str]
:rtype: list[str]
"""
return self.search(s, d, {})
if __name__ == "__main__":
s = "catsanddog"
d = ["cat", "cats", "and", "sand", "dog"]
print(Solution().wordBreak(s, d)) | true |
086dfec9df803181878eb39f8211e194a5a579ea | Python | mgbo/My_Exercise | /2018/Other/Строки_String/str_3.py | UTF-8 | 190 | 3.171875 | 3 | [] | no_license |
before,s,after = "I have 2 small bombs and bomds big bomb".partition("bomb")
print (before)
print (s)
print (after)
n = input()
n_1 = n.split()
print (n_1)
n_2 = n.split(',')
print (n_2) | true |
62addc482726cf37c8aa8a13c1ee113cf74fe354 | Python | vitorbarbosa123/lp1-python | /Semana4/Miniteste/acertou_menos/acerto.py | UTF-8 | 358 | 3.578125 | 4 | [] | no_license | quantidade_alunos = int(input())
acertos = 0
acertou = '.'
erros = 0
for i in range(quantidade_alunos):
performace_aluno = input()
nota = 0
for j in performace_aluno:
if j != acertou:
nota += 1
if nota > erros:
erros = nota
acertos = i + 1
print(f'O aluno {acertos} errou {erros} teste(s).') | true |
582a8694ce26b62fa4c484b0304dbf9a2a51ef03 | Python | michizhou/automatic-octo-waddle | /python-projects/homework-2/myz227_hw2_q3.py | UTF-8 | 299 | 3.34375 | 3 | [] | no_license | def factors(num):
big_factor_lst = []
for factor in range(1,int(num ** (1/2.0)) + 1):
if num % factor == 0:
yield factor
if factor ** 2 != num:
big_factor_lst.append(num // factor)
for index in reversed(big_factor_lst):
yield index | true |
87d42157dbfc83ad325ddc8d3553cf0a2796f388 | Python | rajan3010/leetcode | /word_search.py | UTF-8 | 1,320 | 3.890625 | 4 | [] | no_license | def backtrack(r, c, word, board):
rows=len(board)
cols=len(board[0])
#print(word)
#Check if word has been completely checked
if len(word)==0:
return True #Nothing else to be checked
#Check all the boundary conditions and grid value checks
if (r<0 or r >=rows) or (c<0 or c>=cols) or (board[r][c]!=word[0]):
return False
#Now since the grid letter is same as the first letter in the word, mark it with a special character to avoid confusions during backtracking
#print(board[r][c])
temp=board[r][c]
board[r][c]='*'
ret=False
#continue with backtracking for the adjacent cells
for x,y in [(-1,0),(0,1),(1,0),(0,-1)]:
ret=backtrack(r+x, c+y, word[1:], board)
if ret:
break
#Replace the changed special character cell back to original
board[r][c]=temp
return ret
def wordSearch(board, word):
rows=len(board)
cols=len(board[0])
ret=False
#traverse the grid and check backtrack for each letter
for r in range(rows):
for c in range(cols):
ret=backtrack(r, c, word, board)
if ret:
return ret
return ret
board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
word = "ABCB"
print(wordSearch(board, word)) | true |
37be35c37c27a3d02267b370fd61504254936f65 | Python | iaroslav-ai/noxer | /noxer/gm/metrics.py | UTF-8 | 5,869 | 2.9375 | 3 | [
"MIT"
] | permissive | """
Evaluation metrics for quality of outputs of generative models.
"""
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import r2_score
import numpy as np
def condidional_similarity(G_true, G_pred, C_true=None, C_pred=None,
cross_testing=False):
"""Compares the similarity of two distributions using a set
of samples from "truth" distribution G_true and "predicted"
distribution G_pred. Is useful for estimation of quality
of GAN's and VAE's and the like.
Parameters
----------
G_true : array-like of shape = (n_samples, n_outputs)
Samples from "ground truth" distribution.
G_pred : array-like of shape = (n_samples, n_outputs)
Samples from an estimated sampler.
C_true : array-like of shape = (n_samples, n_features)
Condition for "ground truth" distribution.
C_pred : array-like of shape = (n_samples, n_features)
Condition for estimated sampler.
cross_testing : bool, optional
Whether to use cross-validation like approach for testing.
Returns
-------
z : float
The similarity score for two distributions, calculated
from the generalization estimate of the model that is
trained to distinguish between two sets of samples.
"""
pipe = Pipeline([
('scaler', StandardScaler()),
('model', DummyClassifier())
])
dummy_search = {
'model__strategy': ["stratified", "most_frequent", "uniform"]
}
lin_search = {
'model': [LinearSVC()],
'model__penalty': ['l1', 'l2'],
'model__dual': [False],
'model__C': 10 ** np.linspace(-10, 10),
'model__max_iter': [10000],
}
gb_search = {
'model': [GradientBoostingClassifier()],
'model__learning_rate': [1.0, 0.1, 0.01, 0.001],
'model__n_estimators': [2 ** i for i in range(11)],
}
model = GridSearchCV(
pipe,
[dummy_search, lin_search, gb_search], # svc_search
n_jobs=-1,
verbose=0
)
a = [G_true, G_pred, C_true, C_pred]
for i, v in enumerate(a):
if v is None:
continue
v = np.array(v)
v = v.astype('float')
if len(v.shape) == 1:
v = v[:, np.newaxis]
a[i] = v
G_true, G_pred, C_true, C_pred = a
X = np.row_stack([G_true, G_pred])
X = X.reshape((len(X), -1))
# add condition to the discriminatory features
if C_true is not None and C_pred is not None:
C = np.row_stack([C_true, C_pred])
C = C.reshape((len(C), -1))
X = np.column_stack([X, C])
y = np.concatenate([
np.ones(len(G_true)),
np.zeros(len(G_pred))
])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, stratify=y)
score = model.fit(X_train, y_train).score(X_test, y_test)
# scale the error to be in range from 0.0 to 1.0
U, C = np.unique(y_test, return_counts=True)
scale = max(C * 1.0) / sum(C * 1.0)
score = (1.0 - score)/scale
score = min(1.0, score)
return score
def distribution_similarity(X_true, X_pred, cross_testing=False):
"""Compares the similarity of two distributions using a set
of samples from "truth" distribution X_true and "predicted"
distribution X_pred. Is useful for estimation of quality
of GAN's and VAE's and the like.
Parameters
----------
X_true : array-like of shape = (n_samples, n_outputs)
Samples from "ground truth" distribution.
X_pred : array-like of shape = (n_samples, n_outputs)
Samples from "ground truth" distribution.
cross_testing : bool, optional
Whether to use cross-validation like approach for testing.
Returns
-------
z : float
The similarity score for two distributions, calculated
from the generalization estimate of the model that is
trained to distinguish between two sets of samples.
"""
pipe = Pipeline([
('scaler', StandardScaler()),
('model', DummyClassifier())
])
dummy_search = {
'model__strategy': ["stratified", "most_frequent", "uniform"]
}
lin_search = {
'model': [LinearSVC()],
'model__penalty': ['l1', 'l2'],
'model__dual': [False],
'model__C': 10 ** np.linspace(-10, 10),
'model__max_iter': [10000],
}
gb_search = {
'model': [GradientBoostingClassifier()],
'model__learning_rate': [1.0, 0.1, 0.01, 0.001],
'model__n_estimators': [2 ** i for i in range(11)],
}
model = GridSearchCV(
pipe,
[dummy_search, lin_search, gb_search], # svc_search
n_jobs=-1,
verbose=0
)
X = np.row_stack([X_true, X_pred])
X = X.reshape((len(X), -1))
y = np.concatenate([
np.ones(len(X_true)),
np.zeros(len(X_pred))
])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5, stratify=y)
score = model.fit(X_train, y_train).score(X_test, y_test)
# scale the error to be in range from 0.0 to 1.0
U, C = np.unique(y_test, return_counts=True)
scale = max(C * 1.0) / sum(C * 1.0)
score = (1.0 - score)/scale
score = min(1.0, score)
return score
def conditional_wasserstein_distance(C_true, G_true, C_fake, G_fake, clazz):
model = clazz
if __name__ == "__main__":
# example usage
X1 = np.random.randn(512,2)
for offset in [0.1, 0.2, 0.4, 0.8, 1.6, 3.2]:
X2 = np.random.randn(512,2) + offset
sim = distribution_similarity(X1, X2)
print(sim)
| true |
f8e44ca6c8fcd6267a741ec3ec5779ee01faf489 | Python | jw5971/PythonApp_for_DataProcessing | /examples/etl_feature_example.py | UTF-8 | 6,878 | 2.703125 | 3 | [] | no_license | import pandas as pd
import sys
import frelabpy.utils.misc_util as miscu
SAMPLE_ASSIGN_CONFIG = {
"col_const": {
"ASSIGN_CURRENCY": "USD",
"ASSIGN_ACCOUNT": "SFELDMAN",
"ASSIGN_COMMENTS": ""
},
"col_var": None,
"plugin": None
}
SAMPLE_DATAFRAME = pd.DataFrame({'TYPE': ['PDF', 'XLS'],
'ABBREV': ['NY', 'LDN']})
class EtlFeature(object):
def __init__(self, name, config):
self._name = name
# There are two ways for attribute assignment
self._config = config
#self.config(config)
@property
def config(self):
return self._config
@config.setter
def config(self, value):
if not isinstance(value, dict):
raise ValueError("Provided config argument is of incorrect type")
self_config = value
@property
def name(self):
return self._name
def identify(self):
print(f'The name of this ETL feature instance is: {self._name}')
def run(self):
print("Nothing to do by calling base")
#pass
class AssignEtlFeatureSimple(EtlFeature):
pass
class AssignEtlFeatureOverrideMethod(EtlFeature):
def identify(self):
print(f'The name of this Assign ETL feature instance is: {self._name}')
class AssignEtlFeature(EtlFeature):
def __init__(self, name, config, df):
super().__init__(name, config)
self._df = df
def run(self):
"""
ETL feature to assign new columns to a given dataframe
:return: df_target: pd.DataFrame; Resulted dataframe
"""
df_target = self._df
length = len(df_target.index)
config_assign = dict()
# Assign new columns, using static values.
config_assign_const = miscu.eval_elem_mapping(self._config, 'col_const')
if config_assign_const and isinstance(config_assign_const, dict):
config_assign.update(config_assign_const)
# Assign new columns, using variable values.
config_assign_var = miscu.eval_elem_mapping(self._config, 'col_var')
if config_assign_var and isinstance(config_assign_var, dict):
config_assign.update(config_assign_var)
for col_name, col_value in config_assign.items():
df_target[col_name] = [col_value] * length
return df_target
class PlugableFeature(object):
def __init__(self, config):
self._plugin = miscu.eval_func(config, "plugin")
@property
def plugin(self):
return self._plugin
def run(self, df):
df_target = df
if self._plugin:
df_target = self._plugin(df)
return df_target
class PlugableAssignEtlFeature(EtlFeature, PlugableFeature):
def __init__(self, name, config, df):
EtlFeature.__init__(self, name, config)
PlugableFeature.__init__(self, config)
self._df = df
def run(self):
"""
ETL feature to assign new columns to a given dataframe
:return: df_target: pd.DataFrame; Resulted dataframe
"""
df_target = self._df
length = len(df_target.index)
config_assign = dict()
# Assign new columns, using static values.
config_assign_const = miscu.eval_elem_mapping(self._config, 'col_const')
if config_assign_const and isinstance(config_assign_const, dict):
config_assign.update(config_assign_const)
# Assign new columns, using variable values.
config_assign_var = miscu.eval_elem_mapping(self._config, 'col_var')
if config_assign_var and isinstance(config_assign_var, dict):
config_assign.update(config_assign_var)
for col_name, col_value in config_assign.items():
df_target[col_name] = [col_value] * length
df_target = PlugableFeature.run(df_target)
return df_target
class EtlFeatureWithId(object):
next_id = 10
def __init__(self, name, config):
self._name = name
self._config = config
self._id = EtlFeatureWithId.next_id
EtlFeatureWithId.next_id += 1
@property
def config(self):
return self._config
@config.setter
def config(self, value):
if not isinstance(value, dict):
raise ValueError("Provided config argument is of incorrect type")
self_config = value
@property
def name(self):
return self._name
def identify(self):
print(f'{self._name} ETL feature instance with id {self._id}')
def run(self):
print("Nothing to do by calling base")
#pass
class EtlFeatureWithStaticId(object):
next_id = 10
@staticmethod
def _get_next_id():
target = EtlFeatureWithStaticId.next_id
EtlFeatureWithStaticId.next_id += 1
return target
@classmethod
def _get_next_id2(cls):
target = cls.next_id
cls.next_id += 1
return target
def __init__(self, name, config):
self._name = name
self._config = config
self._id = EtlFeatureWithStaticId._get_next_id()
@property
def config(self):
return self._config
@config.setter
def config(self, value):
if not isinstance(value, dict):
raise ValueError("Provided config argument is of incorrect type")
self_config = value
@property
def name(self):
return self._name
def identify(self):
print(f'{self._name} ETL feature instance with id {self._id}')
def run(self):
print("Nothing to do by calling base")
#pass
def main(argv):
try:
etl_feature = EtlFeature("EtlFeature", SAMPLE_ASSIGN_CONFIG)
etl_feature.identify()
etl_feature.run()
etl_feature_simple = AssignEtlFeatureSimple("AssignEtlFeatureSimple", SAMPLE_ASSIGN_CONFIG)
etl_feature_simple.identify()
etl_feature_simple.run()
etl_feature_override = AssignEtlFeatureOverrideMethod("AssignEtlFeatureOverride", SAMPLE_ASSIGN_CONFIG)
etl_feature_override.identify()
etl_feature_override.run()
etl_feature_assign = AssignEtlFeature("AssignEtlFeatureOverride", SAMPLE_ASSIGN_CONFIG, SAMPLE_DATAFRAME)
etl_feature_assign.identify()
df_target = etl_feature_assign.run()
etl_feature_assign_plugable = PlugableAssignEtlFeature("PlugableAssignEtlFeature", SAMPLE_ASSIGN_CONFIG,
SAMPLE_DATAFRAME)
etl_feature_assign_plugable.identify()
#df_target = etl_feature_assign_plugable.run()
etl_feature = EtlFeatureWithId("EtlFeatyreWithId", SAMPLE_ASSIGN_CONFIG)
etl_feature.identify()
print("Exit")
except Exception as gen_exc:
print(gen_exc)
if __name__ == '__main__':
# Call main process.
main(sys.argv[1:])
| true |
cf110c26ca3df3cc5423b5dd8fe0c19d04d1085f | Python | AEljarrat/nionutils | /nion/utils/Process.py | UTF-8 | 3,039 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | """
Utility classes for implementing task queues and sets.
"""
# standard libraries
import asyncio
import copy
import queue
import threading
# third party libraries
# None
# local libraries
# None
class TaskQueue(queue.Queue):
def perform_tasks(self):
# perform any pending operations
qsize = self.qsize()
while not self.empty() and qsize > 0:
try:
task = self.get(False)
except queue.Empty:
pass
else:
task()
self.task_done()
qsize -= 1
def clear_tasks(self):
# perform any pending operations
qsize = self.qsize()
while not self.empty() and qsize > 0:
try:
task = self.get(False)
except queue.Empty:
pass
else:
self.task_done()
qsize -= 1
# keeps a set of tasks to do when perform_tasks is called.
# each task is associated with a key. overwriting a key
# will discard any task currently associated with that key.
class TaskSet(object):
def __init__(self):
self.__task_dict = dict()
self.__task_dict_mutex = threading.RLock()
def add_task(self, key, task):
with self.__task_dict_mutex:
self.__task_dict[key] = task
def clear_task(self, key):
with self.__task_dict_mutex:
if key in self.__task_dict:
self.__task_dict.pop(key, None)
def perform_tasks(self):
with self.__task_dict_mutex:
task_dict = copy.copy(self.__task_dict)
self.__task_dict.clear()
for task in task_dict.values():
task()
def close_event_loop(event_loop: asyncio.AbstractEventLoop) -> None:
# give event loop one chance to finish up
event_loop.stop()
event_loop.run_forever()
# wait for everything to finish, including tasks running in executors
# this assumes that all outstanding tasks finish in a reasonable time (i.e. no infinite loops).
all_tasks_fn = getattr(asyncio, "all_tasks", None)
if not all_tasks_fn:
all_tasks_fn = asyncio.Task.all_tasks
tasks = all_tasks_fn(loop=event_loop)
if tasks:
gather_future = asyncio.gather(*tasks, return_exceptions=True)
else:
# work around bad design in gather (always uses global event loop in Python 3.8)
gather_future = event_loop.create_future()
gather_future.set_result([])
event_loop.run_until_complete(gather_future)
# due to a bug in Python libraries, the default executor needs to be shutdown explicitly before the event loop
# see http://bugs.python.org/issue28464 . this bug manifests itself in at least one way: an intermittent failure
# in test_document_controller_releases_itself. reproduce by running the contents of that test in a loop of 100.
_default_executor = getattr(event_loop, "_default_executor", None)
if _default_executor:
_default_executor.shutdown()
event_loop.close()
| true |
9216b22aee0f1151422372a2918401e921df6220 | Python | shentaowang/KalmanGui | /filter/kalman/tests/test02_verification.py | UTF-8 | 1,846 | 2.65625 | 3 | [
"MIT"
] | permissive | from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import MerweScaledSigmaPoints
import numpy as np
observe_file = 'data02_observe.txt';
true_file = 'data02_true.txt'
fp = open(observe_file,'r')
observe_data = []
for line in fp.readlines():
line_data = line.split('\t')
line_data = line_data[0:len(line_data)-1]
line_data = [float(i) for i in line_data]
observe_data.append(line_data)
observe_data = np.array(observe_data)
print('the observe data shape is:')
print(observe_data.shape)
fp = open(true_file,'r')
true_data = []
for line in fp.readlines():
line_data = line.split('\t')
line_data = line_data[0:len(line_data)-1]
line_data = [float(i) for i in line_data]
true_data.append(line_data)
true_data = np.array(true_data)
print('the ture data shape is:')
print(true_data.shape)
sigmas = MerweScaledSigmaPoints(4, alpha=0.01, beta=2., kappa=0)
def trans_f(x,dt):
F = np.array([[1, 0, dt, 0],
[0, 1, 0, dt],
[0, 0, 1, 0],
[0, 0, 0, 1]])
return np.dot(F, x)
def trans_h(x):
z1 = np.sqrt(x[0]*x[0]+x[1]*x[1])
z2 = np.arctan(x[1]/x[0])
z3 = (x[0]*x[2]+x[1]*x[3])/(x[0]*x[0]+x[1]*x[1])
return(np.array([z1,z2,z3]))
sigmas = MerweScaledSigmaPoints(4, alpha=0.01, beta=2., kappa=0)
ukf = UKF(dim_x=4, dim_z=3, fx=trans_f,
hx=trans_h, dt=0.106, points=sigmas)
ukf.x = np.array([8.46642, 0, 0, 0])
ukf.P = np.eye(4,4)
ukf.R = np.diag([0.09,0.009, 0.09])
ukf.Q = np.diag([0, 0, 0.09, 0.09])
filtered_data = []
for z in observe_data:
ukf.predict()
ukf.update(z)
filtered_data.append(ukf.x.copy())
filtered_data = np.array( filtered_data)
print('the filtered data shape is:')
print( filtered_data.shape)
data_gap = filtered_data - true_data
MSE_X_0 = np.dot(data_gap[:,0].T,data_gap[:,0])/data_gap.shape[0]
print("MSE between filtered an true dim:%d is:%f")%(0,MSE_X_0) | true |
1f7353638464715ccc16f5bce6723c4f4852f6b5 | Python | daniapm/Python-Proyect | /05_lectura_diccionario_ejemplo.py | UTF-8 | 417 | 3.171875 | 3 | [] | no_license | # LEER UN CSV ESCRITO EN FORMA DE DICCIONARIO
# importamos el modulo de csv para trabajar archivos de este tipo
import csv
with open("productos.csv", "r", newline="\n", encoding="utf8") as csvfile:
reader = csv.DictReader(csvfile)
for producto in reader:
print(producto["nombre"], producto["codigo"], producto["tipoproducto"], producto["tipoflete"], producto["cantidad"], producto["precio"]) | true |
245b2bf58268b9182a62eb44e18145abcd1f6f1b | Python | xiang-daode/Python3_codes | /T028_5个人年龄.py | UTF-8 | 577 | 4.09375 | 4 | [] | no_license | # 在这里写上你的代码 :-)
'''
题目028:有5个人坐在一起,
问第五个人多少岁?他说比第4个人大两岁。
问第4个人,他说比第3个人大两岁。
问第3个人,又说比第2个人大两岁。
问第2个人,说比第1个人大两岁。
最后问第一个人,他说是10岁。
请问第五个人多大?
'''
def age(x):
if x>1:
return 2+age(x-1)
else:
return 10
def tm028():
'''
【备注】:官网给的递归方法,因为不用递归的话就是个口算题。
'''
print(age(5))
tm028()
| true |
168c7a5cdb4c4a215f3ea22ed3e2484703c1e771 | Python | Prev/leetcode | /problems/48-rotate-image/solution-quadratic.py | UTF-8 | 1,347 | 3.625 | 4 | [] | no_license | """
Problem: https://leetcode.com/problems/permutations/
Author: Youngsoo Lee
Time complexity: O(n^2)
Spcae complexity: O(1)
"""
from typing import List
class Solution:
def _dfs(self, matrix: List[List[int]], i: int, j: int,
init_i:int, init_j: int):
n = len(matrix)
ni, nj = j, (n - i - 1)
if (ni, nj) == (init_i, init_j):
return matrix[i][j]
ret = self._dfs(matrix, ni, nj, init_i, init_j)
matrix[ni][nj] = matrix[i][j]
if (i, j) == (init_i, init_j):
matrix[i][j] = ret
return ret
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
for i in range((n + 1) // 2):
for j in range(n - 2 * i - 1):
print(i, i+j)
self._dfs(matrix, i, i+j, i, i+j)
if __name__ == '__main__':
s = Solution()
def test_solution(matrix, expected):
s.rotate(matrix)
assert matrix == expected
test_solution([[1,2,3],[4,5,6],[7,8,9]], [[7,4,1],[8,5,2],[9,6,3]])
test_solution([[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]],
[[15,13,2,5],[14,3,4,1],[12,6,8,9],[16,7,10,11]])
test_solution([[1]], [[1]])
test_solution([[1,2],[3,4]], [[3,1],[4,2]])
| true |
7e016585c3769a7a51b1c092baea4caf5df884ee | Python | sfujiwara/ProjectEuler | /Python/problem090.py | UTF-8 | 1,522 | 3.59375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
import itertools
def is_possible(d1, d2):
## 01 が作れるかどうか
if not d1[0]*d2[1] + d1[1]*d2[0]:
return False
## 04 が作れるかどうか
elif not d1[0]*d2[4] + d1[4]*d2[0]:
return False
## 09 が作れるかどうか
elif not d1[0]*d2[9] + d1[9]*d2[0] + d1[0]*d2[6] + d1[6]*d2[0]:
return False
## 16 が作れるかどうか
elif not d1[1]*d2[6] + d1[6]*d2[1] + d1[1]*d2[9] + d1[9]*d2[1]:
return False
## 25 が作れるかどうか
elif not d1[2]*d2[5] + d1[5]*d2[2]:
return False
## 36 が作れるかどうか
elif not d1[3]*d2[6] + d1[6]*d2[3] + d1[3]*d2[9] + d1[9]*d2[3]:
return False
## 49 が作れるかどうか
elif not d1[4]*d2[9] + d1[9]*d2[4] + d1[4]*d2[6] + d1[6]*d2[4]:
return False
## 64 が作れるかどうか
elif not d1[6]*d2[4] + d1[4]*d2[6] + d1[9]*d2[4] + d1[4]*d2[9]:
return False
## 81 が作れるかどうか
elif not d1[8]*d2[1] + d1[1]*d2[8]:
return False
else:
return True
if __name__ == '__main__':
ans = 0
for i in itertools.combinations(range(10), 6):
for j in itertools.combinations(range(10), 6):
dice1 = [False] * 10
dice2 = [False] * 10
for k in i:
dice1[k] = True
for k in j:
dice2[k] = True
if is_possible(dice1, dice2):
ans += 1
print 'Answer:', ans / 2
| true |
716a89c746f25ef4dec76c9d48fb0b10b6aeee9c | Python | oliwiam96/Human-computer-interaction | /genderRecognitionByVoice.py | UTF-8 | 2,279 | 2.703125 | 3 | [] | no_license | from __future__ import division
import os
import re
import sys
import scipy.io.wavfile
from scipy.fftpack import fft
import wave
import scipy.signal as decim
from scipy.io import wavfile
def testForAll():
ile_ok = 0
ile = 0
for file in os.listdir('./'):
if re.match('^.*\.wav$', file) and not re.match('err.wav$', file) and not re.match('kopia.wav$', file):
decision = WhichSex(file)
name = file
name = name[:-4]
name = name[-1]
if name == decision:
ile_ok += 1
ile += 1
print("ile ok ", ile_ok)
print("ile w sumie ", ile)
def WhichSex(nazwa):
copyName = 'copy.wav'
if nazwa == copyName:
copyName = "copy2.wav"
# fixing opening the file
waveFile = wave.open(nazwa, 'r')
copy = wave.open(copyName, 'w')
copy.setnchannels(waveFile.getnchannels())
copy.setsampwidth(waveFile.getsampwidth())
copy.setframerate(waveFile.getframerate())
copy.setnframes(waveFile.getnframes())
copy.writeframes(waveFile.readframes(waveFile.getnframes()))
copy.close()
w, signal = scipy.io.wavfile.read(copyName) #w- czest probkowania
if waveFile.getnchannels() > 1:
signal = [s[0] for s in signal] #Tylko pierwszy kanał
f_signal = fft(signal)
f_signal = abs(f_signal)
#print(f_signal)
for i in range(len(f_signal)):
if i/len(signal)*w < 60: # for freq < 60 it is definetely not a human voice- set to zero
f_signal[i] = 0
f_signal_2 = decim.decimate(x = f_signal, q = 2, zero_phase = True)
f_signal_3 = decim.decimate(x = f_signal, q = 3, zero_phase = True)
f_signal_4 = decim.decimate(x = f_signal, q = 4, zero_phase = True)
f_wynik = f_signal_4
for i in range(len(f_signal_4)):
f_wynik[i] = f_signal[i] * f_signal_2[i] * f_signal_3[i] * f_signal_4[i]
indexOfMaxi = 0
maxi = f_wynik[0]
for i in range(len(f_wynik)):
if f_wynik[i] > maxi:
maxi = f_wynik[i]
indexOfMaxi = i
f_voice = (indexOfMaxi/len(signal))* w #czest glosu (wynik)
if f_voice < 175: #decyzja
return 'M'
else:
return 'K'
waveFile.close()
if __name__ == "__main__":
print(WhichSex(sys.argv[1]))
| true |
f18eb6e4269d282b98c74646ef1acc41e4f3cf55 | Python | jduell12/cs-module-project-hash-tables | /d2_lecture.py | UTF-8 | 1,795 | 4.03125 | 4 | [] | no_license | #linked list for Hash tables
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.count = 0
def insert_at_head(self, node):
node.next = self.head
self.head = node
self.count += 1
def find(self, value):
cur = self.head
#traverses linked list
while cur is not None and cur.value != value:
cur = cur.next
#checks if node is None or has a value
if cur:
return "Node(%d)" %cur.value
else:
return None
def delete(self, value):
#empty list case
if self.head is None:
return None
#deleting head of list
if self.head.value == value:
old_head = self.head
self.head = old_head.next
old_head.next = None
self.count -= 1
return old_head.value
cur = self.head
while cur is not None and cur.value != value:
prev = cur
cur = cur.next
if cur:
prev.next = cur.next
cur.next = None
self.count -= 1
return cur.value
else:
return None
def __str__(self):
r = f"count: {self.count}\n"
#traverse the list
cur = self.head
while cur is not None:
r += " %d " % cur.value
if cur.next is not None:
r += '-->'
cur = cur.next
return r
ll = LinkedList()
ll.insert_at_head(Node(10))
ll.insert_at_head(Node(40))
ll.insert_at_head(Node(20))
print(ll)
ll.delete(40)
print(ll) | true |