seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
35885471303 | """
This script ensures that the specefied network devices are operating as desired.
This includes:
* Are up and configured via NMCLI
* Are using manual IP addresses
* Are using an MTU of 9000
* Finally, that the DHCP Server is running on these adapters
Background: For an unknown reason on the FOPS machine, the 10GBit network
adapters will not persist their nmcli settings.
"""
import nmcli
import click
import logging
import typing
import ipaddress
import subprocess
import time
import tempfile
from pathlib import Path
log = logging.getLogger("nmcli-dhcp-manager")
logging.basicConfig(level=logging.DEBUG)
DHCPD_CONF_STUB = """
subnet {interface} netmask {netmask} {{
range {range_start} {range_stop};
option routers {router_ipv4};
}}
"""
@click.command()
@click.option(
"--devices",
"-d",
help="A comma seperated list of devices to manage. E.g. enp3s0,enp4s0",
required=True,
)
@click.option(
"--ip-addresses",
"-ip",
help="IP address range to use. Each extra adapter will use an incremented /24 range!",
default="192.168.1.1/24",
show_default=True,
)
@click.option(
"--mtu",
"-m",
help="MTU to set",
default=9000,
show_default=True,
)
def cli(devices: str, ip_addresses: str, mtu: int):
# Split and sanitise
input_devices = [
device_str.lower().strip().rstrip() for device_str in devices.split(",")
]
ip_interface = ipaddress.IPv4Interface(ip_addresses)
log.debug(f"Attempting to control f{input_devices}")
log.debug(f"NMCLI devices: {[dev.device for dev in nmcli.device()]}")
devices_to_control = get_matched_devices(input_devices)
log.debug(f"Matched devices: {_get_device_str_list(devices_to_control)}")
connections_to_control = get_matched_connections(devices_to_control)
log.debug(
f"Matched connections: {[connection.name for connection in connections_to_control]}"
)
ip_interface_current = ip_interface
device_interface_list = []
for connection in connections_to_control:
# A little icky, but generate the next ip address in this network range
ip_addresses_generator = ip_interface_current.network.hosts()
ip_address = next(ip_addresses_generator) # type: ignore
ip_interface_actual = ipaddress.IPv4Interface(
f"{ip_address.compressed}/{ip_interface_current.compressed.split('/')[1]}"
)
set_static_ip(connection, ip_interface_actual)
set_mtu(connection, mtu)
reset_connection(connection)
device_interface_list.append((connection, ip_interface_actual))
log.info(f"Reset {connection.name} with {ip_interface_actual}")
# This is pretty icky, but increments '192.168.1.0/24' -> '192.168.2.0/24'
ip_interface_current = ipaddress.IPv4Interface(
f"{(ip_interface_current+255).compressed.split('/')[0]}/{ip_interface_current.compressed.split('/')[1]}"
)
log.debug("Waiting 10 seconds for connections to settle")
time.sleep(10.0)
restart_dhcp(device_interface_list)
def get_matched_devices(
devices_str: typing.List[str],
) -> typing.List[nmcli.data.device.Device]:
matched_devices = []
for sys_device in nmcli.device():
if sys_device.device.lower() in devices_str:
matched_devices.append(sys_device)
return matched_devices
def get_matched_connections(
devices: typing.List[nmcli.data.device.Device],
) -> typing.List[nmcli.data.connection.Connection]:
matched_connections = []
for sys_connection in nmcli.connection():
device = sys_connection.device
# if connection is inactive device == "--"
if device == "--":
device = nmcli.connection.show(sys_connection.name)[
"connection.interface-name"
]
if device in _get_device_str_list(devices):
matched_connections.append(sys_connection)
return matched_connections
def _get_device_str_list(
devices: typing.List[nmcli.data.device.Device],
) -> typing.List[str]:
return [device.device for device in devices]
def set_static_ip(
connection: nmcli.data.connection.Connection, ip_interface: ipaddress.IPv4Interface
):
nmcli.connection.modify(
connection.name,
{
"ipv4.addresses": ip_interface.compressed,
"ipv4.gateway": ip_interface.ip.compressed,
"ipv4.method": "manual",
},
)
def set_mtu(connection: nmcli.data.connection.Connection, mtu: int):
nmcli.connection.modify(
connection.name,
{"802-3-ethernet.mtu": str(mtu)},
)
def reset_connection(connection: nmcli.data.connection.Connection):
try:
nmcli.connection.down(connection.name, wait=60)
time.sleep(1.0)
except Exception as _:
pass # happens if device is already down. We mainly care about up.
nmcli.connection.up(connection.name, wait=60)
def create_new_dhcpd_conf(device_interface_list):
raise NotImplementedError("Todo")
file_buffer = ""
for device, interface in device_interface_list:
file_buffer += DHCPD_CONF_STUB.format(
interface="", netmask="", range_start="", range_stop="", router_ipv4=""
)
with tempfile.NamedTemporaryFile(mode="wt") as tmpfile:
tmpfile.write(file_buffer)
# Sudo replace old file
command_cp = [
"sudo",
"--non-interactive",
"-E",
"cp",
f"{Path(tmpfile.name).resolve()}",
f"{Path('/etc/dhcp/dhcpd.conf').resolve()}",
]
# subprocess.check_call(command_cp)
def restart_dhcp(device_interface_list):
"""Uses systemctl to restart isc-dhcpd server.
Note: it is not (yet) in scope to dynamically update the '/etc/dhcp/dhcpd.conf' file."""
# create_new_dhcpd_conf(device_interface_list)
command_restart = [
"sudo",
"--non-interactive",
"-E",
"systemctl",
"restart",
"isc-dhcp-server",
]
command_status = [
"systemctl",
"show",
"isc-dhcp-server",
"--no-page",
]
try:
subprocess.check_call(command_restart)
output_str_lines = (
subprocess.check_output(command_status).decode().splitlines(keepends=False)
)
output = {}
for line in output_str_lines:
items = line.split("=")
output.update({items[0]: items[1]})
if output["ActiveState"] != "active":
raise ValueError("Failed to get DHCP server running")
except subprocess.CalledProcessError as exc:
log.error("Could not restart isc-dhcp-server")
raise exc
if __name__ == "__main__":
cli()
| PlantandFoodResearch/machine-vision-acquisition | src/utils/nmcli-dhcp-manager.py | nmcli-dhcp-manager.py | py | 6,720 | python | en | code | 4 | github-code | 90 |
17616094138 | import datetime
import json
import re
import os
from io import BytesIO
from git import Repo, Git
import requests
import subprocess
import tempfile
import uuid
import bson
import zipfile
import base64
import shutil
from bson.binary import Binary
from pathlib import Path
from dotted_dict import DottedDict
from subprocess import check_output
from common.utilities import *
from common.config import *
from database.db_handler import *
from definitions.response_models import *
from definitions.enums import *
from definitions.request_models import *
from definitions.mongo_models import *
from celery_task.worker import celery_worker
@celery_worker.task(name='build_stub_from_url.task', bind=False)
def build_stub_from_url(url: HttpUrl, language: SupportedLanguages, user: str, operation_id: str, project_name: str = None):
return build_stub(language, user, operation_id, project_name, url)
@celery_worker.task(name='build_stub_from_spec.task', bind=False)
def build_stub_from_spec(spec: dict, language: SupportedLanguages, user: str, operation_id: str, project_name: str = None):
return build_stub(language, user, operation_id, project_name, None, spec)
@celery_worker.task(name='build_sdk_from_url.task', bind=False)
def build_sdk_from_url(url: HttpUrl, language: SupportedLanguages, user: str, operation_id: str, project_name: str = None):
return build_sdk(language, user, operation_id, project_name, url)
@celery_worker.task(name='build_sdk_from_spec.task', bind=False)
def build_sdk_from_spec(spec: dict, language: SupportedLanguages, user: str, operation_id: str, project_name: str = None):
return build_sdk(language, user, operation_id, project_name, None, spec)
def push_to_git(project_name, user, zip_file):
settings_search = GenericMongoHandler(PROJECT_SETTINGS).find_one({'project': project_name})
if settings.get("push_to_git"):
try:
user_ssh_key = GenericMongoHandler(USER_SSH_KEYS).find_one({'user': user})
if user_ssh_key:
ssh_key = user_ssh_key.get('ssh_key')
else:
ssh_key = None
logger.warning('User does not have an ssh key set for repo')
with tempfile.NamedTemporaryFile() as file_object:
git_ssh_cmd = 'ssh'
if ssh_key:
file_object.write(ssh_key)
git_ssh_cmd = f'ssh -i {file_object.name}'
repo = Repo(f'/tmp/{project_name}')
# Note implement capability to push to a new branch by cloning branch first to check validity of git url
# Repo.clone_from(push_to_git, f'/tmp/{project_name}',env=dict(GIT_SSH_COMMAND=git_ssh_cmd))
with zipfile.ZipFile(BytesIO(zip_file)) as zip_ref:
zip_ref.extractall(f'/tmp/{project_name}')
with repo.git.custom_environment(GIT_SSH_COMMAND=git_ssh_cmd):
repo.git.add(update=True)
repo.index.commit('sdk update')
origin = repo.remote(name='origin')
origin.push()
except Exception as e:
logger.error(e)
return str(e)
def store_generated_zip(search, encoded, operation_id, project_name, type=LogTypes.SDK.value):
documents = GenericMongoHandler(DOCUMENTS)
handler = GenericMongoHandler(TASKS)
user_handler = GenericMongoHandler(USER_SSH_KEYS)
documents.store({
'uuid': operation_id,
'file': encoded,
'type': type,
'datetime': get_timestamp(),
'project': project_name
}
)
logger.info("Task has been stored succesfully")
handler.update(search, {'status': TaskState.FINISHED.value})
def build_sdk(language, user, operation_id, project_name = None, url = None, spec = None):
search = {'uuid': operation_id}
handler = GenericMongoHandler(TASKS)
builds = GenericMongoHandler(BUILDS)
try:
headers = {'Content-Type':'application/json'}
if url:
request_data = OpenAPIRequest(openAPIUrl=url).dict()
else:
request_data = OpenAPIRequest(spec=spec).dict()
language = str(language.lower())
handler.update(search, {'status': TaskState.RUNNING.value})
generator_url = f'http://{OPENAPI_GENERATOR}/api/gen/clients/{language}'
logger.info(f"Sending Request to generator url:{generator_url}")
response = requests.post(url=generator_url, json=request_data, headers=headers)
logger.info("Received response")
logger.info(response.text)
data = BuildLogs(
user = user,
logs = response.text,
project = project_name,
url = url,
datetime = get_timestamp(),
language = language,
operation_id = operation_id
)
builds.store(data.dict())
if response.status_code != 200:
raise Exception("Generator error")
response = json.loads(response.text)
link = response.get('link')
response = requests.get(url=link, allow_redirects=True)
if response.status_code != 200:
raise Exception("Failed to download generated file")
store_generated_zip(search, response.content, operation_id, project_name)
git_error = push_to_git(project_name, user, response.content)
data.git_error = git_error
return data.dict()
except Exception as e:
logger.error(e)
update = {'status': TaskState.FAILED.value, 'error': str(e)}
handler.update(search, update)
return OperationError(error=str(e)).dict()
def build_stub(language, user, operation_id, project_name = None, url = None, spec = None):
search = {'uuid': operation_id}
handler = GenericMongoHandler(TASKS)
builds = GenericMongoHandler(BUILDS)
try:
headers = {'Content-Type':'application/json'}
if url:
request_data = OpenAPIRequest(openAPIUrl=url).dict()
else:
request_data = OpenAPIRequest(spec=spec).dict()
language = str(language.lower())
handler.update(search, {'status': TaskState.RUNNING.value})
generator_url = f'http://{OPENAPI_GENERATOR}/api/gen/servers/{language}'
logger.info(f"Sending Request to generator url:{generator_url}")
response = requests.post(url=generator_url, json=request_data, headers=headers)
logger.info("Received response")
logger.info(response.text)
data = BuildLogs(
user = user,
logs = response.text,
project = project_name,
url = url,
type = LogTypes.STUB.value,
datetime = get_timestamp(),
language = language,
operation_id = operation_id
)
builds.store(data.dict())
if response.status_code != 200:
raise Exception("Generator error")
response = json.loads(response.text)
link = response.get('link')
response = requests.get(url=link, allow_redirects=True)
if response.status_code != 200:
raise Exception("Failed to download generated file")
store_generated_zip(search, response, operation_id, project_name, LogTypes.STUB.value)
return data.dict()
except Exception as e:
logger.error(e)
update = {'status': TaskState.FAILED.value, 'error': str(e)}
handler.update(search, update)
return OperationError(error=str(e)).dict()
| grmono/openapi-ui | app/celery_task/generate_tasks.py | generate_tasks.py | py | 6,621 | python | en | code | 4 | github-code | 90 |
38906526920 | from lc import *
class Solution:
def originalDigits(self, s: str) -> str:
res = ""
res += "0"*s.count('z')
res += "1"*(s.count('o')-s.count('z')-s.count('w')-s.count('u'))
res += "2"*s.count('w')
res += "3"*(s.count('h') - s.count('g'))
res += "4"*s.count('u')
res += "5"*(s.count('f') - s.count('u'))
res += "6"*s.count('x')
res += "7"*(s.count('s')-s.count('x'))
res += "8"*s.count("g")
res += "9"*(s.count('i') - s.count('x') - s.count("g") - s.count('f') + s.count('u'))
return res
class Solution:
def originalDigits(self, s: str) -> str:
return ''.join(str(i)*(s.count(v[0])-sum(s.count(c.lower())*[1,-1][c<'a'] for c in v[1:])) for i,v in enumerate('z ozwu w hg u fu x sx g ixgfU'.split()))
test('''
423. Reconstruct Original Digits from English
Medium
688
2323
Add to List
Share
Given a string s containing an out-of-order English representation of digits 0-9, return the digits in ascending order.
Example 1:
Input: s = "owoztneoer"
Output: "012"
Example 2:
Input: s = "fviefuro"
Output: "45"
Constraints:
1 <= s.length <= 105
s[i] is one of the characters ["e","g","f","i","h","o","n","s","r","u","t","w","v","x","z"].
s is guaranteed to be valid.
''')
| joric/oneliners | leetcode/reconstruct-original-digits-from-english.py | reconstruct-original-digits-from-english.py | py | 1,293 | python | en | code | 23 | github-code | 90 |
26407541373 | import requests
from time import sleep
# takes server list outputs locations (each only once) the servers are in.
def get_unique_locations(list_of_servers):
unique_locations = []
resolved_locations = []
for aServer in list_of_servers:
latLongDic = {"lat": aServer["location"]["lat"], "long": aServer["location"]["long"]}
if latLongDic not in unique_locations:
unique_locations.append(latLongDic)
# print(unique_locations)
for eachLocation in unique_locations:
geo_address_list = get_location_name(eachLocation)
sleep(0.1)
# geo_address_list = get_location_name(latitude=latitude, longitude=longitude)
resolved_locations.append(geo_address_list)
# print(resolved_locations)
return resolved_locations
def get_location_name(location_dic):
latitude = location_dic["lat"]
longitude = location_dic["long"]
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = "latlng={lat},{lon}&sensor={sen}".format(
lat=latitude,
lon=longitude,
sen='false'
)
final_url = url + "?" + params
r = requests.get(final_url)
geo_address_list = []
name_list = []
results = r.json()['results'][0]['address_components']
# print(results)
country = town = None
geo_address_list.append(location_dic)
for c in results:
if "administrative_area_level_2" in c['types']:
city_name1 = c['short_name']
name_list.append(city_name1.lower())
if "locality" in c['types']:
city_name2 = c['long_name']
name_list.append(city_name2.lower())
if "administrative_area_level_1" in c['types']:
area_name = c['long_name']
name_list.append(area_name.lower())
if "administrative_area_level_1" in c['types']:
area_name_short = c['short_name']
name_list.append(area_name_short.lower())
if "country" in c['types']:
country = c['short_name']
geo_address_list.insert(0, country.lower().split(" "))
geo_address_list.insert(2, name_list)
# print(geo_address_list)
return geo_address_list
| elanozturk/openpyn-nordvpn | openpyn/locations.py | locations.py | py | 2,190 | python | en | code | null | github-code | 90 |
36842242517 | import unittest
import time
from sources.web.national_archives import NationalArchivesScraper
from sources.web.history_net import HistoryNetScraper
from sources.web.bbc import BBCScraper
from sources.web.google_scholar import GoogleScholarScraper
from sources.web.reuters import ReutersScraper
from sources.web.nature import NatureScraper
from sources.web.npr import NPRScraper
class TimedTestCase(unittest.TestCase):
def time_method(self, method, *args, **kwargs):
start_time = time.time()
result = method(*args, **kwargs)
end_time = time.time()
elapsed_time = end_time - start_time
print(f'{self.__class__.__name__}.{method.__name__} took {elapsed_time:.2f} seconds')
return result
class TestNationalArchivesScraper(TimedTestCase):
def setUp(self):
self.scraper = NationalArchivesScraper()
def test_search(self):
query = 'test'
results = self.time_method(self.scraper.search, query)
self.assertIsNotNone(results)
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
class TestHistoryNetScraper(TimedTestCase):
def setUp(self):
self.scraper = HistoryNetScraper()
def test_search(self):
query = 'test'
results = self.time_method(self.scraper.search, query)
self.assertIsNotNone(results)
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
class TestBBCScraper(TimedTestCase):
def setUp(self):
self.scraper = BBCScraper()
def test_search(self):
query = 'test'
results = self.time_method(self.scraper.search, query)
self.assertIsNotNone(results)
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
class TestGoogleScholarScraper(TimedTestCase):
def setUp(self):
self.scraper = GoogleScholarScraper()
def test_search(self):
query = 'test'
results = self.time_method(self.scraper.search, query)
self.assertIsNotNone(results)
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
class TestReutersScraper(TimedTestCase):
def setUp(self):
self.scraper = ReutersScraper()
def test_search(self):
query = 'test'
results = self.time_method(self.scraper.search, query)
self.assertIsNotNone(results)
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
class TestNatureScraper(TimedTestCase):
def setUp(self):
self.scraper = NatureScraper()
def test_search(self):
query = 'test'
results = self.time_method(self.scraper.search, query)
self.assertIsNotNone(results)
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0)
class TestNPRScraper(TimedTestCase):
def setUp(self):
self.scraper = NPRScraper()
def test_search(self):
query = 'test'
results = self.time_method(self.scraper.search, query)
self.assertIsNotNone(results)
self.assertIsInstance(results, list)
self.assertGreater(len(results), 0) | silasnevstad/verifi | sources/web/web_tests.py | web_tests.py | py | 3,162 | python | en | code | 0 | github-code | 90 |
74085720617 | from .daily_dialog import load_daily_dialog
from .curiosity_dialogs import load_curiosity_dialogs
from .multiwoz_v22 import load_multiwoz_v22
from .metawoz import load_metawoz
from .taskmaster import load_taskmaster1, load_taskmaster2, load_taskmaster3
def load_multiple_datasets(datasets, split):
dsets = []
for d in datasets:
if d == "curiosity_dialogs":
dsets.append(load_curiosity_dialogs(split))
elif d == "daily_dialog":
dsets.append(load_daily_dialog(split))
elif d == "multi_woz_v22":
dsets.append(load_multiwoz_v22(split))
elif d == "meta_woz":
dsets.append(load_metawoz(split))
elif d == "taskmaster1":
dsets.append(load_taskmaster1(split))
elif d == "taskmaster2":
dsets.append(load_taskmaster2(split))
elif d == "taskmaster3":
dsets.append(load_taskmaster3(split))
return dsets
| ErikEkstedt/datasets_turntaking | datasets_turntaking/dataset/conversational/utils.py | utils.py | py | 944 | python | en | code | 7 | github-code | 90 |
12187897404 | import os
import pickle
import random
import re
class model:
right_words = {}
word_pattern = r'[\w]+[.,...?!;:]{0,3}'
def fit(self, directory, model):
if directory == None:
text = input("ะะฒะตะดะธัะต ัะตะบัั: ")
else:
text = ""
files = os.listdir(directory)
for file in files:
file = directory + "/" + file
with open(file, "r", encoding='utf-8') as f:
file_text = f.read()
text += file_text
words = re.findall(self.word_pattern, text)
for i in range(len(words) - 1):
word = words[i].lower()
next_word = words[i + 1].lower()
self.right_words.setdefault(word, [])
self.right_words[word].append(next_word)
with open (model, "wb") as f:
pickle.dump(self.right_words, f)
def generate(self, file, length, prefix):
with open (file, "rb") as f:
best_words = pickle.load(f)
if prefix == None:
prefix = random.choice(list(best_words.keys()))
word = prefix
for _ in range (length):
print(word, end = " ")
word = random.choice(best_words[word])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="ะพะฑััะตะฝะธะต ะผะพะดะตะปะธ")
parser.add_argument("--dir", type=str, help="ะฟััั ะบ ะดะธัะตะบัะพัะธะธ, ะฒ ะบะพัะพัะพะน ะปะตะถะธั ะบะพะปะปะตะบัะธั ะดะพะบัะผะตะฝัะพะฒ")
parser.add_argument("--model", type=str, required=True, help="ะฟััั ะบ ัะฐะนะปั, ะฒ ะบะพัะพััะน ัะพั
ัะฐะฝัะตััั ะผะพะดะตะปั")
args = parser.parse_args()
test_model = model()
test_model.fit(args.dir, args.model) | PKovyrzin/text-generator | train.py | train.py | py | 1,792 | python | en | code | 0 | github-code | 90 |
7613256658 | from django.urls import path
from .views import UserListView,UserDetailView,UserCreateView,VerifyEmail,UserUpdateView, CreateBlog,BlogDetailView,BlogListView,EditBLog
# ,BlogCreateView
urlpatterns=[
path('User',UserListView.as_view()),
path('User/<email>',UserDetailView.as_view()),
path('create/account/', UserCreateView.as_view()),
path('Update/account/',UserUpdateView.as_view()),
path('email-verify/', VerifyEmail.as_view(),name='email-verify'),
path('Writing/Publish/', CreateBlog.as_view(),name='writing-publish'),
path('<url>/<pk>', BlogDetailView.as_view(),name='Blog'),
path('', BlogListView.as_view(),name='Blog'),
path('Edit/<pk>', EditBLog.as_view(),name='Blogedit'),
# path('Writing/Publish/', BlogCreateView.as_view(),name='writing-publish'),
] | ThetEstinGsalt/RavenScribe | Backend/Publishing_Fetching/api/urls.py | urls.py | py | 805 | python | en | code | 1 | github-code | 90 |
44554676957 | """
Lowest Common Ancestor of Binary Search Tree
Given a binary search tree (BST), find the lowest common ancestor (LCA) of two given nodes in the BST.
According to the definition of LCA on Wikipedia:
โThe lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).โ
Understand
- Return the lowest common ancestor of two nodes
- Node can be a descendant of itself
- [2,4], p = 2, 4 = q, ancestor is 2
Match
- Binary Search Tree
Plan
- create pointer to keep track of current node
- keep traversing tree while cur isn't null
- if both nodes are less than the current node, move to the left
- if both nodes are more than the current node, move to the right
- else
- one node could be greater than and one node could be less than current node
- one node could equal the current node
- whatever the case, return the current node as thats the common ancestor of both of them
Implement
Review
- root = [6,2,8,0,4,7,9,null,null,3,5], p = 4, q = 5
- 4,5 < 6, cur = 2
- 4,5 > 2, cur = 4
- return 4
Evaluate
- Time Complexity: O(logn) - visiting only one node per level
- Space Complexity: O(1) - not creating any new data structs
"""
def lowestCommonAncestor(root, p, q):
# check input
if not root or not p or not q:
return None
# start at root
cur = root
while cur:
# if cur lower than both nodes, move to right
if cur.val < p.val and cur.val < q.val:
cur = cur.right
# if cur greater than both nodes, move to left
elif cur.val > p.val and cur.val > q.val:
cur = cur.left
else:
return cur
| kpham841/LeetCode_Python | Tree/Lowest_Common_Ancestor_BST.py | Lowest_Common_Ancestor_BST.py | py | 1,760 | python | en | code | 0 | github-code | 90 |
37361272561 | from fractions import Fraction
from typing import List
import concurrent.futures
import time
"""
cuncurrent features
parallelized only LU matrices inversion
23.0 secs on d-500
"""
class Matrix:
def __init__(self, matrix:List[List[int|float]]) -> None:
self.input_matrix = matrix
self.size = len(matrix)
self.L = self.create_identity_matrix()
# handy method to print the matrix to the console
def print(self, matrix:List[List[int|float]]) -> None:
for i in matrix:
for j in i:
print(j, end=" ")
print('')
# add the identity matrix to the original matrix from the right
def create_identity_matrix(self) -> List[List[int]]:
matrix = []
for i in range(self.size):
row = []
for j in range(self.size):
if i == j:
row.append(1)
else:
row.append(0)
matrix.append(row)
return matrix
def inverse_LU(self, main_matrix):
i_matrix = self.create_identity_matrix()
for i in range(self.size):
for j in range(self.size):
if j != i:
ratio = main_matrix[j][i] / main_matrix[i][i]
for k in range(self.size):
main_matrix[j][k] -= main_matrix[i][k] * ratio
i_matrix[j][k] -= i_matrix[i][k] * ratio
for i in range(self.size):
ratio = main_matrix[i][i]
for j in range(self.size):
main_matrix[i][j] = main_matrix[i][j] / ratio
i_matrix[i][j] = i_matrix[i][j] / ratio
return i_matrix
def decompose(self, i, is_floats):
for j in range(self.size-1, i, -1):
if i != j and self.input_matrix[j][i] != 0:
if is_floats:
ratio = self.input_matrix[j][i] / self.input_matrix[i][i]
else:
# python literally cant count wtf
ratio = Fraction(self.input_matrix[j][i], self.input_matrix[i][i])
self.L[j][i] = ratio
for k in range(self.size):
self.input_matrix[j][k] = self.input_matrix[j][k] - self.input_matrix[i][k] * ratio
# Gauss elimination + LU decomposition
def inverse(self, is_floats:bool) -> List[List[int|float]]:
with concurrent.futures.ProcessPoolExecutor() as executor:
# start = time.time()
# multiprocessing this thing is literally counter-productive
for i in range(self.size):
for j in range(self.size-1, i, -1):
if i != j and self.input_matrix[j][i] != 0:
if is_floats:
ratio = self.input_matrix[j][i] / self.input_matrix[i][i]
else:
# python literally cant count wtf
ratio = Fraction(self.input_matrix[j][i], self.input_matrix[i][i])
self.L[j][i] = ratio
for k in range(self.size):
self.input_matrix[j][k] = self.input_matrix[j][k] - self.input_matrix[i][k] * ratio
U = self.input_matrix
# god bless this pdf file
# http://home.cc.umanitoba.ca/~farhadi/Math2120/Inverse%20Using%20LU%20decomposition.pdf
# parallizeable for fucking sure
future_L = executor.submit(self.inverse_LU, self.L)
future_U = executor.submit(self.inverse_LU, U)
inverse_L = future_L.result()
inverse_U = future_U.result()
# multiply these bastards
final_matrix = []
for i in range(self.size):
row = []
for j in range(self.size):
val = 0
for k in range(self.size):
val += inverse_U[i][k] * inverse_L[k][j]
row.append(val)
final_matrix.append(row)
# print(f"{time.time() - start}s elapsed")
return final_matrix | SosnoviyBor/CourseWerk-y3-s2 | algorithms/failures/gauss/1_cf_onlyLU.py | 1_cf_onlyLU.py | py | 4,232 | python | en | code | 0 | github-code | 90 |
31972061921 | ITEM_NAME_COLUMN = 0
QUANTITY_COLUMN = 1
# This function, reads a text file and returns a list of dictionaries
def load_orders(path):
orders = []
# Open the file as read only
with open(path, 'r') as order_file:
# Read each line
for line in order_file.readlines():
# Split by commas
order = line.rstrip().split(',')
# Get our data from the columns
item_name = order[ITEM_NAME_COLUMN]
quantity = int(order[QUANTITY_COLUMN])
# Create a human readable dictionary
item = {
'item': item_name,
'quantity': quantity,
}
orders.append(item)
return orders
# This function takes a list of dictionaries and saves data into the file
def save_orders(path, orders):
# Open the file at path with write mode
with open(path, 'w') as order_file:
# Iterate over our list of dictionaries
for order in orders:
# Create a text line
line = f'{order["item"]},{order["quantity"]}\n'
# And write it to the file!
order_file.write(line)
| davidl0673/pythonstuff | order_cli/orders.py | orders.py | py | 1,162 | python | en | code | 0 | github-code | 90 |
37436729671 | #!/usr/bin/env python
from PyQt4 import QtCore, QtGui
import time, re, hashlib, datetime
from urllib.request import urlopen, urlretrieve
from bs4 import BeautifulSoup
class FetchThread(QtCore.QThread):
signal = QtCore.pyqtSignal(list)
def __init__(self):
QtCore.QThread.__init__(self)
self.value = 0
self.folder_name = datetime.datetime.today().strftime("%Y%m%d") + '/'
def __del__(self):
self.wait()
def get_info(self, target):
FC2magick = '_gGddgPfeaf_gzyr'
hash_target = (target + FC2magick).encode('utf-8')
mini = hashlib.md5(hash_target).hexdigest()
ginfo_url = 'http://video.fc2.com/ginfo.php?mimi=' + mini + '&v=' + target + '&upid=' + target + '&otag=1'
soup = BeautifulSoup(urlopen(ginfo_url, timeout=3).read(), "lxml")
try:
filepath = soup.p.string
flv_url = filepath.split('&')[0].split('=')[1] + '?' + filepath.split('&')[1]
try:
title = filepath.split('&')[14].split('=')[1] # title(need encode)
if len(title) < 4:
title = filepath.split('&')[15].split('=')[1]
# file_name = folder_name + title + ".flv"
except:
return None
except:
try:
filepath = str(soup).replace(";", "").split("&")
flv_url = filepath[0].split('=')[1] + '?' + filepath[1]
title = filepath[14].split('=')[1]
except:
return None
if not flv_url.startswith('http'):
# print('flv_url error')
return None
return title, flv_url
def run(self):
baseurl = 'http://video.fc2.com/a/recentpopular.php?page=1'
r = urlopen(baseurl, timeout=5)
soup = BeautifulSoup(r.read(), "lxml")
links = soup.findAll("a")
targets = set()
regex = re.compile(r"http://video\.fc2\.com(?:/a)?/content/(\w+)/?$")
movie_list = []
for link in links:
url = link.get("href").split("&")[0]
match = regex.search(url)
if match is None:
continue
target = match.group(1)
if target in targets:
continue
result = self.get_info(target)
if result is None:
continue
title, flv_url = result
targets.add(target)
movie_list.append((target, title, flv_url))
self.signal.emit(list(movie_list))
class DownloadThread(QtCore.QThread):
signal = QtCore.pyqtSignal(list)
def __init__(self, movie_info, row):
QtCore.QThread.__init__(self)
self.row = row
self.b = row.itemAt(0).widget()
self.b.setText(movie_info[1] + ' downloading...')
self.bar = row.itemAt(1).widget()
self.movie_info = movie_info
def __del__(self):
self.wait()
def reporthook(self,*a):
percentage = round(100.0 * a[0] * a[1] / a[2], 2)
self.bar.setValue(percentage)
def run(self):
file_name = folder_name + self.movie_info[1] + ".flv"
urlretrieve(self.movie_info[2], file_name, self.reporthook)
class Window(QtGui.QWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.grid = QtGui.QVBoxLayout()
self.button_list = []
self.setting_group = self.create_setting_group()
self.download_group = self.create_download_group()
self.grid.addWidget(self.setting_group)
self.grid.addWidget(self.download_group)
self.setLayout(self.grid)
self.setWindowTitle("fc2_downloader")
self.resize(480, 320)
def create_setting_group(self):
groupbox = QtGui.QGroupBox("Setting",self)
ranking_type = QtGui.QLabel('Ranking type')
ranking_type_button = QtGui.QComboBox(self)
ranking_type_button.addItems(("weekly", "half-yearly", "yearly"))
ranking_type_button.setCurrentIndex(0)
# ranking_type_button.setEditable(True)
# ranking_type_button.lineEdit().setReadOnly(True)
# ranking_type_button.lineEdit().setAlignment(QtCore.Qt.AlignCenter)
layout1 = QtGui.QHBoxLayout()
layout1.addWidget(ranking_type)
layout1.addWidget(ranking_type_button)
uncensored = QtGui.QLabel('Uncensored')
b1 = QtGui.QRadioButton("normal")
b2 = QtGui.QRadioButton("only uncensored")
bg1 = QtGui.QHBoxLayout()
bg1.addWidget(b1)
# bg1.addStretch(1)
bg1.addWidget(b2)
layout2 = QtGui.QHBoxLayout()
layout2.addWidget(uncensored)
layout2.addLayout(bg1)
setting_layout = QtGui.QVBoxLayout()
setting_layout.addLayout(layout1)
setting_layout.addLayout(layout2)
groupbox.setLayout(setting_layout)
return groupbox
def create_download_group(self):
groupbox = QtGui.QGroupBox("download")
btn_exec = QtGui.QPushButton(u'Fetch Movie Info')
btn_exec.clicked.connect(self.execute)
btn_download = QtGui.QPushButton(u'Download')
btn_download.clicked.connect(self.download)
buttons = QtGui.QHBoxLayout()
buttons.addWidget(btn_exec)
buttons.addWidget(btn_download)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(buttons)
groupbox.setLayout(vbox)
return groupbox
def execute(self):
self.thread = FetchThread()
self.thread.signal.connect(self.result_fetch)
QtCore.QObject.connect(self.thread, QtCore.SIGNAL("finished()"), self.done)
self.thread.start()
return
def result_fetch(self, movie_lists):
groupbox = QtGui.QGroupBox("movie")
vbox = QtGui.QVBoxLayout()
self.row = []
self.movie_lists = movie_lists
for target, title, flv_url in movie_lists:
b = QtGui.QCheckBox(title)
b.setChecked(True)
bar = QtGui.QProgressBar(self)
bar.setValue(0)
row = QtGui.QVBoxLayout()
row.addWidget(b)
row.addWidget(bar)
self.row.append(row)
vbox.addLayout(row)
self.movie_num = len(self.button_list)
groupbox.setLayout(vbox)
self.grid.addWidget(groupbox)
def done(self):
# QtGui.QMessageBox.information(self, "Done!", "Done fetching posts!")
print('done')
def download(self):
self.thread_list = []
for i,row in enumerate(self.row):
if row.itemAt(0).widget().isChecked():
self.thread = DownloadThread(self.movie_lists[i], row)
self.thread.start()
self.thread_list.append(self.thread)
if __name__ == '__main__':
import sys, os
folder_name = datetime.datetime.today().strftime("%Y%m%d") + '/'
try:
os.mkdir(folder_name)
except FileExistsError:
print("already exist")
app = QtGui.QApplication(sys.argv)
clock = Window()
clock.show()
sys.exit(app.exec_()) | fukuta0614/xxx | fc2/fc2_downloader_gui.py | fc2_downloader_gui.py | py | 7,128 | python | en | code | 0 | github-code | 90 |
5543734592 | #๋ฉ์น
N = int(input())
frames = []
for i in range(1,N+1):
a,b = map(int,input().split())
frames.append([a,b])
for i in range(len(frames)):
score = 1
for j in range(len(frames)):
if(i != j and frames[i][0]<frames[j][0] and frames[i][1]<frames[j][1]):
score += 1
print(score, end=" ")
| SteadyKim/Algorism | language_PYTHON/BJ7568.py | BJ7568.py | py | 357 | python | en | code | 0 | github-code | 90 |
25010191286 | import pandas as pd
import requests
from dotenv import dotenv_values
from sqlalchemy import create_engine
import mysql.connector
env_variables = dotenv_values()
DB_PASSWORD = env_variables.get('DB_PASSWORD')
engine = create_engine(f"mysql+mysqlconnector://root:{DB_PASSWORD}@localhost:3306/nyt")
warehouse_engine = create_engine(f"mysql+mysqlconnector://root:{DB_PASSWORD}@localhost:3306/nyt_warehouse")
sql = "SELECT * FROM geo_facet;"
geo_facet_df = pd.read_sql(sql, engine)
geo_facet_df.to_sql(name="geo_dim", con=warehouse_engine, if_exists='append', index=False)
sql = "SELECT * FROM des_facet;"
des_df = pd.read_sql(sql, engine)
des_df.to_sql(name="des_dim", con=warehouse_engine, if_exists='append', index=False)
sql = "SELECT * FROM keywords;"
keywords_df = pd.read_sql(sql, engine)
keywords_df.to_sql(name="keywords_dim", con=warehouse_engine, if_exists='append', index=False)
sql = "select * from article"
oltp_article = pd.read_sql(sql, engine)
olap_articles = []
for row in oltp_article.itertuples():
curr_olap_row = {}
curr_olap_row['id'] = row[1]
curr_olap_row['url'] = row[2]
curr_olap_row['source'] = row[3]
published_date = row[4]
curr_olap_row['published_fk'] = int(pd.read_sql(f"select time_key from time_dim where full_date='{published_date}'", warehouse_engine)['time_key'])
updated_date = row[5]
ts = pd.Timestamp(updated_date)
dt = ts.to_pydatetime().date()
curr_olap_row['updated_fk'] = int(pd.read_sql(f"select time_key from time_dim where full_date='{dt}'", warehouse_engine)['time_key'])
section = row[6]
sql = "SELECT * FROM section_dim;"
section_df = pd.read_sql(sql, warehouse_engine)
if section in list(section_df['section_name']):
curr_olap_row['section_fk'] = int(pd.read_sql(f"select section_key from section_dim where section_name='{section}'", warehouse_engine)['section_key'])
else:
max_section_key = section_df['section_key'].max()
new_row = {'section_key': max_section_key + 1, 'section_name': row[6]}
curr_olap_row['section_fk'] = max_section_key + 1
section_df = section_df.append(new_row, ignore_index=True)
section_df.to_sql(name="section_dim", con=warehouse_engine, if_exists='replace', index=False)
curr_olap_row['subsection'] = row[7]
curr_olap_row['title'] = row[8]
curr_olap_row['abstract'] = row[9]
curr_olap_row['byline'] = row[10]
this_type = row[11]
sql = "SELECT * FROM type_dim;"
type_df = pd.read_sql(sql, warehouse_engine)
if this_type in list(type_df['content_type']):
curr_olap_row['type_fk'] = int(pd.read_sql(f"select type_key from type_dim where content_type='{this_type}'", warehouse_engine)['type_key'])
else:
max_type_key = type_df['type_key'].max()
new_row = {'type_key': max_type_key + 1, 'content_type': row[11]}
curr_olap_row['type_fk'] = max_type_key + 1
type_df = type_df.append(new_row, ignore_index=True)
type_df.to_sql(name="type_dim", con=warehouse_engine, if_exists='replace', index=False)
extract_date = row[12]
ts = pd.Timestamp(extract_date)
dt = ts.to_pydatetime().date()
curr_olap_row['time_fk'] = int(pd.read_sql(f"select time_key from time_dim where full_date='{dt}'", warehouse_engine)['time_key'])
olap_articles.append(curr_olap_row)
olap_df = pd.DataFrame(olap_articles)
olap_df.to_sql(name="article_fact", con=warehouse_engine, if_exists='append', index=True)
| danishminhas1/articles_etl_pipeline | Articles_ETL_Pipeline/warehouse.py | warehouse.py | py | 3,595 | python | en | code | 0 | github-code | 90 |
36219305617 | # 10026 : ์ ๋ก์์ฝ
import sys
from collections import deque
n = int(sys.stdin.readline())
graph = [list(sys.stdin.readline().rstrip()) for _ in range(n)]
visited = [[0 for _ in range(n)] for _ in range(n)]
d = [(1, 0), (-1, 0), (0, 1), (0, -1)]
q = deque()
normal = 0
weakness = 0
def bfs(x, y):
visited[x][y] = 1
q.append((x, y))
while q:
x, y = q.popleft()
for i in range(4):
nx = d[i][0]+x
ny = d[i][1]+y
if nx >= n or ny >= n or nx < 0 or ny < 0:
continue
if visited[nx][ny] == 0 and graph[x][y] == graph[nx][ny]:
q.append((nx, ny))
visited[nx][ny] = 1
for i in range(n):
for j in range(n):
if visited[i][j] == 0:
bfs(i, j)
normal += 1
# ์ ๋ก์์ฝ์ R๊ณผ G๋ฅผ ๊ตฌ๋ถํ์ง ๋ชปํ๋ฏ๋ก G๋ฅผ R๋ก ๋ณ๊ฒฝ
for i in range(n):
for j in range(n):
if graph[i][j] == 'G':
graph[i][j] = 'R'
# ๋ฐฉ๋ฌธ ์ฒดํฌํ๋ ๋ฐฐ์ด ์ด๊ธฐํ
visited = [[0 for _ in range(n)] for _ in range(n)]
# ์ ๋ก์์ฝ์ผ๋ bfs ์ํ. visited๊ฐ 0์ผ๋๋ง๋ค 1์ฉ ์ฆ๊ฐ
for i in range(n):
for j in range(n):
if visited[i][j] == 0:
bfs(i, j)
weakness += 1
print(normal, weakness)
| yuhalog/algorithm | BOJ/DFSใปBFS/10026.py | 10026.py | py | 1,296 | python | en | code | 0 | github-code | 90 |
37173811834 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import models, migrations
def load_groups_fixture(apps, schema_editor):
call_command('loaddata', 'groups_initial_data', app_label='recipes')
def load_users_fixture(apps, schema_editor):
call_command('loaddata', 'users_initial_data', app_label='recipes')
def load_ingredients_fixture(apps, schema_editor):
call_command('loaddata', 'ingredients_initial_data', app_label='recipes')
def load_recipes_fixture(apps, schema_editor):
call_command('loaddata', 'recipes_initial_data', app_label='recipes')
class Migration(migrations.Migration):
dependencies = [
('recipes', '0003_auto_20160720_1252'),
]
operations = [
migrations.RunPython(load_groups_fixture),
migrations.RunPython(load_users_fixture),
migrations.RunPython(load_ingredients_fixture),
migrations.RunPython(load_recipes_fixture),
]
| nessa/serenity | amuseapi/recipes/migrations/0004_auto_20160720_1252.py | 0004_auto_20160720_1252.py | py | 1,004 | python | en | code | 0 | github-code | 90 |
3067863976 | import requests
from .. import tbot
from telethon import Button, events
@tbot.on(events.NewMessage(pattern="[/!]anime"))
async def _(e):
f = requests.get('https://anime-news-api-production-5b50.up.railway.app/').json()
y = f['image']
z = f['post_url']
lol = f['title']
ok = f['info']
msg = (f'**Title:**\n{lol}\n\n')
msg += (f'**Info:**\n{ok}\n')
await e.respond(msg, file=y, buttons=Button.url('Click', f'{z}'))
| TAMILVIP007/anime-news | anime/plugins/anime.py | anime.py | py | 440 | python | en | code | 0 | github-code | 90 |
18539773039 | n = int(input())
nmax = 55556
prime = [True]*nmax
prime[0] = prime[1] = False
for i in range(2, int(nmax**0.5)+1):
if not prime[i]: continue
for j in range(2*i, nmax, i):
prime[j] = False
arr = []
for i in range(2, nmax):
if not prime[i]: continue
if i%10 == 3:
arr.append(i)
if len(arr) == n: break
print(*arr, sep=' ') | Aasthaengg/IBMdataset | Python_codes/p03362/s323872639.py | s323872639.py | py | 343 | python | en | code | 0 | github-code | 90 |
18020223749 | n, m = map(int,input().split())
A = [[] for _ in range(n)]
for i in range(n):
A[i] = input()
B = [[] for _ in range(m)]
for i in range(m):
B[i] = input()
flag = False
for tate_begin in range(n-m+1):
for yoko_begin in range(len(A[0])-len(B[0])+1):
for check in range(m):
a_yoko = A[tate_begin+check][yoko_begin:yoko_begin+len(B[0])]
if a_yoko != B[check]:
break
else:
flag = True
print('Yes')
if flag:
break
else:
print('No') | Aasthaengg/IBMdataset | Python_codes/p03804/s586428216.py | s586428216.py | py | 537 | python | en | code | 0 | github-code | 90 |
24342137855 | import numpy as np
import pickle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from get_parameters import *
############################################
####Initialization of Parameters############
############################################
l=32
lattice_shape=(l,l)
nsamples=1000
index_set=range(0,32,1)
T_vals=np.linspace(0.01,2,32)
S=[]
sp_heat=[]
mag=[]
mag_err=[]
######################################
#########Opening saved data###########
######################################
pkl_file=open(str(lattice_shape)+'lattices.pkl','rb')
allTlattices=pickle.load(pkl_file)
pkl_file.close()
#allTlattices contains 32 lists for each temperature
#Each list contains 10000 spin configurations
for index in index_set:
temp=T_vals[index]
lattices=allTlattices[index][-nsamples:]
sp_heat.append(get_specific_heat(lattices,temp))
[mag_mean,mag_std]=get_mean_magnetization(lattices)
mag.append(mag_mean)
mag_err.append(mag_std)
#################################
######Observing vortices#########
#################################
data=(get_vorticity_configuration(allTlattices[20][9999])) #first index indicates the temperature index, second index is a no between 1-10000
im = plt.imshow(data, interpolation='none')
plt.figure(figsize=(8,4))
values=range(-7,8)
colors = [ im.cmap(im.norm(value)) for value in values]
patches = [ mpatches.Patch(color=colors[i], label="Level {l}".format(l=values[i]) ) for i in range(len(values)) ]
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
plt.grid(True)
plt.show()
###########################
######Specific Heat########
###########################
plt.plot(T_vals,sp_heat)
plt.xlabel('Temperature')
plt.ylabel('Specific Heat')
plt.show()
#################################
##########Magnetization##########
#################################
plt.errorbar(T_vals,mag,mag_err)
plt.xlabel('Temperature')
plt.ylabel('Magnetization')
plt.show()
| japneet644/Random-codes | extracting_graphs.py | extracting_graphs.py | py | 1,993 | python | en | code | 0 | github-code | 90 |
18589080739 | import sys
n=int(input())
a = list(map(int,input().split()))
b=0
while(True):
for i in range(n):
if(a[i]%2!=0):
print(b)
sys.exit()
a[i]=a[i]//2
b+=1
print(b) | Aasthaengg/IBMdataset | Python_codes/p03494/s413446503.py | s413446503.py | py | 182 | python | en | code | 0 | github-code | 90 |
38616041810 | from .base_options import BaseOptions
from datetime import datetime
class InferOptions(BaseOptions):
"""This class includes inference options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.set_defaults(phase='infer', dataset_mode='inference')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--data_name', type=str, default=datetime.now().strftime("%Y%m%d%H%M%S"), help='identifier to distinguish different runs')
parser.add_argument('--image_path', type=str, required=True, help='path to input image')
parser.add_argument('--view', type=float, nargs=2, required=False, help='specified view, in the format of [elevation azimuth]')
self.isTrain, self.isTest, self.isInfer = False, False, True
return parser
| bennyguo/sketch2model | options/infer_options.py | infer_options.py | py | 983 | python | en | code | 47 | github-code | 90 |
18588731039 | s = input()
x,y = map(int,input().split())
move = [0]
for si in s:
if(si == 'F'):
move[-1] += 1
else:
move.append(0)
move_x = move[2::2]
move_y = move[1::2]
for a,move_a in zip([x-move[0],y],[move_x,move_y]):
m_max = sum(move_a)
if(m_max < abs(a)):
print('No')
exit()
dp = 2**m_max
for mi in move_a:
dp = (dp<<mi) | (dp>>mi)
if(dp >> (m_max+a))&1:
continue
else:
print('No')
exit()
print('Yes') | Aasthaengg/IBMdataset | Python_codes/p03488/s936757886.py | s936757886.py | py | 496 | python | en | code | 0 | github-code | 90 |
17687715458 | # 4. ะ ะตะฐะปะธะทัะนัะต ะฑะฐะทะพะฒัะน ะบะปะฐัั Car. ะฃ ะดะฐะฝะฝะพะณะพ ะบะปะฐััะฐ ะดะพะปะถะฝั ะฑััั ัะปะตะดัััะธะต ะฐััะธะฑััั: speed, color, name,
# is_police (ะฑัะปะตะฒะพ). ะ ัะฐะบะถะต ะผะตัะพะดั: go, stop, turn(direction), ะบะพัะพััะต ะดะพะปะถะฝั ัะพะพะฑัะฐัั, ััะพ ะผะฐัะธะฝะฐ ะฟะพะตั
ะฐะปะฐ,
# ะพััะฐะฝะพะฒะธะปะฐัั, ะฟะพะฒะตัะฝัะปะฐ (ะบัะดะฐ). ะะฟะธัะธัะต ะฝะตัะบะพะปัะบะพ ะดะพัะตัะฝะธั
ะบะปะฐััะพะฒ: TownCar, SportCar, WorkCar, PoliceCar.
# ะะพะฑะฐะฒััะต ะฒ ะฑะฐะทะพะฒัะน ะบะปะฐัั ะผะตัะพะด show_speed, ะบะพัะพััะน ะดะพะปะถะตะฝ ะฟะพะบะฐะทัะฒะฐัั ัะตะบัััั ัะบะพัะพััั ะฐะฒัะพะผะพะฑะธะปั.
# ะะปั ะบะปะฐััะพะฒ TownCar ะธ WorkCar ะฟะตัะตะพะฟัะตะดะตะปะธัะต ะผะตัะพะด show_speed. ะัะธ ะทะฝะฐัะตะฝะธะธ ัะบะพัะพััะธ ัะฒััะต 60 (TownCar) ะธ 40 (WorkCar)
# ะดะพะปะถะฝะพ ะฒัะฒะพะดะธัััั ัะพะพะฑัะตะฝะธะต ะพ ะฟัะตะฒััะตะฝะธะธ ัะบะพัะพััะธ.
# ะกะพะทะดะฐะนัะต ัะบะทะตะผะฟะปััั ะบะปะฐััะพะฒ, ะฟะตัะตะดะฐะนัะต ะทะฝะฐัะตะฝะธั ะฐััะธะฑััะพะฒ.
# ะัะฟะพะปะฝะธัะต ะดะพัััะฟ ะบ ะฐััะธะฑััะฐะผ, ะฒัะฒะตะดะธัะต ัะตะทัะปััะฐั. ะัะฟะพะปะฝะธัะต ะฒัะทะพะฒ ะผะตัะพะดะพะฒ ะธ ัะฐะบะถะต ะฟะพะบะฐะถะธัะต ัะตะทัะปััะฐั.
import random
class Car:
def __init__(self, speed, color, name):
self.speed = speed
self.color = color
self.name = name
self.is_police = False
def go(self):
print(f'{self.name} {self.color} ัะฒะตัะฐ ะฟะพะตั
ะฐะปะฐ.', end=" ") if self.is_police is False else print(
f'ะะพะปะธัะตะนัะบะฐั ะผะฐัะธะฝะฐ {self.name} ะฟะพะตั
ะฐะปะฐ.', end=" ")
def stop(self):
print(f'{self.name} ะพััะฐะฝะพะฒะธะปะฐัั.\n')
def turn(self, direction):
print(f'{self.name} ะฟะพะฒะตัะฝัะปะฐ {direction}.', end=" ")
def show_speed(self):
print(f'ะขะตะบััะฐั ัะบะพัะพััั {self.name} {self.speed} ะบะผ/ั.', end=" ")
class TownCar(Car):
def show_speed(self):
print(f'{self.name} ะดะฒะธะถะตััั ัะพ ัะบะพัะพัััั {self.speed} ะบะผ/ั ะธ ะฟัะตะฒััะธะปะฐ ะดะพะฟัััะธะผัั ัะบะพัะพััั!',
end=" ") if self.speed > 60 else Car.show_speed(self)
class SportCar(Car):
pass
class WorkCar(Car):
def show_speed(self):
print(f'{self.name} ะดะฒะธะถะตััั ัะพ ัะบะพัะพัััั {self.speed} ะบะผ/ั ะธ ะฟัะตะฒััะธะปะฐ ะดะพะฟัััะธะผัั ัะบะพัะพััั!',
end=" ") if self.speed > 40 else Car.show_speed(self)
class PoliceCar(Car):
def __init__(self, speed, color, name):
super().__init__(speed, color, name)
self.is_police = True
auto_1 = TownCar(75, "ะฑะตะปะพะณะพ", "Mazda")
print(
f'auto_1:\nspeed: {auto_1.speed}, color: {auto_1.color}, name: {auto_1.name}, is_police: {auto_1.is_police}')
auto_1.go(), auto_1.turn(random.choice(['ะฝะฐะปะตะฒะพ', 'ะฝะฐะฟัะฐะฒะพ'])), auto_1.show_speed(), auto_1.stop()
auto_2 = SportCar(300, "ะบัะฐัะฝะพะณะพ", "Maserati")
print(
f'auto_2:\nspeed: {auto_2.speed}, color: {auto_2.color}, name: {auto_2.name}, is_police: {auto_2.is_police}')
auto_2.go(), auto_2.turn(random.choice(['ะฝะฐะปะตะฒะพ', 'ะฝะฐะฟัะฐะฒะพ'])), auto_2.show_speed(), auto_2.stop()
auto_3 = WorkCar(40, "ะถัะปัะพะณะพ", "ะะะะตะปั")
print(
f'auto_3:\nspeed: {auto_3.speed}, color: {auto_3.color}, name: {auto_3.name}, is_police: {auto_3.is_police}')
auto_3.go(), auto_3.turn(random.choice(['ะฝะฐะปะตะฒะพ', 'ะฝะฐะฟัะฐะฒะพ'])), auto_3.show_speed(), auto_3.stop()
auto_4 = PoliceCar(100, "ัะธะฝะตะณะพ", "Lada")
print(
f'auto_4:\nspeed: {auto_4.speed}, color: {auto_4.color}, name: {auto_4.name}, is_police: {auto_4.is_police}')
auto_4.go(), auto_4.turn(random.choice(['ะฝะฐะปะตะฒะพ', 'ะฝะฐะฟัะฐะฒะพ'])), auto_4.show_speed(), auto_4.stop()
| Xuhen17/Python_Basic | lesson6/less6_task4.py | less6_task4.py | py | 3,812 | python | ru | code | 0 | github-code | 90 |
35524029383 | import geometry
import pygame
from vector2 import Vector2, UP, DOWN, LEFT, RIGHT
from vector2 import ZERO as ZERO_VECTOR
from bindable_event import BindableEvent
from input_handler import InputHandler
from geometry import Ray_Result
GRAVITY = Vector2(0, 100)
class InteractiveRectangle(geometry.Rectangle):
def __init__(self, x, y, w, h, color=None):
super().__init__(x, y, w, h, color)
self.on_touched = BindableEvent.new()
def touch(self, player):
self.on_touched.fire(player)
class Player:
def __init__(self, object_list: dict, x=0, y=0, max_speed=200, acceleration=250, friction=400):
self.pos = Vector2.new(x, y)
self.vel = Vector2()
self.max_speed = max_speed
self.acceleration = acceleration
self.friction = friction
self.rect = geometry.Rectangle(x, y, 40, 40, color=(230, 50, 50))
self.jumped = False
self.teleported = False
self.alive = True
self.object_list = object_list
self.died = BindableEvent()
self.connection_on_died = self.died.connect(self.on_died)
self.connection_on_space = InputHandler.input_began.connect(self.on_space)
def on_space(self, inputted):
if inputted.key == pygame.K_SPACE:
#print("Space pressed")
if not self.jumped:
self.jumped = True
self.vel = Vector2(self.vel.x, -100)
elif not self.teleported:
self.teleport()
def on_died(self):
self.alive = False
#print("died")
def draw(self):
self.rect.draw()
def update(self, elapsedTime):
if not self.alive:
return
pressed = pygame.key.get_pressed()
input_vel = Vector2()
if pressed[pygame.K_a]:
input_vel += LEFT
if pressed[pygame.K_d]:
input_vel += RIGHT
if input_vel != ZERO_VECTOR:
vel = Vector2(self.vel.x, 0).move_toward(input_vel * self.max_speed, self.acceleration)
else:
vel = Vector2(self.vel.x, 0).move_toward(ZERO_VECTOR, self.friction)
vel += GRAVITY
self.vel += vel * elapsedTime
self.rect.velocity = self.vel
collisions = []
for rectangle in self.object_list:
result = geometry.dynamic_rect_vs_rect(self.rect, rectangle, elapsedTime)
if isinstance(result, Ray_Result):
collisions.append([rectangle, result.Time])
collisions.sort(key=lambda s: s[1])
for i in range(len(collisions)):
rect = collisions[i][0]
result = geometry.dynamic_rect_vs_rect(self.rect, rect, elapsedTime)
if isinstance(result, Ray_Result):
# print("Pos:", self.rect.position)
# print("Size:", self.rect.size)
# print("Target Pos:", rect.position)
# print("Point:", result.Point)
# print("Time:", result.Time)
self.rect.velocity += result.Normal * Vector2(abs(self.rect.velocity.x), abs(self.rect.velocity.y)) * (1 - result.Time)
if result.Normal == DOWN:
self.jumped = False
self.teleported = False
if isinstance(rect, InteractiveRectangle):
rect.touch(self)
self.vel = self.rect.velocity
#print(self.vel)
self.pos += self.vel * elapsedTime
self.rect.position = self.pos
if self.pos.y > pygame.display.get_surface().get_height():
self.died.fire()
def teleport(self):
pressed = pygame.key.get_pressed()
direction = ZERO_VECTOR
if pressed[pygame.K_a]:
direction += LEFT
if pressed[pygame.K_d]:
direction += RIGHT
if pressed[pygame.K_s]:
direction += UP
if pressed[pygame.K_w]:
direction += DOWN
if direction == ZERO_VECTOR:
return
#print("Teleported")
direction = direction.unit * 80
resolve = None
for rect in self.object_list:
if geometry.rect_vs_rect(self.rect.position + direction, self.rect.size, rect.position, rect.size):
res = geometry.dynamic_rect_vs_rect(self.rect, rect, 1/60, direction=direction)
if isinstance(res, Ray_Result):
if not resolve:
resolve = res
elif resolve.Time > res.Time:
resolve = res
#resolve = res if not resolve or resolve and resolve.Time < res.Time else resolve
#self.rect.position = res.Point - self.rect.size / 2
if not resolve:
self.rect.position += direction
else:
self.rect.position = resolve.Point - self.rect.size / 2
self.pos = self.rect.position
self.vel = self.vel * 0.5
self.teleported = True
self.jumped = False
class KillerRectangle(InteractiveRectangle):
def __init__(self, x, y, w, h, color=(255, 0, 0)):
super().__init__(x, y, w, h, color)
self.on_touched.connect(self.touch)
@staticmethod
def touch(player: Player):
player.died.fire() | PhantomShift/pygame-platformer | src/game_objects.py | game_objects.py | py | 5,272 | python | en | code | 1 | github-code | 90 |
29260294521 | class Solution(object):
def integerBreak(self, n):
dp = [0]*(n+1)
for i in range(2,n+1):
#ไปjๅคๆๅ
for j in range(i):
dp[i] = max(dp[i],j*(i-j),j*dp[i-j])
return dp[n]
print(list(range(-1,-5,-1)))
print(list(range(-5,-1))) | johnkle/FunProgramming | Leetcode/ๅจๆ่งๅ/343ๆดๆฐๆๅ.py | 343ๆดๆฐๆๅ.py | py | 296 | python | en | code | 0 | github-code | 90 |
42278799770 | ''' Problem Statement : Insertion sort in a Linked list
Algorithm: 1) Create an empty sorted (or result) list
2) Traverse the given list, do following for every node.
a) Insert current node in sorted way in sorted or result list.
3) Change head of given linked list to head of sorted (or result) list.'''
# Node class
class Node:
# Constructor to initialize the node object
def __init__(self, data):
self.data = data
self.next = None
# function to sort a singly linked list using insertion sort
def insertionSort(head_ref):
# Initialize sorted linked list
sorted = None
# Traverse the given linked list and insert every
# node to sorted
current = head_ref
while (current != None):
# Store next for next iteration
next = current.next
# insert current in sorted linked list
sorted = sortedInsert(sorted, current)
# Update current
current = next
# Update head_ref to point to sorted linked list
head_ref = sorted
return head_ref
# function to insert a new_node in a list. Note that this
# function expects a pointer to head_ref as this can modify the
# head of the input linked list (similar to push())
def sortedInsert(head_ref, new_node):
current = None
# Special case for the head end */
if (head_ref == None or (head_ref).data >= new_node.data):
new_node.next = head_ref
head_ref = new_node
else:
# Locate the node before the point of insertion
current = head_ref
while (current.next != None and
current.next.data < new_node.data):
current = current.next
new_node.next = current.next
current.next = new_node
return head_ref
# BELOW FUNCTIONS ARE JUST UTILITY TO TEST sortedInsert
# Function to print linked list */
def printList(head):
temp = head
while(temp != None):
print( temp.data, end = " ")
temp = temp.next
# A utility function to insert a node
# at the beginning of linked list
def push( head_ref, new_data):
# allocate node
new_node = Node(0)
# put in the data
new_node.data = new_data
# link the old list off the new node
new_node.next = (head_ref)
# move the head to point to the new node
(head_ref) = new_node
return head_ref
# Driver program to test above functions
a = None
a = push(a, 5)
a = push(a, 20)
a = push(a, 4)
a = push(a, 3)
a = push(a, 30)
print("Linked List before sorting ")
printList(a)
a = insertionSort(a)
print("\nLinked List after sorting ")
printList(a)
| manvi0308/100DaysOfAlgo | Day 22/InsertionSortInLinkedList.py | InsertionSortInLinkedList.py | py | 2,504 | python | en | code | 33 | github-code | 90 |
25742985094 | from f_utils import u_tester
from algo.ucs import UCS
from algo.astar import AStar
from model.point import Point
from model.grid_blocks import GridBlocks
class TestUCS:
def __init__(self):
u_tester.print_start(__file__)
self.__tester_optimal_path()
self.__tester_expanded_nodes()
u_tester.print_finish(__file__)
@staticmethod
def __tester_optimal_path():
p0 = True
for _ in range(100):
grid = GridBlocks(rows=10, cols=10, percent_blocks=25)
start, goal = grid.points_random(amount=2)
ucs = UCS(grid, start, goal)
ucs.run()
astar = AStar(grid, start, goal)
astar.run()
if not len(ucs.optimal_path()) == len(astar.optimal_path()):
p0 = False
break
u_tester.run(p0)
@staticmethod
def __tester_expanded_nodes():
grid = GridBlocks(rows=5)
grid.set_block(1, 2)
grid.set_block(2, 2)
grid.set_block(3, 2)
grid.set_block(4, 2)
start = Point(3, 1)
goal = Point(3, 3)
ucs = UCS(grid, start, goal)
ucs.run()
p0 = ucs.expanded_nodes() == 17
u_tester.run(p0)
if __name__ == '__main__':
TestUCS()
| valdas1966/kg | algo/testers/t_ucs.py | t_ucs.py | py | 1,274 | python | en | code | 0 | github-code | 90 |
35585294135 | import re
def uncollapse(digits):
b = []
temper = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
while len(digits) != 0:
for i in temper:
if re.match(i, digits):
b.append(i)
digits = re.sub(i, '', digits, 1)
return ' '.join(b)
print(uncollapse("fivethreefivesixthreenineonesevenoneeight"))
def finance(n):
b=[]
for i in range(n):
a = sum(range(i+2, n+i))
b.append(a)
return b
print(finance(6))
import re
def solution(roman):
temp_1 = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
temp_2 = {'IV': -2, 'IX': -2, 'XL': -20, 'XC':-20, 'CD':-200, 'CM':-200}
sum = 0
for key in temp_1:
for i in roman:
if i == key:
sum += temp_1.get(i)
for i in temp_2:
if re.search(i, roman):
sum += temp_2.get(i)
return sum
print(solution('IV'))
def xmastree(n):
t = [i for i in range(n+1)]
t_2 = []
tree = []
for i in t:
if i != 0:
t_2.append(t[i]+t[i-1])
for i in t_2:
a = int((t_2[-1] - i)/2)
part = '_'*a+'#'*i+'_'*a
tree.append(part)
b = int((t_2[-1]-1)/2)
part_2 = '_'*b+'#'+'_'*b
tree.append(part_2)
tree.append(part_2)
return tree
import math
def convert(degrees):
#frac, whole = math.modf(degrees)
#return frac, whole
a = int(degrees)
b = degrees - a
degr = int(a)
minut = int(round((b*60),4))
sec = round(((b*60-minut)*60))
return [degr, minut, sec]
print(convert(0.0001398888888888889))
print(convert(91.33333333333333))
';;' | atebelskis/CodeWars-tasks | CD_7.py | CD_7.py | py | 1,667 | python | en | code | 0 | github-code | 90 |
70858527978 | #!/usr/bin/python
from datetime import datetime, date, timedelta
class Student:
def __init__(self, sid, name, address, birthday):
self.id = sid
self.name = name
self.address = address
self.birthday = birthday
self.datetime_birthday = datetime.strptime(birthday, "%d-%m-%Y")
self.grades = []
def get_age(self):
return (date.today() - self.datetime_birthday.date()) // timedelta(days=365.2425)
def get_average(self):
return sum([i.grade for i in self.grades]) / len(self.grades)
@staticmethod
def str_grades(grades):
string = ""
len_grades = len(grades)
for i, grade in enumerate(grades):
string += str(grade)
if i != len_grades - 1:
string += ", "
return string
def __str__(self, *args, **kwargs):
return_str = "\n\t".join(
["Student = {",
"id = %d" % self.id,
"name = %s" % self.name,
"address = %s" % self.address,
"birthday = %s" % self.birthday
])
return return_str + "\n\tgrades = " + self.str_grades(self.grades) + "\n}\n"
| vampy/university | individual-project/lab-src/code/student.py | student.py | py | 1,189 | python | en | code | 4 | github-code | 90 |
15536541369 | class Node:
def __init__(self, value = None):
self.data = value
self.nextNode = None
class Stack:
def __init__(self):
self.head = None
self.listSize = 0
def push(self, value):
newNode = Node(value)
newNode.nextNode = self.head
self.head = newNode
self.listSize += 1
def pop(self):
if self.head is not None:
popped = self.peek()
self.head = self.head.nextNode
self.listSize -= 1
return popped
def size(self):
return self.listSize
def peek(self):
if self.head is not None:
return self.head.data
# When you are ready, uncomment this block
# Do not edit anything below this line
A = Stack() # Initiate the stack
A.push("Value 01") # push values
A.push("Value 02")
print("Size={}".format(A.size())) # check the size of stack
print("Top element is {}".format(A.peek())) # display the top element of stack
c = A.pop() # pop elements in stack, store the removed element in a variable if required
A.push("Value 03")
A.push(c)
# Popping all elements from the stack:
while A.size() > 0:
print( A.pop() )
| ravi-prakash1907/Problem-Solving-with-Python | notes/sem2/LL_Stack.py | LL_Stack.py | py | 1,213 | python | en | code | 0 | github-code | 90 |
18450448899 | N,K=map(int,input().split())
A=list(map(int,input().split()))
One=[0]*40
OneK=format(K, '040b')
flg=True
for i in range(40):
for j in range(N):
if A[j]==0:
continue
flg=False
One[39-i]+=A[j]&1
A[j]=A[j]>>1
if flg:
break
flg=True
ans=0
i=0
while i<40:
if OneK[i]=='1':
break
ans+=pow(2,39-i)*One[i]
i+=1
flg=False
while i<40:
if One[i]>=N/2:
ans+=pow(2,39-i)*One[i]
if OneK[i]=='1':
flg=True
else:
if flg or OneK[i]=='1':
ans+=pow(2,39-i)*(N-One[i])
else:
ans+=pow(2,39-i)*One[i]
i+=1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03138/s568974552.py | s568974552.py | py | 576 | python | en | code | 0 | github-code | 90 |
18323905919 | n = int(input())
D = list(map(int, input().split()))
MOD = 998244353
cnt = [0] * n
for d in D:
cnt[d] += 1
if D[0] == 0 and cnt[0] == 1:
res = 1
else:
res = 0
for i in range(1, n):
res *= pow(cnt[i - 1], cnt[i], MOD)
res %= MOD
print(res) | Aasthaengg/IBMdataset | Python_codes/p02866/s810724902.py | s810724902.py | py | 262 | python | en | code | 0 | github-code | 90 |
38235035592 | # just look for duplicate section of 16 bytes
with open('08.txt', 'rb') as f:
ciphertexts = f.readlines()
ciphertexts = [line.strip() for line in ciphertexts]
for ciphertext in ciphertexts:
blocks = []
for i in range(0, len(ciphertext), 32):
blocks.append(ciphertext[i:i+32])
for i in range(len(blocks)):
for j in range(i + 1, len(blocks)):
if blocks[i] == blocks[j]:
print("Found a match: ", blocks[i], blocks[j])
print("Full ciphertext: ", ciphertext)
exit(0)
| prasantadh/cryptopals | challenge08.py | challenge08.py | py | 562 | python | en | code | 0 | github-code | 90 |
17438158780 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from moonlight.image import decode_music_score_png
from moonlight.staves import staffline_distance
class StafflineDistanceTest(tf.test.TestCase):
def testCorpusImage(self):
filename = os.path.join(tf.resource_loader.get_data_files_path(),
'../testdata/IMSLP00747-000.png')
image_contents = open(filename, 'rb').read()
image_t = decode_music_score_png(tf.constant(image_contents))
staffdist_t, staffthick_t = (
staffline_distance.estimate_staffline_distance_and_thickness(image_t,))
with self.test_session() as sess:
staffdist, staffthick = sess.run((staffdist_t, staffthick_t))
# Manually determined values for the image.
self.assertAllEqual(staffdist, [16])
self.assertEquals(staffthick, 2)
def testZeros(self):
# All white (0) shouldn't be picked up as a music score.
image_t = tf.zeros((512, 512), dtype=tf.uint8)
staffdist_t, staffthick_t = (
staffline_distance.estimate_staffline_distance_and_thickness(image_t))
with self.test_session() as sess:
staffdist, staffthick = sess.run((staffdist_t, staffthick_t))
self.assertAllEqual(staffdist, [])
self.assertEqual(staffthick, -1)
def testSpeckles(self):
# Random speckles shouldn't be picked up as a music score.
tf.set_random_seed(1234)
image_t = tf.where(
tf.random_uniform((512, 512)) < 0.1,
tf.fill((512, 512), tf.constant(255, tf.uint8)),
tf.fill((512, 512), tf.constant(0, tf.uint8)))
staffdist_t, staffthick_t = (
staffline_distance.estimate_staffline_distance_and_thickness(image_t))
with self.test_session() as sess:
staffdist, staffthick = sess.run((staffdist_t, staffthick_t))
self.assertAllEqual(staffdist, [])
self.assertEqual(staffthick, -1)
if __name__ == '__main__':
tf.test.main()
| tensorflow/moonlight | moonlight/staves/staffline_distance_test.py | staffline_distance_test.py | py | 1,994 | python | en | code | 321 | github-code | 90 |
25252678642 | import os
import re
FULLPRINT = False
COMPACTPRINT = False
BETTERPRINT = True
def extract_number(text):
# Regular expression pattern to match the number
pattern = r":\s*([-+]?\d*\.\d+|\d+)"
# Search for the pattern in the input text
match = re.search(pattern, text)
if match:
number_str = match.group(1)
# Try converting to float first, if it fails, convert to int
try:
number = float(number_str)
except ValueError:
number = int(number_str)
return number
else:
# Return None if no number is found
return None
def process_files(filename, data):
result = []
# Get the current working directory
current_path = os.getcwd()
# Function to process each file and update the result array
def process_file(file_path):
with open(file_path, 'r') as file:
lines = file.readlines()
current_dict = {}
current_dict["Test"] = file_path[-80:]
for line in lines:
for datum in data:
if (line.strip()).startswith(datum):
current_dict[datum] = extract_number(line.strip())
if current_dict and len(current_dict.keys()) > 1:
result.append(current_dict)
# Walk through all directories and subdirectories
for root, _, files in os.walk(current_path):
for file in files:
if file == filename:
file_path = os.path.join(root, file)
if "_outside_conv" not in file_path:
process_file(file_path)
return result
filename_to_find_opt = "panda_log_opt.txt"
filename_to_find_no_opt = "panda_log.txt"
strings_to_search = ["Average execution", "Luts", "Time", "Power", "Registers", "DSPs", "BRAMs", "Design slack", "Frequency", "AreaxTime"]
result_opt = process_files(filename_to_find_opt, strings_to_search)
result_no_opt = process_files(filename_to_find_no_opt, strings_to_search)
if FULLPRINT:
print("----> Full print:")
print(f"{'Test':<60}", end="")
for string in strings_to_search:
print(f"{string:<20}", end="")
print("")
count = 0
for res in [val for pair in zip(result_opt, result_no_opt) for val in pair]:
print(f"{res['Test']:<60}", end="")
for string in strings_to_search:
if string in res:
print(f"{res[string]:<20}", end="")
else:
print(f"{'error':<20}", end="")
count += 1
if count % 2 == 0:
print("")
print("")
if COMPACTPRINT:
print("\n\n----> Compact print:")
for res_op, res_no_op in zip(result_opt, result_no_opt):
print(f"{res_op['Test'][:-18]} ", end="")
for string in strings_to_search:
if string in res_op:
print(f"{res_op[string]:.8f} ", end="")
else:
print(f"{'error'} ", end="")
if string in res_no_op:
print(f"{res_no_op[string]:.8f} ", end="")
else:
print(f"{'error'} ", end="")
print("")
if BETTERPRINT:
print("\n\n----> Better print:")
def fix_name(res):
name_tokens = res['Test'].split('\\')
name = name_tokens[-2]
if name_tokens[-3] != 'Tests':
name = name_tokens[-3] + '(' + name + ')'
if 'opt' in name_tokens[-1]:
name += ' - OPT'
name = name.replace('Compute', '')
name = name.replace('FromPanda_mm_float_inside_opt', 'MatrixProduct')
res['Test'] = name
for res in result_opt:
fix_name(res)
for res in result_no_opt:
fix_name(res)
#def print_dictionary(dictionary):
# formatted_items = [f"{key}: {value}" for key, value in dictionary.items()]
# formatted_string = ", ".join(formatted_items)
# print(formatted_string)
def print_dictionaries(dic1, dic2):
formatted_items = [f"{key}: {dic1[key]} -> {dic2[key]}" if key != 'Test' else f"Test: {dic2[key]}" for key in dic1.keys()]
formatted_string = ", ".join(formatted_items)
print(formatted_string)
if len(result_no_opt) != len(result_opt):
raise Exception("Length missmatch between the two result arrays...")
for i in range(len(result_opt)):
#print_dictionary(result_opt[i])
#print_dictionary(result_no_opt[i])
print_dictionaries(result_opt[i], result_no_opt[i])
#for res in result_opt:
# for i in res.items():
# print(i)
#print(result_no_opt)
| EMJzero/COaT_Project | get_vivado_results.py | get_vivado_results.py | py | 4,577 | python | en | code | 0 | github-code | 90 |
36678354113 | import json
import os.path
from apiclient import errors
from oauth2client.client import AccessTokenCredentialsError
from django.conf import settings
from django.db.models import F
from django.shortcuts import get_object_or_404, redirect
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseServerError,
HttpResponseForbidden, HttpResponseNotFound)
from django.contrib.auth.decorators import (
login_required, user_passes_test)
from django.contrib.auth.models import User
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic import ListView, TemplateView, RedirectView
from django.views.generic.edit import UpdateView
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib import messages
from organizations.utils import org_permission_required, active_organization
from unicoremc.models import Project, Localisation, AppType, ProjectRepo
from unicoremc.forms import ProjectForm
from unicoremc import constants, exceptions
from unicoremc import tasks, utils
def repos_json(request):
# no login_required because repos are public
refresh = request.GET.get('refresh', 'false') == 'true'
repos = utils.get_repos(refresh)
return HttpResponse(json.dumps(repos), content_type='application/json')
@login_required
def teams_json(request):
# login_required because teams aren't public
teams = utils.get_teams()
return HttpResponse(json.dumps(teams), content_type='application/json')
def health_json(request, project_id):
project = get_object_or_404(Project, pk=project_id)
if not project.marathon_health_check_path:
return HttpResponseBadRequest('Health check not configured.')
response = utils.get_health(project)
if response.status_code == 200:
return HttpResponse(
json.dumps({'success': True}), content_type='application/json')
return HttpResponseServerError(
'Health check failed: %d. %s' %
(response.status_code, response.content))
@login_required
def update_marathon_exists_json(request, project_id):
project = get_object_or_404(Project, pk=project_id)
workflow = project.get_website_manager().workflow
if project.state == 'done' and not project.exists_on_marathon():
workflow.take_action('missing')
project.save()
elif project.state == 'missing' and project.exists_on_marathon():
workflow.take_action('activate')
project.save()
return HttpResponse(
json.dumps({'state': project.state}), content_type='application/json')
class ProjectViewMixin(View):
pk_url_kwarg = 'project_id'
permissions = []
social_auth = None
@classmethod
def as_view(cls):
view = super(ProjectViewMixin, cls).as_view()
if cls.social_auth:
view = user_passes_test(
lambda u: u.social_auth.filter(
provider=cls.social_auth).exists(),
login_url=reverse_lazy(
'social:begin', args=(cls.social_auth,)))(view)
if cls.permissions:
view = org_permission_required(cls.permissions)(view)
return login_required(view)
def dispatch(self, request, *args, **kwargs):
return super(ProjectViewMixin, self).dispatch(request, *args, **kwargs)
def get_projects_queryset(self, request):
organization = active_organization(request)
if organization is None:
if request.user.is_superuser:
return Project.objects.all()
return Project.objects.none()
return Project.objects.filter(organization=organization)
class NewProjectView(ProjectViewMixin, TemplateView):
# TODO: base this on CreateView instead of TemplateView
template_name = 'unicoremc/new_project.html'
permissions = ['unicoremc.add_project']
def get_context_data(self):
projects = self.get_projects_queryset(self.request)
project_pks = projects.values_list('pk', flat=True)
context = super(NewProjectView, self).get_context_data()
context.update({
'countries': constants.COUNTRY_CHOICES,
'languages': Localisation.objects.all(),
'app_types': AppType.objects.all(),
'project_repos': ProjectRepo.objects.filter(
project__in=project_pks,
project__state='done'
).order_by('project'),
})
return context
def post(self, request, *args, **kwargs):
app_type = request.POST.get('app_type')
app_type = AppType.objects.get(pk=int(app_type))
base_repo = request.POST.get('base_repo')
project_repos = request.POST.getlist('project_repos[]')
repo_count = len(project_repos) + (1 if base_repo else 0)
# validate base repos and app type
if not repo_count:
return HttpResponseBadRequest('No repo selected')
if (repo_count > 1 and app_type.project_type == AppType.UNICORE_CMS):
return HttpResponseBadRequest(
'%s does not support multiple repos' % (AppType.UNICORE_CMS,))
country = request.POST.get('country')
user_id = request.POST.get('user_id')
team_id = request.POST.get('team_id')
docker_cmd = request.POST.get('docker_cmd')
user = User.objects.get(pk=user_id)
project, created = Project.objects.get_or_create(
application_type=app_type,
country=country,
defaults={
'team_id': int(team_id),
'owner': user,
'organization': active_organization(self.request),
'marathon_health_check_path': '/health/',
'docker_cmd':
docker_cmd or
utils.get_default_docker_cmd(app_type, country)
})
project.external_repos.add(*project_repos)
if base_repo:
ProjectRepo.objects.get_or_create(
project=project,
defaults={'base_url': base_repo})
# For consistency with existing apps, all new apps will also have
# country domain urls in addition to the generic urls
project.frontend_custom_domain = project.get_country_domain()
project.cms_custom_domain = project.content_url()
project.save()
if created:
tasks.start_new_project.delay(project.id)
return HttpResponse(json.dumps({'success': True}),
content_type='application/json')
class HomepageView(ProjectViewMixin, ListView):
template_name = 'unicoremc/home.html'
def get_queryset(self):
return self.get_projects_queryset(self.request)
class ProjectEditView(ProjectViewMixin, UpdateView):
form_class = ProjectForm
template_name = 'unicoremc/advanced.html'
permissions = ['unicoremc.change_project']
def get_queryset(self):
return self.get_projects_queryset(self.request)
def get_success_url(self):
return reverse("home")
def form_valid(self, form):
response = super(ProjectEditView, self).form_valid(form)
project = self.get_object()
Project.objects.filter(
pk=project.pk).update(project_version=F('project_version') + 1)
project = self.get_object()
project.create_or_update_hub_app()
project.create_pyramid_settings()
project.create_nginx()
try:
project.update_marathon_app()
except exceptions.MarathonApiException:
messages.error(
self.request, 'Unable to update project in marathon')
return response
class ManageGAView(ProjectViewMixin, TemplateView):
# TODO: base this on UpdateView instead of TemplateView
template_name = 'unicoremc/manage_ga.html'
permissions = ['unicoremc.change_project']
social_auth = 'google-oauth2'
def get_context_data(self):
social = self.request.user.social_auth.get(provider='google-oauth2')
accounts = utils.get_ga_accounts(social.extra_data['access_token'])
projects = self.get_projects_queryset(self.request)
context = super(ManageGAView, self).get_context_data()
context.update({
'projects': projects.filter(state='done'),
'accounts': [
{'id': a.get('id'), 'name': a.get('name')} for a in accounts],
})
return context
def get(self, request, *args, **kwargs):
try:
return super(ManageGAView, self).get(request, *args, **kwargs)
except AccessTokenCredentialsError:
return redirect('social:begin', 'google-oauth2')
def post(self, request, *args, **kwargs):
project_id = request.POST.get('project_id')
account_id = request.POST.get('account_id')
social = request.user.social_auth.get(provider='google-oauth2')
access_token = social.extra_data['access_token']
project = get_object_or_404(
self.get_projects_queryset(self.request), pk=project_id)
if not project.ga_profile_id:
try:
name = u'%s %s' % (
project.app_type.upper(), project.get_country_display())
new_profile_id = utils.create_ga_profile(
access_token, account_id, project.frontend_url(), name)
project.ga_profile_id = new_profile_id
project.ga_account_id = account_id
project.save()
project.create_pyramid_settings()
return HttpResponse(
json.dumps({'ga_profile_id': new_profile_id}),
content_type='application/json')
except errors.HttpError:
return HttpResponseServerError("Unable to create new profile")
return HttpResponseForbidden("Project already has a profile")
class ResetHubAppKeyView(ProjectViewMixin, SingleObjectMixin, RedirectView):
permissions = ['unicoremc.change_project']
permanent = False
pattern_name = 'advanced'
def get_queryset(self):
return self.get_projects_queryset(self.request)
def get(self, request, *args, **kwargs):
project = self.get_object()
app = project.hub_app()
if app is not None:
app.reset_key()
project.create_pyramid_settings()
return super(ResetHubAppKeyView, self).get(request, *args, **kwargs)
class AppLogView(ProjectViewMixin, TemplateView):
template_name = 'unicoremc/app_logs.html'
social_auth = 'google-oauth2'
def get_context_data(self, *args, **kwargs):
context = super(AppLogView, self).get_context_data(*args, **kwargs)
project = get_object_or_404(self.get_projects_queryset(self.request),
pk=kwargs['project_id'])
tasks = project.infra_manager.get_project_marathon_tasks()
context.update({
'project': project,
'tasks': tasks,
'task_ids': [t['id'].split('.', 1)[1] for t in tasks],
'scroll_backlog': (
self.request.GET.get('n') or settings.LOGDRIVER_BACKLOG)
})
return context
class HealthCheckView(ProjectViewMixin, ListView):
template_name = 'unicoremc/health_check.html'
permissions = ['unicoremc.change_project']
def get_queryset(self):
return self.get_projects_queryset(self.request)
class AppEventSourceView(ProjectViewMixin, View):
social_auth = 'google-oauth2'
def get(self, request, project_id, task_id, path):
project = get_object_or_404(self.get_projects_queryset(request),
pk=project_id)
n = request.GET.get('n') or settings.LOGDRIVER_BACKLOG
if path not in ['stdout', 'stderr']:
return HttpResponseNotFound('File not found.')
# NOTE: I'm piecing together the app_id and task_id here
# so as to not need to expose both in the templates.
task = project.infra_manager.get_project_task_log_info(
'%s.%s' % (project.app_id, task_id))
response = HttpResponse()
response['X-Accel-Redirect'] = '%s?n=%s' % (os.path.join(
settings.LOGDRIVER_PATH, task['task_host'],
task['task_dir'], path), n)
response['X-Accel-Buffering'] = 'no'
return response
class ProjectRestartView(ProjectViewMixin, View):
def get(self, request, project_id):
project = get_object_or_404(Project, pk=project_id)
try:
project.marathon_restart_app()
messages.info(self.request, 'App restart sent.')
except exceptions.MarathonApiException:
messages.error(
self.request, 'App restart failed. Please try again.')
return redirect('home')
| universalcore/unicore-mc | unicoremc/views.py | views.py | py | 12,876 | python | en | code | 0 | github-code | 90 |
34343582220 | """This module contains inclusion (subsethood) measures for type-1 sets."""
from decimal import Decimal
from .. import global_settings as gs
def szmidt_pacprzyk(fs):
"""Calculate the ratio between the upper & lower membership functions."""
ent1 = 0
ent2 = 0
for x in gs.get_x_points():
l, u = fs.calculate_membership(x)
ent1 += 1 - max(1 - u, l)
ent2 += 1 - min(1 - u, l)
return gs.rnd((ent1 / ent2) / Decimal(gs.global_x_disc))
def zeng_li(fs):
"""Calculate entroyp based on the sum of upper and lower memberships."""
result = 0
for x in gs.get_x_points():
l, u = fs.calculate_membership(x)
result += abs(u + l - 1)
return gs.rnd(1 - (result / Decimal(gs.global_x_disc)))
| arthurcaio92/pyT2FTS | fuzzycreator/measures/entropy_it2.py | entropy_it2.py | py | 755 | python | en | code | 0 | github-code | 90 |
44490256758 | def simpleIt(a, b):
(r0, r1) = (a, b)
while r1!=0:
(r0, r1) = (r1, r0%r1)
return r0
def simpleRec(a, b):
r = a%b
if r!=0:
return simpleRec(b, a%b)
else:
return b
def extendedIt(a, b):
(r0,r1) = (a, b)
(u0, u1) = (1, 0)
(v0, v1) = (0, 1)
while r1!=0:
q = r0//r1
(r0, r1) = (r1, r0 - q * r1)
(u0, u1) = (u1, u0 - q * u1)
(v0, v1) = (v1, v0 - q * v1)
return r0, u0, v0
def extendedRecInit(a, b):
return extendedRec(a, b, 1, 0, 0, 1)
def extendedRec(a, b, u0, u1, v0, v1):
(r0,r1) = (a, b)
if r1!=0:
q = r0//r1
(r0, r1) = (r1, r0 - q * r1)
(u0, u1) = (u1, u0 - q * u1)
(v0, v1) = (v1, v0 - q * v1)
return extendedRec(r0, r1, u0, u1, v0, v1)
else:
return r0, u0, v0
def revModul(a, n):
"""reverse a modulo n"""
q = 0
(r0,r1) = (n,a)
(v0,v1) = (0,1)
while r1 != 0 :
q = r0//r1
(r0,r1) = (r1,r0-r1*q)
(v0,v1) = (v1,v0-v1*q)
if r0 != 1:
print (a, " is not reversible modulo ", n)
return 0
else:
print (a, " is reversible modulo ", n)
return v0%n
| Sebibebi67/Projet_Crypto | Euclide.py | Euclide.py | py | 1,208 | python | en | code | 0 | github-code | 90 |
15819195727 |
def compressing(string):
frequncy_arry =[]
letters=[]
for letter in string:
if letter not in letters :
letters.append(letter)
frequncy_arry.append(f"{string.count(letter)}_{letter}")
return frequncy_arry
if __name__=="__main__":
result =compressing("z")
result = sorted(result)
print(result) | abdallah-abdelsabour/mastring_4_critical_Skills_USing_python | list/compressing.py | compressing.py | py | 337 | python | en | code | 2 | github-code | 90 |
38789775461 | import tensorflow as tf
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout
class Encoder(tf.keras.Model):
def __init__(self, inp_vocab_size, embedding_dim, lstm_size, input_length):
super().__init__()
seed = 42
self.inp_vocab_size = inp_vocab_size
self.embedding_dim = embedding_dim
self.lstm_size = lstm_size
self.input_length = input_length
self.embedding = Embedding(
input_dim=self.inp_vocab_size,
output_dim=self.embedding_dim,
embeddings_initializer=tf.keras.initializers.RandomNormal(
mean=0, stddev=1, seed=seed
),
input_length=self.input_length,
mask_zero=True,
name="Encoder_Embedding",
)
self.lstm1 = LSTM(
self.lstm_size,
return_state=True,
return_sequences=True,
kernel_initializer=tf.keras.initializers.glorot_uniform(seed=seed),
recurrent_initializer=tf.keras.initializers.orthogonal(seed=seed),
name="Encoder_LSTM1",
)
self.lstm2 = LSTM(
self.lstm_size,
return_state=True,
return_sequences=True,
kernel_initializer=tf.keras.initializers.glorot_uniform(seed=seed),
recurrent_initializer=tf.keras.initializers.orthogonal(seed=seed),
name="Encoder_LSTM2",
)
def call(self, input):
input_sequence, states = input[0], input[1]
input_embedded = self.embedding(input_sequence)
self.enc_output, self.last_hidden_state, self.last_current_state = self.lstm1(
input_embedded, initial_state=states
)
self.enc_output, self.last_hidden_state, self.last_current_state = self.lstm2(
self.enc_output, [self.last_hidden_state, self.last_current_state]
)
return self.enc_output, self.last_hidden_state, self.last_current_state
def initialize_states(self, batch_size):
# Initialized with tf.zeros
self.first_hidden_state, self.first_current_state = tf.zeros(
[batch_size, self.lstm_size]
), tf.zeros([batch_size, self.lstm_size])
return self.first_hidden_state, self.first_current_state
class Attention(tf.keras.Model):
def __init__(self, lstm_size, scoring_function):
super(Attention, self).__init__()
self.lstm_size = lstm_size
self.scoring_function = scoring_function
self.W = tf.keras.layers.Dense(lstm_size)
def call(self, input):
decoder_hidden_state, encoder_output = input[0], input[1]
decoder_hidden_state = tf.expand_dims(decoder_hidden_state, axis=2)
output = self.W(encoder_output)
score = tf.keras.layers.Dot(axes=(2, 1))([output, decoder_hidden_state])
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = tf.reduce_sum(attention_weights * encoder_output, axis=1)
return context_vector, attention_weights
class Timestep_Decoder(tf.keras.Model):
def __init__(
self,
out_vocab_size,
embedding_dim,
input_length,
lstm_size,
scoring_function,
embedding_matrix=None,
):
super().__init__()
seed = 42
self.out_vocab_size = out_vocab_size
self.embedding_dim = embedding_dim
self.input_length = input_length
self.lstm_size = lstm_size
self.scoring_function = scoring_function
self.attention = Attention(self.lstm_size, self.scoring_function)
self.embedding_matrix = embedding_matrix
if self.embedding_matrix is None:
self.embedding = Embedding(
input_dim=self.out_vocab_size,
output_dim=self.embedding_dim,
embeddings_initializer=tf.keras.initializers.RandomNormal(
mean=0, stddev=1, seed=seed
),
input_length=self.input_length,
mask_zero=True,
name="embedding_layer_decoder",
)
else:
self.embedding = Embedding(
input_dim=self.out_vocab_size,
output_dim=self.embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(
self.embedding_matrix
),
trainable=False,
input_length=self.input_length,
mask_zero=True,
name="embedding_layer_decoder",
)
self.lstm1 = LSTM(
self.lstm_size,
return_state=True,
return_sequences=True,
kernel_initializer=tf.keras.initializers.glorot_uniform(seed=seed),
recurrent_initializer=tf.keras.initializers.orthogonal(seed=seed),
name="Timestep_Decoder_LSTM1",
)
self.lstm2 = LSTM(
self.lstm_size,
return_state=True,
return_sequences=True,
kernel_initializer=tf.keras.initializers.glorot_uniform(seed=seed),
recurrent_initializer=tf.keras.initializers.orthogonal(seed=seed),
name="Timestep_Decoder_LSTM2",
)
self.dense = Dense(out_vocab_size)
def call(self, input):
input_token, encoder_output, encoder_hidden, encoder_current = (
input[0],
input[1],
input[2],
input[3],
)
embedded_token = self.embedding(input_token)
context_vector, attention_weights = self.attention(
[encoder_hidden, encoder_output]
)
query_with_time_axis = tf.expand_dims(context_vector, 1)
out_concat = tf.concat([query_with_time_axis, embedded_token], axis=-1)
dec_output, encoder_hidden, encoder_current = self.lstm1(
out_concat, [encoder_hidden, encoder_current]
)
dec_output, encoder_hidden, encoder_current = self.lstm2(
dec_output, [encoder_hidden, encoder_current]
)
out = self.dense(tf.reshape(dec_output, (-1, dec_output.shape[2])))
return out, encoder_hidden, encoder_current
class Decoder(tf.keras.Model):
def __init__(
self,
out_vocab_size,
embedding_dim,
input_length,
lstm_size,
scoring_function,
embedding_matrix=None,
):
super().__init__()
self.out_vocab_size = out_vocab_size
self.embedding_dim = embedding_dim
self.input_length = input_length
self.lstm_size = lstm_size
self.scoring_function = scoring_function
self.embedding_matrix = embedding_matrix
self.timestepdecoder = Timestep_Decoder(
self.out_vocab_size,
self.embedding_dim,
self.input_length,
self.lstm_size,
self.scoring_function,
self.embedding_matrix,
)
@tf.function
def call(self, input):
decoder_input, encoder_output, encoder_hidden, encoder_current = (
input[0],
input[1],
input[2],
input[3],
)
all_outputs = tf.TensorArray(
tf.float32, size=tf.shape(decoder_input)[1], name="output_array"
)
loop = tf.shape(decoder_input)[1]
for timestep in range(loop):
output, encoder_hidden, encoder_current = self.timestepdecoder(
[
decoder_input[:, timestep : timestep + 1],
encoder_output,
encoder_hidden,
encoder_current,
]
)
all_outputs = all_outputs.write(timestep, output)
all_outputs = tf.transpose(all_outputs.stack(), [1, 0, 2])
return all_outputs
class Attention_Based_Encoder_Decoder(tf.keras.Model):
def __init__(
self,
input_length,
inp_vocab_size,
out_vocab_size,
lstm_size,
scoring_function,
batch_size,
embedding_dim,
embedding_matrix=None,
):
super().__init__()
self.input_length = input_length
self.inp_vocab_size = inp_vocab_size + 1
self.out_vocab_size = out_vocab_size + 1
self.lstm_size = lstm_size
self.scoring_function = scoring_function
self.batch_size = batch_size
self.embedding_dim = embedding_dim
self.embedding_matrix = embedding_matrix
self.encoder = Encoder(
inp_vocab_size=self.inp_vocab_size,
embedding_dim=self.embedding_dim,
lstm_size=self.lstm_size,
input_length=self.input_length,
)
self.decoder = Decoder(
out_vocab_size=self.out_vocab_size,
embedding_dim=self.embedding_dim,
lstm_size=self.lstm_size,
scoring_function=self.scoring_function,
input_length=self.input_length,
embedding_matrix=self.embedding_matrix,
)
def call(self, data):
enc_inp, dec_inp = data[0], data[1]
initial_state = self.encoder.initialize_states(
self.batch_size
) # Initialized Encoder state
encoder_output, encoder_hidden, encoder_current = self.encoder(
[enc_inp, initial_state]
) # Encoder
final_output = self.decoder(
[dec_inp, encoder_output, encoder_hidden, encoder_current]
) # Decoder
return final_output
| renata-nerenata/Formal-vs-informal-translator | src/models/transformer.py | transformer.py | py | 9,462 | python | en | code | 0 | github-code | 90 |
8009571585 | from splinter import Browser
from bs4 import BeautifulSoup as bs
import time
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape_info():
browser = init_browser()
# Get the latest News title and paragraph
url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
browser.visit(url)
time.sleep(.25)
# Scrape page into Soup
html = browser.html
soup = bs(html, "html.parser")
title = soup.find("div", class_="content_title").get_text()
news_p = soup.find("div", class_="rollover_description_inner").get_text()
# Get the latest featured image
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
time.sleep(.25)
html = browser.html
soup = bs(html, 'html.parser')
a = soup.find("footer").find("a")
if a.has_attr('data-fancybox-href'):
relative_url = a['data-fancybox-href']
featured_image_url = "https://www.jpl.nasa.gov" + relative_url
# Get the Mars Weather report
url = "https://twitter.com/MarsWxReport"
browser.visit(url)
time.sleep(.25)
html = browser.html
soup = bs(html, 'html.parser')
weather_rs = soup.find_all('p', class_="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text")
tweets = []
for w in weather_rs:
tweet = w.get_text()
if "InSight" in tweet:
tweets.append(tweet)
mw = tweets[0].replace('\n', ' ').replace('Insight s', 'S')
mars_weather = mw.split('pic', 1)[0]
# Get Mars Facts
url = "https://space-facts.com/mars/"
browser.visit(url)
time.sleep(.25)
html = browser.html
soup = bs(html, 'html.parser')
t = soup.find("table")
mars_stuff = t.find_all("span", class_="mars-s")
mars_dict = {"Diameter": mars_stuff[0].get_text(),
"Mass" : mars_stuff[1].get_text(),
"Moons" : mars_stuff[2].get_text(),
"DistancefromSun" : mars_stuff[3].get_text(),
"LengthofYear" : mars_stuff[4].get_text(),
"Temperature" : mars_stuff[5].get_text()}
# Store Hemisphere images
hemisphere_image_urls = [
{"title" : "Cerberus Hemisphere", "img_url" : "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg"},
{"title" : "Schiaparelli Hemisphere", "img_url" : "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg"},
{"title" : "Syrtis Major Hemisphere", "img_url" : "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg"},
{"title" : "Valles Marineris Hemisphere", "img_url" : "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg"}
]
# Store Mars data in a dictionary
# Store data in a dictionary
mars_data = {
"newstitle": title,
"news_p": news_p,
"featured_img": featured_image_url,
"mars_weather": mars_weather,
"mars_dimensions" : mars_dict,
"hemisphere_images" : hemisphere_image_urls
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data
| jcgraham440/Mars-scraper | scrape_mars.py | scrape_mars.py | py | 3,580 | python | en | code | 1 | github-code | 90 |
24682339539 | import os
import sys
import crayons
import subprocess
# List the things in the directory
# Check if there's a cmdline argument for directory provided
if len(sys.argv) < 2:
print("No input directory provided!")
exit()
# get the folder
input_path = sys.argv[1]
# If we can't find the folder in the directory
e = os.listdir()
if not input_path in e:
print(crayons.red(f"[WARNING] No directory called: {input_path} found", True))
exit()
else:
print(crayons.green(f"[โ] ", True) + f"Directory found!")
# make our own post-compressed folder, if it doesn't exist
if not os.path.isdir("post-compressed clips"):
print(crayons.magenta("[NOTICE] ", True) + f" No path for ./post-compressed clips/ found, creating it.")
os.mkdir("post-compressed clips")
# I know this way of using the handbrake CLI is really janky. I might fix this later, I might not.
compression_input_path = os.listdir(input_path)
input_path = ".\\" + input_path
os.chdir(input_path)
for uncompressed_video in compression_input_path:
if uncompressed_video is "HandBrakeCLI.exe":
continue
if uncompressed_video is "compressed":
continue
print(crayons.blue(f".\\HandBrakeCLI.exe -i '{uncompressed_video}\' -o '{uncompressed_video}_compressed' -e x264 -q 30 -B 160", True))
# os.system(f".\\HandBrakeCLI.exe -i '{uncompressed_video}\' -o '{uncompressed_video}_compressed' -e x264 -q 30 -B 160")
subprocess.run([".\\HandBrakeCLI.exe", "-i", uncompressed_video, "-o", ".\\compressed\\" + uncompressed_video, "-e", "x264", "-q", "30", "-B", "160"])
| kenanarica/QOL_handbrake_script | compress.py | compress.py | py | 1,633 | python | en | code | 0 | github-code | 90 |
5363348726 | import os
from datacube_ows.cube_pool import cube
from datacube_ows.ows_configuration import OWSConfig, get_config, read_config
src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def test_metadata_export():
cfg = get_config(refresh=True)
export = cfg.export_metadata()
assert "folder.0.title" not in export
assert "folder.sentinel2.title" in export
# assert layers.platforms
# for p in layers:
# assert p.products
# for prd in p.products:
# assert prd.styles
# assert layers.product_index[prd.name] == prd
# assert prd.title
def test_missing_metadata_file(monkeypatch):
cached_cfg = OWSConfig._instance
cached_reg = OWSConfig._metadata_registry
cached_inh_reg = OWSConfig._inheritance_registry
cached_catalog = OWSConfig._msg_src
monkeypatch.chdir(src_dir)
try:
OWSConfig._instance = None
OWSConfig._metadata_registry = {}
OWSConfig._inheritance_registry = {}
OWSConfig._msg_src = None
raw_cfg = read_config()
raw_cfg["global"]["message_file"] = "integration_tests/cfg/non-existent.po"
raw_cfg["global"]["translations_directory"] = None
raw_cfg["global"]["languages"] = ["en"]
cfg = OWSConfig(refresh=True, cfg=raw_cfg)
with cube() as dc:
cfg.make_ready(dc)
assert "Over-ridden" not in cfg.title
assert "aardvark" not in cfg.title
finally:
OWSConfig._instance = cached_cfg
OWSConfig._metadata_registry = cached_reg
OWSConfig._inheritance_registry = cached_inh_reg
OWSConfig._msg_src = cached_catalog
def test_metadata_file_ignore(monkeypatch):
cached_cfg = OWSConfig._instance
cached_reg = OWSConfig._metadata_registry
cached_inh_reg = OWSConfig._inheritance_registry
cached_catalog = OWSConfig._msg_src
monkeypatch.chdir(src_dir)
try:
OWSConfig._instance = None
OWSConfig._metadata_registry = {}
OWSConfig._inheritance_registry = {}
OWSConfig._msg_src = None
raw_cfg = read_config()
raw_cfg["global"]["message_file"] = "integration_tests/cfg/message.po"
cfg = OWSConfig(refresh=True, cfg=raw_cfg, ignore_msgfile=True)
with cube() as dc:
cfg.make_ready(dc)
assert "Over-ridden" not in cfg.title
assert "aardvark" not in cfg.title
finally:
OWSConfig._instance = cached_cfg
OWSConfig._metadata_registry = cached_reg
OWSConfig._inheritance_registry = cached_inh_reg
OWSConfig._msg_src = cached_catalog
def test_metadata_read(monkeypatch, product_name):
cached_cfg = OWSConfig._instance
monkeypatch.chdir(src_dir)
try:
OWSConfig._instance = None
raw_cfg = read_config()
raw_cfg["global"]["message_file"] = "integration_tests/cfg/message.po"
cfg = OWSConfig(refresh=True, cfg=raw_cfg)
with cube() as dc:
cfg.make_ready(dc)
assert "Over-ridden" in cfg.title
assert "aardvark" in cfg.title
folder = cfg.folder_index["folder.sentinel2"]
assert "Over-ridden" not in folder.title
assert "Over-ridden" in folder.abstract
assert "bunny-rabbit" in folder.abstract
lyr = cfg.product_index[product_name]
assert "Over-ridden" in lyr.title
assert "chook" in lyr.title
styl = lyr.style_index["simple_rgb"]
assert "Over-ridden" in styl.title
assert "donkey" in styl.title
styl = lyr.style_index["blue"]
assert "Over-ridden" not in styl.title
finally:
OWSConfig._instance = cached_cfg
| opendatacube/datacube-ows | integration_tests/test_layers.py | test_layers.py | py | 3,685 | python | en | code | 62 | github-code | 90 |
35281039147 | from yacs.config import CfgNode as CN
_C = CN()
_C.DATASET = CN()
# Path to directory containing the train, validation and test dataset.
_C.DATASET.DATA_DIR = ''
# Path to input mfcc features.
_C.DATASET.TRAIN_FILE = ''
# Path to labels.
_C.DATASET.VAL_FILE = ''
# Path to original labels.
_C.DATASET.TEST_FILE = ''
_C.DATALOADER = CN()
# Batch size.
_C.DATALOADER.BATCH_SIZE= 12
# Number of subprocesses to use for data loading.
_C.DATALOADER.NUM_WORKERS = 4
_C.DATALOADER.CROSS_VALIDATE = True
_C.DATALOADER.K_FOLD = 5
_C.MODEL = CN()
# Model name.
_C.MODEL.NAME = ''
# Number of ground truth classes.
_C.MODEL.NUM_CLASSES = 23
_C.MODEL.LOSS_FUNC = ''
# List of loss functions to use. Each list item should have a NAME and ARGS
# which is a dictionary of arguments to pass to the loss function class.
_C.LOSSES = []
_C.TRAIN_ARGS = CN()
# Base learning rate.
_C.TRAIN_ARGS.BASE_LR = 0.001
# Weight decay in optimizer.
_C.TRAIN_ARGS.WEIGHT_DECAY = 0.001
# Number of epochs to train for.
_C.TRAIN_ARGS.NUM_EPOCHS = 30
# The factor to reduce the current learning rate by.
_C.TRAIN_ARGS.LR_SCHEDULER_FACTOR = 0.1
# Number of epochs with no improvement after which learning rate will be reduced.
_C.TRAIN_ARGS.LR_SCHEDULER_PATIENCE = 2
# Minimum learning rate.
_C.TRAIN_ARGS.MIN_LR = 1e-6
# Number of validation checks with no improvement after which training will be stopped. 0 = no early stopping.
_C.TRAIN_ARGS.EARLY_STOPPING_PATIENCE = 0
# Name of LR scheduler to use.
_C.TRAIN_ARGS.LR_SCHEDULER = 'CosineAnnealingLR'
_C.TRAIN_ARGS.OPTIMIZER = 'SGD'
# Number of epochs for the first restart.
_C.TRAIN_ARGS.WARM_RESTART_EPOCH = 20
_C.TRAIN_ARGS.WARM_UP_EPOCH = 10
_C.TRAIN_ARGS.MAX_EPOCHS = 10
_C.TRAIN_ARGS.CYCLICAL_EPOCHS = 100
# Name of the run to log in mlflow.
_C.RUN_NAME = ''
# Device to run training on.
_C.DEVICE = 'cuda'
# Number of GPUs to use for training.
_C.NUM_GPUS = 4
# Keep only the top k checkpoints. k = -1 to keep all checkpoints.
_C.SAVE_TOP_K = 3
# Save frequency in number of epochs.
_C.SAVE_FREQ = 1
# Random seed/
_C.SEED = 42
def get_cfg_defaults() -> CN:
"""Gets a yacs CfgNode object with default values for an experiment."""
return _C.clone()
def get_cfg_from_yaml(yaml_path: str) -> CN:
"""Gets a yacs CfgNode object with default values and overwrites it with the values from the yaml file.
Args:
yaml_path: Path to yaml file containing the custom configuration.
Returns:
Merged config.
"""
cfg = get_cfg_defaults()
cfg.merge_from_file(yaml_path)
return cfg
| zili98/ELEC576-Deep-Learning-Final-Project | src/config.py | config.py | py | 2,559 | python | en | code | 0 | github-code | 90 |
41836235585 | import logging
import socket
import sys
from threading import Event, Thread
import communicate
import flask
import serial
from flask import Flask, flash, json, render_template
from flask.config import Config
from flask_socketio import SocketIO, emit
from pymongo import MongoClient, errors
# On startup the app has to load database settings first
class App(Flask):
def __init__(self, import_name, camera_params=None, scale_params=None, db_params=None):
super().__init__(import_name)
# 10 Parameter
self.weights_file = 'settings/weights.json'
# CAMERA PATAMETERS
with open(camera_params, 'r') as fileo:
self.camera_params = json.load(fileo)
# SCALE PARAMETERS
with open(scale_params, 'r') as fileo:
self.scale_params = json.load(fileo)
# DATABASE PARAMETERS
with open(db_params, 'r') as fileo:
self.db_params = json.load(fileo)
#
with open(self.weights_file, 'r') as fileo:
self.weights = json.load(fileo)
self.camera_file = camera_params
self.scale_file = scale_params
self.db_file = db_params
self.active_weight = None
try:
self.camera_port = communicate.create_camera_port(
self.camera_params)
except:
self.camera_port = None
self.last_camera_string = None
def write_settings(self, dest):
if dest == 'camera':
with open(self.camera_file, 'w') as fileo:
json.dump(self.camera_params, fileo)
if dest == 'db':
with open(self.db_file, 'w') as fileo:
json.dump(self.db_params, fileo)
if dest == 'scale':
with open(self.scale_file, 'w') as fileo:
json.dump(self.scale_params, fileo)
if dest == 'weights':
with open(self.weights_file, 'w') as fileo:
json.dump(self.weights, fileo)
app = App(__name__, camera_params='settings/camera.json',
scale_params='settings/scale.json',
db_params='settings/db.json')
logging.basicConfig(level=logging.DEBUG)
socketio = SocketIO(app, async_mode=None, logger=True, engineio_logger=True)
thread = Thread()
thread_stop_event = Event()
def check_camera():
logging.info(">>>>>>>>>>>>>>>def check camera")
while not thread_stop_event.isSet():
for scale in app.scale_params['scale']:
scale_name = scale['name']
# IF SCALE IS ACTIVE IN CASE OF 2 SCALE
if scale['active']:
logging.info("0 - PROCESS RUNS FOR SCALE: {} \n".format(scale['name']))
logging.info("1 - GET DATA FROM CAMERA \n")
if app.camera_port and app.active_weight[scale_name]:
camera_string = communicate.query_camera_string(app.camera_port)
else:
socketio.sleep(1)
continue
print("-----------------------------------------------------------")
print(camera_string)
print("-----------------------------------------------------------")
# camera_string = "HHAR2502301##Ca##131945##T04222##S0002130##N01###"
if camera_string and (camera_string.count('#') == 13):
# check if first char is valid
if camera_string[0].isalnum() is False:
app.last_camera_string = camera_string[1:]
else:
app.last_camera_string = camera_string
logging.info("2 - CAMERA STRING: {}\n".format(app.last_camera_string))
socketio.sleep(5)
# OLD
# result = communicate.scale_get_weight((app.scale_params["scale_ip"], app.scale_params['scale_port']),app)
# NEW
result = communicate.scale_get_weight((scale["scale_ip"], scale['scale_port']), app)
logging.info("3 - SCALE IP: {} PORT: {}\n".format(scale["scale_ip"], scale["scale_port"]))
if (result <= app.active_weight[scale_name]['hl']) and (result >= app.active_weight[scale_name]['ll']):
resp = communicate.write_weight_to_db(app.db_params, result, app)
logging.info("4 - WRITE TO DATABASE\n")
else:
logging.info("4 - WRITE TO DATABASE FALSE\n")
resp = communicate.write_weight_to_db(app.db_params, result, app, False)
socketio.emit('newnumber', {'number': str(resp)}, namespace='/test')
logging.info("END - PROCESS \n")
logging.info("===============================================================")
socketio.sleep(1)
@socketio.on('connect', namespace='/test')
def test_connect():
global thread
print('Client connected')
if not thread.isAlive():
print("Starting Thread", file=sys.stderr)
thread = socketio.start_background_task(check_camera)
@socketio.on('disconnect', namespace='/test')
def test_disconnect():
print('Client disconnected')
@app.route('/')
def test():
title = "Default Page"
with open(app.weights_file, 'r') as fileo:
app.weights = json.load(fileo)
with open(app.scale_file, 'r') as fileo:
app.scale_params = json.load(fileo)
active = {'scale_a': "No Part active", 'scale_b': "No Part active"}
if app.active_weight:
active = {'scale_a': "No Part active", 'scale_b': "No Part active"}
if app.active_weight.get('ScaleA'):
active['scale_a'] = app.active_weight['ScaleA']["part_name"]
else:
active['scale_a'] = "No Part active"
if app.active_weight.get('ScaleB'):
active['scale_b'] = app.active_weight['ScaleB']["part_name"]
else:
active['scale_b'] = "No Part active"
return render_template('base.html', title=title, weights=app.weights, scale_params=app.scale_params, active_weight=active)
@app.route('/settings')
def settings():
title = "Settings"
return render_template('settings.html', title=title, db_params=app.db_params, scale_params=app.scale_params,
camera_params=app.camera_params)
@app.route('/console')
def console():
return render_template('console.html')
@app.route('/console-output')
def console_output():
return render_template('console.html')
@app.route('/settings/db', methods=["GET", "POST"])
def set_db_params():
app.db_params = flask.request.form
app.write_settings('db')
return app.make_response('OK')
@app.route('/settings/camera', methods=["GET", "POST"])
def set_camera_params():
# get_camera_params = flask.request.form.to_dict()
# camera_id = int(get_camera_params['camera_id'])
# print(app.camera_params, file=sys.stderr)
# print("====================================", file=sys.stderr)
# print(get_camera_params, file=sys.stderr)
# print("====================================", file=sys.stderr)
# app.camera_params['camera'][camera_id] = get_camera_params
# print(app.camera_params, file=sys.stderr)
# print("====================================", file=sys.stderr)
app.camera_params = flask.request.form
app.write_settings('camera')
return app.make_response('OK')
@app.route('/settings/camera/checkconnection', methods=["GET", "POST"])
def check_camera_connection():
print(" >> Checking cammera connection", file=sys.stderr)
app.logger.info(app.camera_params)
# try:
if app.camera_port:
if app.camera_port.is_open:
app.camera_port.close()
app.camera_port = communicate.create_camera_port(app.camera_params)
app.logger.info(app.camera_port)
return app.make_response('OK')
# except Exception:
# import traceback
# return app.make_response(traceback.format_exc())
@app.route('/settings/scale', methods=["GET", "POST"])
def set_scale_parames():
args = flask.request.form.to_dict()
app.logger.info(app.scale_params)
scale_id = int(args['scale_id'])
app.scale_params['scale'][scale_id]['scale_ip'] = args['scale_ip']
app.scale_params['scale'][scale_id]['scale_port'] = args['scale_port']
app.logger.info(app.scale_params)
app.write_settings('scale')
return app.make_response('OK')
@app.route('/settings/scale/enable', methods=["GET", "POST"])
def set_scale_b():
args = flask.request.form
app.logger.info(app.scale_params)
args = dict(args)
active = int(args['active'][0])
app.scale_params['scale'][1]['active'] = active
app.logger.info(app.scale_params)
app.write_settings('scale')
return app.make_response('OK')
@app.route('/settings/weights', methods=["GET", "POST"])
def set_weights():
weights = flask.request.json['weight']
scale_id = flask.request.json['scale_id']
# DELETE ALL OLD VALUES FROM JSON
second_scale = []
for index in range(len(app.weights)):
if app.weights[index]['scale'] != scale_id:
second_scale.append(app.weights[index])
app.weights = second_scale
print("===========================")
print(app.weights)
print("===========================")
app.weights = app.weights + weights
print(app.weights)
print("===========================")
# app.weights = args
app.write_settings('weights')
return app.make_response("OK")
@app.route('/settings/scale/checkconnection', methods=['GET', 'POST'])
def check_scale_connection():
params = flask.request.form
try:
sock = socket.create_connection(
(params['scale_ip'], params['scale_port']), 0.5)
sock.close()
return app.make_response('OK')
except Exception as e:
return app.make_response('{}'.format(e))
@app.route('/settings/camera_ip/checkconnection', methods=['GET', 'POST'])
def check_ip_camera_connection():
params = flask.request.form
camera_ip = params['camera_ip']
camera_port = params['camera_port']
print("==================================")
print("checking camera ip connection")
print(f"camera ip: {camera_ip}, camera_port: {camera_port} ")
print("==================================")
try:
sock = socket.create_connection((camera_ip, camera_port), 0.5)
command = 'T\r\n'
command = bytes(command, 'ascii')
sock.sendall(command)
response = sock.recv(4096)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
print("==================================")
print("camera response")
print(response.decode('ascii'))
print("==================================")
return app.make_response('OK')
except Exception as e:
return app.make_response('{}'.format(e))
@app.route('/settings/db/checkconnection', methods=['GET', 'POST'])
def check_db_connection():
params = flask.request.form
res = 'OK'
try:
conn_params = {}
if params['db_ip'] != '':
conn_params.update({'host': params['db_ip']})
if params['db_port'] != '':
conn_params.update({'port': int(params['db_port'])})
if params['db_user'] != '':
conn_params.update({'username': params['db_user']})
if params['db_password'] != '':
conn_params.update({'password': params['db_password']})
client = MongoClient(**conn_params, serverSelectionTimeoutMS=3000)
client.admin.command('ismaster')
# client.server_info()
# return app.make_response('OK')
except Exception as e:
res = 'Error: {}...'.format(str(e)[:20])
return app.make_response('{}'.format(res))
@app.route('/set_active_weight', methods=['GET', 'POST'])
def set_active_weight():
params = flask.request.form
get_data = params.to_dict()
scale_index = 0
scale_name = get_data['scale']
if get_data['scale'] == 'ScaleB':
scale_index = 1
app.logger.info("+++++++++++++++++++++++++++++++++++++++")
print(app.scale_params['scale'][scale_index]['scale_ip'])
print(app.scale_params['scale'][scale_index]['scale_port'])
print(params['scale'])
app.logger.info("+++++++++++++++++++++++++++++++++++++++")
# THIS IS FOR INIT DICT
if app.active_weight == None:
app.active_weight = {}
app.active_weight[scale_name] = {"weight": float(params['weight'].replace(',', '.')),
'll': float(params['ll'].replace(',', '.')),
'hl': float(params['hl'].replace(',', '.')),
"part_name": params["part_name"]
}
print("==========================================")
print(app.active_weight)
print("==========================================")
try:
result = communicate.scale_set_weight((app.scale_params['scale'][scale_index]['scale_ip'],
app.scale_params['scale'][scale_index]['scale_port']),
params['weight'],
params['ll'],
params['hl'])
return app.make_response(str(result) + "#" + params["part_name"] + "#" + params['scale'])
except:
return app.make_response("NOK")
@app.route('/get_weight', methods=['GET', 'POST'])
def get_weight():
app.logger.info("def get_weight()")
scale_name = app.scale_params["name"]
result = communicate.scale_get_weight(
(app.scale_params["scale_ip"], app.scale_params['scale_port']), app)
if app.active_weight is None:
return app.make_response("Error. Please set active weight first. Weight is {}".format(result))
if (result <= app.active_weight[scale_name]['hl']) and (result >= app.active_weight[scale_name]['ll']):
resp = communicate.write_weight_to_db(app.db_params, result, app)
return app.make_response(str(resp))
else:
resp = communicate.write_weight_to_db(
app.db_params, result, app, False)
return app.make_response(str(resp))
# return app.make_response('Weight is out of limits')
if __name__ == '__main__':
socketio.run(app, debug=True)
| arborin/part_information_tracking_system | PartInformationTrackingSystem_v1/app.py | app.py | py | 14,450 | python | en | code | 0 | github-code | 90 |
15622954928 | import os
import fnmatch
import pickle
start_dir = "fortune1"
dirfileinfo=[]
filepathtemp=' '
filecontenttemp=' '
for dirpath, dirs, files in os.walk("fortune1"):
for single_file in files:
if fnmatch.fnmatch(single_file, "*txt"):
filepathtemp=str(os.path.abspath(single_file));
f = open(os.path.join(dirpath, single_file))
filecontenttemp=str(f.read());
temp=(filepathtemp,filecontenttemp);
dirfileinfo.append(temp);
f.close()
print(dirfileinfo);
output_file="raw_data.txt"
out=open(output_file,"ba")
pickle.dump(dirfileinfo,out)
out.close()
| prashanth291989/Python | fifth_week_python_assignment/FileTraverse.py | FileTraverse.py | py | 570 | python | en | code | 0 | github-code | 90 |
19019440385 | import io, os, sys, bisect
input = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline
def subset_sum_in_range ():
n, a, b = map(int, input().decode().split()) ; nums_arr = [int(input().decode()) for i in range(n)]
def generate_subset_sum_array (left, right):
size = 2 ** (right - left + 1) ; ssa = []
for i in range(size):
j, curr_sum = left, 0
while ((i != 0) and (j <= right)):
if ((i & 1) != 0): curr_sum += nums_arr[j]
i >>= 1 ; j += 1
ssa.append(curr_sum)
return ssa
mid = (n - 1) // 2 ; arr1, arr2 = generate_subset_sum_array(0, mid), generate_subset_sum_array((mid + 1), (n - 1))
arr2.sort() ; ans = 0
for i in range(len(arr1)):
ans += (bisect.bisect_right(arr2, (b - arr1[i])) - bisect.bisect_left(arr2, (a - arr1[i])))
sys.stdout.write(str(ans) + '\n')
subset_sum_in_range()
| Tejas07PSK/lb_dsa_cracker | Searching & Sorting/Subset Sums/solution.py | solution.py | py | 914 | python | en | code | 2 | github-code | 90 |
38304416150 |
# given a list of ints of even length, return a new list length 2 containing the middle two elements from the original list
# the original list will be length 2 or more
def make_middle(nums):
new_list = []
if len(nums) > 1 and len(nums) % 2 == 0:
new_list.append(nums[int(len(nums)/2)-1])
new_list.append(nums[int(len(nums)/2)])
return new_list
print(make_middle([1, 2, 3, 4]))
print(make_middle([7, 1, 2, 3, 4, 9]))
print(make_middle([1, 2]))
| jemtca/CodingBat | Python/List-1/make_middle.py | make_middle.py | py | 476 | python | en | code | 0 | github-code | 90 |
18419333669 | s = input()
e0=e1=o0=o1=0
for i in range(len(s)):
if s[i]=="0" :
if i%2!=0 :
e0+=1
else:
o0+=1
else:
if i%2!=0 :
e1+=1
else:
o1+=1
# print(e0,e1,o0,o1)
ot = len(s)//2 if len(s)%2==0 else (len(s)+1)//2
et = len(s)//2
ans = min(abs(e0-et)+abs(o1-ot),abs(e1-et)+abs(o0-ot))
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03073/s042605926.py | s042605926.py | py | 375 | python | fr | code | 0 | github-code | 90 |
42078303223 | from setuptools import setup
from pkg_resources import parse_requirements
with open('requirements.txt') as f:
requirements = [str(req) for req in parse_requirements(f)]
setup(
name='AudioBookBot',
version='1.0.0',
author='Agcon, pr0maxxx, MrGreys0n',
description='Converts text from books into audio',
packages=['main'],
install_requires=requirements,
extras_require={
'docs': [
'sphinx',
'sphinx_rtd_theme'
]
},
entry_points={
'console_scripts': [
'audiobookbot = main.audiobookbot:main'
]
}
)
| Agcon/AudioBookBot | setup.py | setup.py | py | 638 | python | en | code | 0 | github-code | 90 |
11223656299 | import pygame
from sys import exit
from pygame.locals import *
import random
import math
def list_duplicates_of(seq,item):
start_at = -1
locs = []
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
def product(*args, repeat=1):
pools = [tuple(pool) for pool in args] * repeat
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def empty_list_remove(input_list):
new_list = []
for ele in input_list:
if ele:
new_list.append(ele)
return new_list
def longest_sublist(nested_list):
lp = []
for i in nested_list:
lp.append(len(i))
return max(lp)
colors = ["blue", "grey"]
station_colors, xy_list, blue_stations = [], [], []
rows = random.randint(7, 12)
station_amount = random.randint(2, (rows - 1) * (rows - 2))
for i in range(station_amount):
station_color = random.choice(colors)
xy = [random.randint(0, rows - 3), random.randint(1, rows - 1)]
while xy in xy_list:
xy = [random.randint(0, rows - 3), random.randint(1, rows - 1)]
xy_list.append(xy)
station_colors.append(station_color)
if station_color == "blue":
blue_stations.append(xy)
if "blue" not in station_colors:
index = random.randint(0, len(station_colors) - 1)
station_colors[index] = "blue"
blue_stations.append(xy_list[index])
blue_stations.sort(key=lambda x: x[0])
x = blue_stations[0][0]
n = 0
xy_list2 = [[blue_stations[0]]]
for i in blue_stations[1:]:
if i[0] == x:
xy_list2[n].append(i)
else:
xy_list2.append([i])
x = blue_stations[blue_stations.index(i)][0]
n += 1
path = []
for i in range(longest_sublist(xy_list2)):
paths = list(product(*xy_list2))
buh = 0
d = []
for e, ii in enumerate(paths):
for x in range(len(ii)-1):
buh += math.dist(ii[x], ii[x+1])
d.append(buh)
buh = 0
ind = d.index(min(d))
path.append(list(paths[ind]))
for b in range(len(xy_list2)):
for j in path[i]: # kan vara annorlunda ?
if j in xy_list2[b]:
xy_list2[b].remove(j)
xy_list2 = empty_list_remove(xy_list2)
print(xy_list2)
print(paths)
print(path)
pygame.init()
screen = pygame.display.set_mode((800, 400), pygame.RESIZABLE)
pygame.display.set_caption("Simulation")
fullscreen = False
clock = pygame.time.Clock()
text = "Smarta bussar"
test_font = pygame.font.SysFont("arial", 100)
text_surface = test_font.render(text, False, "black")
text_width, text_height = test_font.size(text)
bus_length = 50
bus = pygame.surface.Surface((bus_length, 20))
bus.fill("red")
road_thickness = 25
speed = 5
node = 0
path_number = -1
last_station_on = True
start = True
while True:
screen.fill("lightgrey")
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
if event.type == VIDEORESIZE:
if not fullscreen:
screen = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)
start = True
path_number -= 1
if event.type == KEYDOWN:
if event.key == K_f:
fullscreen = not fullscreen
if fullscreen:
screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
else:
screen = pygame.display.set_mode((800, 400), pygame.RESIZABLE)
start = True
path_number -= 1
if start:
bus_start = screen.get_height() * int(rows / 2) / rows
bus_rect = bus.get_rect(center = (0, bus_start))
node = 0
if path_number != len(path) - 1:
path_number += 1
last_station_on = True
if "blue" in station_colors:
start = False
# Road building
for i in range(1, rows):
pygame.draw.rect(screen, ("black"), pygame.Rect(0, screen.get_height() / rows * i - road_thickness / 2, screen.get_width(), road_thickness))
pygame.draw.rect(screen, ("black"), pygame.Rect(screen.get_width() / rows * i - road_thickness / 2, 0, road_thickness, screen.get_height()))
for i in range(station_amount):
pygame.draw.rect(screen, (station_colors[i]), pygame.Rect((screen.get_width() * 1.5 + xy_list[i][0] * screen.get_width()) / rows, screen.get_height() * xy_list[i][1] / rows, 15, 15))
# Bus Movement
if bus_rect.centerx < screen.get_width() / rows * (path[path_number][node][0] + 1):
bus_rect.centerx += speed
elif bus_rect.centery < screen.get_height() / rows * path[path_number][node][1] - speed:
bus_rect.centery += speed
elif bus_rect.centery > screen.get_height() / rows * path[path_number][node][1] + speed:
bus_rect.centery -= speed
elif node != len(path[path_number]) - 1:
station_colors[xy_list.index(path[path_number][node])] = "grey"
node += 1
elif bus_rect.left < screen.get_width():
bus_rect.centerx += speed
if last_station_on:
station_colors[xy_list.index(path[path_number][node])] = "grey"
last_station_on = False
else:
start = True
screen.blit(bus, bus_rect)
screen.blit(text_surface, ((screen.get_width() - text_width)/ 2, (screen.get_height() - text_width) / 10))
pygame.display.update()
clock.tick(60) | ossan05/blixtlas | pygame-test.py | pygame-test.py | py | 5,590 | python | en | code | 0 | github-code | 90 |
6438584611 | # ์ ์ด์ ์ ์ฒด๋ฅผ ๋ค์ง์ ์ผ์ด ์๋์,,,?
S = list(input())
idx = []
start = S[0] # ์ต์ด ๋ฌธ์ ์ด๊ธฐํ
for i in range(len(S)):
if S[i] != start: # ์ต์ด ๋ฌธ์์ ๋ค๋ฅธ ๋ฌธ์์ ์ธ๋ฑ์ค๋ฅผ
idx.append(i) # ๋น ๋ฆฌ์คํธ์ ์ถ๊ฐ
start = S[i] # ๊ทธ ๋ฌธ์๋ฅผ ์ต์ด๋ฌธ์๋ก ์ค์
# ๋ฌธ์๊ฐ ๋ฐ๋๋ ์ง์ ์ด 1, 2๊ฐ->์ต์ํ์ 1ํ / 3, 4๊ฐ->์ต์ํ์ 2ํ ์ด๋ฐ์์ผ๋ก ์งํ๋์ด์ ์ด๋ ๊ฒ ํ์์ต๋๋ค
if len(idx) % 2 == 1: # ํ์์ผ ๊ฒฝ์ฐ 1 ๋ํด์ 2๋ก ๋๋์ด์ฃผ๊ธฐ
cnt = (len(idx) + 1) // 2
elif len(idx) % 2 == 0: # 0 ๋๋ ์ง์์ผ ๊ฒฝ์ฐ 2๋ก ๋๋์ด์ฃผ๊ธฐ
cnt = len(idx) // 2
print(cnt) | namoo1818/SSAFY_Algorithm_Study | ๋ฐฐ๋ฏผ์ง/3-3.py | 3-3.py | py | 758 | python | ko | code | 0 | github-code | 90 |
3995225488 | #a~b ์ ์์ ํฉ ๊ตฌํ๊ธฐ with ์ ๋ ฌ
#for๋ฌธ
print('a๋ถํฐ b๊น์ง ์ ์์ ํฉ ๊ตฌํ๊ธฐ')
a = int(input('a : '))
b = int(input('b : '))
if a>b :
a,b = b,a #a,b ์ค๋ฆ์ฐจ์์ผ๋ก ์ ๋ ฌ(์์ ๋ฐ๊พธ๊ธฐ)
sum = 0
for i in range(a, b+1):
sum += i
print(sum) | WonyJeong/algorithm-study | kkxxh/basic/b35.py | b35.py | py | 275 | python | ko | code | 2 | github-code | 90 |
20369185391 | class Solution:
def champagneTower(self, poured: int, query_row: int, query_glass: int) -> float:
levels = [[0]*i for i in range(1,query_row+2)]
levels[0] = [poured]
for i in range(len(levels)-1):
for j in range(len(levels[i])):
if levels[i][j]-1 <= 0: continue
temp = (levels[i][j]-1)/2.0
levels[i+1][j] = levels[i+1][j]+temp
levels[i+1][j+1] = levels[i+1][j+1]+temp
return min(1,levels[query_row][query_glass]) | RishabhSinha07/Competitive_Problems_Daily | 799-champagne-tower/799-champagne-tower.py | 799-champagne-tower.py | py | 560 | python | en | code | 1 | github-code | 90 |
19116925832 | import sqlite3
from datetime import date
import yfinance as yf
import config
def updatePrice(max_date):
connection = sqlite3.connect(config.DB_FILE)
cursor = connection.cursor()
cursor.execute("""SELECT count(id) from stock where Special IS NULL OR Special='SNP500'""")
rows = cursor.fetchall()
totalUpdatedPrice = 0
for row in rows:
df = yf.download(row[1], start=max_date, end=date.today().isoformat())
for df_date, j in df.iterrows():
date_value = df_date.strftime('%Y-%m-%d')
stock_id = row[0]
try:
cursor.execute(
"""INSERT or REPLACE INTO stock_price (stock_id,date,open,high,low,close,volume) VALUES (?,?,?,?,
?,?,?)""",
(stock_id, date_value, j['Open'], j['High'], j['Low'], j['Close'], j['Volume']))
totalUpdatedPrice += 1
except:
pass
connection.commit()
return totalUpdatedPrice
| adityakdevin/trading | lateststockprices.py | lateststockprices.py | py | 998 | python | en | code | 0 | github-code | 90 |
29626764057 | #
# @lc app=leetcode id=9 lang=python
#
# [9] Palindrome Number
#
class Solution(object):
# def isPalindrome(self, x):
# """
# :type x: int
# :rtype: bool
# """
# s = str(x)
# return s == s[::-1]
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
ranger = 1
while x // ranger >= 10:
ranger *= 10
while x:
left = x // ranger
right = x % 10
if left != right:
return False
x = (x % ranger) / 10 # ่ฟไธๆญฅ้่ฆๆๆก
ranger /= 100 # ่ฟ้ๆไธชๅ
return True
if __name__ == '__main__':
"""
ไธ่ฝ็จbit_length()๏ผๅ ไธบๆฏๅ่ฟๅถ็ๅๆใ
convert to str is easy but involve extra memory
needs to do without convert to str. Also pitfalls about bit manipulation.
"""
s = Solution()
print(s.isPalindrome(-1001))
| zhch-sun/leetcode_szc | 9.palindrome-number.py | 9.palindrome-number.py | py | 1,037 | python | en | code | 0 | github-code | 90 |
18279034029 | from functools import reduce
from fractions import gcd
import math
import bisect
import itertools
import sys
sys.setrecursionlimit(10**7)
input = sys.stdin.readline
INF = float("inf")
# ๅฆ็ๅ
ๅฎน
def main():
H, N = map(int, input().split())
A = [0]*N
B = [0]*N
for i in range(N):
A[i], B[i] = map(int, input().split())
dp = [INF] * 10000100
dp[0] = 0
for i in range(H):
if dp[i] == INF:
continue
for n in range(N):
dp[i+A[n]] = min(dp[i+A[n]], dp[i] + B[n])
ans = INF
for i in range(H, 10000100):
ans = min(ans, dp[i])
print(ans)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02787/s169833720.py | s169833720.py | py | 680 | python | en | code | 0 | github-code | 90 |
74056775016 | import time
import imaplib
import RPi.GPIO as GPIO
chan_list = (18, 25, 12, 16, 23, 21)
print('press ctl-c to stop')
try:
while True:
M = imaplib.IMAP4_SSL('imap.gmail.com')
M.login('drewatkinson5@gmail.com', 'password')
M.select()
unread_count = len(M.search(None, 'UnSeen')[1][0].split())
M.logout()
print(str(unread_count) + ' unread in your inbox')
output_values = []
while unread_count > 0:
output_values.append(unread_count % 2)
unread_count = unread_count // 2
GPIO.setmode(GPIO.BCM)
GPIO.setup(chan_list, GPIO.OUT)
if len(output_values) > len(chan_list):
output_values = []
for i in range(len(chan_list)):
output_values.append(1)
elif len(output_values) < len(chan_list):
while len(output_values) < len(chan_list):
output_values.append(0)
GPIO.output(chan_list, output_values)
print('sleeping for 1 minute')
time.sleep(60)
except KeyboardInterrupt:
GPIO.cleanup()
| drewatk/raspberry-pi | gmailchecker.py | gmailchecker.py | py | 1,126 | python | en | code | 0 | github-code | 90 |
15296528812 | import datetime
from django.forms import (ModelForm, inlineformset_factory, forms)
from django import forms
from django.forms.extras.widgets import SelectDateWidget
from core.forms import LibraryForm, LibraryQuantificationAndStorageForm, SaveDefault, SequencingForm, LaneForm
from .models import *
class TenxChipForm(ModelForm):
class Meta:
model = TenxChip
fields = "__all__"
class TenxPoolForm(ModelForm):
class Meta:
model = TenxPool
exclude = ['pool_name']
fields = "__all__"
widgets = {
'constructed_date':
SelectDateWidget(
years=range(
2015,
datetime.date.today().year + 5,
),
empty_label=('year', 'month', 'day'),
)
}
class TenxLibraryForm(LibraryForm):
field_order = [
'chips',
'sample',
'description',
'result',
'num_sublibraries',
'relates_to_dlp',
'relates_to_tenx',
'projects',
'jira_ticket',
]
def __init__(self, *args, **kwargs):
super(TenxLibraryForm, self).__init__(*args, **kwargs)
if not self.instance.pk:
# Get Jira info
self.fields['additional_title'] = forms.CharField(max_length=100)
self.fields['jira_user'] = forms.CharField(max_length=100)
self.fields['jira_password'] = forms.CharField(widget=forms.PasswordInput, )
# Remove the field which allows explicitly setting the Jira
# ticket ID (since it's done automatically)
self.fields.pop('jira_ticket')
class Meta:
model = TenxLibrary
exclude = ['name']
labels = {
'primary sample': ('*Sample'),
}
help_texts = {
'sample': ('Sequencing ID (usually SA ID) of the sample composing the majority of the library.'),
'well_partition':
('Was this well split into multiple libraries? If so, please choose a UNIQUE well partition. This will be added as the suffix to the library name.'
)
}
class TenxLibraryQuantificationAndStorageForm(LibraryQuantificationAndStorageForm):
"""
Clean uploaded 10x-related files.
"""
class Meta(LibraryQuantificationAndStorageForm.Meta):
model = TenxLibraryQuantificationAndStorage
TenxLibrarySampleDetailInlineFormset = inlineformset_factory(
TenxLibrary,
TenxLibrarySampleDetail,
form=SaveDefault,
can_delete=False,
fields="__all__",
exclude=[""],
widgets={
'sample_prep_date':
SelectDateWidget(
years=range(
2015,
datetime.date.today().year + 5,
),
empty_label=('year', 'month', 'day'),
)
},
)
TenxLibraryConstructionInfoInlineFormset = inlineformset_factory(
TenxLibrary,
TenxLibraryConstructionInformation,
form=SaveDefault,
can_delete=False,
fields="__all__",
widgets={
'submission_date':
SelectDateWidget(
years=range(
2015,
datetime.date.today().year + 5,
),
empty_label=('year', 'month', 'day'),
)
},
)
TenxLibraryQuantificationAndStorageInlineFormset = inlineformset_factory(
TenxLibrary,
TenxLibraryQuantificationAndStorage,
can_delete=False,
form=TenxLibraryQuantificationAndStorageForm,
fields="__all__",
)
class TenxSequencingForm(SequencingForm):
def __init__(self, *args, **kwargs):
super(TenxSequencingForm, self).__init__(*args, **kwargs)
if not self.instance.pk:
self.fields['jira_user'] = forms.CharField(max_length=100)
self.fields['jira_password'] = forms.CharField(widget=forms.PasswordInput)
else:
self.fields['jira_user'] = forms.CharField(max_length=100, required=False)
self.fields['jira_password'] = forms.CharField(widget=forms.PasswordInput, required=False)
class Meta(SequencingForm.Meta):
model = TenxSequencing
labels = {
'tenx_pool': ('*TENX POOL'),
}
class TenxLaneForm(ModelForm):
class Meta(LaneForm.Meta):
fields = "__all__"
model = TenxLane
class TenxGSCSubmissionForm(forms.Form):
name = forms.CharField(max_length=50, widget=forms.TextInput())
email = forms.EmailField(max_length=50, widget=forms.EmailInput())
date = forms.DateField(widget=forms.SelectDateWidget(), initial=datetime.date.today())
tenxpools = forms.ChoiceField(
widget=forms.Select(),
choices=[(pool.id, pool.pool_name) for pool in TenxPool.objects.all().order_by('id')],
label="TenX Pool",
)
| molonc/colossus | tenx/forms.py | forms.py | py | 4,775 | python | en | code | 3 | github-code | 90 |
18396866789 | from itertools import product
#ๅ
ฅๅ
N,M=map(int,input().split())
ks=[list(map(int,input().split())) for _ in range(M)]
p=[int(x) for x in input().split()]
#print(ks)
ans=0
for i in product([0,1],repeat=N):
ok=True
for j in range(M):
j_on_cnt=0
for k in ks[j][1:]:
j_on_cnt+=i[k-1]
if j_on_cnt%2 != p[j]:
ok=False
break
if ok:
ans+=1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03031/s187554410.py | s187554410.py | py | 440 | python | en | code | 0 | github-code | 90 |
73173158696 | import matplotlib
matplotlib.use('Agg')
import datetime
import os
import argparse
import csv
import numpy as np
import scipy
from scipy.stats import norm
import matplotlib.pyplot as plt
# setting graphs
font = {'size' : 14}
fontLeg = {'fontsize': 11}
matplotlib.rc('font', **font)
matplotlib.rc('legend', **fontLeg)
def split(data, index):
# divide data for different values of index
subsets = []
value = "*_"
num = -1
for row in data:
if(value!=row[index]):
value = row[index]
num = num + 1
subsets.append([])
subsets[num].append(row);
return subsets
def column(data, index):
# extract the values of column index from data (also convert in float)
columnValues = []
for row in data:
columnValues.append(float(row[index]))
return columnValues
def plot_di_max(data, i):
width = 1/1.5
setData = set(data)
y = [data.count(x) for x in setData]
x = [x for x in setData]
plt.figure()
plt.title("Web object "+str(i))
plt.bar(x, y, width, color="blue")
plt.xlabel("values (n)")
plt.ylabel("quantity (m)")
plt.savefig(directory+"/Web_object_"+str(i)+"_Dimax.pdf")
if(not verbose):
plt.close()
def plot_fitting(x, y, yFitted, i):
plt.figure()
plt.title("Web object "+str(i))
plt.plot([xi/1000 for xi in x], [y_i/1000 for y_i in y], 'o', label='Original data', markersize=.4, color="gray")
plt.plot([xi/1000 for xi in x], [y_i/1000 for y_i in yFitted], 'r', label='Fitted line', color="black")
plt.xlabel("downloads (n x1000)")
plt.ylabel("revenues (n x1000)")
plt.legend()
plt.savefig(directory+"/Web_object_"+str(i)+"_fitting.pdf")
if(not verbose):
plt.close()
def plot_all_revenues(xs, ys):
if(len(xs) != len(ys)):
print("[Error] plot_all_revenues: size of x and y does not match")
return
plt.figure()
plt.title("Revenues/downloads for all object")
lw = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
ls = ["-", "--", ":", "-", "--", ":", "-", "--", ":", "-"]
m = ["x", "x", "x", "^", "^", "^", "o", "o", "o", "s"]
for i, x in enumerate(xs):
y = ys[i]
plt.plot([x[0]/1000, x[-1]/1000], [y[0]/1000, y[-1]/1000], marker=m[i], linewidth=lw[i], ls=ls[i], label='Object'+str(i+1))
plt.xlabel("downloads (n x1000)")
plt.ylabel("revenues (n x1000)")
plt.legend()
plt.savefig(directory+"/all_revenues_downloads.pdf")
if(not verbose):
plt.close()
def plot_gaussian(data, bin, left, right):
plt.figure()
plt.hist(data, bins=bin, normed=True, alpha=0.6, color='gray')
mu = np.mean(data)
std = np.std(data)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
#plt.plot(x, p, 'k', linewidth=2)
p1 = norm.pdf(x, 0, left)
p2 = norm.pdf(x, 0, right)
plt.plot(x, p1, 'k', linewidth=1)
plt.plot(x, p2, 'k', linewidth=1)
title = "Fitting: mu = %.2f, std = [%.2f, %.2f]" % (mu, left, right)
plt.xlabel("noise values (n)")
plt.ylabel("probability (p)")
plt.title(title)
plt.savefig(directory+"/gaussian_noise.pdf")
if(not verbose):
plt.close()
# argumentparser
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-vb', '--verbose',
action='store_true',
help="Print some graphs")
parser.add_argument('-vvb', '--vverbose',
action='store_true',
help="Print some graphs and save all graphs in /images")
config = parser.parse_args()
verbose = config.verbose
vverbose = config.vverbose
# prepare working space
now = datetime.datetime.now()
directory = os.path.join(os.getcwd(), "images", datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
try:
os.makedirs(directory)
except:
print("impossible to create folder "+directory+" to store images. Using ./images")
directory = './images'
with open('baldessari.csv', "r") as f:
reader = csv.DictReader( f )
subsets = split(reader, "i")
full_noise = []
clean_revenues_list = []
downloads_list = []
std_list = []
for i, subset in enumerate(subsets):
# load esperimental parameters
times = column(subset, "t")
downloads = column(subset, "x")
revenues = column(subset, "y")
# compute D_MAX
sortedDiffDownloads = np.sort(np.diff(downloads)).tolist()
dMax = sortedDiffDownloads[-1]
if(verbose and i==7 or vverbose):
plot_di_max(sortedDiffDownloads, i)
# compute ALPHA
A = np.vstack([downloads, np.ones(len(downloads))]).T
m, c = np.linalg.lstsq(A, revenues, rcond=-1)[0]
clean_revenues = [v*m+c for v in downloads]
downloads_list.append(downloads)
clean_revenues_list.append(clean_revenues)
print("Object: " + str(i) + " Dmax: " + str(dMax) + " alpha: " + str(m))
if(verbose and i==7 or vverbose):
plot_fitting(downloads, revenues, clean_revenues, i)
noise = [a - b for a, b in zip(revenues, clean_revenues)]
std_list.append(np.std(noise))
full_noise = full_noise + noise
# compute std interval
print("----------------------------------")
left = scipy.stats.t.ppf(0.025, 14)
m = np.mean(std_list)
s = np.std(std_list)
print("confidence interval for sigma")
# compute sigma interval
confidence_level = [90, 95, 99]
for confidence in confidence_level:
p = (-confidence + 100)/100
n = len(std_list)
t_p2 = scipy.stats.t.ppf(1-p/2, n-1)
interval = t_p2*s/np.sqrt(n)
interval_left = m - interval
interval_right = m + interval
print(confidence,'% [',interval_left,',',interval_right,']')
if(verbose or vverbose):
plot_all_revenues(downloads_list, clean_revenues_list)
plot_gaussian(full_noise, 30, interval_left, interval_right)
print("----------------------------------")
print("The graphs are saved as pdf inside \033[92m", directory, "\033[0m")
print("If you want to plot graphs in console comment line2 of ./main.py and remove plt.close()")
plt.show()
| balde73/SPE-assignment-1 | main.py | main.py | py | 5,742 | python | en | code | 0 | github-code | 90 |
18321848069 | n, T = map(int, input().split())
food = []
for _ in range(n):
a, b = map(int, input().split())
food.append((a, b))
dp1 = [[0]*T for _ in range(1+n)]
dp2 = [[0]*T for _ in range(1+n)]
for i in range(n):
for j in range(T):
dp1[i+1][j] = dp1[i][j]
if j - food[i][0] >= 0:
dp1[i+1][j] = max(dp1[i+1][j], dp1[i][j-food[i][0]] + food[i][1])
for i in range(n-1, -1, -1):
for j in range(T):
dp2[i][j] = dp2[i+1][j]
if j - food[i][0] >= 0:
dp2[i][j] = max(dp2[i][j], dp2[i+1][j-food[i][0]] + food[i][1])
res = 0
for i in range(n):
for j in range(T):
res = max(res, food[i][1] + dp1[i][j] + dp2[i+1][T-1-j])
print(res) | Aasthaengg/IBMdataset | Python_codes/p02863/s250319356.py | s250319356.py | py | 696 | python | en | code | 0 | github-code | 90 |
18520983309 | import sys
import sqlite3
from sqlite3 import Error
import pickle
michelin = pickle.load(open('michelin_restaurants.bin', 'rb'))
yelp_restaurants = pickle.load(open('yelp_restaurants.bin', 'rb'))
yelp_reviews = pickle.load(open('yelp_reviews.bin', 'rb'))
create_table_sql = """PRAGMA foreign_keys = ON;
CREATE TABLE IF NOT EXISTS state (
id INT PRIMARY KEY,
name VARCHAR(100) NOT NULL
);
CREATE TABLE IF NOT EXISTS city (
id INT PRIMARY KEY,
name VARCHAR(100) NOT NULL
);
CREATE TABLE IF NOT EXISTS restaurant (
id INT PRIMARY KEY,
name VARCHAR(100) NOT NULL,
rating FLOAT,
url VARCHAR(300),
price VARCHAR(4),
review_count INT,
street VARCHAR(100),
city VARCHAR(100),
state VARCHAR(2),
country VARCHAR(100),
zip_code INT,
phone VARCHAR(20),
image_url VARCHAR(300)
);
CREATE TABLE IF NOT EXISTS category (
id INT PRIMARY KEY,
title VARCHAR(100)
);
CREATE TABLE IF NOT EXISTS restaurant_by_category (
restaurant_id INT,
category_id INT,
FOREIGN KEY (restaurant_id) REFERENCES restaurant,
FOREIGN KEY (category_id) REFERENCES category
);
CREATE TABLE IF NOT EXISTS review (
url VARCHAR(300) PRIMARY KEY,
restaurant_id INT,
rating FLOAT,
name VARCHAR(100),
time VARCHAR(100),
text VARCHAR(300)
);
"""
try:
conn = sqlite3.connect('database.db')
c = conn.cursor()
c.executescript(create_table_sql)
c.close()
conn.close()
except Error as e:
print(e)
state_id = city_id = restaurant_id = category_id = 0
seen_states, seen_cities, seen_categories = set(), set(), set()
sql = ""
for place in michelin.keys():
print("Building SQL string for {0} restaurants...".format(place))
for r in yelp_restaurants[place]:
if r['name'] in michelin[place].keys():
sql += """INSERT INTO restaurant VALUES({0}, "{1}", {2}, "{3}", "{4}", {5}, "{6}", "{7}", "{8}", "{9}", {10}, "{11}", "{12}");\n""".format(
restaurant_id,
r['name'].replace('\"', '\'').replace(':', ''),
r['rating'],
r['url'].replace('\"', '\'').replace(':', ''),
r['price'].replace('\"', '\'').replace(':', ''),
r['review_count'],
r['location']['address1'].replace('\"', '\'').replace(':', ''),
r['location']['city'].replace('\"', '\'').replace(':', ''),
r['location']['state'].replace('\"', '\'').replace(':', ''),
r['location']['country'].replace('\"', '\'').replace(':', ''),
r['location']['zip_code'].replace('\"', '\'').replace(':', ''),
r['display_phone'],
r['image_url'],
)
if r['location']['city'] not in seen_cities:
sql += """INSERT INTO city VALUES({0}, "{1}");\n""".format(
city_id,
r['location']['city'].replace('\"', '\'').replace(':', '')
)
seen_cities.add(r['location']['city'])
city_id += 1
if r['location']['state'] not in seen_states:
sql += """INSERT INTO state VALUES({0}, "{1}");\n""".format(
state_id,
r['location']['state'].replace('\"', '\'').replace(':', '')
)
seen_states.add(r['location']['state'])
state_id += 1
for category in set([c['title'] for c in r['categories']]):
if category not in seen_categories:
sql += """INSERT INTO category VALUES({0}, "{1}");\n""".format(
category_id,
category.replace('\"', '\'').replace(':', '').replace('(', '')
)
seen_categories.add(category)
category_id += 1
sql += """INSERT INTO restaurant_by_category VALUES({0}, {1});\n""".format(
restaurant_id,
list(sorted(seen_categories)).index(category)
)
for review in yelp_reviews[place][r['id']]['reviews']:
sql += """INSERT INTO review VALUES("{0}", {1}, {2}, "{3}", "{4}", "{5}");\n""".format(
review['url'].replace('\"', '\''),
restaurant_id,
review['rating'],
review['user']['name'].replace('\"', '\'').replace(':', ''),
review['time_created'].replace('\"', '\'').replace(':', ''),
review['text'].replace('\"', '\'').replace(':', '')
)
restaurant_id += 1
try:
print("Adding records to the database...")
conn = sqlite3.connect('database.db')
c = conn.cursor()
c.executescript(sql)
conn.commit()
conn.close()
except Error as e:
print(e)
| a-rich/Yelp-with-Michelin-Restaurants | data_processing_database_scripts/build_database.py | build_database.py | py | 5,326 | python | en | code | 1 | github-code | 90 |
13135059923 | import csv
reader1=csv.reader(open('rest_byzip', 'r'), delimiter=',')
reader2=csv.reader(open('rest_zip_crossjoin0', 'r'), delimiter=',')
writer=csv.writer(open('rest_zip_crossjoin', 'w'), delimiter=',')
for row1 in reader1:
for row2 in reader2:
if row1[0] == row2[0] and row1[1] == row2[1]:
row2.append(row1[2])
break
else:
row2.append("0")
writer.writerow(row2)
writer.writerow(row2)
for row_check in reader2:
if len(row_check) != 3:
row_check.append("0")
writer.writerow(row_check) | DiHou/RestaurantSiteRecommend | Hive/2 rest_number_match_python/rest_zip_match.py | rest_zip_match.py | py | 511 | python | en | code | 0 | github-code | 90 |
18578818389 | N, Y = map(int, input().split())
c =False
for i in range(N+1):
for j in range(N+1-i):
if Y == 10000*i + 5000*j + 1000*(N-i-j):
a = [str(i), str(j), str(N-i-j)]
print(" ".join(a))
c = True
break
if c:
break
if not c:
print("-1 -1 -1")
| Aasthaengg/IBMdataset | Python_codes/p03471/s637447754.py | s637447754.py | py | 311 | python | en | code | 0 | github-code | 90 |
40026570216 | class Solution:
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
heights.append(0)
stack=[heights[0]]
res = 0
for i,h in enumerate(heights[1:]):
if h>=stack[-1]:
stack.append(h)
else:
width=1
while len(stack)>0 and stack[-1]>h:
h_p=stack.pop()
res = h_p*width if h_p*width>res else res
width+=1
for i in range(width):
stack.append(h)
return res | lanpartis/LeetCodePractice | 84.py | 84.py | py | 625 | python | en | code | 0 | github-code | 90 |
70036864297 | from django.urls import path
from . import views
from .views import *
urlpatterns = [
path('register/', register, name='register'),
path('login/', user_login, name='login'),
path('logout/', user_logout, name='logout'),
path('test/', test, name='test'),
path('', Home.as_view(), name='home'),
path('category/<str:slug>/', PostByCategory.as_view(), name='category'),
path('tag/<str:slug>/', PostsByTag.as_view(), name='tag'),
path('post/<str:slug>/', GetPost.as_view(), name='post'),
path('search/', Search.as_view(), name='search'),
path('', IndexView.as_view()),
path('new/', NewOrderView.as_view(), name='new_order'),
path('take/<int:oid>', test, name='take_order'),
path('edit-page/', views.edit_page, name='edit-page'),
]
| Bagrat88/D9.5. | News_Portal/urls.py | urls.py | py | 780 | python | en | code | 0 | github-code | 90 |
32110384485 | #import os
import re
data=open("data.txt","r")
def delete(d):
pattern=re.compile("[co]")
r=re.sub(pattern,"",d)
yield r
# for d in data:
# s=delete(d)
# print(next(s))
s=delete(data.read())
print(next(s))
| pp2-22B030444/pp2-22B030444 | TSIS6/generator.py | generator.py | py | 232 | python | en | code | 0 | github-code | 90 |
33681478915 | # Required Imports
import os
from flask import Flask, request, jsonify
from firebase_admin import credentials, firestore, initialize_app
from flask_cors import CORS
# Initialize Flask App
app = Flask(__name__)
CORS(app)
# Initialize Firestore DB
cred = credentials.Certificate('key.json')
default_app = initialize_app(cred)
db = firestore.client()
device_ref = db.collection('device')
@app.route('/', methods=['GET'])
def home():
return '<h1><center>Welcome to Device Back-End!</center></h1>'
""" Device """
@app.route('/device', methods=['GET'])
def get_device():
try:
# Check if ID was passed to URL query
device_id = request.args.get('id')
if device_id:
device = device_ref.document(device_id).get()
return jsonify(device.to_dict()), 200
else:
devices = [doc.to_dict() for doc in device_ref.stream()]
return jsonify(devices), 200
except Exception as e:
return f"An error occurred: {e}"
@app.route('/device', methods=['POST'])
def add_device():
try:
id = request.json['id_device']
device_ref.document(id).set(request.json)
return jsonify({"success": True}), 200
except Exception as e:
return f"An error occurred: {e}"
@app.route('/device', methods=['PUT'])
def update_device():
try:
id = request.json['id']
device_ref.document(id).update(request.json)
return jsonify({"success": True}), 200
except Exception as e:
return f"An error occurred: {e}"
@app.route('/device', methods=['DELETE'])
def delete_device():
try:
# Check for ID in URL query
id = request.args.get('id')
device_ref.document(id).delete()
return jsonify({"success": True}), 200
except Exception as e:
return f"An error occurred: {e}"
port = int(os.environ.get('PORT', 8080))
if __name__ == '__main__':
app.run(threaded=True, host='0.0.0.0', port=port)
| bluesunkennie/BE_API | app.py | app.py | py | 2,061 | python | en | code | 0 | github-code | 90 |
73361224298 | import cv2
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import math
#%%
# Start of problem 1
def GaussianFilt(img,win,sigma):
g=np.ones((win,win))
d = np.int((win-1)/2)
for x in range(-d,d+1):
for y in range(-d,d+1):
g[x+2,y+2]=np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
fimg=signal.convolve2d(img,g, boundary = 'symm')
return fimg
#%%
def Gaussian_1d(win,sigma):
gx=np.ones((win,win))
gy=np.ones((win,win))
g=np.ones((win,win))
d = np.int((win-1)/2)
for x in range(-d,d+1):
for y in range(-d,d+1):
g[x+2,y+2]=np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
gx[x+2,y+2]=-x*g[x+2,y+2]/(sigma**2)
gy[x+2,y+2]=-y*g[x+2,y+2]/(sigma**2)
return gx,gy
#%%
def Harris(img,sigma,win,swin):
rows,cols = img.shape[:2]
gx, gy=Gaussian_1d(win,sigma)
Ix=signal.convolve2d(img,gx,boundary = 'symm')
Iy=signal.convolve2d(img,gy,boundary = 'symm')
Ix2=Ix**2
Iy2=Iy**2
Ixy=Iy*Ix
Ix2_s=GaussianFilt(Ix2,swin,2*sigma)
Iy2_s=GaussianFilt(Iy2,swin,2*sigma)
Ixy_s=GaussianFilt(Ixy,swin,2*sigma)
H = np.zeros((rows,cols))
for i in range(3,rows-3):
for o in range(3,cols-3):
a00 = np.sum(Ix2_s[i-2:i+3,o-2:o+3])
a01 = np.sum(Ixy_s[i-2:i+3,o-2:o+3])
a11 = np.sum(Iy2_s[i-2:i+3,o-2:o+3])
H[i,o] = a00*a11-a01**2 - 0.06*(a00+a11)**2
H[H<0] = 0
H = H/np.max(H)*255
cft=[]
for i in range(2,rows-2):
for j in range(2,cols-2):
local = np.array([H[i-2:i+3,j-2:j+3]])
if np.max(local) == H[i,j] and np.max(local)!= 0:
cft.append([H[i,j],(j,i)])
N=50
ft=sorted(cft,reverse=True)[0:N]
# print(ft)
for i in range(N):
dimg=cv2.circle(img,ft[i][1],3,(255,255,255),-1)
cv2.imshow('corner detecter',dimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
return ft, dimg
#%%
img=cv2.imread('BK_left.jpg',0)
rimg=cv2.imread('BK_right.jpg',0)
sigma=1
win=4*sigma+1
swin=6*sigma+1
ft,dimg = Harris(img,sigma,win,swin)
#%%
# create new images by rotating and resizing
rows,cols = img.shape[:2]
nimg1=cv2.resize(img,(cols/2,rows/2))
nimg2=cv2.resize(img,(cols*2,rows*2))
M=cv2.getRotationMatrix2D(((cols/2,rows/2)),30,1)
nimg3=cv2.warpAffine(nimg1,M,(cols,rows))
M=cv2.getRotationMatrix2D(((cols/2,rows/2)),-20,1)
nimg4 = cv2.warpAffine(nimg2,M,(cols*2,rows*2))
#cv2.imshow('new1',nimg1)
#cv2.imshow('new2',nimg2)
cv2.imshow('new image, downsize',nimg3)
cv2.imshow('new image, upsize',nimg4)
#
cv2.waitKey(0)
cv2.destroyAllWindows()
ft4, dimg4 = Harris(nimg4,sigma,win,swin)
# end of problem 1
#%%
# start of problem 2
def MagAndAngle(img,win,sigma,N):
w,h = img.shape[:2]
gx, gy=Gaussian_1d(win,sigma)
Ix=signal.convolve2d(img,gx)
Iy=signal.convolve2d(img,gy)
ori = np.zeros((w,h))
mag = np.zeros((w,h))
for x in range(1,w-1):
for y in range(1,h-1):
l = Ix[x-1:x+2,y-1:y+2]
mag[x,y] = np.sqrt( (l[2,1] - l[0,1])**2 + (l[1,2] - l[1,0])**2 )
ori[x,y] = math.atan( (l[1,2] - l[1,0]) / (l[2,1] - l[0,1]) )
#quantisize
N = 8
q = 45
ori_q = np.floor( (ori + q/2)/q )
for i in range(1,w-1):
for j in range(1,h-1):
if ori_q[i,j] == N:
x = 0
return mag, ori_q
#%%
def SIFT(img,featureP):
w,h = img.shape[:2]
win = 16
N=8
x,y = featureP[:][1]
mag,ori = MagAndAngle(img,5,1,N)
patchMag = mag[x-win/2 : 1+x+win/2 , y-win/2 : 1+y+win/2]
w_patchMag = GaussianFilt(patchMag, 3, sigma = win/2)
hog = [0]*N
for i in range(w):
for j in range(h):
for p in range(N):
if ori[i,j] == p:
hog[p] = hog[p] + w_patchMag[i][j]
hog = list(hog)
patchOri = hog.index(max(hog))
hog44 = [[0]*4 for i in range(4)]
w_patchMag44 = [[0]*4 for i in range(4)]
patchOri44 = [[0]*4 for i in range(4)]
hogs = []
for m in range(4):
for n in range(4):
hog44[m][n] = [0]*N
w_patchMag44[m][n] = w_patchMag[m:4*m,n:4*n]
for i in range(w):
for j in range(h):
for p in range(N):
if ori[i,j] == p:
hog44[m][n][p] = hog[m][n][p] + w_patchMag44[m][n][i][j]
hog44[m][n] = list(hog44[m][n])
patchOri44[m][n] = hog44[m][n].index(max(hog[m][n]))
hog44[m][n] = hog44[m][n][hog44[m][n].index(max(hog[m][n]))::] + hog44[m][n][:hog44[m][n].index(max(hog[m][n])):]
hogs = hogs + hog44[m][n]
# normalize
hogs_n = np.linalg.norm(hogs, ord = 2)
hogs_n[hogs_n > 0.2] = 0.2
hogs_rn = np.linalg.norm(hogs_n, ord = 2)
return hog, hogs_rn
#%%
def match(ft1,ft2,r):
# ft1 = Harris(img1,1,5,7)
# ft2 = Harris(img2,1,5,7)
# ft1P = ft1[:][1]
# ft2P = ft2[:][1]
matchPairs = []
sumft = 0
for i in range(len(ft1)):
dis = np.zeros(len(ft2))
for i2 in range(len(ft2)):
sumft = (ft1[i][1][0] - ft2[i2][1][0])**2 + (ft1[i][1][1] - ft2[i2][1][1])**2
dis[i2] = sumft**0.5
dis = list(dis)
q1 = dis.index(min(dis))
q1v = dis[q1]
dis[q1] = max(dis)
q2 = dis.index(min(dis))
if q1v/dis[q2] < r:
matchPairs.append([i,q1])
return matchPairs
# end of problem 2
#%%
# start of problem 3
# feature points and descriptors of original image already generated before.
M2=cv2.getRotationMatrix2D(((cols/2,rows/2)),2,1)
rimg=cv2.warpAffine(img,M2,(cols,rows))
ftr, rdimg = Harris(rimg,sigma,win,swin)
#%%
#show in one image
def showMatchingPairs(dimg1,dimg2,matchPairs):
rows,cols = dimg1.shape[:2]
rows2,cols2 = dimg2.shape[:2]
bimg = np.zeros((rows, cols + cols2))
bimg[0:rows,0:cols] = dimg1[:,:]
bimg[0:rows,cols:cols + cols2] = dimg2[:,:]
for i in range(np.shape(matchPairs)[0]):
pt1 = ft[matchPairs[i][0]]
pt2 = ftr[matchPairs[i][1]]
pt2[1] = list(pt2[1])
# pt2[1][1] = pt2[1][1] + cols
cv2.line(bimg, (pt1[1][0],pt1[1][1]), (pt2[1][0] + cols, pt2[1][1]), (255,255,255))
# print([pt1[1][1],pt1[1][0],[pt2[1][1], pt2[1][0]]])
return bimg
matchPairs = match(ft,ftr,0.3)
#cv2.imshow('matching',bimg)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
bimg = showMatchingPairs(dimg,rdimg,matchPairs)
plt.figure(figsize = (15,15))
plt.imshow(bimg, cmap = plt.cm.gray)
# end of problem 3
| kkkacey/ImageAndVideoProcessing | Homework/5/HW5_problem1_3.py | HW5_problem1_3.py | py | 7,017 | python | en | code | 0 | github-code | 90 |
24216307881 | import csv
import json
csv_file = open('kinoafisha_data.csv', 'r', encoding='cp1251')
json_file = open('kinoafisha_data.json', 'w', encoding='utf-8')
with open('kinoafisha_data.csv') as f:
size = len(f.readlines())
fieldnames = ('position', 'title', 'genres', 'year', 'countries', 'rate', 'link')
reader = csv.DictReader(csv_file, fieldnames)
i = 1
json_file.write('[\n')
for row in reader:
json.dump(row, json_file, indent=4, ensure_ascii=False)
if i != size:
json_file.write(',\n')
i += 1
json_file.write('\n]')
| VladHound/Information_Retrieval | csv_to_json.py | csv_to_json.py | py | 562 | python | en | code | 0 | github-code | 90 |
27682122627 | #this is in work, it is not finished yet
import math
def mitternachtsformel (a,b,c):
x1 = (-b + math.sqrt(b**2 - 4*a*c)) / (2*a)
x2 = (-b - math.sqrt(b**2 - 4*a*c)) / (2*a)
return x1, x2
def discriminant(a,b,c):
return b**2 - 4*a*c
a = int(input("Enter a: "))
b = int(input("Enter b: "))
c = int(input("Enter c: "))
print(mitternachtsformel(a,b,c))
print(discriminant(a,b,c))
| dazyfreez/smaller-python-projects | math/even_better_calc.py | even_better_calc.py | py | 394 | python | en | code | 2 | github-code | 90 |
18227593159 | # C - gacha
n = int(input())
s = []
c = 1
for i in range(n):
s.append(input())
s.sort()
for j in range(1,n):
if s[j-1] != s[j]:
c += 1
print(c)
| Aasthaengg/IBMdataset | Python_codes/p02701/s376075235.py | s376075235.py | py | 166 | python | en | code | 0 | github-code | 90 |
19418977938 | import cv2
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import numpy as np
import base64,time
import os
import xlwt
import xlrd
from xlutils.copy import copy
from PIL import Image
from io import BytesIO
import re
cred = credentials.Certificate("smkitchendb-firebase-adminsdk-a8z9b-9634b4119e.json")
firebase_admin.initialize_app(cred,{'databaseURL':'https://smkitchendb.firebaseio.com'})
ref = db.reference('/')
grabcutImages_ref=ref.child('TestImages')
desktopImages_ref=ref.child('DesktopTestImages')
class TestStorage:
def __init__(self, mobilePath, grabcutMobilePath,predictedLabel):
self.mobilePath = mobilePath
self.grabcutMobilePath = grabcutMobilePath
self.predictedLabel = predictedLabel
# convert image from dtype('uint8') to Base64
def convertImageToBytes(self,category):
if category=='grabcut':
path=self.grabcutMobilePath
elif category=='mobile':
path=self.mobilePath
img=cv2.imread(path)
retval, buffer = cv2.imencode('.jpg', img)
jpg_as_text = base64.b64encode(buffer).decode("utf-8")
return jpg_as_text
def saveTestDataToFireBase(self):
mobileByteString=self.convertImageToBytes('mobile')
grabcutByteString=self.convertImageToBytes('grabcut')
testChildRef =grabcutImages_ref.push()
testChildRef.set({
"Mobile_Image":mobileByteString,
"Mobile_GrabcutImage":grabcutByteString,
"Predicted_Label":self.predictedLabel
})
# print(fName)
print("Image is saved to Firebase")
# t1=TestStorage("E:/NutritionTracking/TestCases/Test_Images/WebApple.jpg","E:/NutritionTracking/TestCases/Test_Images/PhotoApple.jpg","Apple")
# t2=TestStorage("E:/NutritionTracking/TestCases/Test_Images/Banana81.jpg","E:/NutritionTracking/TestCases/Test_Images/Banana80.jpg","Banana")
# t1.saveTestDataToFireBase()
# t2.saveTestDataToFireBase()
class TestImagesLoader:
def __init__(self):
self.snapshot = grabcutImages_ref.order_by_key().get()
# convert image from Base64 to dtype('uint8')
def convertBytesToImage(self,val,timestamp,category):
jpg_original = base64.b64decode(val)
nparr = np.frombuffer(jpg_original, np.uint8)
extractImg = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if category=="grabcut":
filename="E:/TEST/Grabcut/"+timestamp+'.jpg'
elif category=="mobile":
filename="E:/TEST/Mobile/"+timestamp+'.jpg'
cv2.imwrite(filename, extractImg)
# Rename the name of the files in the test directory
def renameFiles(self):
for count, filename in enumerate(os.listdir("E:/TEST/Grabcut/")):
os.rename(os.path.join("E:/TEST/Grabcut/",filename), os.path.join("E:/TEST/Grabcut/","MobileGrabCut_" + str(count) + ".jpg"))
for count, filename in enumerate(os.listdir("E:/TEST/Mobile/")):
os.rename(os.path.join("E:/TEST/Mobile/",filename), os.path.join("E:/TEST/Mobile/","Mobile_" + str(count) + ".jpg"))
def loadImagesFromFirebase(self):
#create folder if folder does not exist
if not os.path.exists("E:/TEST"):
path = os.path.join("E:/","TEST")
os.mkdir(path)
if not os.path.exists("E:/TEST/Grabcut/"):
path = os.path.join("E:/TEST/","Grabcut")
os.mkdir(path)
if not os.path.exists("E:/TEST/Mobile/"):
path = os.path.join("E:/TEST/","Mobile")
os.mkdir(path)
# First clear existing files in respective folders and reload files
if os.stat("E:/TEST/Grabcut/").st_size >0:
for f in os.listdir("E:/TEST/Grabcut/"):
os.remove(os.path.join("E:/TEST/Grabcut/", f))
if os.stat("E:/TEST/Mobile/").st_size >0:
for f in os.listdir("E:/TEST/Mobile/"):
os.remove(os.path.join("E:/TEST/Mobile/", f))
# Load image from Firebase into the local directory of pc
for key, val in self.snapshot.items():
# print(grabcutImages_ref.child(key).child("Predicted_Label").get() )
mobileByteString=grabcutImages_ref.child(key).child("Mobile_Image").get()
grabcutByteString=grabcutImages_ref.child(key).child("Mobile_GrabcutImage").get()
timestamp=str(time.time())
self.convertBytesToImage(mobileByteString,timestamp,"mobile")
self.convertBytesToImage(grabcutByteString,timestamp,"grabcut")
print("All Mobile Images loaded in: "+"E:/TEST/Mobile/\n")
print("All Cropped Mobiles Images loaded in: "+"E:/TEST/Grabcut/\n")
class ImageDifferenceExcelWriter:
def __init__(self):
rb = xlrd.open_workbook('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Testcase.xls')
self.wb=copy(rb)
self.wsheet = self.wb.get_sheet(0)
self.rsheet=rb.sheet_by_index(0)
self.wHSVsheet = self.wb.get_sheet(1)
self.rHSVsheet=rb.sheet_by_index(1)
self.wsheetColor = self.wb.get_sheet(2)
self.rsheetColor=rb.sheet_by_index(2)
self.newIndex=None
self.testCaseId=None
self.testChildRef =desktopImages_ref.push()
self.testChildRef.set({
"TestCase_id":self.testCaseId,
"Desktop_Image":" ",
"Mobile_Image":" ",
"Mobile_GrabcutImage":" ",
"Desktop_GrabcutImage":" ",
"Overlay_Image":" ",
"Web_Histogram":" ",
"Mobile_Histogram":" "
})
# Generate Testcase Id and generate new entry for test result
def setRowIndex(self):
self.newIndex= self.rsheet.nrows
rowId=str(self.newIndex)
d=0
print("Row Index: "+rowId)
for c in rowId:
if c.isdigit():
d=d+1
if d==1:
self.testCaseId="T00"+str(rowId)
elif d==2:
self.testCaseId="T0"+str(rowId)
elif d==3:
self.testCaseId="T"+str(rowId)
self.wsheet.write(self.newIndex,0,self.testCaseId)
self.wHSVsheet.write(self.newIndex,0,self.testCaseId)
self.wsheetColor.write(self.newIndex,0,self.testCaseId)
# Save Images as bytestring in Firebase
def saveTestImageToFirebase(self,path,category):
img=cv2.imread(path)
img=cv2.resize(img,(384,400),3)
retval, buffer = cv2.imencode('.jpg', img)
jpg_as_text = base64.b64encode(buffer).decode("utf-8")
if category=='desktop_image':
self.testChildRef.update({
'Desktop_Image': jpg_as_text
})
elif category=='mobile_image':
self.testChildRef.update({
'Mobile_Image': jpg_as_text
})
elif category=='grabcutDesktop_image':
self.testChildRef.update({
'Desktop_GrabcutImage': jpg_as_text
})
elif category=='grabcutMobile_image':
self.testChildRef.update({
'Mobile_GrabcutImage': jpg_as_text
})
elif category=='overlay_image':
self.testChildRef.update({
'Overlay_Image': jpg_as_text
})
elif category=='web_histogram':
self.testChildRef.update({
'Web_Histogram': jpg_as_text
})
elif category=='mobile_histogram':
self.testChildRef.update({
'Mobile_Histogram': jpg_as_text
})
# store bitmaps in Ms Excel workbook
def writeTestImages(self,webPath,mobilePath):
wwPath='E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Images'
mmPath='E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Images'
if self.rsheet.nrows>1:
wdirs=os.listdir("E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Images")
mdirs=os.listdir("E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Images")
iw=len(wdirs)-1
im=len(mdirs)-1
iw-=1
im-=1
for rowIndex in range(1,self.rsheet.nrows):
testCaseId=str(self.rsheet.cell(rowIndex,0).value)
wPath=os.path.join(wwPath,wdirs[iw])
wImg=Image.open(wPath)
wArr = BytesIO()
wImg.save(wArr, format='bmp')
self.wsheet.insert_bitmap_data(wArr.getvalue(),rowIndex,1)
wImg.close()
iw-=1
mPath=os.path.join(mmPath,mdirs[im])
mImg=Image.open(mPath)
mArr = BytesIO()
mImg.save(mArr, format='bmp')
self.wsheet.insert_bitmap_data(mArr.getvalue(),rowIndex,3)
mImg.close()
im-=1
if iw<0 and im<0:
break
self.wb.save('Testcase.xls')
self.saveTestImageToFirebase(webPath,"desktop_image")
webimg = Image.open(webPath)
webimg = webimg.resize((round(webimg.size[0]/30), round(webimg.size[1]/30)))
wimage_parts = webimg.split()
rw = wimage_parts[0]
gw = wimage_parts[1]
bw = wimage_parts[2]
webimg = Image.merge("RGB", (rw, gw, bw))
filename="Web_Image"+self.testCaseId+'.jpg'
filepath=os.path.join(wwPath,filename)
webimg.save(filepath)
webArr = BytesIO()
webimg.save(webArr, format='bmp')
self.wsheet.insert_bitmap_data(webArr.getvalue(),self.newIndex,1)
self.wb.save('Testcase.xls')
webimg.close()
self.saveTestImageToFirebase(mobilePath,"mobile_image")
mobileimg = Image.open(mobilePath)
mobileimg = mobileimg.resize( (round(mobileimg.size[0]/30),round(mobileimg.size[1]/30) ))
mimage_parts = mobileimg.split()
rm = mimage_parts[0]
gm = mimage_parts[1]
bm = mimage_parts[2]
mobileimg = Image.merge("RGB", (rm, gm, bm))
filename="Mobile_Image"+self.testCaseId+'.jpg'
filepath=os.path.join(mmPath,filename)
mobileimg.save(filepath)
mobileArr = BytesIO()
mobileimg.save(mobileArr, format='bmp')
self.wsheet.insert_bitmap_data(mobileArr.getvalue(),self.newIndex,3)
self.wb.save('Testcase.xls')
mobileimg.close()
def writeGrabcutImages(self):
webpath=os.path.join('E:\\NutritionTracking\\TestCases\\ImageProcessed_Pictures\\Grabcut','WebImageCropped.png')
mobilepath=os.path.join('E:\\NutritionTracking\\TestCases\\ImageProcessed_Pictures\\Grabcut','PhotoImageCropped.png')
wwPath="E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Grabcut"
mmPath="E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Grabcut"
if self.rsheet.nrows>1:
wdirs=os.listdir("E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Grabcut")
mdirs=os.listdir("E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Grabcut")
iw=len(wdirs)-1
im=len(mdirs)-1
iw-=1
im-=1
for rowIndex in range(1,self.rsheet.nrows):
testCaseId=str(self.rsheet.cell(rowIndex,0).value)
wPath=os.path.join(wwPath,wdirs[iw])
wImg=Image.open(wPath)
wArr = BytesIO()
wImg.save(wArr, format='bmp')
self.wsheet.insert_bitmap_data(wArr.getvalue(),rowIndex,10)
wImg.close()
iw-=1
mPath=os.path.join(mmPath,mdirs[im])
mImg=Image.open(mPath)
mArr = BytesIO()
mImg.save(mArr, format='bmp')
self.wsheet.insert_bitmap_data(mArr.getvalue(),rowIndex,11)
mImg.close()
im-=1
if iw<0 and im<0:
break
self.wb.save('Testcase.xls')
self.saveTestImageToFirebase(webpath,"grabcutDesktop_image")
webimg = Image.open(webpath)
webimg = webimg.resize((round(webimg.size[0]/30), round(webimg.size[1]/30)))
wimage_parts = webimg.split()
rw = wimage_parts[0]
gw = wimage_parts[1]
bw = wimage_parts[2]
webimg = Image.merge("RGB", (rw, gw, bw))
filename="Web_ImageGrabcut"+self.testCaseId+'.png'
filepath=os.path.join(wwPath,filename)
webimg.save(filepath)
webArr = BytesIO()
webimg.save(webArr, format='bmp')
self.wsheet.insert_bitmap_data(webArr.getvalue(),self.newIndex,10)
self.wb.save('Testcase.xls')
webimg.close()
self.saveTestImageToFirebase(webpath,"grabcutMobile_image")
mobileimg = Image.open(mobilepath)
mobileimg = mobileimg.resize( (round(mobileimg.size[0]/30),round(mobileimg.size[1]/30) ))
mimage_parts = mobileimg.split()
rm = mimage_parts[0]
gm = mimage_parts[1]
bm = mimage_parts[2]
mobileimg = Image.merge("RGB", (rm, gm, bm))
filename="Mobile_ImageGrabcut"+self.testCaseId+'.png'
filepath=os.path.join(mmPath,filename)
mobileimg.save(filepath)
mobileArr = BytesIO()
mobileimg.save(mobileArr, format='bmp')
self.wsheet.insert_bitmap_data(mobileArr.getvalue(),self.newIndex,11)
self.wb.save('Testcase.xls')
mobileimg.close()
def writeHistograms(self):
webpath=os.path.join('E:\\NutritionTracking\\TestCases\\ImageProcessed_Pictures\\Histograms','WebImageHistogram.jpg')
mobilepath=os.path.join('E:\\NutritionTracking\\TestCases\\ImageProcessed_Pictures\\Histograms','PhotoImageHistogram.jpg')
wwPath="E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Histograms"
mmPath="E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Histograms"
if self.rsheet.nrows>1:
wdirs=os.listdir("E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Histograms")
mdirs=os.listdir("E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Histograms")
iw=len(wdirs)-1
im=len(mdirs)-1
iw-=1
im-=1
for rowIndex in range(1,self.rsheet.nrows):
testCaseId=str(self.rsheet.cell(rowIndex,0).value)
wPath=os.path.join(wwPath,wdirs[iw])
wImg=Image.open(wPath)
wArr = BytesIO()
wImg.save(wArr, format='bmp')
self.wHSVsheet.insert_bitmap_data(wArr.getvalue(),rowIndex,3)
wImg.close()
iw-=1
mPath=os.path.join(mmPath,mdirs[im])
mImg=Image.open(mPath)
mArr = BytesIO()
mImg.save(mArr, format='bmp')
self.wHSVsheet.insert_bitmap_data(mArr.getvalue(),rowIndex,4)
mImg.close()
im-=1
if iw<0 and im<0:
break
self.wb.save('Testcase.xls')
self.saveTestImageToFirebase(webpath,"web_histogram")
webimg = Image.open(webpath)
webimg = webimg.resize((round(webimg.size[0]/10), round(webimg.size[1]/30)))
wimage_parts = webimg.split()
rw = wimage_parts[0]
gw = wimage_parts[1]
bw = wimage_parts[2]
webimg = Image.merge("RGB", (rw, gw, bw))
filename="Web_ImageHistogram"+self.testCaseId+'.png'
filepath=os.path.join(wwPath,filename)
webimg.save(filepath)
webArr = BytesIO()
webimg.save(webArr, format='bmp')
self.wHSVsheet.insert_bitmap_data(webArr.getvalue(),self.newIndex,3)
self.wb.save('Testcase.xls')
webimg.close()
self.saveTestImageToFirebase(mobilepath,"mobile_histogram")
mobileimg = Image.open(mobilepath)
mobileimg = mobileimg.resize( (round(mobileimg.size[0]/10),round(mobileimg.size[1]/30) ))
mimage_parts = mobileimg.split()
rm = mimage_parts[0]
gm = mimage_parts[1]
bm = mimage_parts[2]
mobileimg = Image.merge("RGB", (rm, gm, bm))
filename="Mobile_ImageHistogram"+self.testCaseId+'.png'
filepath=os.path.join(mmPath,filename)
mobileimg.save(filepath)
mobileArr = BytesIO()
mobileimg.save(mobileArr, format='bmp')
self.wHSVsheet.insert_bitmap_data(mobileArr.getvalue(),self.newIndex,4)
self.wb.save('Testcase.xls')
mobileimg.close()
def writeOverlayImage(self):
ovlpath=os.path.join('E:\\NutritionTracking\\TestCases\\ImageProcessed_Pictures\\Overlay\\Overlapped Images.png')
ovl_Path='E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Overlay_Images'
if self.rsheet.nrows>1:
ovldirs=os.listdir("E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Overlay_Images")
iovl=len(ovldirs)-1
iovl-=1
for rowIndex in range(1,self.rsheet.nrows):
testCaseId=str(self.rsheet.cell(rowIndex,0).value)
ovlPath=os.path.join(ovl_Path,ovldirs[iovl])
ovlImg=Image.open(ovlPath)
ovlArr = BytesIO()
ovlImg.save(ovlArr, format='bmp')
self.wsheet.insert_bitmap_data(ovlArr.getvalue(),rowIndex,12)
ovlImg.close()
iovl-=1
if iovl<0:
break
self.wb.save('Testcase.xls')
self.saveTestImageToFirebase(ovlpath,"overlay_image")
ovlImg = Image.open(ovlpath)
ovlImg = ovlImg.resize((round(ovlImg.size[0]/30), round(ovlImg.size[1]/30)))
ovlimage_parts = ovlImg.split()
rl = ovlimage_parts[0]
gl = ovlimage_parts[1]
bl = ovlimage_parts[2]
ovlImg = Image.merge("RGB", (rl, gl, bl))
filename="overlayed_Image"+self.testCaseId+'.png'
filepath=os.path.join(ovl_Path,filename)
ovlImg.save(filepath)
ovlArr = BytesIO()
ovlImg.save(ovlArr, format='bmp')
self.wsheet.insert_bitmap_data(ovlArr.getvalue(),self.newIndex,12)
self.wb.save('Testcase.xls')
ovlImg.close()
# Store label in Ms Excel Workbook
def writePredictedLabels(self,webPrediction,mobilePrediction):
self.wsheet.write(self.newIndex,2,webPrediction)
self.wsheet.write(self.newIndex,4,mobilePrediction)
self.wb.save('Testcase.xls')
# Store HSV Range and Feedback in Ms Excel Workbook
def writeHSVRange(self,hsvRange,hsvFeedback):
self.wHSVsheet.write(self.newIndex,1,hsvRange)
self.wHSVsheet.write(self.newIndex,2,hsvFeedback)
self.wb.save('Testcase.xls')
# Store Number of pixels for web image and mobile image,size difference as well as its feedback
def writeSizeComparison(self,web_size,mobile_size,size_diff,size_feedback):
self.wsheet.write(self.newIndex,6,web_size)
self.wsheet.write(self.newIndex,7,mobile_size)
self.wsheet.write(self.newIndex,8,size_diff)
self.wsheet.write(self.newIndex,9,size_feedback)
self.wb.save('Testcase.xls')
# Store number of pixels for curries and banana with respective to color
def writePixelsOfColor(self,riceColor,dhalColor,sambalColor,bananaColor):
self.wsheetColor.write(self.newIndex,1,riceColor)
self.wsheetColor.write(self.newIndex,2,dhalColor)
self.wsheetColor.write(self.newIndex,3,sambalColor)
self.wsheetColor.write(self.newIndex,4,bananaColor)
self.wb.save('Testcase.xls')
def writePercentOfColor(self,percent_riceColor,percent_DhalColor,percent_SambalColor,percent_BananaColor):
self.wsheetColor.write(self.newIndex,5,percent_riceColor)
self.wsheetColor.write(self.newIndex,6,percent_DhalColor)
self.wsheetColor.write(self.newIndex,7,percent_SambalColor)
self.wsheetColor.write(self.newIndex,8,percent_BananaColor)
self.wb.save('Testcase.xls')
# Two methods erase image files in Directory
def clearBackupPhase1(self):
if os.stat('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Grabcut').st_size >0:
for f in os.listdir('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Grabcut'):
os.remove(os.path.join('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Grabcut', f))
if os.stat('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Grabcut').st_size >0:
for f in os.listdir('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Grabcut'):
os.remove(os.path.join('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Grabcut', f))
if os.stat('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Images').st_size >0:
for f in os.listdir('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Images'):
os.remove(os.path.join('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Images', f))
if os.stat('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Images').st_size >0:
for f in os.listdir('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Images'):
os.remove(os.path.join('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Images', f))
print("Backup images Phase 1 are all Cleared")
def clearBackupPhase2(self):
if os.stat('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Overlay_Images').st_size >0:
for f in os.listdir('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Overlay_Images'):
os.remove(os.path.join('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Overlay_Images', f))
if os.stat('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Histograms').st_size >0:
for f in os.listdir('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Histograms'):
os.remove(os.path.join('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Web_Histograms', f))
if os.stat('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Histograms').st_size >0:
for f in os.listdir('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Histograms'):
os.remove(os.path.join('E:\\NutritionTracking\\ImageClassifier_and_MeasureImageDifference\\Backup_images\\Mobile_Histograms', f))
print("Backup images Phase 2 are all Cleared")
# Erase record for the worksheets in Excel Workbook
def clearExcelSheets(self):
endIndex=self.rsheet.nrows
self.wsheet._cell_overwrite_ok = True
self.wsheetColor._cell_overwrite_ok = True
# print(self.wsheet._cell_overwrite_ok )
# print(self.wsheetColor._cell_overwrite_ok )
if self.rsheet.nrows>1:
for rowIndex in range(1,endIndex):
#print(self.rsheetColor.cell(rowIndex,0))
self.wsheet.write(rowIndex,0,"")
self.wsheet.write(rowIndex,1,"")
self.wsheet.write(rowIndex,2,"")
self.wsheet.write(rowIndex,3,"")
self.wsheet.write(rowIndex,4,"")
self.wsheet.write(rowIndex,5,"")
self.wsheet.write(rowIndex,5,"")
self.wsheet.write(rowIndex,6,"")
self.wsheet.write(rowIndex,7,"")
self.wsheet.write(rowIndex,8,"")
self.wsheet.write(rowIndex,9,"")
self.wsheet.write(rowIndex,10,"")
self.wsheet.write(rowIndex,11,"")
self.wsheet.write(rowIndex,12,"")
self.wHSVsheet.write(rowIndex,0,"")
self.wHSVsheet.write(rowIndex,1,"")
self.wHSVsheet.write(rowIndex,2,"")
self.wHSVsheet.write(rowIndex,3,"")
self.wHSVsheet.write(rowIndex,4,"")
self.wsheetColor.write(rowIndex,0,"")
self.wsheetColor.write(rowIndex,1,"")
self.wsheetColor.write(rowIndex,2,"")
self.wsheetColor.write(rowIndex,3,"")
self.wsheetColor.write(rowIndex,4,"")
self.wsheetColor.write(rowIndex,5,"")
self.wsheetColor.write(rowIndex,6,"")
self.wsheetColor.write(rowIndex,7,"")
self.wsheetColor.write(rowIndex,8,"")
desktopImages_ref.delete()
self.wsheet._cell_overwrite_ok = False
self.wsheetColor._cell_overwrite_ok = False
self.wb.save('Testcase.xls')
print("Excel Sheets are cleared")
# Adjust width of columns and height of rows
def autoAdjustExcelSheet(self):
#print(self.wsheet.row(self.newIndex).height)
endIndex1=self.rsheet.nrows
endIndex2=self.rHSVsheet.nrows
if self.rsheet.nrows>1:
for rowIndex in range(1,endIndex1):
self.wsheet.row(rowIndex).height_mismatch = True
self.wsheet.row(rowIndex).height=4500
if self.rHSVsheet.nrows>1:
for rowIndex in range(1,endIndex2):
self.wHSVsheet.row(rowIndex).height_mismatch = True
self.wHSVsheet.row(rowIndex).height=5500
#print(self.wsheet.row(rowIndex).height)
self.wsheet.col(0).width=15220
self.wsheet.col(1).width=15220
self.wsheet.col(2).width=15220
self.wsheet.col(3).width=15220
self.wsheet.col(4).width=15220
self.wsheet.col(5).width=15220
self.wsheet.col(6).width=15220
self.wsheet.col(7).width=15220
self.wsheet.col(8).width=15220
self.wsheet.col(9).width=15220
self.wsheet.col(10).width=15220
self.wsheet.col(11).width=15220
self.wsheet.col(12).width=15220
self.wHSVsheet.col(0).width=15220
self.wHSVsheet.col(1).width=15220
self.wHSVsheet.col(2).width=15220
self.wHSVsheet.col(3).width=60000
self.wHSVsheet.col(4).width=60000
self.wsheetColor.col(0).width=17220
self.wsheetColor.col(1).width=17220
self.wsheetColor.col(2).width=17220
self.wsheetColor.col(3).width=17220
self.wsheetColor.col(4).width=17220
self.wsheetColor.col(5).width=17220
self.wsheetColor.col(6).width=17220
self.wsheetColor.col(7).width=17220
self.wsheetColor.col(8).width=17220
self.wsheet.row(self.newIndex).height_mismatch = True
self.wsheet.row(self.newIndex).height=4500
self.wHSVsheet.row(self.newIndex).height_mismatch = True
self.wHSVsheet.row(self.newIndex).height=5500
self.wb.save('Testcase.xls')
| KajavathananM/NutritionTracking_DesktopApplication | NutritionTracking_DesktopApplication/NutritionTracking/ImageClassifier_and_MeasureImageDifference/TestStorageController.py | TestStorageController.py | py | 26,118 | python | en | code | 0 | github-code | 90 |
8900736306 | import h5py
import numpy as np
import matplotlib.pyplot as plt
import os
import core
from scipy.ndimage import gaussian_filter1d
plt.rcParams['pdf.fonttype'] = 42
fig, ax = plt.subplots(
nrows=1, ncols=1, figsize = (8, 6), constrained_layout = True)
datasets, dataset_names = core.dataset_search()
dset_colors = ['pink', 'blue']
for dset in range(len(datasets)):
postrev_performance_nested = (
datasets[dset].get_post_reversal_performance(1000))
mouse_mean = np.empty(len(datasets[dset].mouse_list), dtype=np.ndarray)
for mouse in range(len(postrev_performance_nested)):
if len(postrev_performance_nested[mouse]) > 0:
performance_array = core.as_array(postrev_performance_nested[mouse])
mean_performance = np.mean(performance_array, axis=0)
conv_mean_performance = gaussian_filter1d(mean_performance, sigma=15)
mouse_mean[mouse] = conv_mean_performance
ax.plot(conv_mean_performance, color=dset_colors[dset],
label=datasets[dset].mouse_list[mouse], lw=0.75)
else:
mouse_mean[mouse] = np.array([])
population_array = core.as_array(mouse_mean)
population_mean = np.nanmean(population_array, axis=0)
ax.plot(population_mean, color=dset_colors[dset],
lw=2, label='Population mean')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_ylim(0,1)
ax.set_xlabel('Training Day')
ax.set_ylabel('Fraction Correct Trials')
ax.legend()
plt.show()
| smail031/behavior_analysis | postrev_performance.py | postrev_performance.py | py | 1,538 | python | en | code | 0 | github-code | 90 |
69964892136 | import cv2
import numpy as np
import mxnet as mx
from sklearn.preprocessing import normalize
from reid.insightface.mtcnn import MtcnnDetector
from reid.insightface.utils import preprocess
def get_embedder(ctx, image_size, model_prefix: str, layer):
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
all_layers = sym.get_internals()
sym = all_layers[layer + '_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
return model
class ArcFaceModel:
def __init__(self, embedder_path, mtcnn_path, image_size=(112, 112)):
self.image_size = image_size
self.ctx = mx.cpu()
self.embedder = get_embedder(self.ctx, image_size, embedder_path, 'fc1')
self.detector = MtcnnDetector(
model_folder=mtcnn_path,
ctx=self.ctx,
accurate_landmark=True,
threshold=[0.6, 0.7, 0.8]
)
def predict(self, image):
embedding = None
preprocessed_img, bbox, landmark = self.detect(image)
if preprocessed_img is not None:
embedding = self.embed(preprocessed_img)
return embedding
def align(self, image, bbox, landmark):
landmark = landmark.reshape((2, 5)).T
preprocessed_img = preprocess(image, bbox, landmark, image_size=self.image_size)
preprocessed_img = cv2.cvtColor(preprocessed_img, cv2.COLOR_BGR2RGB)
preprocessed_img = np.transpose(preprocessed_img, (2, 0, 1))
return preprocessed_img, bbox, landmark
def detect(self, image):
bboxes, landmarks = self.detector.detect_face(image)
if bboxes is None:
return None, None, None
bboxes, scores = bboxes[:, :4], bboxes[:, 4]
return self.align(image, bboxes[0], landmarks[0])
def embed(self, image):
input_blob = np.expand_dims(image, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
self.embedder.forward(db, is_train=False)
embedding = self.embedder.get_outputs()[0].asnumpy()
embedding = normalize(embedding).flatten()
return embedding
| amirassov/topcoder-facial-marathon | reid/insightface/model.py | model.py | py | 2,267 | python | en | code | 10 | github-code | 90 |
18320583769 | class Town:
def __init__(self,x,y):
self.x = x
self.y = y
from itertools import permutations
N = int(input())
towns = []
for n in range(N):
d = input().split()
x, y = map(int, d)
towns.append(Town(x,y))
roots = list(permutations(towns))
distances = 0
for root in roots:
distance = 0
for i in range(len(root) - 1 ):
start = root[i]
end = root[i + 1]
distance += ((start.x - end.x) ** 2 + (start.y - end.y) ** 2) ** (1 / 2)
distances += distance
average = distances / len(roots)
print(average) | Aasthaengg/IBMdataset | Python_codes/p02861/s621417423.py | s621417423.py | py | 530 | python | en | code | 0 | github-code | 90 |
5528616791 | """
Back Testing - Trading Strategy - RSI , MV and Bollianger Band
"""
import os
import backtrader.sizers
import pandas as pd
import yfinance as yf
import backtrader as bt
import backtrader.analyzers as btanalyzer
import numpy as np
from self.tradingSetup.backtrader.Strategy.RSI import rsi
from self.tradingSetup.backtrader.Strategy.mvav import mvav
from self.tradingSetup.backtrader.Strategy.BBand_Strategy import BBand_Strategy
cwd = os.getcwd()
print(f"Current working directory: {cwd}")
DESIRED_WIDTH = 320
pd.set_option('display.width', DESIRED_WIDTH)
pd.set_option('display.max_columns', 30)
pd.set_option('display.max_rows', 2000)
class GenericCSV(bt.feeds.GenericCSVData):
"""Add Rows"""
lines = ('pivot', 'RSIpivot', 'divsignal')
params = (('pivot', 7), ('RSIpivot', 8), ('divsignal', 9))
class BacktestBackTrader:
"""Create instance"""
def __init__(self, ):
self.cerebro = bt.Cerebro(stdstats=False, cheat_on_open=True)
def back_trader(self, data):
# Add Strategy
self.cerebro.addstrategy(rsi)
# Add Data
self.cerebro.adddata(data)
self.cerebro.broker.setcash(100000000.0)
self.cerebro.broker.setcommission(commission=0.000000001)
# ADD Observer
self.cerebro.addobserver(bt.observers.BuySell)
self.cerebro.addobserver(bt.observers.Value)
self.cerebro.addsizer(backtrader.sizers.PercentSizer, percents=100)
self.cerebro.addsizer(bt.sizers.PercentSizer, percents=10)
# ADD Analyzer
self.cerebro.addanalyzer(btanalyzer.SharpeRatio, _name="sharpe")
self.cerebro.addanalyzer(btanalyzer.DrawDown, _name="drawdown")
self.cerebro.addanalyzer(btanalyzer.Transactions, _name="tran")
self.cerebro.addanalyzer(btanalyzer.TradeAnalyzer, _name="Trade")
self.cerebro.addanalyzer(btanalyzer.Returns, _name="returns")
self.cerebro.addanalyzer(bt.analyzers.TimeReturn, timeframe=bt.TimeFrame.NoTimeFrame)
self.cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name="ta")
backtest_result = self.cerebro.run(maxcpus=1, stdstats=False, runonce=False, safediv=False)
# Graph plotting
self.cerebro.plot(iplot=True, volume=False)
if __name__ == "__main__":
# Download data from Yahoo
df = yf.download(tickers='INFY.NS', period='1mo', interval='5m', progress=False)
df = df.reset_index()
df.rename(columns={"Datetime": "Date"}, inplace=True)
df.Date = pd.to_datetime(df.Date)
df['Date'] = df['Date'].dt.tz_localize(None)
df = df.set_index("Date")
df = df.rename(columns={'Open': open, 'High': 'high', 'Low': 'low', 'Close': 'close'})
df["pivot"] = np.nan
df["RSIpivot"] = np.nan
df["divsignal"] = np.nan
df.to_csv("backtest_backtrader.csv")
data1 = GenericCSV(dataname="backtest_backtrader.csv")
# data1 = bt.feeds.PandasData(dataname=df)
BacktestBackTrader().back_trader(data=data1)
| ankitrawat85/QuantProjects | Backtesting_backtrader/code/backtest_backTrader.py | backtest_backTrader.py | py | 2,951 | python | en | code | 0 | github-code | 90 |
27087872038 | import os
import stat
import re
import llnl.util.tty as tty
import spack.paths
import spack.modules
# Character limit for shebang line. Using Linux's 127 characters
# here, as it is the shortest I could find on a modern OS.
shebang_limit = 127
def shebang_too_long(path):
"""Detects whether a file has a shebang line that is too long."""
if not os.path.isfile(path):
return False
with open(path, 'rb') as script:
bytes = script.read(2)
if bytes != b'#!':
return False
line = bytes + script.readline()
return len(line) > shebang_limit
def filter_shebang(path):
"""Adds a second shebang line, using sbang, at the beginning of a file."""
with open(path, 'r') as original_file:
original = original_file.read()
# This line will be prepended to file
new_sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.paths.prefix
# Skip files that are already using sbang.
if original.startswith(new_sbang_line):
return
# In the following, newlines have to be excluded in the regular expression
# else any mention of "lua" in the document will lead to spurious matches.
# Use --! instead of #! on second line for lua.
if re.search(r'^#!(/[^/\n]*)*lua\b', original):
original = re.sub(r'^#', '--', original)
# Use //! instead of #! on second line for node.js.
if re.search(r'^#!(/[^/\n]*)*node\b', original):
original = re.sub(r'^#', '//', original)
# Change non-writable files to be writable if needed.
saved_mode = None
if not os.access(path, os.W_OK):
st = os.stat(path)
saved_mode = st.st_mode
os.chmod(path, saved_mode | stat.S_IWRITE)
with open(path, 'w') as new_file:
new_file.write(new_sbang_line)
new_file.write(original)
# Restore original permissions.
if saved_mode is not None:
os.chmod(path, saved_mode)
tty.warn("Patched overlong shebang in %s" % path)
def filter_shebangs_in_directory(directory, filenames=None):
if filenames is None:
filenames = os.listdir(directory)
for file in filenames:
path = os.path.join(directory, file)
# only handle files
if not os.path.isfile(path):
continue
# only handle links that resolve within THIS package's prefix.
if os.path.islink(path):
real_path = os.path.realpath(path)
if not real_path.startswith(directory + os.sep):
continue
# test the file for a long shebang, and filter
if shebang_too_long(path):
filter_shebang(path)
def post_install(spec):
"""This hook edits scripts so that they call /bin/bash
$spack_prefix/bin/sbang instead of something longer than the
shebang limit.
"""
if spec.external:
tty.debug('SKIP: shebang filtering [external package]')
return
for directory, _, filenames in os.walk(spec.prefix):
filter_shebangs_in_directory(directory, filenames)
| matzke1/spack | lib/spack/spack/hooks/sbang.py | sbang.py | py | 3,033 | python | en | code | 2 | github-code | 90 |
43573604211 | from array import *
from pip._vendor.distlib.compat import raw_input
import random
def randomNumbers():
n = int(input("Write the number of elements: "))
con = 1
total = 0
for i in range(n):
elements = int(input("Write the numbers: "))
total = total + elements
con += 1
print("{} {}".format("Total sum of elements: ", total))
print("{} {}".format("Number of elements: ", con))
def convertDegreesCelsiusToFahrenheit():
celsius = int(input("Celsius โ: "))
fahrenheit = 32
weather_Result = (celsius * 9 / 5) + fahrenheit
print(str(weather_Result) + "ยฐF")
mainMenu()
def convertMetersToFeet():
meters = int(input("Enter the meters: "))
feet = 3.281
weather_Result = float(meters) * feet
print(str(weather_Result) + "ft")
mainMenu()
def convertDollarToPesos():
dollar = int(input("Dollar amount: "))
dominicanPeso = 58.80
result = float(dollar * dominicanPeso)
print("The amount in pesos: " + str(result))
mainMenu()
def mainMenu():
print("\n 1. Convertir grados a Celsius a Fahrenheit \n 2. Convertir dรณlar a pesos \n 3. Convertir metros a pies "
"\n 4. Salir")
selection = int(input("\nEnter your choice: "))
if selection == 1:
convertDegreesCelsiusToFahrenheit()
elif selection == 2:
convertDollarToPesos()
elif selection == 3:
convertMetersToFeet()
elif selection == 4:
exit()
else:
print("Unknown Option Selected!. Enter 1-4")
def Multiply():
for i in range(5, 1000, 5):
for l in range(1, 13, 1):
result = i * l
print(i, "*", l, "=", result)
def AFP():
return 0.0287 * 12
def SFS():
return 0.0304 * 12
def annualSalary(salary):
annual_salary = salary * 12
print("Annual Salary: ", "RD$", annual_salary)
return float(annual_salary)
def ISFCaculator():
salary = float(input("Enter your salary: "))
top_one = 416220
top_two = 624329
top_three = 867123
isr = 0.00
annual = annualSalary(salary)
if annual <= top_one:
isr = float(annual * AFP() * SFS())
elif annual <= top_two:
surplus = annual - top_one
isr = float(surplus * 0.15 * AFP() * SFS())
elif annual <= top_three:
surplus = annual - top_two
isr = float(312116 + (surplus * 0.15 * AFP() * SFS()))
else:
surplus = annual - top_three
isr = float(79776 + (surplus * 0.25 * AFP() * SFS()))
print("Your ISR: ", "RD$", int(isr / 12))
def cashMachine():
print("Select bank: \n 1. ABC \n 2. other")
bank = int(input())
if bank == 1:
cash_money = int(input("Enter the amount ro retire: "))
bills = [1000, 500, 100]
for i in range(len(bills)):
b = cash_money / bills[i]
if cash_money > 10000:
print("Your limit: RD$10000")
break
elif b > 0:
print(int(b), "bills", bills[i], "Dominican Pesos")
cash_money %= bills[i]
if bank == 2:
print("This bank is not available")
randomNumbers()
mainMenu()
Multiply()
ISFCaculator()
cashMachine()
| Jhalinson/Python | Practices/Practice2.py | Practice2.py | py | 3,198 | python | en | code | 1 | github-code | 90 |
36267913983 | # Convert Word Document to PDF
# !pip install pypiwin32 (this is a pre installed library if not found install it)
import win32com.client
# Access MS Word application to read the file
word = win32com.client.Dispatch("Word.Application")
word.visible = 0
# File Paths
pdfDoc = "path\\to\\pdf\\samplepdf.pdf"
wordDoc = "path\\to\\word\\NewDoc.docx"
# open pdf file and write it in word document
wordObj = word.Documents.Open(pdfDoc)
wordObj.SaveAs(wordObj, FileFormat=16) # file format for docx
# for more file formats refer the link "https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat"
wordObj.Close()
word.Quit()
| sprao-cs/Python-Scripts | pdf2word.py | pdf2word.py | py | 654 | python | en | code | 0 | github-code | 90 |
18378147079 | import sys
input = sys.stdin.readline
def MI():
return map(int,input().split())
def main():
n,m=MI()
G=[[] for _ in range(n)]
for _ in range(m):
u,v=MI()
u-=1
v-=1
G[u].append(v)
s,t=MI()
s-=1
t-=1
fi=[True]*n
se=[True]*n
th=[True]*n
th[s]=False
dq=[s]
depth=0
while dq:
depth+=1
tank1=[]
tank2=[]
tank3=[]
for p in dq:
for c in G[p]:
if fi[c]:
fi[c]=False
tank2.append(c)
for p in tank2:
for c in G[p]:
if se[c]:
se[c]=False
tank3.append(c)
for p in tank3:
for c in G[p]:
if th[c]:
th[c]=False
tank1.append(c)
if c==t:
print(depth)
exit()
dq=tank1
print(-1)
if __name__=='__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02991/s989191603.py | s989191603.py | py | 814 | python | en | code | 0 | github-code | 90 |
25122708676 | # Utilizando a funรงรฃo input para coletar dados do usuรกrio
nome = input('Qual seu nome? ') #o programa sรณ continua se o usuario apertar enter
print(f'O seu nome รฉ {nome}')
numero1 = input('Digite um nรบmero: ')
numero2 = input('Digite outro nรบmero: ')
int_numero_1 = int(numero1)
int_numero_2 = int(numero2)
print(f'A soma รฉ: {int_numero_1 + int_numero_2}') | Remoguima/Curso_Python | aula15.py | aula15.py | py | 368 | python | pt | code | 0 | github-code | 90 |
37780740399 | """
From Map to Graph
Universidad Panamericana Campus Mixcoac
Inteligencia Artificial
Enrique Ulises Bรกez Gรณmez Tagle
Ivรกn Cruz Ledesma
Mauricio Pรฉrez Aguirre
April 26 2023
v 1.0
R:: Mauricio Pรฉrez Aguirre
"""
from queue import PriorityQueue
import time
def BeamSearch(graph, heuristics, start, goal):
# get the beam width from the user
beam_width = int(input("Beam Width: "))
start_time = time.time()
# if the start node is the same as the goal node, return the start node as the solution
if start == goal:
return [start]
# initialize the priority queue with the start node and its priority
frontier = PriorityQueue()
explored = set()
parents = {}
frontier.put((start, 0))
parents[start] = None
while not frontier.empty():
# select the top k nodes from the priority queue
candidates = []
for _ in range(beam_width):
if not frontier.empty():
candidates.append(frontier.get())
for candidate, _ in candidates:
# if the goal node has been reached, reconstruct the path and return it
if candidate == goal:
path = []
while candidate is not None:
path.append(candidate)
candidate = parents[candidate]
end_time = time.time()
print("Tiempo de ejecuciรณn: ",
end_time - start_time, "segundos")
return path[::-1]
# add the current node to the set of explored nodes
explored.add(candidate)
# expand the current node by visiting its neighboring nodes
for neighbor in graph.get_neighbors(candidate):
# if the neighbor has not been explored, calculate its priority and add it to the priority queue
if neighbor not in explored:
new_cost = heuristics.get_weight(neighbor, goal)
priority = new_cost
frontier.put((neighbor, priority))
# keep track of the parent node to reconstruct the path later
parents[neighbor] = candidate
# if no solution is found, return None
end_time = time.time()
print("Tiempo de ejecuciรณn: ", end_time - start_time, "segundos")
return None
| HeinrichGomTag/Artificial-Intelligence-Projects | Kikin-Informed-Search-Algorithms/Beam.py | Beam.py | py | 2,411 | python | en | code | 0 | github-code | 90 |
22126818016 | """
General Character commands usually available to all characters
"""
from django.conf import settings
from evennia.utils import utils, evtable
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
# limit symbol import for API
__all__ = ("CmdLook", "CmdInventory", "CmdSetDesc", "CmdGet", "CmdDrop",
"CmdGive", "CmdSay", "CmdWhisper", "CmdPose", "CmdAccess")
class CmdLook(COMMAND_DEFAULT_CLASS):
"""
look at location or object
Usage:
look
look <obj>
look *<account>
Observes your location or objects in your vicinity.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""
Handle the looking.
"""
caller = self.caller
if not self.args:
target = caller.location
if not target:
caller.msg("You have no location to look at!")
return
else:
target = caller.search(self.args)
if not target:
return
if not target.access(self, "view"):
try:
return "Could not view '%s'." % target.get_display_name(self)
except AttributeError:
return "Could not view '%s'." % target.key
self.msg(target.return_appearance(caller))
class CmdInventory(COMMAND_DEFAULT_CLASS):
"""
view inventory
Usage:
inventory
inv
Shows your inventory.
"""
key = "inventory"
aliases = ["inv", "i"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"""check inventory"""
items = self.caller.contents
if not items:
string = "You are not carrying anything."
else:
string = "You are carrying:"
for item in items:
string += "\n%s" % item.name
self.caller.msg(string)
class CmdGet(COMMAND_DEFAULT_CLASS):
"""
pick up something
Usage:
get <obj>
Picks up an object from your location and puts it in
your inventory.
"""
key = "get"
aliases = "grab"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""implements the command."""
caller = self.caller
if not self.args:
caller.msg("Get what?")
return
obj = caller.search(self.args, location=caller.location)
if not obj:
return
if caller == obj:
caller.msg("You can't get yourself.")
return
if not obj.access(caller, 'get'):
caller.msg("You can't get that.")
return
obj.move_to(caller, quiet=True)
caller.msg("You pick up %s." % obj.name)
caller.location.msg_contents("%s picks up %s." %
(caller.name,
obj.name),
exclude=caller)
class CmdDrop(COMMAND_DEFAULT_CLASS):
"""
drop something
Usage:
drop <obj>
Lets you drop an object from your inventory into the
location you are currently in.
"""
key = "drop"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""Implement command"""
caller = self.caller
if not self.args:
caller.msg("Drop what?")
return
# Because the DROP command by definition looks for items
# in inventory, call the search function using location = caller
obj = caller.search(self.args, location=caller,
nofound_string="You aren't carrying %s." % self.args,
multimatch_string="You carry more than one %s:" % self.args)
if not obj:
return
obj.move_to(caller.location, quiet=True)
caller.msg("You drop %s." % (obj.name,))
caller.location.msg_contents("%s drops %s." %
(caller.name, obj.name),
exclude=caller)
class CmdGive(COMMAND_DEFAULT_CLASS):
"""
give away something to someone
Usage:
give <inventory obj> <to||=> <target>
Gives an items from your inventory to another character,
placing it in their inventory.
"""
key = "give"
rhs_split = ("=", " to ") # Prefer = delimiter, but allow " to " usage.
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""Implement give"""
caller = self.caller
if not self.args or not self.rhs:
caller.msg("Usage: give <inventory object> = <target>")
return
to_give = caller.search(self.lhs, location=caller,
nofound_string="You aren't carrying %s." % self.lhs,
multimatch_string="You carry more than one %s:" % self.lhs)
target = caller.search(self.rhs)
if not (to_give and target):
return
if target == caller:
caller.msg("You keep %s to yourself." % to_give.key)
return
if not to_give.location == caller:
caller.msg("You are not holding %s." % to_give.key)
return
# give object
caller.msg("You give %s to %s." % (to_give.key, target.key))
to_give.move_to(target, quiet=True)
target.msg("%s gives you %s." % (caller.key, to_give.key))
class CmdSetDesc(COMMAND_DEFAULT_CLASS):
"""
describe yourself
Usage:
setdesc <description>
Add a description to yourself. This
will be visible to people when they
look at you.
"""
key = "setdesc"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""add the description"""
if not self.args:
self.caller.msg("You must add a description.")
return
self.caller.db.desc = self.args.strip()
self.caller.msg("You set your description.")
class CmdSay(COMMAND_DEFAULT_CLASS):
"""
speak as your character
Usage:
say <message>
Talk to those in your current location.
"""
key = "say"
aliases = ['"', "'"]
locks = "cmd:all()"
def func(self):
"""Run the say command"""
caller = self.caller
if not self.args:
caller.msg("Say what?")
return
speech = self.args
caller.msg('You say, "%s"' % speech)
caller.location.msg_contents(text='%s says, "%s"' % (caller.name, speech),
from_obj=caller, exclude=caller)
class CmdWhisper(COMMAND_DEFAULT_CLASS):
"""
Speak privately as your character to another
Usage:
whisper <character> = <message>
Talk privately to one or more characters in your current location, without
others in the room being informed.
"""
key = "whisper"
locks = "cmd:all()"
def func(self):
"""Run the whisper command"""
caller = self.caller
if not self.lhs or not self.rhs:
caller.msg("Usage: whisper <character> = <message>")
return
receiver = caller.search(self.lhs)
if not receiver:
caller.msg("Whisper to whom?")
speech = self.rhs
if not speech:
caller.msg("Whisper what?")
return
caller.msg('You whisper to %s, "%s"' % (receiver.name, speech))
receiver.msg('%s whispers, "%s"' % (caller.name, speech))
class CmdPose(COMMAND_DEFAULT_CLASS):
"""
strike a pose
Usage:
pose <pose text>
pose's <pose text>
Example:
pose is standing by the wall, smiling.
-> others will see:
Tom is standing by the wall, smiling.
Describe an action being taken. The pose text will
automatically begin with your name.
"""
key = "pose"
aliases = [":", "emote"]
locks = "cmd:all()"
def parse(self):
"""
Custom parse the cases where the emote
starts with some special letter, such
as 's, at which we don't want to separate
the caller's name and the emote with a
space.
"""
args = self.args
if args and not args[0] in ["'", ",", ":"]:
args = " %s" % args.strip()
self.args = args
def func(self):
"""Hook function"""
caller = self.caller
if not self.args:
caller.msg("What do you want to do?")
return
caller.location.msg_contents(text=caller.key + self.args,
from_obj=self.caller)
class CmdAccess(COMMAND_DEFAULT_CLASS):
"""
show your current game access
Usage:
access
This command shows you the permission hierarchy and
which permission groups you are a member of.
"""
key = "access"
aliases = ["groups", "hierarchy"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"""Load the permission groups"""
caller = self.caller
hierarchy_full = settings.PERMISSION_HIERARCHY
string = "\n|wPermission Hierarchy|n (climbing):\n %s" % ", ".join(hierarchy_full)
if self.caller.account.is_superuser:
cperms = "<Superuser>"
pperms = "<Superuser>"
else:
cperms = ", ".join(caller.permissions.all())
pperms = ", ".join(caller.account.permissions.all())
string += "\n|wYour access|n:"
string += "\nCharacter |c%s|n: %s" % (caller.key, cperms)
if hasattr(caller, 'account'):
string += "\nAccount |c%s|n: %s" % (caller.account.key, pperms)
caller.msg(string)
| CloudKeeper/SimpleEvennia | commands/general.py | general.py | py | 9,781 | python | en | code | 0 | github-code | 90 |
72143624618 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing.billing import BillingHarness
class TestNMassPays(BillingHarness):
def setUp(self):
BillingHarness.setUp(self)
self.make_participant('admin', claimed_time='now', is_admin=True).username
team = self.make_team(owner=self.homer, is_approved=True)
self.obama.set_payment_instruction(team, '20.00')
def post_masspays(self, n):
for i in range(n):
self.client.PxST( '/~homer/history/record-an-exchange'
, { 'amount': '-20'
, 'fee': '0'
, 'note': 'Exchange!'
, 'status': 'succeeded'
, 'ref': 'transactionidref'
, 'route_id': unicode(self.homer_route.id)
}
, auth_as='admin'
) # responds with 302
def test_returns_zero_for_no_paydays(self):
assert self.client.GET('/dashboard/nmasspays').body == '0'
def test_returns_zero_for_one_payday(self):
self.run_payday()
assert self.client.GET('/dashboard/nmasspays').body == '0'
def test_returns_zero_for_penultimate_payday_with_no_masspays(self):
self.run_payday(); self.post_masspays(2)
self.run_payday()
self.run_payday(); self.post_masspays(1)
assert self.client.GET('/dashboard/nmasspays').body == '0'
def test_returns_three_for_penultimate_payday_with_three_masspays(self):
self.run_payday(); self.post_masspays(1)
self.run_payday(); self.post_masspays(4)
self.run_payday(); self.post_masspays(2)
self.run_payday(); self.post_masspays(3)
self.run_payday(); self.post_masspays(8)
assert self.client.GET('/dashboard/nmasspays').body == '3'
| gratipay/gratipay.com | tests/py/test_dashboard.py | test_dashboard.py | py | 1,963 | python | en | code | 1,121 | github-code | 90 |
3784498806 | import re
from bs4 import BeautifulSoup
from pre_commit_test.local_lib import LocalLibClass
GLOBAL_VARIABLE = 10
# change for tests
class MainClass(object):
def __init__(self):
self.first_variable = 1
self.second_variable = [
value for value in range(10, 1000, 2) if value % 10 == 3
]
self.third_variable = (
'This is a very very very long string that shouldn\'t be in one '
'line only because it has more than 80 characters')
self.local_lib_reference = LocalLibClass()
self.fourth_variable = self.local_lib_reference.sum(
self.first_variable, self.first_variable)
self.regex = re.compile(r'\d+')
self.soup = BeautifulSoup()
| fvendrameto/pre-commit-test | main_file.py | main_file.py | py | 747 | python | en | code | 0 | github-code | 90 |
73530702698 | import os, sys
import argparse
from tqdm import tqdm
from functools import partial
from argparse import Namespace
# jnp.set_default_tensor_type(torch.FloatTensor)
argparser = argparse.ArgumentParser()
# general args:
argparser.add_argument("--seed", type=int, help="seed", default = 0)
argparser.add_argument("--visible_GPUs", type=str, help = "which GPUs are visible and therefore usable", default="0")
argparser.add_argument("--float_bits", type=int, help="whether use 32 bit or 64 bit floating number for computation", default = 64)
argparser.add_argument("--data_folder", type=str, help='folder for the data', default="")
argparser.add_argument("--data_mult", type=float, help='multiplier for the data', default=1)
# args for overlapping DDM:
argparser.add_argument("--model_saving_path", type=str, help="the root dir to save checkpoints", default="")
argparser.add_argument("--flow_model_name", type=str, help="model for steady flow", default="")
argparser.add_argument("--domain_sizex", type=int, help="number of pixels in x direction of subdomain", default=16)
argparser.add_argument("--domain_sizey", type=int, help="number of pixels in y direction of subdomain", default=16)
argparser.add_argument("--overlap_pixels", type=int, help="the # of overlapping pixels of adjacent subdomain", default = 10)
argparser.add_argument("--starting_x", type=int, help="index of starting x", default = 0)
argparser.add_argument("--starting_y", type=int, help="index of starting y", default = 0)
argparser.add_argument("--x_patches", type=int, help='# of subdomains in x', default=1)
argparser.add_argument("--y_patches", type=int, help='# ofsubdomains in y', default=1)
argparser.add_argument("--DDM_iters", type=int, help="number of iterations for the overlapping schwarz's algorithm", default = 10)
argparser.add_argument("--momentum", type=float, help="next_iter_bc_batch = momentum*last_iter_bc_batch + (1-momentum)*new_bc_batch", default = 0)
# args for solver:
argparser.add_argument("--bc_type", type=str, help="boundary condition type, in ['dirichlet', 'neumann', 'robin']", default = 'dirichlet')
argparser.add_argument("--Re", type=float, help="Reynolds number", default = 0)
argparser.add_argument("--a0", type=float, help="robin b.c.: g_normal = 1/Re*du/dn + a0*u, g_tangent = 1/Re*du/dn + b0*u", default = 0)
argparser.add_argument("--b0", type=float, help="robin b.c.: g_normal = 1/Re*du/dn + a0*u, g_tangent = 1/Re*du/dn + b0*u", default = 0)
argparser.add_argument("--nu", type=float, help="kinetic viscosity", default = 0.1)
# args for plotting:
argparser.add_argument("--div_k", type=int, help="plot per div_k iterations", default = 5)
# argparser.add_argument("--save_data", type=int, help="if == 1, save data", default = 0)
args = argparser.parse_args()
print("verify visible: ", args.visible_GPUs)
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_GPUs
import jax
import equinox as eqx
import jax.numpy as jnp
# from jax.config import config
# config.update("jax_enable_x64", True)
import numpy as np
import torch
import pandas as pd
from torch.utils.data import random_split, DataLoader
import time
import pickle
sys.path.append("../util")
sys.path.append("../solver")
from JAX_DDM_util import *
from transform import *
from LBM_solver import *
# from JAX_SM_FNO_steady_flow import FNO_multimodal_2d as SM_FNO_flow
from DDM_dataset_flow import DDM_Dataset
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# import torch.distributed as dist
import cv2
def get_flowing_indices(batched_data):
reduce_dims = tuple(range(1,len(batched_data.shape)))
data = jnp.sum(1-batched_data, axis=reduce_dims)
return tuple(jnp.nonzero(data)[0].tolist())
@partial(jax.jit, static_argnums = [0,1,2])
def combine_batch(total_size, flow_indices, shape, flow_batch):
combined = []
count = 0
head_model=0
while count < total_size:
if count==flow_indices[head_model]:
combined.append(flow_batch[head_model])
head_model = min(len(flow_indices)-1, head_model+1)
count += 1
else:
combined.append(jnp.zeros((shape[1],shape[2],shape[3])))
count += 1
return jnp.stack(combined).reshape(shape)
def thicken_line_data(d, th):
thickened_data = np.pad(d, ((th,th), (th,th)), mode='constant', constant_values=0)
thickened_data_ori = thickened_data.copy()
for i in range(th):
thickened_data = thickened_data + np.roll(thickened_data_ori, i+1, axis=0) + np.roll(thickened_data_ori, -(i+1), axis=0) + \
np.roll(thickened_data_ori, i+1, axis=1) + np.roll(thickened_data_ori, -(i+1), axis=1)
return thickened_data[th:-th, th:-th]
def setup_plot_data(args, data):
colored_setup = np.zeros((data.shape[0], data.shape[1],3))
obstacle_color = np.array([245,112,108], dtype=np.uint8)
background_color = np.array([255,231,195], dtype=np.uint8)
inlet_color = np.array([255, 185, 0], dtype=np.uint8)
outlet_color = np.array([52, 181, 168], dtype=np.uint8)
data = np.asarray(data)
colored_setup = (data[:,:,None]>1e-3)*obstacle_color + (data[:,:,None]<1e-3)*background_color
# inlet_data = np.zeros((data.shape[0], data.shape[1]))
# if args.inlet_side == 'l':
# inlet_data[int(args.inlet_center-args.inlet_width/2):int(args.inlet_center+args.inlet_width/2), 0] = 1
# elif args.inlet_side == 'r':
# inlet_data[int(args.inlet_center-args.inlet_width/2):int(args.inlet_center+args.inlet_width/2), -1] = 1
# elif args.inlet_side == 't':
# inlet_data[0, int(args.inlet_center-args.inlet_width/2):int(args.inlet_center+args.inlet_width/2)] = 1
# elif args.inlet_side == 'b':
# inlet_data[-1, int(args.inlet_center-args.inlet_width/2):int(args.inlet_center+args.inlet_width/2)] = 1
# thickness = 10
# thickened_inlet = thicken_line_data(inlet_data, th=thickness)
# colored_setup = (thickened_inlet[:,:,None]>1e-3)*inlet_color + (thickened_inlet[:,:,None]<1e-3)*colored_setup
# outlet_data = np.zeros((data.shape[0], data.shape[1]))
# if args.outlet_side == 'l':
# outlet_data[int(args.outlet_center-args.outlet_width/2):int(args.outlet_center+args.outlet_width/2), 0] = 1
# elif args.outlet_side == 'r':
# outlet_data[int(args.outlet_center-args.outlet_width/2):int(args.outlet_center+args.outlet_width/2), -1] = 1
# elif args.outlet_side == 't':
# outlet_data[0, int(args.outlet_center-args.outlet_width/2):int(args.outlet_center+args.outlet_width/2)] = 1
# elif args.outlet_side == 'b':
# outlet_data[-1, int(args.outlet_center-args.outlet_width/2):int(args.outlet_center+args.outlet_width/2)] = 1
# thickened_outlet = thicken_line_data(outlet_data, th=thickness)
# colored_setup = (thickened_outlet[:,:,None]>1e-3)*outlet_color + (thickened_outlet[:,:,None]<1e-3)*colored_setup
return colored_setup
@partial(jax.jit, static_argnums=[0])
def momentum_bc_batch_update(momentum, top_bc_batch, bottom_bc_batch, left_bc_batch, right_bc_batch, new_top_bc_batch, new_bottom_bc_batch, new_left_bc_batch, new_right_bc_batch):
return momentum*top_bc_batch + (1-momentum)*new_top_bc_batch, \
momentum*bottom_bc_batch + (1-momentum)*new_bottom_bc_batch, \
momentum*left_bc_batch + (1-momentum)*new_left_bc_batch, \
momentum*right_bc_batch + (1-momentum)*new_right_bc_batch
def plot_helper(data,title,path):
plt.figure()
ax = plt.gca()
im = ax.imshow(data)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
# plt.title(title)
plt.savefig(path, transparent=True)
plt.close()
@partial(jax.jit, static_argnums=[3,4,5,6,7,8,9,10])
def prepare_batched_data(DDM_obstacle, DDM_rho, DDM_v, model_bs, x_patches, y_patches, domain_sizex, domain_sizey, overlap_pixels, starting_x, starting_y):
# pad img, source, pml and Hy to be size_x by size_y
obstacle_batch = [DDM_obstacle[starting_x+i*(domain_sizex-overlap_pixels) : starting_x+domain_sizex+i*(domain_sizex-overlap_pixels),\
starting_y+j*(domain_sizey-overlap_pixels) : starting_y+domain_sizey+j*(domain_sizey-overlap_pixels)] for i in range(x_patches) for j in range(y_patches)]
obstacle_batch = jnp.stack(obstacle_batch).reshape(model_bs,domain_sizex,domain_sizey)
rho_batch = [DDM_rho[starting_x+i*(domain_sizex-overlap_pixels):starting_x+domain_sizex+i*(domain_sizex-overlap_pixels),\
starting_y+j*(domain_sizey-overlap_pixels):starting_y+domain_sizey+j*(domain_sizey-overlap_pixels)] for i in range(x_patches) for j in range(y_patches)]
rho_batch = jnp.stack(rho_batch).reshape(model_bs,domain_sizex,domain_sizey)
# batched sources and pmls:
v_batch = [DDM_v[starting_x+i*(domain_sizex-overlap_pixels):starting_x+domain_sizex+i*(domain_sizex-overlap_pixels),\
starting_y+j*(domain_sizey-overlap_pixels):starting_y+domain_sizey+j*(domain_sizey-overlap_pixels)] for i in range(x_patches) for j in range(y_patches)]
v_batch = jnp.stack(v_batch).reshape(model_bs,domain_sizex,domain_sizey,2)
return obstacle_batch, v_batch, rho_batch
@partial(jax.jit, static_argnums=[3])
def prepare_loaded_data(DDM_obstacle, DDM_rho, DDM_v, data_mult):
DDM_obstacle = DDM_obstacle[0,:,:]
DDM_rho = data_mult*DDM_rho[0,:,:]
DDM_v = data_mult*DDM_v[0,:,:,:]
return DDM_obstacle, DDM_rho, DDM_v
def main(args):
key = jax.random.PRNGKey(args.seed)
print(args)
jax_devices = jax.devices('gpu')
print("jax_devices: ", jax_devices)
# with jax:
solver = lbm_solver(ulB = 0.04, nulb=args.nu, maxIter=10000, write_step=1000, p_change_iter=10, p_change_th=1e-3, Re=args.Re, a0=args.a0, b0=args.b0)
ds = DDM_Dataset(args.data_folder, data_type=np.float32 if args.float_bits==32 else np.float64)
torch.manual_seed(42)
DDM_loader = DataLoader(ds, batch_size=1, shuffle=True, num_workers=0)
total_shape = args.domain_sizex+(args.x_patches-1)*(args.domain_sizex-args.overlap_pixels), \
args.domain_sizey+(args.y_patches-1)*(args.domain_sizey-args.overlap_pixels)
print("x_patches: ", args.x_patches)
print("y_patches: ", args.y_patches)
model_bs = args.x_patches*args.y_patches
size_x = args.domain_sizex+(args.x_patches-1)*(args.domain_sizex-args.overlap_pixels)
size_y = args.domain_sizey+(args.y_patches-1)*(args.domain_sizey-args.overlap_pixels)
print("size: ", size_x, size_y, total_shape)
# df = pd.DataFrame(columns=['epoch','train_loss', 'train_phys_reg', 'test_loss', 'test_phys_reg'])
convergence_data = []
for sample_id, sample_batched in enumerate(DDM_loader):
if sample_id == 3:
break
# if sample_id not in [0]:
# continue
this_converge = []
this_data = {}
this_converge = []
this_data = {}
time1 = time.time()
DDM_obstacle, DDM_rho, DDM_v = prepare_loaded_data(jnp.asarray(sample_batched['obstacle']), jnp.asarray(sample_batched['rho']), jnp.asarray(sample_batched['v']), args.data_mult)
obstacle_batch, v_batch, rho_batch = prepare_batched_data(DDM_obstacle, DDM_rho, DDM_v, model_bs, args.x_patches, args.y_patches, args.domain_sizex, args.domain_sizey, args.overlap_pixels, args.starting_x, args.starting_y)
top_bc_batch_inner, bottom_bc_batch_inner, left_bc_batch_inner, right_bc_batch_inner = init_zero_inner_bc_batch(args.x_patches, args.y_patches, args.domain_sizex, args.domain_sizey, 3)
if args.bc_type == 'dirichlet':
top_bc_batch_global, bottom_bc_batch_global, left_bc_batch_global, right_bc_batch_global = flow_global_bc_dirichlet(args.x_patches, args.y_patches, args.domain_sizex, args.domain_sizey, args.overlap_pixels, DDM_v[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y])
elif args.bc_type == 'robin':
top_bc_batch_global, bottom_bc_batch_global, left_bc_batch_global, right_bc_batch_global = flow_global_bc_robin(args.x_patches, args.y_patches, args.Re, args.a0, args.b0, args.domain_sizex, args.domain_sizey, args.overlap_pixels, DDM_v[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y])
top_bc_batch, bottom_bc_batch, left_bc_batch, right_bc_batch = top_bc_batch_inner+top_bc_batch_global, bottom_bc_batch_inner+bottom_bc_batch_global, left_bc_batch_inner+left_bc_batch_global, right_bc_batch_inner+right_bc_batch_global
time2 = time.time()
print("data prepare time: ", time2-time1)
colored_setup = setup_plot_data(args, DDM_obstacle[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y])
# get the indices of the batched data for each model:
flow_idx = get_flowing_indices(obstacle_batch)
print(flow_idx)
assert len(flow_idx) > 0
this_vmax_rho = jnp.max(DDM_rho[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y])
this_vmin_rho = jnp.min(DDM_rho[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y])
# print(f"this_vmax_rho: {this_vmax_rho}, this_vmin_rho: {this_vmin_rho}")
this_vmax_vx = jnp.max(DDM_v[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y, 0])
this_vmin_vx = jnp.min(DDM_v[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y, 0])
# print(f"this_vmax_vx: {this_vmax_vx}, this_vmin_vx: {this_vmin_vx}")
this_vmax_vy = jnp.max(DDM_v[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y, 1])
this_vmin_vy = jnp.min(DDM_v[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y, 1])
# print(f"this_vmax_vy: {this_vmax_vy}, this_vmin_vy: {this_vmin_vy}")
corner_indices, rest_indices = global_corner_rest_indices(args, x_patches=args.x_patches, y_patches=args.y_patches)
for k in range(args.DDM_iters):
time3 = time.time()
if (k+1)%10==0:
print(k+1)
# with autocast():
# top_bc_batch, bottom_bc_batch, left_bc_batch, right_bc_batch = combine_bc_batch(top_bc_batch, bottom_bc_batch, left_bc_batch, right_bc_batch)
# flow_logits = flow_model_eval(steady_flow_model, flow_idx, obstacle_batch, top_bc_batch, bottom_bc_batch, left_bc_batch, right_bc_batch)
flow_logits = []
for i in tqdm(range(args.x_patches*args.y_patches)):
bc_ux = jnp.zeros((args.domain_sizex, args.domain_sizey))
bc_ux = bc_ux.at[0:1,:].set(top_bc_batch[i,:,:,0])
bc_ux = bc_ux.at[-1:,:].set(bottom_bc_batch[i,:,:,0])
bc_ux = bc_ux.at[:,0:1].set(left_bc_batch[i,:,:,0])
bc_ux = bc_ux.at[:,-1:].set(right_bc_batch[i,:,:,0])
bc_uy = jnp.zeros((args.domain_sizex, args.domain_sizey))
bc_uy = bc_uy.at[0:1,:].set(top_bc_batch[i,:,:,1])
bc_uy = bc_uy.at[-1:,:].set(bottom_bc_batch[i,:,:,1])
bc_uy = bc_uy.at[:,0:1].set(left_bc_batch[i,:,:,1])
bc_uy = bc_uy.at[:,-1:].set(right_bc_batch[i,:,:,1])
bc_p = jnp.zeros((args.domain_sizex, args.domain_sizey))
bc_p = bc_p.at[0:1,:].set(top_bc_batch[i,:,:,2])
bc_p = bc_p.at[-1:,:].set(bottom_bc_batch[i,:,:,2])
bc_p = bc_p.at[:,0:1].set(left_bc_batch[i,:,:,2])
bc_p = bc_p.at[:,-1:].set(right_bc_batch[i,:,:,2])
output_vx, output_vy, output_p, _, _, failed_nan = solver.solve(i, bc_ux, bc_uy, bc_p, obstacle_batch[i], bc_type=args.bc_type, write_video=True)
if failed_nan:
raise ValueError("nan")
flow_logits.append(jnp.stack((output_vx, output_vy, output_p), axis=2))
flow_logits = jnp.asarray(flow_logits)
time4 = time.time()
logits = combine_batch(args.x_patches*args.y_patches, flow_idx, (v_batch.shape[0], v_batch.shape[1], v_batch.shape[2], 3), flow_logits)
plt.rcParams["font.size"] = "8"
if (k+1)%args.div_k==0:
# reconstruct the whole field
reconstructed = reconstruct(logits, x_patches=args.x_patches, y_patches=args.y_patches, d_sx=args.domain_sizex, d_sy=args.domain_sizey, ol=args.overlap_pixels, c=3)
intermediate_v, intermediate_rho = reconstructed[:,:,:2], reconstructed[:,:,2]
this_obstacle = DDM_obstacle[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y]
this_rho = DDM_rho[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y]
this_v = DDM_v[args.starting_x:args.starting_x+size_x, args.starting_y:args.starting_y+size_y]
loss_rho = jnp.mean(jnp.abs(intermediate_rho.flatten()[rest_indices] - this_rho.flatten()[rest_indices]))/ \
jnp.mean(jnp.abs(this_rho))
loss_vx = jnp.mean(jnp.abs(intermediate_v[:,:,0].flatten()[rest_indices] - this_v[:,:,0].flatten()[rest_indices]))/ \
jnp.mean(jnp.abs(this_v[:,:,0]))
loss_vy = jnp.mean(jnp.abs(intermediate_v[:,:,1].flatten()[rest_indices] - this_v[:,:,1].flatten()[rest_indices]))/ \
jnp.mean(jnp.abs(this_v[:,:,1]))
fig, axs = plt.subplots(4,2)
for a in axs.flatten():
a.set_xticks([])
a.set_yticks([])
axs[0,0].imshow(colored_setup)
axs[0,1].imshow(colored_setup)
im = axs[1,0].imshow(this_v[:,:,0], cmap='seismic')
# plt.colorbar(im, ax=axs[1])
im = axs[1,1].imshow(intermediate_v[:,:,0], cmap='seismic', vmax=this_vmax_vx, vmin=this_vmin_vx)
annotate_x = 0.5
annotate_y = -0.15
annotate_content = f"Iteration: {(k+1):d}"
plt.annotate(annotate_content, (annotate_x, annotate_y), xycoords="axes fraction", ha="center", fontsize=10)
annotate_x = 0.5
annotate_y = -0.3
annotate_content = f"rel. L1 loss: {loss_vx:.3f}"
plt.annotate(annotate_content, (annotate_x, annotate_y), xycoords="axes fraction", ha="center", fontsize=10)
im = axs[2,0].imshow(this_v[:,:,1], cmap='seismic')
# plt.colorbar(im, ax=axs[1])
im = axs[2,1].imshow(intermediate_v[:,:,1], cmap='seismic', vmax=this_vmax_vy, vmin=this_vmin_vy)
annotate_x = -3
annotate_y = -0.15
annotate_content = f"Iteration: {(k+1):d}"
plt.annotate(annotate_content, (annotate_x, annotate_y), xycoords="axes fraction", ha="center", fontsize=10)
annotate_x = -3
annotate_y = -0.3
annotate_content = f"rel. L1 loss: {loss_vy:.3f}"
plt.annotate(annotate_content, (annotate_x, annotate_y), xycoords="axes fraction", ha="center", fontsize=10)
im = axs[3,0].imshow(this_rho, cmap='seismic')
# plt.colorbar(im, ax=axs[1])
im = axs[3,1].imshow(intermediate_rho, cmap='seismic', vmax=this_vmax_rho, vmin=this_vmin_rho)
# plt.colorbar(im, ax=axs[2])
plt.savefig(f'frames/s_{sample_id}_frame_{k:04d}.png', bbox_inches='tight', transparent=True, dpi=600)
plt.close()
this_converge.append(1/2*(loss_vx+loss_vy))
time5 = time.time()
# Then prepare the data for next iteration:
# new_top_bc_batch, new_bottom_bc_batch, new_left_bc_batch, new_right_bc_batch = new_iter_bc_batchs_periodic(logits, obstacle_batch, , args.x_patches, args.y_patches, domain_sizex=args.domain_sizex, domain_sizey=args.domain_sizey, overlap_pixels=args.overlap_pixels)
if args.bc_type == 'dirichlet':
new_top_bc_batch_inner, new_bottom_bc_batch_inner, new_left_bc_batch_inner, new_right_bc_batch_inner = new_iter_inner_bcs_flow_dirichlet(logits, args.x_patches, args.y_patches, d_sx=args.domain_sizex, d_sy=args.domain_sizey, ol=args.overlap_pixels, c=3)
elif args.bc_type == 'robin':
new_top_bc_batch_inner, new_bottom_bc_batch_inner, new_left_bc_batch_inner, new_right_bc_batch_inner = new_iter_inner_bcs_flow_robin(logits, args.x_patches, args.y_patches, args.Re, args.a0, args.b0, d_sx=args.domain_sizex, d_sy=args.domain_sizey, ol=args.overlap_pixels, c=3)
new_top_bc_batch, new_bottom_bc_batch, new_left_bc_batch, new_right_bc_batch = new_top_bc_batch_inner+top_bc_batch_global, new_bottom_bc_batch_inner+bottom_bc_batch_global, new_left_bc_batch_inner+left_bc_batch_global, new_right_bc_batch_inner+right_bc_batch_global
top_bc_batch, bottom_bc_batch, left_bc_batch, right_bc_batch = momentum_bc_batch_update(args.momentum, top_bc_batch, bottom_bc_batch, left_bc_batch, right_bc_batch, new_top_bc_batch, new_bottom_bc_batch, new_left_bc_batch, new_right_bc_batch)
time6 = time.time()
if k<5:
print(f"model inference time: {time4-time3}, \
plot time: {time5-time4}, update next time: {time6-time5}")
print(f"total step time: {time6-time3}")
plt.close()
video_filename = f'video_{sample_id}.mp4'
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for MP4 video
fps = 5 # Frames per second
frame = cv2.imread(f'frames/s_{sample_id}_frame_{(args.div_k-1):04d}.png') # Load the first frame to get dimensions
frame_height, frame_width, _ = frame.shape
video_writer = cv2.VideoWriter(video_filename, fourcc, fps, (frame_width, frame_height))
for i in range(args.div_k,args.DDM_iters+1,args.div_k):
filename = f'frames/s_{sample_id}_frame_{(i-1):04d}.png'
frame = cv2.imread(filename)
video_writer.write(frame)
video_writer.release()
convergence_data.append(this_converge)
convergence_data = np.array(convergence_data)
iters = args.div_k*np.array(range(1, convergence_data.shape[1]+1))
plt.figure()
plt.plot(iters, convergence_data.T)
domain_size = args.data_folder.split('_')[-3]
plt.title("domain size: %s x %s, mean: %.4f, std: %.4f" % (domain_size, domain_size, np.mean(convergence_data[:,-1]), np.std(convergence_data[:,-1])))
plt.ylim(0,1.0)
plt.xlabel("Iterations")
plt.ylabel("Relative L1 loss")
plt.savefig("eval_convergence.png", transparent=True, dpi=300)
plt.close()
# df = df.append({'epoch': step+1, 'lr': str(model.lr_scheduler.get_last_lr()),
# 'train_loss': train_loss.item(),
# 'test_loss': test_loss.item(),
# }, ignore_index=True)
# df.to_csv(model_path + '/'+'df.csv',index=False)
if __name__ == '__main__':
main(args)
| ChenkaiMao97/MAML_EM_simulation | DDM/overlapping_solver_JAX_steady_flow/overlapping_solver_jax_video.py | overlapping_solver_jax_video.py | py | 23,288 | python | en | code | 3 | github-code | 90 |
20749920079 | import sys
def solve():
input = sys.stdin.readline
mod = 10 ** 9 + 7
n = int(input().rstrip('\n'))
ab = []
takahashi = 0
aoki = 0
for i in range(n):
a, b = list(map(int, input().rstrip('\n').split()))
aoki += a
ab.append([2 * a + b, a, b])
ab.sort(reverse=True)
cnt = 0
for dif, a, b in ab:
cnt += 1
takahashi += a + b
aoki -= a
if takahashi > aoki:
print(cnt)
exit()
if __name__ == '__main__':
solve()
| tabi-code/AtCoder | problems/abc187/abc187_d.py | abc187_d.py | py | 531 | python | en | code | 0 | github-code | 90 |
38724537579 | import numpy as np
from utils.vocab import Vocab
def padding(sentence, max_len, vocab):
"""
็ปๅฅๅญๅ ไธ<START><PAD><UNK><END>
:param sentence:
:param max_len:
:param vocab:
:return:
"""
words = sentence.strip().split()
words = words[:max_len]
sentence = [word if word in vocab.word2id else vocab.UNKNOWN_TOKEN for word in words]
sentence = [vocab.START_DECODING] + sentence + [vocab.STOP_DECODING]
sentence = sentence + [vocab.PAD_TOKEN] * (max_len - len(words))
return ' '.join(sentence)
def transform_to_ids(sentence, vocab):
# ๅญ็ฌฆไธฒๅๅๆ่ฏ
words = sentence.split()
# ๆ็
งvocab็index่ฟ่ก่ฝฌๆข
# ids = [_vocab[word] if word in _vocab else _vocab['<UNK>'] for word in words]
ids = [vocab.get_id_by_word(word) for word in words]
return ids
def seg_text_to_ids(seg_text_path, vocab, max_len):
print('ๅผๅงๆฐๆฎ่ฟ่ก้ขๅค็...')
with open(seg_text_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
result = []
for line in lines:
line = padding(line, max_len, vocab)
line = transform_to_ids(line, vocab)
result.append(line)
return result
def save_dataset_file(dataset, path):
np.savetxt(path, dataset, fmt="%d", delimiter=",")
if __name__ == '__main__':
test_seg_text_path = '../resource/gen/test_x_cut.txt'
test_dataset_path = '../resource/gen/test_x_dataset.txt'
vocab_file = '../resource/gen/vocabs_w_f.txt'
max_len = 200
vocab = Vocab(vocab_file, vocab_max_size=None)
dataset = seg_text_to_ids(test_seg_text_path, vocab, max_len)
save_dataset_file(dataset, test_dataset_path) | yikisng/NLP-Project-01-QA_Abstract_Reasoning | data_processor/dataset_processor.py | dataset_processor.py | py | 1,682 | python | en | code | 0 | github-code | 90 |
36759592183 | from datetime import datetime, time
from itertools import groupby
import requests
from flask import Flask, jsonify, render_template, request
from pymongo import MongoClient
app = Flask(__name__)
app.jinja_env.add_extension('jinja2.ext.do')
client = MongoClient('localhost', 27017)
db = client.metcast
API_KEY = 'cd62149871d972ab50a11189467f1bd6'
FIND_URL = 'http://api.openweathermap.org/data/2.5/find'
FORECAST_URL = 'http://api.openweathermap.org/data/2.5/forecast'
DAYS = ('ะะพะฝะตะดะตะปัะฝะธะบ', 'ะัะพัะฝะธะบ', 'ะกัะตะดะฐ', 'ะงะตัะฒะตัะณ', 'ะััะฝะธัะฐ', 'ะกัะฑะฑะพัะฐ',
'ะะพัะบัะตัะตะฝัะต')
NOON = time(12, 00)
def normalize_wind_deg(deg):
towards = (0, 23, 45, 68, 90, 113, 135, 158, 180, 203, 225, 248, 270, 293,
313, 336)
for toward in towards:
if toward - 12 < deg < toward + 12:
return toward
return 0
def normalize_icon(own_icon):
map_ = {'01d': 'wi wi-day-sunny', '01n': 'wi wi-night-clear',
'02d': 'wi wi-day-cloudy', '02n': 'wi wi-night-alt-cloudy',
'03d': 'wi wi-cloud', '03n': 'wi wi-cloud',
'04d': 'wi wi-cloudy', '04n': 'wi wi-cloudy',
'09d': 'wi wi-rain', '09n': 'wi wi-rain',
'10d': 'wi wi-day-rain', '10n': 'wi wi-night-alt-hail',
'11d': 'wi wi-thunderstorm', '11n': 'wi wi-thunderstorm',
'13d': 'wi wi-snow', '13n': 'wi wi-snow'}
return map_.get(own_icon, 'wi wi-na')
@app.route('/')
def index():
params = dict()
if request.args.get('id'):
params['id'] = request.args.get('id')
elif request.args.get('lat') and request.args.get('lon'):
params['lat'] = request.args.get('lat')
params['lon'] = request.args.get('lon')
elif request.args.get('q'):
params['q'] = request.args.get('q')
else:
params['id'] = 701404 # Melitopol
params['units'] = request.args.get('units', 'metric')
params['lang'] = request.args.get('lang', 'ru')
params['APPID'] = API_KEY
now = datetime.now()
zero = datetime.fromtimestamp(0)
for forecast in db.forecast.find(params):
delta = now - forecast.get('inner_dt', zero)
if delta.seconds <= 3 * 60 * 60:
response = forecast
from_db = True
print('_______from_db_______')
print('date:', now)
print('city:', response['city'])
print('_____________________')
break
else:
data = requests.get(FORECAST_URL, params=params).json()
from_db = False
for forecast in data['list']:
forecast['dt'] = datetime.fromtimestamp(forecast['dt'])
response = {'weather': [], 'inner_dt': now}
response.update(params)
now_date = datetime.now().date()
num = 0
for key, weather in groupby(data['list'], lambda x: (x['dt'].weekday(), x['dt'].day)):
num += 1
if num > 5: break
weekday, day = key
result = dict()
result['date'] = '{weekday} {day}'.format(weekday=DAYS[weekday],
day=day)
weather_list = list(weather)
daily_forecast = weather_list[0]
result['list'] = []
for forecast in weather_list:
if forecast['dt'].date() == now_date:
daily_forecast = weather_list[0]
elif forecast['dt'].time() == NOON:
daily_forecast = forecast
normal_icon = normalize_icon(forecast['weather'][0]['icon'])
forecast['weather'][0]['icon'] = normal_icon
forecast['time'] = forecast.pop('dt').strftime('%H:%M')
normal_wind_deg = normalize_wind_deg(forecast['wind']['deg'])
forecast['wind']['deg'] = normal_wind_deg
result['list'].append(forecast)
result['temp'] = daily_forecast['main']['temp']
result['icon'] = daily_forecast['weather'][0]['icon']
result['description'] = daily_forecast['weather'][0]['description']
response['weather'].append(result)
response['city'] = data['city']
if not from_db:
db.forecast.insert_one(response)
units = 'celsius' if params['units'] == 'metric' else 'fahrenheit'
return render_template('index.html', data=response, units=units)
@app.route('/find')
def find():
if not request.args.get('q'):
return jsonify({'code': 404})
params = dict()
params['q'] = request.args.get('q')
params['units'] = request.args.get('units', 'metric')
params['lang'] = request.args.get('lang', 'ru')
params['APPID'] = API_KEY
return requests.get(FIND_URL, params=params).content
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| dbond762/metcast | app.py | app.py | py | 4,850 | python | en | code | 0 | github-code | 90 |
2806683956 | import random
def pick6():
return [random.randint(1,99) for x in range(6)]
# ticket = []
# for x in range(6):
# ticket.append(random.randint(1,99))
# return ticket
def num_matches(winning, ticket):
matches = 0
# for i in range(len(winning)):
# if winning[i] == ticket[i]:
# matches += 1
for win, tix in zip(winning, ticket):
if win == tix:
matches += 1
return matches
# def num_matches(winning, ticket):
# matches = 0
# for num in ticket:
# if num in winning:
# matches += 1
# return matches
winnings = {6: 25000000, 5: 1000000, 4: 50000, 3: 100, 2: 0, 1: 0, 0: 0}
balance = 0
earnings = 0
expenses = 0
num_of_matches = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
winning_ticket = pick6()
for n in range(1000000):
current_ticket = pick6()
balance -= 2
expenses += 2
matches = num_matches(winning_ticket, current_ticket)
balance += winnings[matches]
earnings += winnings[matches]
num_of_matches[matches] += 1
# if matches == 6:
# balance += 25000000
# earnings += 25000000
# m6 += 1
# elif matches == 5:
# balance += 1000000
# earnings += 1000000
# m5 += 1
# elif matches == 4:
# balance += 50000
# earnings += 50000
# m4 += 1
# elif matches == 3:
# balance += 100
# earnings += 100
# m3 += 1
# elif matches == 2:
# balance += 7
# earnings += 7
# m2 += 1
# elif matches == 1:
# balance += 4
# earnings += 4
# m1 += 1
# else:
# m0 += 1
print("balance:", balance)
print("expenses:", expenses)
print("earnings:", earnings)
print("roi:", (earnings - expenses)/expenses)
print(num_of_matches) | PdxCodeGuild/class_salmon | code/merritt/archive/lab14.py | lab14.py | py | 1,814 | python | en | code | 5 | github-code | 90 |
25538736091 | '''
1. check directions of reads containing DNM
2. if there are indels in reads, if any, could be misaligned reads
Author: Y.Lin
'''
import pysam, vcfpy
import sys, os
def main():
VCF = sys.argv[1]
PEDfile = sys.argv[2]
#child, father, mother = getPED(PED)
#CHROM, POS = getVCFInfo (VCF)
try:
reader = vcfpy.Reader.from_path(VCF)
#print(reader.header)
except:
sys.exit('Cannot find VCF file')
ped_list = getPED(PedFile)
for i in reader.header.samples.names:
try:
if ped_list[i]:
c = i
p, m = ped_list[i]
except:
pass
pBAM = p + '.dedup.20k.bam'
mBAM = m + '.dedup.20k.bam'
cBAM = c + '.dedup.20k.bam'
writer = vcfpy.Writer.from_path('./{}.AB_DP_BAM.filter.vcf'.format(c), reader.header)
writer_PASS_only = vcfpy.Writer.from_path('./{}_pass.AB_DP_BAM.filter.vcf'.format(c), reader.header)
for record in reader:
CHROM = record.CHROM
POS = record.POS
for read in AlignedRead(cBAM, CHROM, POS):
if checkGaps(read):
record = record
else:
record.add_filter("cGaps")
if secMappedReads(read):
record = record
else:
record.add_filter("csecMapped")
for read in AlignedRead(pBAM, CHROM, POS):
if checkGaps(read):
record = record
else:
record.add_filter("pGaps")
if secMappedReads(read):
record = record
else:
record.add_filter("psecMapped")
for read in AlignedRead(mBAM, CHROM, POS):
if checkGaps(read):
record = record
else:
record.add_filter("mGaps")
if secMappedReads(read):
record = record
else:
record.add_filter("msecMapped")
# output all sites
writer.write_record(record)
# output PASS only
if record.FILTER == ["PASS"]:
#print(record)
writer_PASS_only.write_record(record)
else:
continue
def getPED(PedFile):
'''
read DNG file and return a dictionary containing info of parents for each child
{child:[father, mother]}
'''
ped_list = {}
with open(PedFile,'r') as inf:
for line in inf:
if line.startswith("#"):
pass
elif line.split():
child, father, mother, *_ = line.split()
if father != '.' and mother != '.':
ped_list[child] = [father, mother]
return ped_list
def getVCFInfo(VCF):
try:
reader = vcfpy.Reader.from_path(VCF)
#print(reader.header)
except:
sys.exit('Cannot find VCF file')
for record in reader:
CHROM = record.CHROM
POS = record.POS
return CHROM, POS
def AlignedRead(BAM, CHROM, POS):
'''
remove duplicate reads and mapQ < 30 reads
'''
samfile = pysam.AlignmentFile(BAM,'rb')
for read in samfile.fetch(CHROM, POS - 1, POS + 1):
if not read.is_duplicate and read.mapping_quality >= 30 and read.is_proper_pair:
yield read
def checkGaps(read):
# check cigar, CIGAR: 3M1I3M1D5M -> 3 matches, 1 insertion (not exist in ref seq), 3 matches, 1 deletion (not exist in quary seq), 5 matches
# read.cigarstring
read_count = 0
gap_count = 0
cigars = [i for i in read.cigarstring]
if 'I' in cigars or 'D' in cigars:
gap_count += 1
read_count += 1
else:
read_count += 1
return False if gap_count / read_count > 0.5 else True
def secMappedReads(read):
read_count = 0
secMapped_count = 0
if read.is_secondary and read.is_proper_pair:
secMapped_count += 1
read_count += 1
else:
read_count += 1
return False if secMapped_count / read_count > 0.5 else True
if "__main__":
main()
| Lin-Yuying/GuppyGermlineDNMs | BAMfilter.py | BAMfilter.py | py | 3,350 | python | en | code | 1 | github-code | 90 |
32428571135 | """
TorchText๋ก ์ธ์ด ๋ฒ์ญํ๊ธฐ
===================================
์ด ํํ ๋ฆฌ์ผ์์๋ ``torchtext`` ์ ์ ์ฉํ ์ฌ๋ฌ ํด๋์ค๋ค๊ณผ ์ํ์ค ํฌ ์ํ์ค(sequence-to-sequence, seq2seq)๋ชจ๋ธ์ ํตํด
์์ด์ ๋
์ผ์ด ๋ฌธ์ฅ๋ค์ด ํฌํจ๋ ์ ๋ช
ํ ๋ฐ์ดํฐ ์
์ ์ด์ฉํด์ ๋
์ผ์ด ๋ฌธ์ฅ์ ์์ด๋ก ๋ฒ์ญํด ๋ณผ ๊ฒ์
๋๋ค.
์ด ํํ ๋ฆฌ์ผ์
PyTorch ์ปค๋ฎค๋ํฐ ๋ฉค๋ฒ์ธ `Ben Trevett <https://github.com/bentrevett>`__ ์ด ์์ฑํ
`ํํ ๋ฆฌ์ผ <https://github.com/bentrevett/pytorch-seq2seq/blob/master/3%20-%20Neural%20Machine%20Translation%20by%20Jointly%20Learning%20to%20Align%20and%20Translate.ipynb>`__ ์ ๊ธฐ์ดํ๊ณ ์์ผ๋ฉฐ
`Seth Weidman <https://github.com/SethHWeidman/>`__ ์ด Ben์ ํ๋ฝ์ ๋ฐ๊ณ ๋ง๋ค์์ต๋๋ค.
์ด ํํ ๋ฆฌ์ผ์ ํตํด ์ฌ๋ฌ๋ถ์ ๋ค์๊ณผ ๊ฐ์ ๊ฒ์ ํ ์ ์๊ฒ ๋ฉ๋๋ค:
- ``torchtext`` ์ ์๋์ ๊ฐ์ ์ ์ฉํ ํด๋์ค๋ค์ ํตํด ๋ฌธ์ฅ๋ค์ NLP๋ชจ๋ธ๋ง์ ์์ฃผ ์ฌ์ฉ๋๋ ํํ๋ก ์ ์ฒ๋ฆฌํ ์ ์๊ฒ ๋ฉ๋๋ค:
- `TranslationDataset <https://torchtext.readthedocs.io/en/latest/datasets.html#torchtext.datasets.TranslationDataset>`__
- `Field <https://torchtext.readthedocs.io/en/latest/data.html#torchtext.data.Field>`__
- `BucketIterator <https://torchtext.readthedocs.io/en/latest/data.html#torchtext.data.BucketIterator>`__
"""
######################################################################
# `Field` ์ `TranslationDataset`
# ----------------
# ``torchtext`` ์๋ ์ธ์ด ๋ณํ ๋ชจ๋ธ์ ๋ง๋ค๋ ์ฝ๊ฒ ์ฌ์ฉํ ์ ์๋ ๋ฐ์ดํฐ์
์ ๋ง๋ค๊ธฐ ์ ํฉํ ๋ค์ํ ๋๊ตฌ๊ฐ ์์ต๋๋ค.
# ๊ทธ ์ค์์๋ ์ค์ํ ํด๋์ค ์ค ํ๋์ธ `Field <https://github.com/pytorch/text/blob/master/torchtext/data/field.py#L64>`__ ๋
# ๊ฐ ๋ฌธ์ฅ์ด ์ด๋ป๊ฒ ์ ์ฒ๋ฆฌ๋์ด์ผ ํ๋์ง ์ง์ ํ๋ฉฐ, ๋ ๋ค๋ฅธ ์ค์ํ ํด๋์ค๋ก๋ `TranslationDataset` ์ด ์์ต๋๋ค.
# ``torchtext`` ์๋ ์ด ์ธ์๋ ๋น์ทํ ๋ฐ์ดํฐ์
๋ค์ด ์๋๋ฐ, ์ด๋ฒ ํํ ๋ฆฌ์ผ์์๋ `Multi30k dataset <https://github.com/multi30k/dataset>`__ ์ ์ฌ์ฉํ ๊ฒ์
๋๋ค.
# ์ด ๋ฐ์ดํฐ ์
์ ํ๊ท ์ฝ 13๊ฐ์ ๋จ์ด๋ก ๊ตฌ์ฑ๋ ์ฝ ์ผ๋ง ๊ฐ์ ๋ฌธ์ฅ์ ์์ด์ ๋
์ผ์ด ๋ ์ธ์ด๋ก ํฌํจํ๊ณ ์์ต๋๋ค.
#
# ์ฐธ๊ณ : ์ด ํํ ๋ฆฌ์ผ์์์ ํ ํฐํ(tokenization)์๋ `Spacy <https://spacy.io>`__ ๊ฐ ํ์ํฉ๋๋ค.
# Spacy๋ ์์ด ์ด ์ธ์ ๋ค๋ฅธ ์ธ์ด์ ๋ํ ๊ฐ๋ ฅํ ํ ํฐํ ๊ธฐ๋ฅ์ ์ ๊ณตํ๊ธฐ ๋๋ฌธ์ ์ฌ์ฉํฉ๋๋ค. ``torchtext`` ๋
# `basic_english`` ํ ํฌ๋์ด์ ๋ฅผ ์ ๊ณตํ ๋ฟ ์๋๋ผ ์์ด์ ์ฌ์ฉํ ์ ์๋ ๋ค๋ฅธ ํ ํฌ๋์ด์ ๋ค(์์ปจ๋ฐ
# `Moses <https://bitbucket.org/luismsgomes/mosestokenizer/src/default/>`__ )์ ์ง์ํฉ๋๋ค๋ง, ์ธ์ด ๋ฒ์ญ์ ์ํด์๋ ๋ค์ํ ์ธ์ด๋ฅผ
# ๋ค๋ฃจ์ด์ผ ํ๊ธฐ ๋๋ฌธ์ Spacy๊ฐ ๊ฐ์ฅ ์ ํฉํฉ๋๋ค.
#
# ์ด ํํ ๋ฆฌ์ผ์ ์คํํ๋ ค๋ฉด, ์ฐ์ ``pip`` ๋ ``conda`` ๋ก ``spacy`` ๋ฅผ ์ค์นํ์ธ์. ๊ทธ ๋ค์,
# Spacy ํ ํฌ๋์ด์ ๊ฐ ์ธ ์์ด์ ๋
์ผ์ด์ ๋ํ ๋ฐ์ดํฐ๋ฅผ ๋ค์ด๋ก๋ ๋ฐ์ต๋๋ค.
#
# ::
#
# python -m spacy download en
# python -m spacy download de
#
# Spacy๊ฐ ์ค์น๋์ด ์๋ค๋ฉด, ๋ค์ ์ฝ๋๋ ``TranslationDataset`` ์ ์๋ ๊ฐ ๋ฌธ์ฅ์ ``Field`` ์ ์ ์๋
# ๋ด์ฉ์ ๊ธฐ๋ฐ์ผ๋ก ํ ํฐํํ ๊ฒ์
๋๋ค.
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
SRC = Field(tokenize = "spacy",
tokenizer_language="de",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),
fields = (SRC, TRG))
######################################################################
# ์ด์ ``train_data`` ๋ฅผ ์ ์ํ์ผ๋, ``torchtext`` ์ ``Field`` ์ ์๋ ์์ฒญ๋๊ฒ ์ ์ฉํ ๊ธฐ๋ฅ์
# ๋ณด๊ฒ ๋ ๊ฒ์
๋๋ค : ๋ฐ๋ก ``build_vovab`` ๋ฉ์๋(method)๋ก ๊ฐ ์ธ์ด์ ์ฐ๊ด๋ ์ดํ๋ค์ ๋ง๋ค์ด ๋ผ ๊ฒ์
๋๋ค.
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
######################################################################
# ์ ์ฝ๋๊ฐ ์คํ๋๋ฉด, ``SRC.vocab.stoi`` ๋ ์ดํ์ ํด๋นํ๋ ํ ํฐ์ ํค๋ก, ๊ด๋ จ๋ ์์ธ์ ๊ฐ์ผ๋ก ๊ฐ์ง๋
# ์ฌ์ (dict)์ด ๋ฉ๋๋ค. ``SRC.vocab.itos`` ์ญ์ ์ฌ์ (dict)์ด์ง๋ง, ํค์ ๊ฐ์ด ์๋ก ๋ฐ๋์
๋๋ค. ์ด ํํ ๋ฆฌ์ผ์์๋
# ๊ทธ๋ค์ง ์ค์ํ์ง ์์ ๋ด์ฉ์ด์ง๋ง, ์ด๋ฐ ํน์ฑ์ ๋ค๋ฅธ ์์ฐ์ด ์ฒ๋ฆฌ ๋ฑ์์ ์ ์ฉํ๊ฒ ์ฌ์ฉํ ์ ์์ต๋๋ค.
######################################################################
# ``BucketIterator``
# ----------------
# ๋ง์ง๋ง์ผ๋ก ์ฌ์ฉํด ๋ณผ ``torchtext`` ์ ํนํ๋ ๊ธฐ๋ฅ์ ๋ฐ๋ก ``BucketIterator`` ์
๋๋ค.
# ์ฒซ ๋ฒ์งธ ์ธ์๋ก ``TranslationDataset`` ์ ์ ๋ฌ๋ฐ๊ธฐ ๋๋ฌธ์ ์ฌ์ฉํ๊ธฐ๊ฐ ์ฝ์ต๋๋ค. ๋ฌธ์์์๋ ๋ณผ ์ ์๋ฏ
# ์ด ๊ธฐ๋ฅ์ ๋น์ทํ ๊ธธ์ด์ ์์ ๋ค์ ๋ฌถ์ด์ฃผ๋ ๋ฐ๋ณต์(iterator)๋ฅผ ์ ์ํฉ๋๋ค. ๊ฐ๊ฐ์ ์๋ก์ด ์ํฌํฌ(epoch)๋ง๋ค
# ์๋ก ์์ธ ๊ฒฐ๊ณผ๋ฅผ ๋ง๋๋๋ฐ ํ์ํ ํจ๋ฉ์ ์๋ฅผ ์ต์ํ ํฉ๋๋ค. ๋ฒ์ผํ
๊ณผ์ ์์ ์ฌ์ฉ๋๋ ์ ์ฅ ๊ณต๊ฐ์ ํ๋ฒ ์ดํด๋ณด์๊ธฐ ๋ฐ๋๋๋ค.
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 128
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
######################################################################
# ์ด ๋ฐ๋ณต์๋ค์ ``DataLoader`` ์ ๋ง์ฐฌ๊ฐ์ง๋ก ํธ์ถํ ์ ์์ต๋๋ค. ์๋ ``train`` ๊ณผ
# ``evaluation`` ํจ์์์ ๋ณด๋ฉด, ๋ค์๊ณผ ๊ฐ์ด ๊ฐ๋จํ ํธ์ถํ ์ ์์์ ์ ์ ์์ต๋๋ค :
# ::
#
# for i, batch in enumerate(iterator):
#
# ๊ฐ ``batch`` ๋ ``src`` ์ ``trg`` ์์ฑ์ ๊ฐ์ง๊ฒ ๋ฉ๋๋ค.
#
# ::
#
# src = batch.src
# trg = batch.trg
######################################################################
# ``nn.Module`` ๊ณผ ``Optimizer`` ์ ์ํ๊ธฐ
# ----------------
# ๋๋ถ๋ถ์ ``torchtext`` ๊ฐ ์์์ ํด์ค๋๋ค : ๋ฐ์ดํฐ์
์ด ๋ง๋ค์ด์ง๊ณ ๋ฐ๋ณต์๊ฐ ์ ์๋๋ฉด, ์ด ํํ ๋ฆฌ์ผ์์
# ์ฐ๋ฆฌ๊ฐ ํด์ผ ํ ์ผ์ด๋ผ๊ณ ๋ ๊ทธ์ ``nn.Module`` ์ ``Optimizer`` ๋ฅผ ๋ชจ๋ธ๋ก์ ์ ์ํ๊ณ ํ๋ จ์ํค๋ ๊ฒ์ด ์ ๋ถ์
๋๋ค.
#
#
# ์ด ํํ ๋ฆฌ์ผ์์ ์ฌ์ฉํ ๋ชจ๋ธ์ `์ด๊ณณ <https://arxiv.org/abs/1409.0473>`__ ์์ ์ค๋ช
ํ๊ณ ์๋ ๊ตฌ์กฐ๋ฅผ ๋ฐ๋ฅด๊ณ ์์ผ๋ฉฐ,
# ๋ ์์ธํ ๋ด์ฉ์ `์ฌ๊ธฐ <https://github.com/SethHWeidman/pytorch-seq2seq/blob/master/3%20-%20Neural%20Machine%20Translation%20by%20Jointly%20Learning%20to%20Align%20and%20Translate.ipynb>`__
# ๋ฅผ ์ฐธ๊ณ ํ์๊ธฐ ๋ฐ๋๋๋ค.
#
# ์ฐธ๊ณ : ์ด ํํ ๋ฆฌ์ผ์์ ์ฌ์ฉํ๋ ๋ชจ๋ธ์ ์ธ์ด ๋ฒ์ญ์ ์ํด ์ฌ์ฉํ ์์ ๋ชจ๋ธ์
๋๋ค. ์ด ๋ชจ๋ธ์ ์ฌ์ฉํ๋ ๊ฒ์
# ์ด ์์
์ ์ ๋นํ ํ์ค ๋ชจ๋ธ์ด๊ธฐ ๋๋ฌธ์ด์ง, ๋ฒ์ญ์ ์ ํฉํ ๋ชจ๋ธ์ด๊ธฐ ๋๋ฌธ์ ์๋๋๋ค. ์ฌ๋ฌ๋ถ์ด ์ต์ ๊ธฐ์ ํธ๋ ๋๋ฅผ
# ์ ๋ฐ๋ผ๊ฐ๊ณ ์๋ค๋ฉด ์ ์์๊ฒ ์ง๋ง, ํ์ฌ ๋ฒ์ญ์์ ๊ฐ์ฅ ๋ฐ์ด๋ ๋ชจ๋ธ์ Transformers์
๋๋ค. PyTorch๊ฐ
# Transformer ๋ ์ด์ด๋ฅผ ๊ตฌํํ ๋ด์ฉ์ `์ฌ๊ธฐ <https://pytorch.org/docs/stable/nn.html#transformer-layers>`__
# ์์ ํ์ธํ ์ ์์ผ๋ฉฐ ์ด ํํ ๋ฆฌ์ผ์ ๋ชจ๋ธ์ด ์ฌ์ฉํ๋ "attention" ์ Transformer ๋ชจ๋ธ์์ ์ ์ํ๋
# ๋ฉํฐ ํค๋ ์
ํ ์ดํ
์
(multi-headed self-attention) ๊ณผ๋ ๋ค๋ฅด๋ค๋ ์ ์ ์๋ ค๋๋ฆฝ๋๋ค.
import random
from typing import Tuple
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch import Tensor
class Encoder(nn.Module):
def __init__(self,
input_dim: int,
emb_dim: int,
enc_hid_dim: int,
dec_hid_dim: int,
dropout: float):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self,
src: Tensor) -> Tuple[Tensor]:
embedded = self.dropout(self.embedding(src))
outputs, hidden = self.rnn(embedded)
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
return outputs, hidden
class Attention(nn.Module):
def __init__(self,
enc_hid_dim: int,
dec_hid_dim: int,
attn_dim: int):
super().__init__()
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.attn_in = (enc_hid_dim * 2) + dec_hid_dim
self.attn = nn.Linear(self.attn_in, attn_dim)
def forward(self,
decoder_hidden: Tensor,
encoder_outputs: Tensor) -> Tensor:
src_len = encoder_outputs.shape[0]
repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
energy = torch.tanh(self.attn(torch.cat((
repeated_decoder_hidden,
encoder_outputs),
dim = 2)))
attention = torch.sum(energy, dim=2)
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self,
output_dim: int,
emb_dim: int,
enc_hid_dim: int,
dec_hid_dim: int,
dropout: int,
attention: nn.Module):
super().__init__()
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.output_dim = output_dim
self.dropout = dropout
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def _weighted_encoder_rep(self,
decoder_hidden: Tensor,
encoder_outputs: Tensor) -> Tensor:
a = self.attention(decoder_hidden, encoder_outputs)
a = a.unsqueeze(1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
weighted_encoder_rep = torch.bmm(a, encoder_outputs)
weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2)
return weighted_encoder_rep
def forward(self,
input: Tensor,
decoder_hidden: Tensor,
encoder_outputs: Tensor) -> Tuple[Tensor]:
input = input.unsqueeze(0)
embedded = self.dropout(self.embedding(input))
weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden,
encoder_outputs)
rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2)
output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0))
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted_encoder_rep = weighted_encoder_rep.squeeze(0)
output = self.out(torch.cat((output,
weighted_encoder_rep,
embedded), dim = 1))
return output, decoder_hidden.squeeze(0)
class Seq2Seq(nn.Module):
def __init__(self,
encoder: nn.Module,
decoder: nn.Module,
device: torch.device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self,
src: Tensor,
trg: Tensor,
teacher_forcing_ratio: float = 0.5) -> Tensor:
batch_size = src.shape[1]
max_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)
encoder_outputs, hidden = self.encoder(src)
# ๋์ฝ๋๋ก์ ์ฒซ ๋ฒ์งธ ์
๋ ฅ์ <sos> ํ ํฐ์
๋๋ค.
output = trg[0,:]
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, encoder_outputs)
outputs[t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.max(1)[1]
output = (trg[t] if teacher_force else top1)
return outputs
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
# ENC_EMB_DIM = 256
# DEC_EMB_DIM = 256
# ENC_HID_DIM = 512
# DEC_HID_DIM = 512
# ATTN_DIM = 64
# ENC_DROPOUT = 0.5
# DEC_DROPOUT = 0.5
ENC_EMB_DIM = 32
DEC_EMB_DIM = 32
ENC_HID_DIM = 64
DEC_HID_DIM = 64
ATTN_DIM = 8
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
attn = Attention(ENC_HID_DIM, DEC_HID_DIM, ATTN_DIM)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)
model = Seq2Seq(enc, dec, device).to(device)
def init_weights(m: nn.Module):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
model.apply(init_weights)
optimizer = optim.Adam(model.parameters())
def count_parameters(model: nn.Module):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
######################################################################
# ์ฐธ๊ณ : ์ธ์ด ๋ฒ์ญ์ ์ฑ๋ฅ ์ ์๋ฅผ ๊ธฐ๋กํ๋ ค๋ฉด, ``nn.CrossEntropyLoss`` ํจ์๊ฐ ๋จ์ํ
# ํจ๋ฉ์ ์ถ๊ฐํ๋ ๋ถ๋ถ์ ๋ฌด์ํ ์ ์๋๋ก ํด๋น ์์ธ๋ค์ ์๋ ค์ค์ผ ํฉ๋๋ค.
PAD_IDX = TRG.vocab.stoi['<pad>']
criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX)
######################################################################
# ๋ง์ง๋ง์ผ๋ก ์ด ๋ชจ๋ธ์ ํ๋ จํ๊ณ ํ๊ฐํฉ๋๋ค :
import math
import time
def train(model: nn.Module,
iterator: BucketIterator,
optimizer: optim.Optimizer,
criterion: nn.Module,
clip: float):
model.train()
epoch_loss = 0
for _, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, trg)
output = output[1:].view(-1, output.shape[-1])
trg = trg[1:].view(-1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model: nn.Module,
iterator: BucketIterator,
criterion: nn.Module):
model.eval()
epoch_loss = 0
with torch.no_grad():
for _, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output = model(src, trg, 0) #turn off teacher forcing
output = output[1:].view(-1, output.shape[-1])
trg = trg[1:].view(-1)
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def epoch_time(start_time: int,
end_time: int):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
test_loss = evaluate(model, test_iterator, criterion)
print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |')
######################################################################
# ๋ค์ ๋จ๊ณ
# --------------
#
# - ``torchtext`` ๋ฅผ ์ฌ์ฉํ Ben Trevett์ ํํ ๋ฆฌ์ผ์ `์ด๊ณณ <https://github.com/bentrevett/>`__ ์์ ํ์ธํ ์ ์์ต๋๋ค.
# - ``nn.Transformer`` ์ ``torchtext`` ์ ๋ค๋ฅธ ๊ธฐ๋ฅ๋ค์ ์ด์ฉํ ๋ค์ ๋จ์ด ์์ธก์ ํตํ ์ธ์ด ๋ชจ๋ธ๋ง ํํ ๋ฆฌ์ผ์ ์ดํด๋ณด์ธ์. | uramoon/oss15 | docs/_downloads/e733d8cec5d7c07a409a12a4273a4a28/torchtext_translation_tutorial.py | torchtext_translation_tutorial.py | py | 17,278 | python | ko | code | 0 | github-code | 90 |
25254478412 | import sys
from itertools import combinations
numList = [int(sys.stdin.readline()) for i in range(9)]
sumList = sum(numList) - 100
for comb in combinations(numList, 2):
if sum(comb) == sumList :
numList.remove(comb[0])
numList.remove(comb[1])
break
print('\n'.join(map(str, numList))) | choinara0/Algorithm | Baekjoon/BruteForce Algorithm/3040๋ฒ - ๋ฐฑ์ค ๊ณต์ฃผ์ ์ผ๊ณฑ ๋์์ด/3040๋ฒ - ๋ฐฑ์ค ๊ณต์ฃผ์ ์ผ๊ณฑ ๋์์ด.py | 3040๋ฒ - ๋ฐฑ์ค ๊ณต์ฃผ์ ์ผ๊ณฑ ๋์์ด.py | py | 315 | python | en | code | 0 | github-code | 90 |
43954736860 | import re
from collections import *
from functools import *
inp = []
arr = []
for l in open("i1"):
l = l.strip()
a,b = l.split("|")
arr.append(b.split())
inp.append(a.split())
res = 0
for x in arr:
for y in x:
if len(y) in (2,3,4,7):
res += 1
print(res)
res = 0
for i, x in enumerate(arr):
y = inp[i]
d = {}
for w in sorted(y, key=len):
word = set(w)
size = len(w)
if size == 2:
d[1] = word
elif size == 3:
d[7] = word
elif size == 4:
d[4] = word
elif size == 7:
d[8] = word
elif size == 5:
if len(word & d[7]) == 3:
d[3] = word
elif len(word & d[4]) == 3:
d[5] = word
else:
d[2] = word
elif size == 6:
if len(word & d[5]) == 5:
if len(word & d[7]) == 2:
d[6] = word
else:
d[9] = word
else:
d[0] = word
string = ''.join([str(k) for ele in x for k in d.keys() if d[k] == set(ele)])
res += int(string)
print(res)
| Scheir/AdventOfCode | AoC21/d8/8.py | 8.py | py | 1,184 | python | en | code | 0 | github-code | 90 |
18363814759 | N = int(input())
P = list(map(int,input().split()))
R = sorted(P)
import copy
answer = 'NO'
for i in range(0,N-1):
for j in range(i+1,N):
Q = copy.deepcopy(P)
A = Q[i]
B = Q[j]
Q[i] = B
Q[j] = A
if Q == R:
answer = 'YES'
break
if P == R:
answer = 'YES'
print(answer) | Aasthaengg/IBMdataset | Python_codes/p02958/s236343760.py | s236343760.py | py | 347 | python | en | code | 0 | github-code | 90 |
11871104175 | import json
from datetime import datetime
from django.test import TestCase
from blog import views
class BlogURLTest(TestCase):
def test_url_blog_redirection(self):
response = self.client.get("/blog/")
self.assertEqual(response.status_code, 200)
def test_blogs_template_is_called(self):
with self.assertTemplateUsed("blogs.html"):
response = self.client.get("/blog/")
self.assertEqual(response.status_code, 200)
class BlogModelFullQueryOneBasicBlog(TestCase):
fixtures = ["one_blog.json"]
def test_returns_one_blog_with_fields(self):
with open("./blog/fixtures/one_blog.json", encoding="utf-8") as json_fixtures:
fixtures_list = json.load(json_fixtures)
expected_entries = [
json_entry["fields"]
for json_entry in fixtures_list
if json_entry["model"] == "blog.Blog"
]
for entry in expected_entries:
entry["text"] = ""
entry["date"] = datetime.now().date()
actual_entries = views.blog_model_full_query()
self.assertListEqual(actual_entries, expected_entries)
class BlogModelFullQueryThreeBlogs(TestCase):
fixtures = ["three_blogs.json"]
def test_returns_three_blogs_with_fields(self):
with open(
"./blog/fixtures/three_blogs.json", encoding="utf-8"
) as json_fixtures:
fixtures_list = json.load(json_fixtures)
expected_entries = [
json_entry["fields"]
for json_entry in fixtures_list
if json_entry["model"] == "blog.Blog"
]
for entry in expected_entries:
entry["date"] = datetime.strptime(entry["date"], "%Y-%m-%d").date()
actual_entries = views.blog_model_full_query()
self.assertListEqual(actual_entries, expected_entries)
# to run the tests on command line
# python ./manage.py test blog.tests.test_views
#
# About the tests of BlogURLTest class:
# The first test "test_url_blog_redirection" tests that
# the http://localhost:8000/blog/ calls something.
# The second test "test_blogs_template_is_called" tests that
# the http://localhost:8000/blog/ calls the blogs.html template
# The tests about the content of the templates is in the test_templates.py module
| xrochard/personal_portfolio | blog/tests/test_views.py | test_views.py | py | 2,296 | python | en | code | 0 | github-code | 90 |
17933261429 | import sys
sys.setrecursionlimit(4100000)
import math
INF = 10**9
def main():
x,y = input().split()
if x == y:
print('=')
elif x < y:
print('<')
else:
print('>')
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03547/s840871523.py | s840871523.py | py | 244 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.