blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb8482304ccdd802e4bafa88b1cf9ccc15fecb76 | 76c31e0a8e23bd3641f7d2270124c532fbb85e6e | /Problems/1108.Defanging_an_IP_Address.py | be041ce2239c4499ed8e6d10018c28fcfbeb321d | [] | no_license | Kenfernandezjr/Leetcode | f916cc88157352f62ca55b688f15bb46c0779c79 | efedbaa299c6d57514f91d4f047c645e228ff4ad | refs/heads/master | 2023-08-19T10:58:55.410748 | 2021-10-01T21:17:28 | 2021-10-01T21:17:28 | 288,283,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | class Solution:
def defangIPaddr(self, address: str) -> str:
# completed this 3 different ways
# First
changeList = list(address)
for i in range(len(changeList)):
if changeList[i] is ".":
changeList[i] = "[.]"
return("".join(changeList))
# Second
return "".join(["[.]" if i is "." else i for i in list(address)])
# Third
return address.replace(".", "[.]")
| [
"kenfernandezjr@github.com"
] | kenfernandezjr@github.com |
5f2966ec1f36c07526da05e9373204e613eb4e7c | bb8c3267f410e920ef234fd346357a12f5814156 | /import-components-container-tools-c9s-3.0.py | a2b3bff48cfa900d7196cd2819d1fc9f9c9f4b0e | [] | no_license | mmathesius/stream-module-testing | a4f1d9b7b7ccfc8e2daeb6794380ea07efa9743f | 8e5c5aa1d35313d8d87657783458a01866d71922 | refs/heads/main | 2023-06-06T00:10:12.179381 | 2021-06-15T16:45:03 | 2021-06-15T16:45:03 | 365,357,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,054 | py | #!/usr/bin/python3
# Usage: import-components.py namespace/component#ref [ ... ]
resync_cache_only = False
dry_run = None
# set if using an alternate destination namespace, None to use standard namespace
alt_ns = "temp"
import argparse
import git
import logging
import os
import pyrpkg
import random
import regex
import string
import sys
import tempfile
# path to the lib directory of a checkout of https://github.com/fedora-eln/distrobaker
sys.path = ["/home/merlinm/github/fedora-eln/distrobaker/lib"] + sys.path
import distrobaker
from distrobaker import (
clone_destination_repo,
configure_repo,
fetch_upstream_repo,
logger,
parse_sources,
repo_push,
split_module,
split_scmurl,
sync_repo_merge,
sync_repo_pull,
)
# brute force configuration
c = {
"main": {
"source": {
# "scm": "git://pkgs.devel.redhat.com",
# "cache": {
# "url": "http://pkgs.devel.redhat.com/repo",
# "cgi": "http://pkgs.devel.redhat.com/lookaside/upload.cgi",
# "path": "%(name)s/%(filename)s/%(hashtype)s/%(hash)s/%(filename)s",
# },
"scm": "ssh://git@gitlab.com/redhat/centos-stream",
"cache": {
"url": "https://sources.stream.rdu2.redhat.com/sources",
"cgi": "https://sources.stream.rdu2.redhat.com/lookaside/upload.cgi",
"path": "%(name)s/%(filename)s/%(hashtype)s/%(hash)s/%(filename)s",
},
},
"destination": {
"scm": "ssh://git@gitlab.com/redhat/centos-stream",
"cache": {
"url": "https://sources.stream.rdu2.redhat.com/sources",
"cgi": "https://sources.stream.rdu2.redhat.com/lookaside/upload.cgi",
"path": "%(name)s/%(filename)s/%(hashtype)s/%(hash)s/%(filename)s",
},
},
"git": {
"author": "Merlin Mathesius",
"email": "mmathesi@redhat.com",
"message": "Component import",
},
"control": {
"build": "false",
"merge": "true",
"exclude": {
"rpms": {},
"modules": {},
},
},
"defaults": {
"rpms": {
# "source": "%(component)s.git",
"source": "%(component)s.git#c9s",
# "destination": "%(component)s.git",
"destination": "%(component)s.git#stream-container-tools-3.0-rhel-9.0.0-beta",
},
"modules": {
"source": "%(component)s.git#%(stream)s",
"destination": "%(component)s.git#%(stream)s",
"rpms": {
# "source": "%(component)s.git",
"source": "%(component)s.git#c9s",
# "destination": "%(component)s.git#%(ref)s",
"destination": "%(component)s.git#stream-container-tools-3.0-rhel-9.0.0-beta",
},
},
"cache": {
"source": "%(component)s",
"destination": "%(component)s",
},
},
},
"comps": {
"rpms": {},
"modules": {},
},
}
# copy configurable values into lib/distrobaker
distrobaker.c = c
# extract default value from lib/distrobaker
retry = distrobaker.retry
repo_base = "/home/merlinm/stream-module-testing/repos/%(component)s"
logging.basicConfig(level=logging.DEBUG)
# revised sync_cache() from lib/distrobaker that allows an alternate
# destination namespace to be specified
def sync_cache(comp, sources, ns="rpms", dns=None, scacheurl=None):
"""Synchronizes lookaside cache contents for the given component.
Expects a set of (filename, hash, hastype) tuples to synchronize, as
returned by parse_sources().
:param comp: The component name
:param sources: The set of source tuples
:param ns: The component namespace
:param dns: An alternate destination namespace to use when uploading,
defaults to value of 'ns'
:param scacheurl: Optional source lookaside cache url for modular RPM
components
:returns: The number of files processed, or None on error
"""
dns = dns if dns else ns
if "main" not in c:
logger.critical("DistroBaker is not configured, aborting.")
return None
if comp in c["main"]["control"]["exclude"][ns]:
logger.critical(
"The component %s/%s is excluded from sync, aborting.", ns, comp
)
return None
logger.debug("Synchronizing %d cache file(s) for %s/%s.", len(sources), ns, comp)
if scacheurl:
if scacheurl != c["main"]["source"]["cache"]["url"]:
logger.warning(
"The custom source lookaside cache URL for %s/%s (%s) doesn't "
"match configuration (%s), ignoring.",
ns,
comp,
scacheurl,
c["main"]["source"]["cache"]["url"],
)
scache = pyrpkg.lookaside.CGILookasideCache(
"sha512",
c["main"]["source"]["cache"]["url"],
c["main"]["source"]["cache"]["cgi"],
)
scache.download_path = c["main"]["source"]["cache"]["path"]
dcache = pyrpkg.lookaside.CGILookasideCache(
"sha512",
c["main"]["destination"]["cache"]["url"],
c["main"]["destination"]["cache"]["cgi"],
)
dcache.download_path = c["main"]["destination"]["cache"]["path"]
tempdir = tempfile.TemporaryDirectory(prefix="cache-{}-{}-".format(ns, comp))
logger.debug("Temporary directory created: %s", tempdir.name)
if comp in c["comps"][ns]:
scname = c["comps"][ns][comp]["cache"]["source"]
dcname = c["comps"][ns][comp]["cache"]["destination"]
else:
scname = c["main"]["defaults"]["cache"]["source"] % {"component": comp}
dcname = c["main"]["defaults"]["cache"]["source"] % {"component": comp}
for s in sources:
# There's no API for this and .upload doesn't let us override it
dcache.hashtype = s[2]
for attempt in range(retry):
try:
if not dcache.remote_file_exists(
"{}/{}".format(dns, dcname), s[0], s[1]
):
logger.debug(
"File %s for %s/%s (%s/%s) not available in the "
"destination cache, downloading.",
s[0],
ns,
comp,
dns,
dcname,
)
scache.download(
"{}/{}".format(ns, scname),
s[0],
s[1],
os.path.join(tempdir.name, s[0]),
hashtype=s[2],
)
logger.debug(
"File %s for %s/%s (%s/%s) successfully downloaded. "
"Uploading to the destination cache.",
s[0],
ns,
comp,
ns,
scname,
)
if not dry_run:
dcache.upload(
"{}/{}".format(dns, dcname),
os.path.join(tempdir.name, s[0]),
s[1],
)
logger.debug(
"File %s for %s/%s (%s/%s) )successfully uploaded "
"to the destination cache.",
s[0],
ns,
comp,
dns,
dcname,
)
else:
logger.debug(
"Running in dry run mode, not uploading %s for %s/%s (%s/%s).",
s[0],
ns,
comp,
dns,
dcname,
)
else:
logger.debug(
"File %s for %s/%s (%s/%s) already uploaded, skipping.",
s[0],
ns,
comp,
dns,
dcname,
)
except Exception:
logger.warning(
"Failed attempt #%d/%d handling %s for %s/%s (%s/%s -> %s/%s), retrying.",
attempt + 1,
retry,
s[0],
ns,
comp,
ns,
scname,
dns,
dcname,
exc_info=True,
)
else:
break
else:
logger.error(
"Exhausted lookaside cache synchronization attempts for %s/%s "
"while working on %s, skipping.",
ns,
comp,
s[0],
)
return None
return len(sources)
def import_component(bscm):
ns = bscm["ns"]
comp = bscm["comp"]
ref = bscm["ref"]
logger.info("Importing %s/%s#%s.", ns, comp, ref)
if ns == "modules":
ms = split_module(comp)
cname = ms["name"]
sname = ms["stream"]
else:
cname = comp
sname = ""
if comp in c["comps"][ns]:
csrc = c["comps"][ns][comp]["source"]
cdst = c["comps"][ns][comp]["destination"]
else:
csrc = c["main"]["defaults"][ns]["source"]
cdst = c["main"]["defaults"][ns]["destination"]
# append #ref if not already present
if "#" not in csrc:
csrc += "#%(ref)s"
if "#" not in cdst:
cdst += "#%(ref)s"
csrc = csrc % {
"component": cname,
"stream": sname,
"ref": ref,
}
cdst = cdst % {
"component": cname,
"stream": sname,
"ref": ref,
}
sscm = split_scmurl("{}/{}/{}".format(c["main"]["source"]["scm"], ns, csrc))
dscm = split_scmurl(
"{}/{}/{}".format(
c["main"]["destination"]["scm"], alt_ns if alt_ns else ns, cdst
)
)
dscm["ref"] = dscm["ref"] if dscm["ref"] else "master"
logger.debug("source scm = %s", sscm)
logger.debug("destination scm = %s", dscm)
gitdir = repo_base % {
"component": cname,
"stream": sname,
"ref": ref,
}
logger.debug("repo directory = %s", gitdir)
# clone desination repo
repo = clone_destination_repo(ns, comp, dscm, gitdir)
if repo is None:
logger.error("Failed to clone destination repo for %s/%s, skipping.", ns, comp)
return None
if fetch_upstream_repo(ns, comp, sscm, repo) is None:
logger.error("Failed to fetch upstream repo for %s/%s, skipping.", ns, comp)
return None
if configure_repo(ns, comp, repo) is None:
logger.error(
"Failed to configure the git repository for %s/%s, skipping.",
ns,
comp,
)
return None
logger.debug("Gathering destination files for %s/%s.", ns, comp)
dsrc = parse_sources(comp, ns, os.path.join(repo.working_dir, "sources"))
if dsrc is None:
logger.error(
"Error processing the %s/%s destination sources file, skipping.",
ns,
comp,
)
return None
if c["main"]["control"]["merge"]:
if sync_repo_merge(ns, comp, repo, bscm, sscm, dscm) is None:
logger.error("Failed to sync merge repo for %s/%s, skipping.", ns, comp)
return None
else:
if sync_repo_pull(ns, comp, repo, bscm) is None:
logger.error("Failed to sync pull repo for %s/%s, skipping.", ns, comp)
return None
logger.debug("Gathering source files for %s/%s.", ns, comp)
ssrc = parse_sources(comp, ns, os.path.join(repo.working_dir, "sources"))
if ssrc is None:
logger.error(
"Error processing the %s/%s source sources file, skipping.",
ns,
comp,
)
return None
if resync_cache_only:
srcdiff = dsrc
else:
srcdiff = ssrc - dsrc
if srcdiff:
logger.debug("Source files for %s/%s differ.", ns, comp)
if sync_cache(comp, srcdiff, ns, dns=alt_ns) is None:
logger.error("Failed to synchronize sources for %s/%s, skipping.", ns, comp)
return None
else:
logger.debug("Source files for %s/%s are up-to-date.", ns, comp)
logger.debug("Component %s/%s successfully synchronized.", ns, comp)
if not resync_cache_only:
if repo_push(ns, comp, repo, dscm) is None:
logger.error("Failed to push %s/%s, skipping.", ns, comp)
return None
else:
logger.info(
"Re-syncing cache only; not attempting to push repo for %s/%s.", ns, comp
)
logger.info("Successfully synchronized %s/%s.", ns, comp)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Import components into the redhat/centos-stream/temp namespace in gitlab."
)
parser.add_argument(
"comps", metavar="comps", nargs="+", help="The components to import"
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="Do not upload or push",
default=False,
)
args = parser.parse_args()
distrobaker.loglevel(logging.DEBUG)
logger.debug("Logging configured")
distrobaker.dry_run = dry_run = args.dry_run
if dry_run:
logger.info("Dry run enabled. Nothing will be uploaded/pushed.")
for rec in args.comps:
logger.info("Processing argument %s.", rec)
bscm = split_scmurl(rec)
import_component(bscm)
| [
"mmathesi@redhat.com"
] | mmathesi@redhat.com |
33d009eea8b27f5d5ce8f5bd571e075cd8fbe8ec | 3f762b26d57b0969969f458143653c25367367a0 | /blueberry/tests/connectivity/bluetooth_connection_test.py | 767af38da620e81cfdf0ae8dc2f32fae28bd1784 | [
"Apache-2.0"
] | permissive | wao/android_system_bt | 6c9f6f1a47294cb338d280e7a96234947fe485de | 4bbd11f366c720f010ab60020a0a6a8eea64c850 | refs/heads/master | 2023-08-17T05:03:42.132485 | 2021-08-19T19:24:58 | 2021-09-16T06:07:46 | 407,167,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,177 | py | """Tests for Bluetooth connection with Android device and a Bluetooth device."""
import time
from mobly import test_runner
from blueberry.utils import blueberry_base_test
# Connection state change sleep time in seconds.
CONNECTION_STATE_CHANGE_SLEEP_SEC = 5
class BluetoothConnectionTest(blueberry_base_test.BlueberryBaseTest):
"""Test Class for Bluetooth connection testing.
Attributes:
primary_device: A primary device under test.
derived_bt_device: A Bluetooth device which is used to connected to the
primary device in the test.
"""
def setup_class(self):
super().setup_class()
self.primary_device = self.android_devices[0]
self.primary_device.init_setup()
self.derived_bt_device = self.derived_bt_devices[0]
self.derived_bt_device.factory_reset_bluetooth()
self.mac_address = self.derived_bt_device.get_bluetooth_mac_address()
self.derived_bt_device.activate_pairing_mode()
self.primary_device.pair_and_connect_bluetooth(self.mac_address)
def setup_test(self):
super().setup_test()
# Checks if A2DP and HSP profiles are connected.
self.wait_for_a2dp_and_hsp_connection_state(connected=True)
# Buffer between tests.
time.sleep(CONNECTION_STATE_CHANGE_SLEEP_SEC)
def wait_for_a2dp_and_hsp_connection_state(self, connected):
"""Asserts that A2DP and HSP connections are in the expected state.
Args:
connected: bool, True if the expected state is connected else False.
"""
self.primary_device.wait_for_a2dp_connection_state(self.mac_address,
connected)
self.primary_device.wait_for_hsp_connection_state(self.mac_address,
connected)
def test_disconnect_and_connect(self):
"""Test for DUT disconnecting and then connecting to the remote device."""
self.primary_device.log.info('Disconnecting the device "%s"...' %
self.mac_address)
self.primary_device.disconnect_bluetooth(self.mac_address)
self.wait_for_a2dp_and_hsp_connection_state(connected=False)
# Buffer time for connection state change.
time.sleep(CONNECTION_STATE_CHANGE_SLEEP_SEC)
self.primary_device.log.info('Connecting the device "%s"...' %
self.mac_address)
self.primary_device.connect_bluetooth(self.mac_address)
self.wait_for_a2dp_and_hsp_connection_state(connected=True)
def test_reconnect_when_enabling_bluetooth(self):
"""Test for DUT reconnecting to the remote device when Bluetooth enabled."""
self.primary_device.log.info('Turning off Bluetooth...')
self.primary_device.sl4a.bluetoothToggleState(False)
self.primary_device.wait_for_bluetooth_toggle_state(enabled=False)
self.primary_device.wait_for_disconnection_success(self.mac_address)
time.sleep(CONNECTION_STATE_CHANGE_SLEEP_SEC)
self.primary_device.log.info('Turning on Bluetooth...')
self.primary_device.sl4a.bluetoothToggleState(True)
self.primary_device.wait_for_bluetooth_toggle_state(enabled=True)
self.primary_device.wait_for_connection_success(self.mac_address)
self.wait_for_a2dp_and_hsp_connection_state(connected=True)
def test_reconnect_when_connected_device_powered_on(self):
"""Test for the remote device reconnecting to DUT.
Tests that DUT can be disconnected when the remoted device is powerd off,
and then reconnected when the remote device is powered on.
"""
self.primary_device.log.info(
'The connected device "%s" is being powered off...' % self.mac_address)
self.derived_bt_device.power_off()
self.primary_device.wait_for_disconnection_success(self.mac_address)
self.wait_for_a2dp_and_hsp_connection_state(connected=False)
time.sleep(CONNECTION_STATE_CHANGE_SLEEP_SEC)
self.derived_bt_device.power_on()
self.primary_device.log.info(
'The connected device "%s" is being powered on...' % self.mac_address)
self.primary_device.wait_for_connection_success(self.mac_address)
self.wait_for_a2dp_and_hsp_connection_state(connected=True)
if __name__ == '__main__':
test_runner.main()
| [
"jizhengchu@google.com"
] | jizhengchu@google.com |
5f1a9598ca6ede14f8e919dfc37e6770ef5e5f5b | 28576c22f2eeecfc67a0919254258737598f77a2 | /python/hamcalc/stdio/trig.py | 23c39d4e3f0288ef63689cb39a2d27efc55a30bd | [] | no_license | slott56/HamCalc-2.1 | 5e3b40b302c13569806fe2f18734e639b17a988e | 382724dfcad867ed8c4134a93a6bbc1c83dc306b | refs/heads/master | 2020-04-25T21:55:51.298097 | 2013-07-16T13:24:33 | 2013-07-16T13:24:33 | 9,798,987 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | """Trigonometric functions
"""
import hamcalc.math.trig as trig
from hamcalc.stdio import *
import math
import runpy
def functions( angle ):
a0, a1, a2, a3 = (angle, math.pi-angle, math.pi+angle, 2*math.pi-angle)
print( "TRIGONOMETRIC FUNCTIONS".center(80) )
print()
print(" ANGLES:" )
print(" Deg/Min/Sec.......= {0:>12s} {1:>12s} {2:>12s} {3:>12s}".format(trig.DEG_MIN_SEC.from_std(a0), trig.DEG_MIN_SEC.from_std(a1), trig.DEG_MIN_SEC.from_std(a2), trig.DEG_MIN_SEC.from_std(a3)) )
print(" Decimal degrees...= {0:12.6f} {1:12.6f} {2:12.6f} {3:12.6f}".format(trig.DEGREE.from_std(a0), trig.DEGREE.from_std(a1), trig.DEGREE.from_std(a2), trig.DEGREE.from_std(a3)) )
print(" Radians...........= {0:12.6f} {1:12.6f} {2:12.6f} {3:12.6f}".format(trig.RADIAN.from_std(a0), trig.RADIAN.from_std(a1), trig.RADIAN.from_std(a2), trig.RADIAN.from_std(a3)) )
print()
print(" FUNCTIONS of all the above angles:" )
print(" Sine..........Sin = {0:12.6f}".format( math.sin(a0) ) )
print(" Cosine........Cos = {0:12.6f}".format( math.cos(a0) ) )
print(" Tangent.......Tan = {0:12.6f}".format( math.tan(a0) ) )
print(" Cotangent.....Cot = {0:12.6f}".format( 1/math.tan(a0) ) )
print(" Secant........Sec = {0:12.6f}".format( 1/math.cos(a0) ) )
print(" Cosecant......Csc = {0:12.6f}".format( 1/math.sin(a0) ) )
print( trig.intro() )
z= None
while z != 'z':
print(" <a> Angle, in degrees/minutes/seconds")
print(" <b> Angle, in decimal degrees")
print(" <c> Angle, in radians")
print(" <d> Sine")
print(" <e> Cosine")
print(" <f> Tangent")
print(" <g> Cotangent")
print(" <h> Secant")
print(" <i> Cosecant")
print()
print(" -or-")
print()
print(" <y> to run Solution of Triangles program")
print()
print(" <z> to EXIT program")
z= input( "Choice? " )
if z == 'a':
angle_raw= input_float( "ENTER: Angle, in degrees minutes and seconds? " )
if angle_raw is None: continue
angle= trig.DEG_MIN_SEC.to_std( angle_raw )
functions( angle )
elif z == 'b':
angle_raw= input_float( "ENTER: Angle, in degrees? " )
if angle_raw is None: continue
angle= trig.DEGREE.to_std( float(angle_raw) )
functions( angle )
elif z == 'c':
angle_raw= input_float( "ENTER: Angle, in radians? " )
if angle_raw is None: continue
angle= trig.RADIAN.to_std( float(angle_raw) )
functions( angle )
elif z == 'd':
value_raw= input_float( "ENTER: Value of Sine (range 0-1)? " )
if value_raw is None: continue
angle= math.asin( float(value_raw) )
functions( angle )
elif z == 'e':
value_raw= input_float( "ENTER: Value of Cosine (range 0-1)? " )
if value_raw is None: continue
angle= math.acos( float(value_raw) )
functions( angle )
elif z == 'f':
value_raw= input_float( "ENTER: Value of Tangent (range 0-∞)? " )
if value_raw is None: continue
angle= math.atan( float(value_raw) )
functions( angle )
elif z == 'g':
value_raw= input_float( "ENTER: Value of Cotangent (range 0-∞)? " )
if value_raw is None: continue
angle= math.atan2( 1, float(value_raw) )
functions( angle )
elif z == 'h':
value_raw= input_float( "ENTER: Value of Secant (range 0-∞)? " )
if value_raw is None: continue
z= 1/float(value_raw)
angle= math.pi/2-math.atan2(z,math.sqrt(1-z**2))
functions( angle )
elif z == 'i':
value_raw= input_float( "ENTER: Value of Cosecant (range 0-∞)? " )
if value_raw is None: continue
z= 1/float(value_raw)
angle= math.atan2(z,math.sqrt(1-z**2))
functions( angle )
elif z == 'y':
runpy.run_module( 'hamcalc.stdio.solutri' )
| [
"slott56@gmail.com"
] | slott56@gmail.com |
08daa46e4e5fe6003d67697fdc33c22dab11bdcd | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/Book_evaluator/venv/Lib/site-packages/passlib/crypto/scrypt/__init__.py | 9fe2b4a0fa1ded521a65f67133294c7ff18329ed | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e7834ebeec8f7f56f60e8475fe5ba818941616523db21b7e6649ac46e5bcf229
size 6854
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
83a1c88e7526001016e921efbf9661fccf5fea16 | b0ce657c3445ca572ab4cb04be400ddfdf5621b6 | /Code/.ipynb_checkpoints/Problem1-checkpoint.py | f21e4a7b3bebd4a5cc44858786d3bb7340c8c15b | [] | no_license | h-gokul/LaneDetection | 8b2811419c3c772ffae76cd18ae1d8e5379ecc05 | 96627f6f4a13bef93a306f310e613213820c683d | refs/heads/master | 2023-05-07T13:38:15.577836 | 2021-05-21T03:51:45 | 2021-05-21T03:51:45 | 369,406,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,098 | py | import cv2
from misc.utils import *
import numpy as np
import os
import matplotlib.pyplot as plt
import argparse
def vidRead(path = '../Data/Project2_Dataset1/Night Drive - 2689.mp4'):
imgs = []
cap = cv2.VideoCapture(path)
while(True):
ret, frame = cap.read()
if ret:
# frame = cv2.undistort(frame, mtx, dist, None, mtx)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
imgs.append(frame)
else:
break
cap.release()
return imgs
def histogram(im_flat):
bins = 256
h = np.zeros(bins)
for i in im_flat:
h[i] +=1
return h
def cumulate_distribution(A):
sum_ = A[0]
c = [sum_]
for i in range(0,len(A)-1):
sum_+=A[i+1]
c.append(sum_)
return np.array(c)
def normalize(c):
c = np.array(c)
return ((c - c.min()) * 255) / (c.max() - c.min())
def HistogramEqualization(im):
"""
reference: https://github.com/torywalker/histogram-equalizer/blob/master/HistogramEqualization.ipynb
"""
for i in range(im.shape[2]):
im_ = im[:,:,i]
im_flat = im_.flatten()
h = histogram(im_flat)
c = cumulate_distribution(h)
# c_norm = np.int32(cv2.normalize(c,None, 0,255,cv2.NORM_MINMAX))
c_norm = np.int32(normalize(c))
im_eq = c_norm[im_flat]
im_eq = im_eq.reshape(-1,im.shape[1])
if i==0:
im_eqs = np.array(im_eq)
else:
im_eqs = np.dstack((im_eqs,im_eq))
return im_eqs
def AdjustGamma(im_, gamma=1.0):
"""
Buid a lookup table of
"""
im = im_.copy()
gamma_inv = 1.0 / gamma
gammatable = []
for i in np.arange(0, 256):
i = (i / 255.0) ** gamma_inv
gammatable.append(i*255)
gammatable = np.array(gammatable).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(im, gammatable)
def main():
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--DataPath', default="../Data/Project2_Dataset1/Night Drive - 2689.mp4", help='Data path of images, Default: ../Data/Project2_Dataset1/Night Drive - 2689.mp4')
Parser.add_argument('--SavePath', default="../Outputs/Problem1/", help='Save file name, Default: ../Outputs/Problem1.mp4')
Parser.add_argument('--Mode', default=1, help='1 for histogram_equalization, 2 gamma correction for , Default: 2')
Args = Parser.parse_args()
DataPath = Args.DataPath
SavePath = Args.SavePath
Mode = int(Args.Mode)
if(not (os.path.isdir(SavePath))):
print(SavePath, " was not present, creating the folder...")
os.makedirs(SavePath)
if Mode ==1:
SaveFileName = SavePath + 'hist_eq.mp4'
else:
SaveFileName = SavePath + 'gamma_corr.mp4'
cap = cv2.VideoCapture(DataPath)
w = int(cap.get(3))
h = int(cap.get(4))
# print(cap, w, h)
if Mode ==1:
w = w//3
h = h//3
################### Load video writer ##############
result = cv2.VideoWriter(SaveFileName,
cv2.VideoWriter_fourcc(*'DIVX'),
30, (w, h))
################### Run the code ##############
i = 0
while(True):
ret, im = cap.read()
if ret:
i+=1
if Mode ==1:
im = cv2.resize(im, (w,h))
out = HistogramEqualization(im)
print('running equalization on frame :', i)
# cv2.imshow('Histogram Equalization', out)
elif Mode ==2:
out = AdjustGamma(im, gamma = 2.0)
print('running gamma correction on frame :', i)
cv2.imshow('Gamma Correction', out)
result.write(np.uint8(out))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
result.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"hgokul69@gmail.com"
] | hgokul69@gmail.com |
3e474975db04026ec4ff253fb4e9cae7cebcd831 | a9ce8afcd7f46865e8f164b054df6557a52f9d74 | /create_csv_files/create_primary_energy_minus_one.py | ded0771e5d3101e83ff22d8246ccf2be584d31a8 | [] | no_license | fatemetardasti96/variation_scenario | dc004766c517d72f986f58485df027d5d9cbf1e3 | 49b26cfde7d958e5499edc81d1016fdea462b879 | refs/heads/master | 2023-07-05T19:34:28.605476 | 2021-08-25T09:23:39 | 2021-08-25T09:23:39 | 393,187,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | from write_to_csv import seperator_to_csv
def primary_energy_minus_one_detail_block(cwd):
csv_list = []
csv_list.append(["#comment","unlimited -1 primary ressource"])
base_type = "DVP_const"
base = 1E15
date = "2016-01-01_00:00"
csv_list.append(["base", "#type", base_type, "#data", date, base])
value_type = "DVP_linear"
value = -1
csv_list.append(["value", "#type", value_type, "#data", date, value])
csv_list.append(["#endtable"])
filename = cwd + '/TimeSeries/PrimaryEnergyUnlimited_minusOne.csv'
seperator_to_csv(csv_list, filename) | [
"fateme.tardasti@rwth-aachen.de"
] | fateme.tardasti@rwth-aachen.de |
669058b04ef29cc7831d55492242fc55d1df1197 | 464b867648ffa7afb444d9754cf4d1ffbf25d2bf | /Experimental_QtUI_Scripts/006_Tab_View/TabView_main.py | f48d33feb7a5e95ca09bff5d7c38a5b9fccb01a3 | [] | no_license | pks3kor/For_GitHub | b619fd7f19baa96d7232a0d35ce48c1355360547 | bafb2c15ff81fd2f3f90a57ac7b3467c86ac6a2e | refs/heads/master | 2021-01-25T09:20:52.146374 | 2018-06-10T14:44:04 | 2018-06-10T14:44:04 | 93,822,114 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | """
Author : Pankaj soni
"""
from PySide import QtCore, QtGui
import sys
from Tab_View import Ui_Form
# Initialize main GUI and use its resources
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
#############################
# Write your own function here and bind them with buttons usied in GUI form
def sayHello():
print "Hello there!!!"
# now bind the above functions with buttons
ui.pushButton.clicked.connect(sayHello)
ui.pushButton_2.clicked.connect(quit)
# To display main form and GUI
Form.show()
sys.exit(app.exec_()) | [
"pks3kor@gmail.com"
] | pks3kor@gmail.com |
02b1265e60e889ce02aff9853a966b296c3a3785 | 70cb4b240ce366c1b889ab6d5cf2f09e5fce6b3f | /webapp/src/app.py | ad5b53b154363d75ff23e0d1b96077a11b03d57b | [] | no_license | djwhatle/stock-cluster-apb | f6ed7e8cfe9a1ed69f08e2ad6fadbf89a042185c | 76ad7bcc26b3d3416b0df6269d8d4825844d8b01 | refs/heads/master | 2021-07-24T11:35:23.104123 | 2017-11-01T06:07:03 | 2017-11-01T06:07:03 | 107,734,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,224 | py | from bottle import route, request, run, get, static_file, template, view, debug
import os, boto3, string, random
aws_vars = {
'sqs': {
'access_key': os.environ.get('SQS_AWS_ACCESS_KEY'),
'secret_key': os.environ.get('SQS_AWS_SECRET_KEY'),
'queue_arn': os.environ.get('SQS_QUEUE_ARN'),
'queue_name': os.environ.get('SQS_QUEUE_NAME'),
'queue_url': os.environ.get('SQS_QUEUE_URL'),
'region': os.environ.get('SQS_REGION')
},
'sns': {
'access_key': os.environ.get('SNS_AWS_ACCESS_KEY'),
'secret_key': os.environ.get('SNS_AWS_SECRET_KEY'),
'topic_arn': os.environ.get('SNS_TOPIC_ARN'),
'region': os.environ.get('SNS_AWS_REGION')
},
's3': {
'access_key': os.environ.get('S3_BUCKET_AWS_ACCESS_KEY_ID'),
'secret_key': os.environ.get('S3_BUCKET_AWS_SECRET_ACCESS_KEY'),
'bucket_arn': os.environ.get('S3_BUCKET_ARN'),
'bucket_name': os.environ.get('S3_BUCKET_NAME'),
'region': os.environ.get('S3_BUCKET_REGION')
}
}
@route('/')
@route('/index')
@view('index')
def index():
return dict(no_sqs_creds=(not aws_vars['sqs']['secret_key']),
no_sns_creds=(not aws_vars['sns']['secret_key']),
no_s3_creds=(not aws_vars['s3']['secret_key']),
disable_controls=((not aws_vars['sqs']['secret_key'])
or (not aws_vars['sns']['secret_key']))
or (not aws_vars['s3']['secret_key']))
@route('/get_queue_length')
def get_queue_length():
return dict(queue_length=read_sqs_queue_length(aws_vars))
@route('/queue_clustering_job')
def queue_clustering_job():
return publish_to_sqs_queue(aws_vars)
@route('/admin_panel')
@view('admin_panel')
def admin_panel():
return None
# For Static files
@get("/static/css/<filename:re:.*\.css>")
def css(filename):
return static_file(filename, root="static/css")
@get("/static/font/<filename:re:.*\.(eot|otf|svg|ttf|woff|woff2?)>")
def font(filename):
return static_file(filename, root="static/font")
@get("/static/img/<filename:re:.*\.(jpg|png|gif|ico|svg)>")
def img(filename):
return static_file(filename, root="static/img")
@get("/static/js/<filename:re:.*\.js>")
def js(filename):
return static_file(filename, root="static/js")
@get("/static/stocks/<filename:re:.*\.stocks>")
def stocks(filename):
return static_file(filename, root="static/stocks")
def id_generator(size=128, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def validate_aws_vars(aws_vars):
vars_ok = True
for service_name, service_var_set in aws_vars.items():
for service_var_name, service_var_val in service_var_set.items():
if not service_var_val:
print(service_name + " env var not found or empty: " + service_var_name)
vars_ok = False
return vars_ok
def read_sqs_queue_length(aws_vars):
sqs = boto3.resource(
'sqs',
region_name=aws_vars['sqs']['region'],
aws_access_key_id=aws_vars['sqs']['access_key'],
aws_secret_access_key=aws_vars['sqs']['secret_key']
)
# Read messages from queue
queue = sqs.Queue(url=aws_vars['sqs']['queue_url'])
return len(queue.receive_messages())
def publish_to_sqs_queue(aws_vars):
sqs = boto3.resource(
'sqs',
region_name=aws_vars['sqs']['region'],
aws_access_key_id=aws_vars['sqs']['access_key'],
aws_secret_access_key=aws_vars['sqs']['secret_key']
)
queue = sqs.Queue(aws_vars['sqs']['queue_url'])
# msg_body = request.query
msg_body = '{{"datasetName": "{}", "startDate": "{}", "endDate": "{}" }}'.format(request.query.datesetName, request.query.startDate, request.query.endDate)
return queue.send_message(MessageBody=msg_body, MessageGroupId=id_generator(), MessageDeduplicationId=id_generator())
if __name__ == '__main__':
if not validate_aws_vars(aws_vars):
print("WARNING: One or more expected environment variables is missing. Ensure that binding with SQS, SNS, and S3 was successful.")
run(host='0.0.0.0', port=8080, debug=True, reloader=True)
| [
"derekwhatley@gmail.com"
] | derekwhatley@gmail.com |
4243789910b4eafd8accc3ebfb9dcf544bb61fdc | 11e8e0b0abe952aff4cbe71e354700f759438468 | /run.py | 1e5bdd99a0b2b0ece9e92c3de2fa84368792613c | [] | no_license | jaovito/flask-auth | 8f71292ac5b918605d26dc88d6ccee25a851800b | a86971ab2a386e84376b0160ae04132d2141b4a9 | refs/heads/main | 2023-06-29T07:31:40.111304 | 2021-08-01T01:42:28 | 2021-08-01T01:42:28 | 391,495,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | from app import app
if __name__ == '__main__':
app.run(port=3333) | [
"junior.vitorio.dias@gmail.com"
] | junior.vitorio.dias@gmail.com |
213ec361cd162f8a0733d4d4d34eb2d36e119bac | 36739fb573c1a99d9301f2140680d0d7cc4a3833 | /assignment1/assignment1/.env/lib/python2.7/sre_compile.py | 656519efbb26d009764be380d1110adc91408134 | [] | no_license | jingshuangliu22/cs224n | 21db34c91846ea0dea095e2032b2016cc11a7f84 | 1951a4b7a25c142c860d456f3f7a3afa32171a51 | refs/heads/master | 2020-12-30T11:28:20.855167 | 2017-05-17T12:25:02 | 2017-05-17T12:25:02 | 91,565,500 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | /Users/jingshuang/anaconda/lib/python2.7/sre_compile.py | [
"6044475200@qq.com"
] | 6044475200@qq.com |
54fbbd3895749f2d1352a00e42ae0caa3ce23495 | ff4b8ded01d77bea6ede5d2312a16c050987629f | /gym_chrome_dino/game/dino_game.py | 646b3218a8cd95b710e59ede2bdb821c92138e1c | [
"MIT"
] | permissive | HoaiDuyLe/rl-dino-run | eedf9fa0dc2a56de518b7d9fc970d6b2fa1b969c | 89083f3ef8fc7211c3145eb949776a155bb675a1 | refs/heads/master | 2022-04-02T19:39:26.672916 | 2020-02-15T13:24:28 | 2020-02-15T13:24:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,239 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Elvis Yu-Jing Lin <elvisyjlin@gmail.com>
# Licensed under the MIT License - https://opensource.org/licenses/MIT
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from gym_chrome_dino.utils.helpers import download_chromedriver
class DinoGame():
def __init__(self, render=False, accelerate=False, autoscale=False):
# if not os.path.exists('chromedriver') and not os.path.exists('chromedriver.exe'):
# download_chromedriver()
chromedriver_path = './chromedriver'
options = Options()
options.add_argument('--disable-infobars')
options.add_argument('--mute-audio')
options.add_argument('--no-sandbox')
options.add_argument('--window-size=800,600')
if not render:
options.add_argument('--headless')
self.driver = webdriver.Chrome(executable_path=chromedriver_path, options=options)
# self.driver.get('chrome://dino')
self.driver.get('https://elvisyjlin.github.io/t-rex-runner/')
self.defaults = self.get_parameters() # default parameters
if not accelerate:
self.set_parameter('config.ACCELERATION', 0)
if not autoscale:
self.driver.execute_script('Runner.instance_.setArcadeModeContainerScale = function(){};')
self.press_space()
def get_parameters(self):
params = {}
params['config.ACCELERATION'] = self.driver.execute_script('return Runner.config.ACCELERATION;')
return params
def is_crashed(self):
return self.driver.execute_script('return Runner.instance_.crashed;')
def is_inverted(self):
return self.driver.execute_script('return Runner.instance_.inverted;')
def is_paused(self):
return self.driver.execute_script('return Runner.instance_.paused;')
def is_playing(self):
return self.driver.execute_script('return Runner.instance_.playing;')
def press_space(self):
return self.driver.find_element_by_tag_name('body').send_keys(Keys.SPACE)
def press_up(self):
return self.driver.find_element_by_tag_name('body').send_keys(Keys.UP)
def press_down(self):
return self.driver.find_element_by_tag_name('body').send_keys(Keys.DOWN)
def pause(self):
return self.driver.execute_script('Runner.instance_.stop();')
def resume(self):
return self.driver.execute_script('Runner.instance_.play();')
def restart(self):
return self.driver.execute_script('Runner.instance_.restart();')
def close(self):
self.driver.close()
def get_score(self):
digits = self.driver.execute_script('return Runner.instance_.distanceMeter.digits;');
return int(''.join(digits))
def get_canvas(self):
return self.driver.execute_script('return document.getElementsByClassName("runner-canvas")[0].toDataURL().substring(22);')
def set_parameter(self, key, value):
self.driver.execute_script('Runner.{} = {};'.format(key, value))
def restore_parameter(self, key):
self.set_parameter(self, key, self.defaults[key]) | [
"yzheng51@sheffield.ac.uk"
] | yzheng51@sheffield.ac.uk |
544ca9209f8eaffd8963bc47a621318a1f43b4ec | 556e1797e9e013a675b058cfd97af513d94de15a | /initial_tables/schema.py | 9ccb2315dd9bc2a36b0010fc77e5d0005aab6223 | [] | no_license | EizoAssik/DB-ExCr | 08adfaaba3b707064097e07050930c1257b31238 | 86e9d472065d0920e4985c10d28069f180d8fff9 | refs/heads/master | 2018-12-28T07:40:35.915772 | 2014-06-12T09:03:16 | 2014-06-12T09:03:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | """
CREATED AT 4/24/14 11:10 AM AS A PART OF Project DBSC
"""
from ply import yacc, lex
tokens = ('NAME', 'REF', 'PRI', 'LP', 'RP', 'LSP', 'RSP', 'LCP', 'RCP', 'COMMA')
t_NAME = r"[A-Za-z0-9_]+"
t_REF = r"\*"
t_PRI = r"\+"
t_LP = r'\('
t_RP = r'\)'
t_LSP = r'\['
t_RSP = r"\]"
t_LCP = r'\{'
t_RCP = r'\}'
t_COMMA = r','
t_ignore = ' \n\r\tS;'
def t_error(t):
print('Lexing Err @', t)
# CREATE IF NOT EXISTS ...
def p_schema(p):
""" schema : cine
| schema cine
"""
if len(p) is 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
# cine, CREATE TABLE IF NOT EXISTS
def p_cine(p):
""" cine : term LCP terms RCP """
# cine 是一个 term - terms 对
p[0] = (p[1], p[3])
def p_type(p):
""" type : NAME LP terms RP """
# type 是一个 NAME - terms 对
p[0] = (p[1], p[3])
def p_term(p):
""" term : NAME
| REF term
| PRI term
| term LSP type RSP
"""
# term是一个四元组
# (NAME, isPrimary, isForeign, type/None)
if len(p) is 2:
p[0] = (p[1], False, False, None)
if len(p) is 3:
if p[1] is '+':
p[0] = (p[2][0], True, p[2][2], None)
if p[1] is '*':
p[0] = (p[2][0], p[2][1], True, None)
if len(p) is 5:
p[0] = (p[1][0], p[1][1], p[1][2], p[3])
def p_terms(p):
""" terms : term
| terms COMMA
| terms COMMA term
"""
# terms 是一或多个 term 构成的 list
if len(p) is 2:
p[0] = [p[1]]
if len(p) is 3:
p[0] = p[1]
if len(p) is 4:
p[0] = p[1] + [p[3]]
def parse(filename):
lexer = lex.lex()
parser = yacc.yacc()
with open(filename) as file:
schema = file.read()
r = parser.parse(schema, debug=0)
return r
| [
"eizoassik@gmail.com"
] | eizoassik@gmail.com |
6d23701514aa04997b08269566d74de35d31863a | 86024673fba8f9f46f4a43fe38e1c1cace121997 | /vjezba-15.py | 914f5cd09cece516289fbfe9945ba0bd7b0020b8 | [] | no_license | LaraDuretic/PZW | 5d292eb7fa3ca26748dcf035752cd13fae925738 | 2863068cbcb59fd84711459f1d31d69dcc7b6584 | refs/heads/main | 2023-09-03T10:07:45.915753 | 2021-11-17T10:40:01 | 2021-11-17T10:40:01 | 411,620,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # To do aplikacija
lista_zadataka = []
print("Dobrodošli u To do aplikaciju. Za izlaz odaberite x")
while True:
zadatak = input ("Unesi novi zadatak: ")
if zadatak.lower() != "x":
lista_zadataka.append(zadatak)
else:
break
print("Lista zadataka")
for x in lista_zadataka:
print(x)
| [
"noreply@github.com"
] | noreply@github.com |
2bc93fa19cb05690f43b36a680d47a50c3e69ae8 | 4cc7f348b7ef6e9d5abcf98d10c360864f2d2800 | /sko/PSO.py | da24e59a8068801d58146ccf614e4c2329adcb36 | [
"Python-2.0",
"MIT"
] | permissive | zkcz/scikit-opt | 6886ba5fd66c0e79b5bc4f101f47d556fef1612b | bc884b6408af4c91fa406391e75f570a25496c4b | refs/heads/master | 2020-10-01T13:21:30.549707 | 2019-12-11T05:50:51 | 2019-12-11T05:50:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,897 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/8/20
# @Author : github.com/guofei9987
import numpy as np
from sko.tools import func_transformer
from .base import SkoBase
class PSO(SkoBase):
"""
Do PSO (Particle swarm optimization) algorithm.
This algorithm was adapted from the earlier works of J. Kennedy and
R.C. Eberhart in Particle Swarm Optimization [IJCNN1995]_.
The position update can be defined as:
.. math::
x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)
Where the position at the current step :math:`t` is updated using
the computed velocity at :math:`t+1`. Furthermore, the velocity update
is defined as:
.. math::
v_{ij}(t + 1) = w * v_{ij}(t) + c_{p}r_{1j}(t)[y_{ij}(t) − x_{ij}(t)]
+ c_{g}r_{2j}(t)[\hat{y}_{j}(t) − x_{ij}(t)]
Here, :math:`cp` and :math:`cg` are the cognitive and social parameters
respectively. They control the particle's behavior given two choices: (1) to
follow its *personal best* or (2) follow the swarm's *global best* position.
Overall, this dictates if the swarm is explorative or exploitative in nature.
In addition, a parameter :math:`w` controls the inertia of the swarm's
movement.
.. [IJCNN1995] J. Kennedy and R.C. Eberhart, "Particle Swarm Optimization,"
Proceedings of the IEEE International Joint Conference on Neural
Networks, 1995, pp. 1942-1948.
Parameters
--------------------
func : function
The func you want to do optimal
dim : int
Number of dimension, which is number of parameters of func.
pop : int
Size of population, which is the number of Particles. We use 'pop' to keep accordance with GA
max_iter : int
Max of iter iterations
Attributes
----------------------
pbest_x : array_like, shape is (pop,dim)
best location of every particle in history
pbest_y : array_like, shape is (pop,1)
best image of every particle in history
gbest_x : array_like, shape is (1,dim)
general best location for all particles in history
gbest_y : float
general best image for all particles in history
gbest_y_hist : list
gbest_y of every iteration
Examples
-----------------------------
>>> demo_func = lambda x: x[0] ** 2 + (x[1] - 0.05) ** 2 + x[2] ** 2
>>> pso = PSO(func=demo_func, dim=3)
>>> gbest_x, gbest_y = pso.run()
>>> print('best_x is ', pso.gbest_x, 'best_y is ', pso.gbest_y)
>>> pso.plot_history()
"""
def __init__(self, func, dim, pop=40, max_iter=150, lb=None, ub=None, w=0.8, c1=0.5, c2=0.5):
self.func = func_transformer(func)
self.w = w # inertia
self.cp, self.cg = c1, c2 # parameters to control personal best, global best respectively
self.pop = pop # number of particles
self.dim = dim # dimension of particles, which is the number of variables of func
self.max_iter = max_iter # max iter
self.has_constraints = not (lb is None and ub is None)
self.lb = -np.ones(self.dim) if lb is None else np.array(lb)
self.ub = np.ones(self.dim) if ub is None else np.array(ub)
assert self.dim == len(self.lb) == len(self.ub), 'dim == len(lb) == len(ub) must holds'
assert np.all(self.ub > self.lb), 'All upper-bound values must be greater than lower-bound values'
self.X = np.random.uniform(low=self.lb, high=self.ub, size=(self.pop, self.dim))
v_high = self.ub - self.lb
self.V = np.random.uniform(low=-v_high, high=v_high, size=(self.pop, self.dim)) # speed of particles
self.Y = self.cal_y() # y = f(x) for all particles
self.pbest_x = self.X.copy() # personal best location of every particle in history
self.pbest_y = self.Y.copy() # best image of every particle in history
self.gbest_x = np.zeros((1, self.dim)) # global best location for all particles
self.gbest_y = np.inf # global best y for all particles
self.gbest_y_hist = [] # gbest_y of every iteration
self.update_gbest()
# record verbose values
self.record_mode = False
self.record_value = {'X': [], 'V': [], 'Y': []}
def update_V(self):
r1 = np.random.rand(self.pop, self.dim)
r2 = np.random.rand(self.pop, self.dim)
self.V = self.w * self.V + \
self.cp * r1 * (self.pbest_x - self.X) + \
self.cg * r2 * (self.gbest_x - self.X)
def update_X(self):
self.X = self.X + self.V
if self.has_constraints:
self.X = np.clip(self.X, self.lb, self.ub)
def cal_y(self):
# calculate y for every x in X
self.Y = np.array([self.func(x) for x in self.X]).reshape(-1, 1)
return self.Y
def update_pbest(self):
'''
personal best
:return:
'''
self.pbest_x = np.where(self.pbest_y > self.Y, self.X, self.pbest_x)
self.pbest_y = np.where(self.pbest_y > self.Y, self.Y, self.pbest_y)
def update_gbest(self):
'''
global best
:return:
'''
if self.gbest_y > self.Y.min():
self.gbest_x = self.X[self.Y.argmin(), :]
self.gbest_y = self.Y.min()
def recorder(self):
if not self.record_mode:
return
self.record_value['X'].append(self.X)
self.record_value['V'].append(self.V)
self.record_value['Y'].append(self.Y)
def run(self, max_iter=None):
self.max_iter = max_iter or self.max_iter
for iter_num in range(self.max_iter):
self.update_V()
self.recorder()
self.update_X()
self.cal_y()
self.update_pbest()
self.update_gbest()
self.gbest_y_hist.append(self.gbest_y)
return self
fit = run
| [
"guofei9987@foxmail.com"
] | guofei9987@foxmail.com |
54794aabde093af896a26d62f7eb36fab7a37823 | c0c2719820ded6d7793fc9910c6d76116f48aa25 | /tango_with_django_project/rango/migrations/0005_category_slug.py | eb8c559ae28f8a1f28190a050b55d992f23e60a7 | [] | no_license | leo123qw/ASD2 | 5cb02a7c65bfe5d1ace935c83ecb0ee7be444e2d | a8938187cf9a2af17c47757bf201e7aed8628352 | refs/heads/master | 2021-01-13T16:21:05.968626 | 2017-02-07T17:35:13 | 2017-02-07T17:35:13 | 79,949,756 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-26 18:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0004_auto_20170126_1705'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
]
| [
"2229755z@student.gla.ac.uk"
] | 2229755z@student.gla.ac.uk |
31cb4886a450673f2d8b2cf2eb0e7434adbabdfe | b140bb49ab8c092cf52d0f8fbefe2da6aad77c48 | /exceptions.py | b832e156d68ff9705a1efbaf1bf59b7327ef772f | [] | no_license | freefalI/translator | e2d3c24e608fe8aa8fdfabbec8a341853bc939c9 | 5a12930c4b6a3d7d3488b48bc4410cf3370eafe6 | refs/heads/master | 2020-04-16T21:14:17.796776 | 2019-02-24T19:49:00 | 2019-02-24T19:49:00 | 165,917,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py |
class TranslatorException(Exception):
def __init__(self, message=None, line=None):
msg = "RAISED ON LINE {},\n".format(line) if line else ""
msg += "MESSAGE: {}".format(message) if message else "There is no message"
super().__init__(msg)
class LexicException(TranslatorException):
pass
class NotFoundLexemException(LexicException):
pass
class NotFoundIdnException(NotFoundLexemException):
pass
class NotFoundConException(NotFoundLexemException):
pass
class SemanticException(TranslatorException):
pass
class UndefinedVariableException(SemanticException):
pass
class VariableRedeclarationException(SemanticException):
pass
class NoEndOfProgramSymbolException(TranslatorException):
pass
class SyntaxException(TranslatorException):
pass | [
"freefalI@users.noreply.github.com"
] | freefalI@users.noreply.github.com |
7d8ec831d0e7abcce415e5b9743f82e08eaa5960 | b78ed085d7f4860893715d27d26df2c1a7c760e3 | /练习/Dec2Bin.py | e1a85e591d998f58ddaf05ffeca12b23518dcc2b | [] | no_license | maxzjs/python_learning | e5c1a1916c8dbeb05c30b967bd36c33f0b4119e9 | 90c9c1cfaae2e7855ae350e5e36c1264a5b69006 | refs/heads/master | 2023-04-15T07:31:07.852946 | 2023-04-12T02:47:07 | 2023-04-12T02:47:07 | 150,445,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # 十进制转二进制
#dec = input('输入十进制数:')
import os,time
def Dec2Bin(dec):
result = ''
if dec:
result = Dec2Bin(dec // 2)
return result + str(dec % 2)
else:
return result
print(Dec2Bin(32))
os.system("pause") | [
"626703431@qq.com"
] | 626703431@qq.com |
81808d6ccec7cb6c07f735067c807d038bbe08a9 | c031f254141897f861501782bb53e10901cc6379 | /백준/banghyungjin_1339.py | 1be1d46d440ba50133994f44f5a0951974318c8f | [] | no_license | Banghyungjin/coding_test | 0673d23f819791ab7a2052f787f007b828d7fb62 | 0ada8528f3c35017e77a54701fa001835dadeaef | refs/heads/master | 2023-07-13T17:11:10.456195 | 2021-08-20T13:29:06 | 2021-08-20T13:29:06 | 182,887,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | import sys
num_of_letters = int(sys.stdin.readline().split()[0]) # 단어의 개수
letters = {} # 알파벳과 그 알파벳이 나온 숫자 딕셔너리
answer = 0 # 정답
for i in range(num_of_letters): # 단어의 개수 만큼
input_letter = sys.stdin.readline().split()[0] # 단어들을 읽어옴
for letter in range(len(input_letter)): # 각 읽어온 단어들을 알파벳 하나씩 나눔
if not(input_letter[letter] in letters): # letters에 해당 알파벳이 없으면
letters[input_letter[letter]] = 10 ** (len(input_letter) - letter - 1) # 새로 넣음 이때 key는 알파벳, value는 해당 알파벳이 가리키는 자리수
else: # letters에 해당 알파벳이 있으면
letters[input_letter[letter]] += 10 ** (len(input_letter) - letter - 1) # 해당 원소의 value에 해당 알파벳이 가리키는 자리수 더해줌
letters = sorted(letters.items(), reverse=True, key=lambda x: (x[1])) # letters를 각 원소의 value 값으로 정렬
for i in range(len(letters)): # letters를 처음부터 탐색
answer += letters[i][1] * (9 - i) # 순서대로 9부터 역순으로 대입 후 value에 곱해서 answer에 더함
print(answer) # 정답 출력
| [
"37135305+Banghyungjin@users.noreply.github.com"
] | 37135305+Banghyungjin@users.noreply.github.com |
036e1f00cafaa98cabbe1f94fd404050425f7bd6 | 7ed2f04f9d92587f97a226ee7e4381e3664dbc8d | /The Ultimate Project.py | 70bb73cc23fdae54ca8b13992648fb932f6cf9e9 | [] | no_license | Jubayer-Hossain-Abir-404/Problem-Solving-Using-Python-and-Numerical-Methods | 3e11d717f27a02ccb8af56ffe45b0211a7358cf1 | 2c23f477cee1e9dbb4b3f9c5b89d208caf69f7a8 | refs/heads/main | 2023-06-06T20:37:54.513575 | 2021-07-17T17:10:11 | 2021-07-17T17:10:11 | 386,996,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | import math
def g(x):
return (-0.88318*pow(10,-2))/(-0.50598*pow(10,-10)*pow(x,2)+0.38292*pow(10,-7)*x+0.74363*pow(10,-4))
def iteration(xi):
n=0
while(n<5):
xi_plus=g(xi)
n+=1
xi_minus=xi
xi=xi_plus
return xi
def calculate(n,T,alpha,delD):
sum_Ti=0.0
sum_Ti_square=0.0
sum_Ti_cube=0.0
sum_Ti_four=0.0
sum_ai=0.0
sum_aiTi=0.0
sum_ai2=0.0
for i in range(0,7):
sum_Ti=sum_Ti+T[i]
sum_Ti_square=sum_Ti_square+pow(T[i],2)
sum_Ti_cube=sum_Ti_cube+pow(T[i],3)
sum_Ti_four=sum_Ti_four+pow(T[i],4)
sum_ai=sum_ai+alpha[i]
sum_aiTi=sum_aiTi+(alpha[i]*T[i])
sum_ai2=sum_ai2+(alpha[i]*(T[i]*T[i]))
a=[0.0]*3
for i in range(0,3):
a[i]=[0.0]*3
b=[0.0]*3
p=3
for i in range(0,p):
for j in range(0,p):
if(i+j==0):
a[i][j]=n
elif(i+j==1):
a[i][j]=sum_Ti
elif(i+j==2):
a[i][j]=sum_Ti_square
elif(i+j==3):
a[i][j]=sum_Ti_cube
elif(i+j==4):
a[i][j]=sum_Ti_four
for i in range(0,p):
if(i==0):
b[i]=sum_ai
elif(i==1):
b[i]=sum_aiTi
elif(i==2):
b[i]=sum_ai2
for i in range(0,p-1):
for j in range(i+1,p):
factor=a[j][i]/a[i][i]
for k in range(i+1,p):
a[j][k]=a[j][k]-factor*a[i][k]
b[j]=b[j]-factor*b[i]
x=[0.0]*3
x[p-1]=b[p-1]/a[p-1][p-1]
i=p-2
while(i>=0):
value=b[i]
for j in range(i+1,p):
value=value-a[i][j]*x[j]
x[i]=value/a[i][i]
i=i-1
print "Vector of Variable"
for i in range(0,p):
print x[i]
print "\n"
print "Therefore second order polinomial is:\n"
print "-1.2278*10^-11*T^2+6.1946*10^-9*T+6.0150*10^-6\n"
print "Since the desired contraction is at least ",delD," and it's going to be needed in the next equation"
print "\nSo after itegrating we can find the equation where upper limit is Tf which we need to find and the lower limit is 80\n"
print "The equation is:\n"
print "-0.50598*10^-10*Tf^3+0.38292*10^-7*Tf^2+0.74363*10^-4*Tf+0.88318*10^-2=0\n"
print "Now in order to find the root simple fixed iteration is being used and the found root will be the temperature to cool the trunion\n"
Tf=iteration(0)
print Tf, "temperature is needed to cool the trunion in order to get the desired contraction\n"
print "-108 degree farenheit is not going to work to contract the trunion\n"
def main():
D=12.363
print "Trunion Outside Diameter,D = ",D
print "\nhub inner Diameter = 12.358\n"
print "Diametric Clearance = 0.01\n"
delD=D-12.358
delD=delD+0.01
print "So, the desired contraction is:",delD
print "\nHere ,alpha = coefficient of thermal expansion coefficient at room temperature\n"
print "Tf = Temperature of fluid needed\n"
print "Troom = room temperature\n"
Troom=80.0
n=7.0
T=[80.0,40.0,-40.0,-120.0,-200.0,-280.0,-340.0]
alpha=[0.00000647,0.00000624,0.00000572,0.00000509,0.00000430,0.00000333,0.00000245]
calculate(n,T,alpha,delD)
if __name__=="__main__":
main()
| [
"41603187+Jubayer-Hossain-Abir-404@users.noreply.github.com"
] | 41603187+Jubayer-Hossain-Abir-404@users.noreply.github.com |
118477199ec7566e310b67d75ad1cdeeca56855c | 3e59724306fac40aee85a69df70af05baf6c120b | /pywr_models/models/stanislaus/_parameters/Donnells_Reservoir_Storage_Value.py | d15e6651ec3525a39edc20006e96790c3d1460d1 | [] | no_license | mlmaskey/sierra-pywr | 9e632ecf85aeb0345a1489c866625ecd62693613 | 80bf954cb26011aee4a84dc82b001e8d260ae525 | refs/heads/master | 2023-01-31T21:49:05.663574 | 2020-12-12T02:55:24 | 2020-12-12T02:55:24 | 318,676,217 | 0 | 0 | null | 2020-12-05T01:32:05 | 2020-12-05T01:32:04 | null | UTF-8 | Python | false | false | 911 | py | from parameters import WaterLPParameter
from math import exp
class Donnells_Reservoir_Storage_Value(WaterLPParameter):
def _value(self, timestep, scenario_index):
base_cost = -60
if self.model.mode == 'planning':
return base_cost
elev = self.model.nodes[self.res_name].get_level(scenario_index)
offset = 100
max_elev = 1498.7
k = 0.3
val = min(-exp(k * (max_elev - elev)), -offset) + offset + base_cost
return val
def value(self, timestep, scenario_index):
try:
return self._value(timestep, scenario_index)
except Exception as err:
print('\nERROR for parameter {}'.format(self.name))
print('File where error occurred: {}'.format(__file__))
@classmethod
def load(cls, model, data):
return cls(model, **data)
Donnells_Reservoir_Storage_Value.register()
| [
"herr.rhein@gmail.com"
] | herr.rhein@gmail.com |
68a1ff303501253341e14cb21abc7e096179a871 | 13f0b9a6ca148fe40cdbd73398b12d2f8c0e8571 | /scripts/flashing/firmware_utils.py | ad76c9ed80e473c9999f6922cf5c2a1c610f36a4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | simonlingoogle/connectedhomeip | 84670f016ae68b80f636738a2647dc8dc2cea287 | 47e8b60c0a903b385accde9f49635187646dd577 | refs/heads/master | 2023-01-11T08:29:44.455068 | 2020-09-25T03:40:35 | 2020-09-25T03:40:35 | 298,519,978 | 0 | 0 | Apache-2.0 | 2020-09-25T08:54:56 | 2020-09-25T08:54:55 | null | UTF-8 | Python | false | false | 9,845 | py | #!/usr/bin/env python3
# Copyright (c) 2020 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utitilies to flash or erase a device."""
import argparse
import errno
import os
import stat
import subprocess
import sys
import textwrap
# Here are the options that can be use to configure a `Flasher`
# object (as dictionary keys) and/or passed as command line options.
OPTIONS = {
# Configuration options define properties used in flashing operations.
'configuration': {
# Script configuration options.
'verbose': {
'help': 'Report more verbosely',
'default': 0,
'short-option': 'v',
'argument': {
'action': 'count',
},
# Levels:
# 0 - error message
# 1 - action to be taken
# 2 - results of action, even if successful
# 3+ - details
},
},
# Action control options specify operations that Flasher.action() or
# the function interface flash_command() will perform.
'operations': {
# Action control options.
'erase': {
'help': 'Erase device',
'default': False,
'argument': {
'action': 'store_true'
},
},
'application': {
'help': 'Flash an image',
'default': None,
'argument': {
'metavar': 'FILE'
},
},
'verify-application': {
'help': 'Verify the image after flashing',
'default': False,
'argument': {
'action': 'store_true'
},
},
# 'reset' is a three-way switch; if None, action() will reset the
# device if and only if an application image is flashed. So, we add
# an explicit option to set it false.
'reset': {
'help': 'Reset device after flashing',
'default': None, # None = Reset iff application was flashed.
'argument': {
'action': 'store_true'
},
},
'skip-reset': {
'help': 'Do not reset device after flashing',
'default': None, # None = Reset iff application was flashed.
'argument': {
'dest': 'reset',
'action': 'store_false'
},
}
},
}
class Flasher:
"""Manage flashing."""
def __init__(self, options=None, platform=None):
self.options = options or {}
self.platform = platform
self.parser = argparse.ArgumentParser(
description='Flash {} device'.format(platform or 'a'))
self.group = {}
self.err = 0
self.define_options(OPTIONS)
self.module = __name__
self.argv0 = None
self.tool = {}
def define_options(self, options):
"""Define options, including setting defaults and argument parsing."""
for group, group_options in options.items():
if group not in self.group:
self.group[group] = self.parser.add_argument_group(group)
for key, info in group_options.items():
argument = info.get('argument', {})
option = argument.get('dest', key)
# Set default value.
if option not in self.options:
self.options[option] = info['default']
# Add command line argument.
names = ['--' + key]
if 'short-option' in info:
names += ['-' + info['short-option']]
self.group[group].add_argument(
*names,
help=info['help'],
default=self.options[option],
**argument)
# Record tool options.
if 'tool' in info:
self.tool[option] = info['tool']
def status(self):
"""Return the current error code."""
return self.err
def actions(self):
"""Perform actions on the device according to self.options."""
raise NotImplementedError()
def log(self, level, *args):
"""Optionally log a message to stderr."""
if self.options['verbose'] >= level:
print(*args, file=sys.stderr)
def run_tool_logging(self,
tool,
arguments,
name,
pass_message=None,
fail_message=None,
fail_level=0):
"""Run a tool with log messages."""
self.log(1, name)
if self.run_tool(tool, arguments).err:
self.log(fail_level, fail_message or ('FAILED: ' + name))
else:
self.log(2, pass_message or (name + ' complete'))
return self
def run_tool(self, tool, arguments):
"""Run an external tool."""
command = [self.options[tool]] + arguments
self.log(3, 'Execute:', *command)
try:
self.err = subprocess.call(command)
except FileNotFoundError as exception:
self.err = exception.errno
if self.err == errno.ENOENT:
# This likely means that the program was not found.
# But if it seems OK, rethrow the exception.
if self.verify_tool(tool):
raise exception
return self
def verify_tool(self, tool):
"""Run a command to verify that an external tool is available.
Prints a configurable error and returns False if not.
"""
command = [i.format(**self.options) for i in self.tool[tool]['verify']]
try:
err = subprocess.call(command)
except OSError as ex:
err = ex.errno
if err:
note = self.tool[tool].get('error', 'Unable to execute {tool}.')
note = textwrap.dedent(note).format(tool=tool, **self.options)
# textwrap.fill only handles single paragraphs:
note = '\n\n'.join((textwrap.fill(p) for p in note.split('\n\n')))
print(note, file=sys.stderr)
return False
return True
def find_file(self, filename, dirs=None):
"""Resolve a file name; also checks the script directory."""
if os.path.isabs(filename) or os.path.exists(filename):
return filename
dirs = dirs or []
if self.argv0:
dirs.append(os.path.dirname(self.argv0))
for d in dirs:
name = os.path.join(d, filename)
if os.path.exists(name):
return name
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
filename)
def optional_file(self, filename, dirs=None):
"""Resolve a file name, if present."""
if filename is None:
return None
return self.find_file(filename, dirs)
def parse_argv(self, argv):
"""Handle command line options."""
self.argv0 = argv[0]
args = self.parser.parse_args(argv[1:])
for key, value in vars(args).items():
self.options[key.replace('_', '-')] = value
return self
def flash_command(self, argv):
"""Perform device actions according to the command line."""
return self.parse_argv(argv).actions().status()
def make_wrapper(self, argv):
"""Generate script to flash a device.
The generated script is a minimal wrapper around `flash_command()`,
containing any option values that differ from the class defaults.
"""
# Note: this modifies the argument parser, so the same Flasher instance
# should not be used for both parse_argv() and make_wrapper().
self.parser.description = 'Generate a flashing script.'
self.parser.add_argument(
'--output',
metavar='FILENAME',
required=True,
help='flashing script name')
self.argv0 = argv[0]
args = self.parser.parse_args(argv[1:])
# Find any option values that differ from the class defaults.
# These will be inserted into the wrapper script.
defaults = []
for key, value in vars(args).items():
key = key.replace('_', '-')
if key in self.options and value != self.options[key]:
defaults.append(' {}: {},'.format(repr(key), repr(value)))
script = """
import sys
DEFAULTS = {{
{defaults}
}}
import {module}
if __name__ == '__main__':
sys.exit({module}.Flasher(DEFAULTS).flash_command(sys.argv))
"""
script = ('#!/usr/bin/env python3' + textwrap.dedent(script).format(
module=self.module, defaults='\n'.join(defaults)))
try:
with open(args.output, 'w') as script_file:
script_file.write(script)
os.chmod(args.output, (stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
| stat.S_IXGRP | stat.S_IRGRP
| stat.S_IXOTH | stat.S_IROTH))
except OSError as exception:
print(exception, sys.stderr)
return 1
return 0
| [
"noreply@github.com"
] | noreply@github.com |
d04dfe1509c30e44084d28e7679e5a6f432e69e7 | 980a0761e4cc0f28112e21fbd685945f486a8033 | /MC_reconstruction/digi_raw/mc_lbl/crabConfig.py | eea543e2279dcd9d4c984797ba55564ceb3c99dc | [] | no_license | jniedzie/LightByLight2018 | f5d8369a286ea8cc308e28a53ef8ac17a19e59d4 | 281b2b35488c4c67cba9526204c0b1143b5c7735 | refs/heads/master | 2023-09-01T04:01:00.933114 | 2023-08-15T08:34:08 | 2023-08-15T08:34:08 | 152,394,057 | 1 | 13 | null | 2023-03-28T06:40:01 | 2018-10-10T09:03:06 | Python | UTF-8 | Python | false | false | 1,049 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
#config.section_('General')
config.General.requestName = 'digi_raw_lbl'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
#config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'step2_DIGI_L1_DIGI2RAW_HLT.py'
config.JobType.maxMemoryMB = 4000
config.Data.outputPrimaryDataset = 'digi_raw_lbl'
config.Data.userInputFiles = open('input.txt').readlines()
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
#NJOBS = 2000 # This is not a configuration parameter, but an auxiliary variable that we use in the next line.
#config.Data.totalUnits = config.Data.unitsPerJob * NJOBS
#config.section_('Data')
config.Data.outLFNDirBase = '/store/group/phys_diffraction/lbyl_2018/mc_lbl/digi_raw'
config.Data.allowNonValidInputDataset = True
config.Data.publication = True
config.Data.outputDatasetTag = 'digi_raw_lbl'
config.Site.storageSite = 'T2_CH_CERN'
| [
"jeremi.niedziela@cern.ch"
] | jeremi.niedziela@cern.ch |
ec3a563f26ecd67ae8ca764601e6d0337155a7de | f2b7a1cf286ffbd2fbeb3366dfc1f50548ada1fc | /lavre_gain/util/plotter.py | 8650b028155e11205170d397ea508a1c6fd03e59 | [] | no_license | dmorais/AI | 277cf4d6c10a5a1c34f445e45686607e9b92b129 | a3a4f4b80b8406ab7ff82420df0120480d3d9fcd | refs/heads/master | 2020-12-30T16:01:29.738844 | 2017-09-11T13:56:58 | 2017-09-11T13:56:58 | 91,193,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py | import matplotlib.pyplot as plt
from matplotlib import style
def plot_raw(x_label, raw, corrected, size):
style.use('seaborn-darkgrid2')
x = list(range(size - 1))
fig = plt.figure()
ax1 = plt.subplot2grid((8, 1), (0, 0),
rowspan=3, colspan=1)
plt.title(r"Rentabilidade da Carteira vs. Indicadores Economicos")
plt.ylabel("% a.m.")
ax2 = plt.subplot2grid((8, 1), (4, 0),
rowspan=3, colspan=1, sharex=ax1)
plt.title(r"Rentabilidade da Carteira vs. Indicadores Economicos Corrigidos")
plt.ylabel("% a.m.")
for index, i_corrected in zip(raw, corrected):
plt.xticks(x, x_label)
# add numbers to each point on the graph
for i, j in enumerate(x):
ax1.text(x[i], index[j + 1], str(index[j + 1]))
ax2.text(x[i], i_corrected[j + 1], str(i_corrected[j + 1]))
# plot graph
if index[0] == "MINHA CARTEIRA":
ax1.plot(x, index[1:size], '-', label=index[0], dashes=[9, 5, 10, 5], color='b')
ax2.plot(x, i_corrected[1:size], '-', label=i_corrected[0], dashes=[9, 5, 10, 5], color='b')
else:
ax1.plot(x, index[1:size], label=index[0], linewidth=1)
ax2.plot(x, i_corrected[1:size], label=i_corrected[0], linewidth=1)
# put legend below the graph
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.25),
fancybox=False, shadow=False, ncol=5)
# hide x axis legend on ax1
plt.setp(ax1.get_xticklabels(), visible=False)
# set the x axis to start at zero
ax1.set_xlim(xmin=0)
ax2.set_xlim(xmin=0)
plt.show()
def table_correction(tables):
for table in tables:
culmu = 1
for i in range(1, len(tables[1])):
culmu *= (table[i] / 100 + 1)
table[i] = "{:.2f}".format((culmu - 1) * 100)
return tables
| [
"dmorais@ccs.usherbrooke.ca"
] | dmorais@ccs.usherbrooke.ca |
0ce9848fd48d69c34d629a9dd386f1705127fa21 | ab0172a250897f8f053fef98560ddcb0a9207213 | /utils.py | bc42a9ca2d0f638ee5fc97f436f6f9e1c26bf971 | [] | no_license | bigfishtwo/scale_layer | 127c7a28a502b4b104d9980196ebdaf12a15877a | e7ef26c922359882869585d5323b068b20c3cc4f | refs/heads/main | 2023-06-27T19:05:16.069265 | 2021-08-01T10:48:49 | 2021-08-01T10:48:49 | 391,007,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,359 | py | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib as mpl
import time
import os
import copy
import scipy
from skimage import io, transform
import math
from PIL import Image
from sklearn import metrics
from sklearn.model_selection import train_test_split
import albumentations as A
class DogCatDataset(Dataset):
'''Dogs and Cats Dataset'''
def __init__(self, root_dir, train, test, isdog, transform=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.train = train
self.test = test
self.transform = transform
self.labels = 'dogs' if isdog else 'cats'
def __len__(self):
dir = self.root_dir + '/'+self.labels
# dir_cats = self.root_dir + '/cats'
# TODO: change number of images
return len(os.listdir(dir))
# return 100
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
start = 0
if not self.train and not self.test:
start += 6000
if self.test:
start += 8000
# i = np.random.randint(0,2)
if self.labels=='dogs':
img_name = self.root_dir+'/dogs/dog.'+str(index+start)+'.jpg'
label = 0
else:
img_name = self.root_dir+ '/cats/cat.'+ str(index+start)+'.jpg'
label = 1
image = Image.open(img_name)
if self.transform:
image = self.transform(image=np.array(image))['image']
image = torch.from_numpy(
np.moveaxis(image / (255.0 if image.dtype == np.uint8 else 1), -1, 0).astype(np.float32))
return image, label
class ISICDataset(Dataset):
'''ISIC Dataset'''
def __init__(self, root_dir, labels, transform):
"""
Args:
root_dir (string): Directory with all the images.
csv_file (string): Directory of label file
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.labels = labels
self.transform = transform
def __len__(self):
# TODO: change number of images
# return self.labels.shape[0]
return 520
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
img_name = os.path.join(self.root_dir,
self.labels.iloc[index, 0])
image = Image.open(img_name + '.jpg')
label = self.labels.iloc[index, 1:].astype(int).to_numpy()
label = np.argmax(label)
if self.transform:
image = self.transform(image=np.array(image))['image']
image = torch.from_numpy(
np.moveaxis(image / (255.0 if image.dtype == np.uint8 else 1), -1, 0).astype(np.float32))
return image, label
class ISICTest(Dataset):
'''ISIC Dataset'''
def __init__(self, root_dir, transform = None):
"""
Args:
root_dir (string): Directory with all the images.
csv_file (string): Directory of label file
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.file_names = os.listdir(self.root_dir)
self.transform = transform
def __len__(self):
# TODO: change number of images
return len(self.file_names)
# return 100
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
img_name = os.path.join(self.root_dir,
self.file_names[index])
image = Image.open(img_name)
# image = np.asarray(image).transpose((2,0,1))
img_name = self.file_names[index].split('.')[0]
if self.transform:
image = self.transform(image)
return img_name,image
class AlbumentationDataset(Dataset):
def __init__(self, root_dir, csv_file, train,test, transform):
self.root_dir = root_dir
self.train = train
self.test = test
self.csv_file = pd.read_csv(csv_file)
y_train, y_validation = train_test_split(self.csv_file, test_size=0.2, shuffle=False)
y_val, y_test = train_test_split(y_validation, test_size=0.5, shuffle=False)
if self.train:
self.labels = y_train
elif self.test:
self.labels = y_val
else:
self.labels = y_test
# TODO
# self.labels = self.csv_file
self.transform = transform
def __len__(self):
return self.labels.shape[0]
# return 100
def __getitem__(self, index):
# if torch.is_tensor(index):
# index = index.tolist()
img_name = os.path.join(self.root_dir,
self.labels.iloc[index, 0])
image = Image.open(img_name+'.jpg')
label = self.labels.iloc[index,1:].astype(int).to_numpy()
label = np.argmax(label)
if self.transform:
# img_tensor = self.transform(image)
image = self.transform(image=np.array(image))['image']
image = torch.from_numpy(np.moveaxis(image / (255.0 if image.dtype == np.uint8 else 1), -1, 0).astype(np.float32))
return image, label
def run():
# for multiprocessing
torch.multiprocessing.freeze_support()
def get_device():
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
def load_mnist(data_dir, batch_size):
# Use standard FashionMNIST dataset
train_set = torchvision.datasets.FashionMNIST(
root=data_dir,
train=True,
download=False, #transforms.Resize(32),
transform=transforms.Compose([
transforms.ToTensor()
])
)
valid_set = torchvision.datasets.FashionMNIST(
root=data_dir,
train=False,
download=False, #transforms.Resize(32),
transform=transforms.Compose([
transforms.ToTensor()])
)
test_set = torchvision.datasets.FashionMNIST(
root=data_dir,
train=False,
download=False, #transforms.Resize(32),
transform=transforms.Compose([
transforms.ToTensor()])
)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size,
shuffle=False)
# Create training and validation dataloaders
dataloaders_dict = {'train': train_loader,
'val': valid_loader,
'test': test_loader}
return dataloaders_dict
def load_dogs(batch_size, resolution,phase):
if phase == 'train':
is_train = True
is_test = False
prefix = r'\train'
elif phase == 'test':
is_train = False
is_test = True
prefix = r'\test'
else:
is_train = False
is_test = False
prefix = r'\validation'
data_transform = A.Compose([
A.Resize(resolution[0], resolution[1]),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.Rotate(limit=(-90,90)),
A.RandomBrightnessContrast(),
])
dogset = DogCatDataset(
root_dir='D:\Subjects\PycharmProjects\Onmyoji\data\dogs_small'+prefix,
# root_dir = './data/dogs_small'+prefix,
train=is_train,
test=is_test,
isdog=True,
transform= data_transform
)
catset = DogCatDataset(
root_dir='D:\Subjects\PycharmProjects\Onmyoji\data\dogs_small' + prefix,
train=is_train,
test=is_test,
isdog=False,
transform=data_transform
)
dogcatset = torch.utils.data.ConcatDataset([dogset,catset])
data_loader = torch.utils.data.DataLoader(dogcatset, batch_size=batch_size,
shuffle=False, num_workers=1)
return data_loader
def my_collate(batch):
# load image with original size
data = [item[0] for item in batch]
target = [item[1] for item in batch]
target = torch.LongTensor(target)
return [data, target]
def load_isic(batch_size, sampled = False):
# load isic data set
csv_file = './data/ISIC/ISIC_2019_Training_GroundTruth.csv'
if sampled:
csv_file = './data/ISIC/ISIC_2019_Training_Input/sampled_files.csv'
csv_file = pd.read_csv(csv_file)
y_train, y_validation = train_test_split(csv_file, test_size=0.2, shuffle=True)
y_val, y_test = train_test_split(y_validation, test_size=0.5, shuffle=True)
data_transform = A.Compose([
# A.Resize(resolution,resolution),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.Rotate(limit=(-180,180)),
A.RandomBrightnessContrast(),
])
trainset = ISICDataset(
root_dir='./data/ISIC/ISIC_2019_Training_Input/ISIC_2019_Training_Input',
labels=y_train,
transform= data_transform
)
valset = ISICDataset(
root_dir='./data/ISIC/ISIC_2019_Training_Input/ISIC_2019_Training_Input',
labels=y_val,
transform= data_transform
)
testset = ISICDataset(
root_dir='./data/ISIC/ISIC_2019_Training_Input/ISIC_2019_Training_Input',
labels=y_test,
transform=data_transform
)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, collate_fn=my_collate,num_workers=1)
val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size,
shuffle=True, collate_fn=my_collate,num_workers=1)
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, collate_fn=my_collate,num_workers=1)
dataloaders_dict = {'train': train_loader,
'resize': val_loader,
'val': val_loader,
'test': test_loader}
return dataloaders_dict
def kfold_load_isic(labels,batch_size, resolution):
# data loader for k-fold validation
dataset = ISICDataset(
root_dir ='./data/ISIC/ISIC_2019_Training_Input/ISIC_2019_Training_Input',
labels=labels,
transform= A.Compose([
A.Resize(resolution, resolution),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.Rotate(limit=(-90, 90)),
A.RandomBrightnessContrast(),
])
)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=1)
return data_loader
def set_dataloader(dataset_name, batch_size, resolution):
if dataset_name == 'mnist':
data_dir = "./data/FashionMNIST"
dataloaders_dict = load_mnist(data_dir, batch_size)
# Number of classes in the dataset
num_classes = 10
elif dataset_name == 'dogs':
dataloaders_dict = {'train': load_dogs(batch_size, resolution, 'train'),
'val': load_dogs(batch_size, resolution, 'val'),
'test': load_dogs(batch_size, resolution, 'test')}
# dataloaders_dict, datalens_dict = load_dogs_separat(batch_size)
# Number of classes in the dataset
num_classes = 2
elif dataset_name == 'isic':
dataloaders_dict = {'train': load_isic(batch_size, resolution, 'train'),
'resize': load_isic(batch_size, resolution, 'resize'),
'val': load_isic(batch_size, resolution, 'val'),
'test': load_isic(batch_size, resolution, 'test')}
# dataloaders_dict = load_isic(batch_size, resolution)
num_classes = 9
elif dataset_name == 'isic_sampled':
dataloaders_dict = {'train': load_isic(batch_size, resolution, 'train', sampled=True),
'resize': load_isic(batch_size, resolution, 'resize',sampled=True),
'val': load_isic(batch_size, resolution, 'val', sampled=True),
'test': load_isic(batch_size, resolution, 'test', sampled=True)}
num_classes = 9
else:
num_classes = 0
dataloaders_dict = None
return num_classes, dataloaders_dict
def show_img(img):
# functions to show an image
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
print(npimg.shape)
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def save_logs(hist,output_directory,dataset_name):
'''
function to save logs
:param hist: dictionary type
:param output_directory: output directory
:param dataset_name: dataset_name
:return: none, save logs to given directory
'''
hist_df = pd.DataFrame.from_dict(hist,orient='index').transpose()
hist_df.to_csv(output_directory + '/history_'+dataset_name+'.csv', index=False)
def save_paras(paras,output_directory,dataset_name):
# function to save parameters of training
df = pd.DataFrame.from_dict(paras,orient='index',columns=['parameters'])
df.to_csv(output_directory+ '/paras_'+dataset_name+'.csv', index=True)
def plot_loss_res(loss, res_x, res_y,output_directory,dataset_name,loss_type):
'''
plot of loss and resolution
:param loss: can be train loss, valid loss and test loss
:param res_x: resolution of x
:param res_y: resolution of y
:param loss_type: str, for naming
:return:
'''
plt.title(loss_type+" loss and resolution")
x, = plt.plot(res_x, loss,'o', label='x')
y, = plt.plot(res_y,loss,'o',label='y')
plt.legend(handles=[x,y])
plt.xlabel("Resolution")
plt.ylabel(loss_type)
# TODO: change directory
plt.savefig(output_directory+"/Loss_resolution_"+dataset_name+".png", bbox_inches='tight')
plt.show()
def plot_scatter(dir):
hist = pd.read_csv(dir)
x = np.array(hist['resolution_x'])
y = np.array(hist['resolution_y'])
loss = np.array(hist['loss'])
p = plt.scatter(x,y,c=loss,cmap=mpl.cm.cool)
plt.xlabel('x')
plt.ylabel('y')
h = plt.colorbar(p)
h.set_label('loss')
plt.title('x, y, loss--dogs')
plt.show()
def plot_roc(fpr,tpr, roc_auc, nums_class,output_directory,dataset_name):
# plot toc curve
plt.figure(figsize=(14, 8))
lw = 2
for i in range(nums_class):
plt.plot(fpr[i], tpr[i], lw=lw,
label='ROC curve of class'+str(i)+'(area = %0.2f)' % roc_auc[i])
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=lw)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=lw)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic ' + dataset_name)
plt.legend(loc="lower right")
plt.savefig(output_directory + "/ROC_curve_" + dataset_name + ".png", bbox_inches='tight')
plt.show()
def calculate_f1(y_true, y_pred):
return metrics.f1_score(y_true, y_pred,average='weighted')
def calculate_metrics(y_true, y_pred):
f1 = metrics.f1_score(y_true, y_pred, average='weighted')
jaccard = metrics.jaccard_score(y_true,y_pred, average='weighted')
accuracy = metrics.accuracy_score(y_true, y_pred)
confusion_matrix = metrics.confusion_matrix(y_true, y_pred)
return f1, jaccard,accuracy,confusion_matrix
def calculate_roc(y_true, y_score, num_classes):
fpr = dict()
tpr = dict()
roc_auc = dict()
one_hot_labels = np.zeros((y_true.shape[0],num_classes), dtype=int)
for idx, j in enumerate(y_true):
one_hot_labels[idx][j] = 1
prediction = y_score.to_numpy()
for i in range(num_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(one_hot_labels[:, i], prediction[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
#
# # Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = metrics.roc_curve(one_hot_labels.ravel(), prediction.ravel())
roc_auc["micro"] = metrics.auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_classes):
mean_tpr += scipy.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= num_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
return fpr, tpr, roc_auc
def shades_of_gray(img, mink_norm=6):
illum = []
for channel in range(3):
illum.append(np.power(np.mean(np.power(img[:,:,channel], mink_norm), (0,1)), 1 / mink_norm))
# normalize estimated illumination
som = np.sqrt(np.sum(np.power(illum, 2)))
# som = np.power(np.prod(img.shape), 1 / mink_norm)
illum = np.divide(illum, som)
correcting_illum = illum * np.sqrt(3)
corrected_image = img / 255.
for channel in range(3):
corrected_image[:, :, channel] /= correcting_illum[channel]
return corrected_image
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
# mean = np.array([0.485, 0.456, 0.406])
# std = np.array([0.229, 0.224, 0.225])
# inp = std * inp + mean
# inp = np.clip(inp, 0, 1)
return inp
def visualize_transform(model, dataloader, resolution):
with torch.no_grad():
# Get a batch of training data
# self.model.load_state_dict(self.best_model)
data = next(iter(dataloader))[0]
input_tensor = data[0]
transformed_input_tensor = transform.resize(input_tensor, (3,resolution,resolution))
in_grid = convert_image_np(
torchvision.utils.make_grid(input_tensor))
out_grid = transformed_input_tensor.transpose((1, 2, 0))
# Plot the results side-by-side
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(in_grid)
axarr[0].set_title('Dataset Images')
axarr[1].imshow(out_grid)
axarr[1].set_title('Transformed Images')
plt.ioff()
plt.show()
def visualization_cnn(model, best_model):
model.load_state_dict(best_model)
model_weights = [] # we will save the conv layer weights in this list
conv_layers = [] # we will save the conv layers in this list
# get all the model children as list
model_children = list(model.children())
# counter to keep count of the conv layers
counter = 0
# append all the conv layers and their respective weights to the list
for i in range(len(model_children)):
if type(model_children[i]) == nn.Conv2d:
counter += 1
model_weights.append(model_children[i].weight)
conv_layers.append(model_children[i])
elif type(model_children[i]) == nn.Sequential:
for j in range(len(model_children[i])):
child = model_children[i][j]
if type(child) == nn.Conv2d:
counter += 1
model_weights.append(child.weight)
conv_layers.append(child)
print(f"Total convolutional layers: {counter}")
# visualize the first conv layer filters
plt.figure(figsize=(20, 17))
for i, filter in enumerate(model_weights[0]):
plt.subplot(int(len(model_weights[0]) / 8), 8,
i + 1) # (8, 8) because in conv0 we have 7x7 filters and total of 64 (see printed shapes)
plt.imshow(filter[0, :, :].cpu().detach(), cmap='gray')
plt.axis('off')
# plt.savefig('../outputs/filter.png')
plt.show()
def count_param(model):
param_count = 0
for param in model.parameters():
param_count += param.view(-1).size()[0]
return param_count
def transform_fix(self, x):
s = self.scale.item()
theta = torch.tensor([[[1, 0, 0], [0, 1, 0]]],
dtype=torch.float, requires_grad=True)
theta = torch.mul(theta, self.scale)
scaled = int(self.resolution / abs(s))
output = None
grid = F.affine_grid(theta, [1, 3, self.resolution, self.resolution]).to('cuda')
for img in x:
img = F.grid_sample(img.unsqueeze(0), grid)
output = torch.cat((output, img), 0) if output is not None else img
x = output
return x, scaled
| [
"noreply@github.com"
] | noreply@github.com |
98acdbd0cb4d39fcd02dec2ee1444546fad112b3 | a79a807697d54baa63e80a35a0037cd00ab45e28 | /Python-Stack/Django/DjangoIntro/Counter/assignment_three/counter/urls.py | 836969244c63649d19bb9d193a652fbd4474b74a | [] | no_license | YasmeenMuhtaseb/Assignments | 153eb2a3bf9815ffd1f23d272beed7271339e981 | ccec0c28bc173989a9288956cceedda84f577810 | refs/heads/master | 2023-03-21T17:19:46.796728 | 2021-03-10T20:08:24 | 2021-03-10T20:08:24 | 330,404,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('destroy_session',views.destroy),
path('increment',views.increment),
path('incrementByInserted',views.incrementByInserted)
] | [
"y.muhtaseb95@gmail.com"
] | y.muhtaseb95@gmail.com |
c2f34abffbd6b332cda3d124b36df870c5f80437 | fd74ba74d33469098218c582a0a639cf49ab1950 | /csv_reader.py | e6b86df4c4a53dab7e9ff55b5adf54291f799966 | [] | no_license | tedljw/bert_chinese_classify | 71a7b71622dbc8163f73dfb58bc05fad8eb096cf | 557f273cf716c0530ef60a533c5ba30ef23c8fa4 | refs/heads/master | 2020-05-22T07:33:27.583594 | 2019-05-21T08:01:38 | 2019-05-21T08:01:38 | 186,267,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | import pandas
import random
import json
random.seed(233)
def save_data(path, data_list):
json_list = []
for stc, label in data_list:
sample = {}
sample['question'] = stc
sample['label'] = label
json_list.append(json.dumps(sample))
json_list = '\n'.join(json_list)
with open(path, 'w', encoding='utf-8') as w:
w.write(json_list)
datas = pandas.read_csv('./test.csv')
label_list = datas['label'].tolist()
stc_list = datas['question'].tolist()
c = list(zip(stc_list, label_list))
random.shuffle(c)
stc_list[:], label_list[:] = zip(*c)
train_len = int(0.75 * len(stc_list))
test_len = int(0.9 * len(stc_list))
train_data = c[:train_len]
valid_data = c[train_len:test_len]
test_data = c[test_len:]
save_data('data/train.json', train_data)
save_data('data/valid.json', valid_data)
save_data('data/test.json', test_data)
| [
"ljwhero@163.com"
] | ljwhero@163.com |
ba0fba26f9ca4e4466527155c41fa14dfdc80358 | 1aca8e3a2ce8196c677be60d5f782a795defe13a | /problemaC.py | ffa9d2e66c7f36710d0786031ca4e08f583a2f59 | [] | no_license | AtilioA/TProg1 | af2182eae3cd83160c8ab167fd8e855ec9cc7232 | f0771e7bb76d9d22e29d2bfd76ef113694f87ed8 | refs/heads/master | 2020-05-31T16:42:42.072575 | 2019-06-29T16:02:07 | 2019-06-29T16:02:07 | 190,387,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,264 | py | # Módulo com funções para solucionar o problema C
# Autores: Atílio Antônio Dadalto e Ícaro Madalena do Nascimento
# Disciplina: Programação I, ministrada por Jordana Sarmenghi Salamon em 2019/1
from problemaA import distancia_total
from problemaB import caminhos_percorridos, ids_robos
from roboAux import tupla2
# ============ QUESTÃO C ============ #
# c) Exiba os caminhos percorridos por todos os robôs que entraram no terreno de busca,
# ordenados crescentemente pela distância total percorrida;
# COMPREENSÃO DO PROBLEMA: deve-se calcular as distâncias percorridas por todos os robôs, adicionando-as
# em uma tupla que será ordenada a partir das distâncias, impressa na tela e então retornada
def tupla_caminhos_percorridos(listaRobos, ids, distsTotais, caminhos):
""" Dada uma lista de robôs, seus ids, distâncias totais percorridas e caminhos percorridos
pelos robôs, retorna uma lista de tuplas formatada como a questão C exemplifica
Escopo: Função global paramétrica
Dados de entrada: Lista de robôs, seus ids, distâncias totais e caminhos
Dados de saída: Uma lista de tuplas formatadas como no exemplo de entrada da questão C
"""
if not ids:
return ids
elif not distsTotais:
return distsTotais
elif not caminhos:
return caminhos
else:
return [(ids[0], distsTotais[0], caminhos[0])] + \
tupla_caminhos_percorridos(listaRobos, ids[1:], distsTotais[1:], caminhos[1:])
def merge_ordenada_tupla(l1, l2):
""" Com duas listas de tuplas ordenadas conforme o segundo elemento,
junta as duas de forma ordenada
Escopo: Função global paramétrica
Dados de entrada: Duas listas de tuplas numéricas ordenadas pelo segundo elemento
Dados de saída: Uma lista de tuplas ordenada pelo segundo elemento das tuplas
formada a partir da junção ordenada das duas listas de entrada
Ex: l1 = [(5, 5)] e l2 = [(5, 0)]:
merge_ordenada([(5, 5)], [(5, 0)])
[(5, 0)] + merge_ordenada([], [(5, 5)])
l1 é vazia, retorna l2
[(5, 0)] + [(5, 5)]
[(5, 0), (5, 5)]
"""
if not l1:
return l2
elif not l2:
return l1
elif tupla2(l1[0]) <= tupla2(l2[0]): # tupla2(l1[0]) é menor ou igual, portanto será primeiro
return [l1[0]] + merge_ordenada_tupla(l1[1:], l2)
else: # tupla2(l2[0]) é menor, portanto será primeiro
return [l2[0]] + merge_ordenada_tupla(l1, l2[1:])
def merge_sort_tupla(lista):
""" Dada uma lista de tuplas de entrada, ordena-a por merge sort,
tendo como referência para a ordenação os segundos elementos das tuplas
Escopo: Função global paramétrica
Dados de entrada: Uma lista de tuplas numéricas
Dados de saída: Uma lista de tuplas numéricas ordenadas a partir do segundo elemento
"""
if len(lista) <= 1:
return lista
else:
metade = len(lista) // 2 # Pega o índice do meio da lista
return merge_ordenada_tupla(merge_sort_tupla(lista[:metade]), merge_sort_tupla(lista[metade:]))
def distancias_totais_robos(listaRobos):
""" Calcula as distâncias totais que todos os robôs percorreram
Escopo: Função global paramétrica
Dados de entrada: Lista de robôs
Dados de saída: Lista de
"""
return list(map(distancia_total, caminhos_percorridos(listaRobos)))
# Função principal do problema C
def caminhos_robos_crescente(listaRobos):
""" Dada a lista de robôs, imprime o caminho de cada robô (em ordem crescente de distância)
Escopo: Função global paramétrica
Dados de entrada: A lista de dados sobre os robôs
Dados de saída: Uma lista de distâncias ordenada
"""
try:
if not listaRobos: # ValueError
print("Lista vazia não possui robôs.")
else:
ids = ids_robos(listaRobos)
caminhos = caminhos_percorridos(listaRobos)
distsTotais = distancias_totais_robos(listaRobos)
tuplasComDistancias = tupla_caminhos_percorridos(listaRobos, ids, distsTotais, caminhos)
listaTuplasOrdenadasPorDistancia = merge_sort_tupla(tuplasComDistancias)
return listaTuplasOrdenadasPorDistancia
except ValueError:
pass
| [
"atiliodadalto@hotmail.com"
] | atiliodadalto@hotmail.com |
73f72b765974115b3bd6a2e5fc4171242eb45a4e | 907f1fe8d0403d535fdbd05e020abd6e01f6b03d | /code_sacro/pre_train.py | e1e2f420b03c3d88a2915b6d169dbfeae41f9a7c | [] | no_license | mikami520/Sacrocolpopexy-workflow-analysis | 95641741b08e28f79ff4da5ac290559084bccc13 | 49a88702ccac0e8b97030dab7afb3f539951fbe5 | refs/heads/master | 2023-05-09T03:56:28.119126 | 2021-06-04T02:02:21 | 2021-06-04T02:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,800 | py | #! /usr/bin/env python
from model import Conv3_pre
from utils.UCF101 import UCF101
import numpy as np
import pickle
import time
import psutil
import os
import scipy.io as scio
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
def dataloder():
list_file = open('./data/UCF101-16-112-112/train_set1.pickle', 'rb')
train_sets = pickle.load(list_file)
list_file.close()
list_file = open('./data/UCF101-16-112-112/train_set2.pickle', 'rb')
train_sets.extend(pickle.load(list_file))
list_file.close()
list_file = open('./data/UCF101-16-112-112/train_labels1.pickle', 'rb')
train_labels = pickle.load(list_file)
list_file.close()
list_file = open('./data/UCF101-16-112-112/train_labels2.pickle', 'rb')
train_labels.extend(pickle.load(list_file))
list_file.close()
# info = psutil.virtual_memory()
# print('memory used:', psutil.Process(os.getpid()).memory_info().rss)
# print('total memory:', info.total)
# print('precent used:', info.percent)
return train_sets, train_labels
def reset_train_sets(train_sets, train_labels):
myUCF101 = UCF101()
batch_num = myUCF101.set_mode('train')
print('reconstructing the train set...')
for batch_index in range(batch_num):
train_x, train_y = myUCF101[batch_index]
train_sets[batch_index] = train_x
train_labels[batch_index] = train_y
return train_sets, train_labels
def main():
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
model = Conv3_pre().to(device)
print('loading data')
start = time.time()
train_sets, train_labels = dataloder()
batch_num = len(train_sets)
elapsed = (time.time() - start)
print("Data loded, time used:", elapsed)
# Initializing necessary components
loss_func = nn.NLLLoss().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
accuracy_stas = []
loss_stas = []
counter = 0
train_idx = np.arange(batch_num)
# myUCF101 = UCF101()
# className = myUCF101.get_className()
# batch_num = myUCF101.set_mode('train')
print('Start training')
start = time.time()
for epoch in range(28):
print('epoch:', epoch + 1)
running_loss = 0.0
running_accuracy = 0.0
np.random.shuffle(train_idx)
# reconstruct the train set for every 7 epochs
if epoch % 7 == 0 and epoch != 0:
train_sets, train_labels = reset_train_sets(train_sets, train_labels)
for batch_idx in range(batch_num):
inputs = train_sets[train_idx[batch_idx]].to(device)
labels = train_labels[train_idx[batch_idx]].long().to(device)
# inputs, labels = myUCF101[batch_idx]
# inputs = inputs.to(device)
# labels = labels.long().to(device)
optimizer.zero_grad()
output = model.forward_cov(inputs)
loss = loss_func(output, labels)
loss.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), 10)
optimizer.step()
# Calculate the loss with new parameters
running_loss += loss.item()
current_loss = running_loss / (batch_idx + 1)
# Calculate the accuracy
output = model.forward_cov(inputs)
_, predicted_labels = torch.max(output.data, 1)
correct_pred = (predicted_labels == labels).sum().item()
total_pred = predicted_labels.size(0)
batch_accuracy = correct_pred / total_pred
running_accuracy += batch_accuracy
current_accuracy = running_accuracy / (batch_idx + 1) * 100
counter += 1
if counter % 10 == 0:
# save the loss and accuracy
accuracy_stas.append(current_accuracy)
loss_stas.append(current_loss)
if batch_idx % 300 == 299:
print('[Epoch: %d Batch: %5d] loss: %.3f accuracy: %.3f'
% (epoch + 1, batch_idx + 1, current_loss, current_accuracy))
print('[Final results of epoch: %d] loss: %.3f accuracy: %.3f'
% (epoch + 1, current_loss, current_accuracy))
elapsed = (time.time() - start)
print("Training finished, time used:", elapsed / 60, 'min')
torch.save(model.state_dict(), 'params_conv3.pkl')
data_path = 'results_output.mat'
scio.savemat(data_path, {'accuracy': np.asarray(accuracy_stas), 'loss': np.asarray(loss_stas)})
if __name__ == '__main__':
main()
| [
"zyt948715472@icloud.com"
] | zyt948715472@icloud.com |
8934d26a9954c241c559584809ce7bfe4daa42b8 | 3e4556b982455bc5b44c7f823ed3199c654f4f6f | /delicious_hometown/migrations/0023_choice_market.py | 12efdd1bf7e9389789bca1e4dc837815ce25cfbe | [] | no_license | changwoo360/MyProjects | 42f7661571e1a548f276075c2e258e79abc78f6d | 92f3174d2fd03a86dfd3614872d7dddf90d34d35 | refs/heads/master | 2020-03-31T05:01:28.887379 | 2018-11-20T14:02:45 | 2018-11-20T14:02:45 | 151,929,977 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-10-12 08:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('delicious_hometown', '0022_auto_20181012_1135'),
]
operations = [
migrations.CreateModel(
name='choice_market',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('market_material', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='delicious_hometown.FoodMaterial')),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
eb06707c02b708b16b20562078f0ccd02b5cca34 | 76dab6591cb9c7ee566b76a0adc7b0b0c4086592 | /main/tests/test_models.py | 7185a7137b6e46e6c02f4727e6bb80c1f7e2792a | [] | no_license | gray-adeyi/booktime | 87962321e380cfa779b24f2bd6fa8c434687d084 | fb54bc35739b28b5a71a5cf0c1067f38140559ba | refs/heads/main | 2023-04-05T02:44:01.992984 | 2021-05-03T01:37:01 | 2021-05-03T01:37:25 | 363,434,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,490 | py | from decimal import Decimal
from django.test import TestCase
from main import models
class TestModel(TestCase):
def test_active_manager_works(self):
models.Product.objects.create(
name="The cathedral and the bazaar",
price=Decimal("10.00"))
models.Product.objects.create(
name="Pride and Prejudice",
price=Decimal("2.00"))
models.Product.objects.create(
name="A Tale of Two Cities",
price=Decimal("2.00"),
active=False)
self.assertEqual(len(models.Product.objects.active()), 2)
def test_create_order_works(self):
p1 = models.Product.objects.create(
name="The cathedral and the bazaar",
price=Decimal("10.00"),
)
p2 = models.Product.objects.create(
name="Pride and Prejudice", price=Decimal("2.00")
)
user1 = models.User.objects.create_user(
"user1", "pw432joij"
)
billing = models.Address.objects.create(
user=user1,
name="John Kimball",
address1="127 Strudel road",
city="London",
country="uk",
)
shipping = models.Address.objects.create(
user=user1,
name="John Kimball",
address1="123 Deacon road",
city="London",
country="uk",
)
basket = models.Basket.objects.create(user=user1)
models.BasketLine.objects.create(
basket=basket, product=p1
)
models.BasketLine.objects.create(
basket=basket, product=p2
)
with self.assertLogs("main.models", level="INFO") as cm:
order = basket.create_order(billing, shipping)
self.assertGreaterEqual(len(cm.output), 1)
order.refresh_from_db()
self.assertEquals(order.user, user1)
self.assertEquals(
order.billing_address1, "127 Strudel road"
)
self.assertEquals(
order.shipping_address1, "123 Deacon road"
)
# add more checks here
self.assertEquals(order.lines.all().count(), 2)
lines = order.lines.all()
self.assertEquals(lines[0].product, p1)
self.assertEquals(lines[1].product, p2)
| [
"adeyigbenga005@gmail.com"
] | adeyigbenga005@gmail.com |
a3fc82185c9fb7f8be05a7dfcc5642cbb1451ae1 | 5ccf17b4708c7ef12ae17d8f089eeea57e1717f6 | /mazeGenerator/mazeGenerator.pyde | f8955658619403bff81f4e40fb622a3cd0883fdb | [] | no_license | perewodchik/processing | d71b3057fae584b94ae499e8c5bc443936e3ec22 | 6caf07c259ae4775cc6b151b92f315fa1ad4f294 | refs/heads/master | 2020-05-15T16:32:33.694728 | 2019-04-23T20:29:29 | 2019-04-23T20:29:29 | 182,391,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,416 | pyde | from random import *
backgroundColor = "#B0E19B"
startColor = "#37BA004"
endColor = "#2B8F00"
wayColor = "#41DB00"
wallColor = "#CB0024"
windowWidth = 900
windowHeight = 900
mazeSize = 10
""":
This is a maze generator using DSU with a possible solution
There is also BFS for finding path from start to finish.
CONTROLS:
LEFT MOUSE - set starting position
RIGHT MOUSE - set ending position
D - create a maze or generate a new one
S - show/hide solution
C - clear current maze
So, i didn't play for it initially, but you can also draw a maze of your own!
In order to do that, simply hold MIDDLE MOUSE and hover over the cells
that should become the walls. It will automatically generate the solution
once you're finished building the walls
"""
class Point:
def __init__(self, x_, y_):
self.x = x_
self.y = y_
startPoint = Point(0,0)
endPoint = Point(mazeSize - 1, mazeSize - 1)
#We will keep the connections as one dimensional arra,
#where row is defined using // operator,
#and column is defines using % operator
dirx = [-1, 0, 0, 1]
diry = [ 0,-1, 1, 0]
unionLink = [i for i in range(mazeSize * mazeSize)]
unionSize = [1 for i in range(mazeSize * mazeSize)]
Maze = [[0 for i in range(mazeSize)] for i in range(mazeSize)]
showPath = 0
pathFound = 0
def unionFind(x):
x = int(x)
while x != unionLink[x]:
x = unionLink[x]
return x
def unionSame(f, s):
return unionFind(f) == unionFind(s)
def unionUnite(f, s):
f = unionFind(f)
s = unionFind(s)
if f == s:
return
if unionSize[f] < unionSize[s]:
f, s = s, f
unionSize[f] += unionSize[s]
unionLink[s] = f
""":
The core of the program, the function program has been written for!
It uses disjoint set union to build a maze in such a way so there will
always be a solution. The way it works is it fills the maze with walls
except for start and finish. Then it chooses a random cell and removes
the wall. If there are other non-wall cells, then they are joined. This
process repeats until a solution is found.
"""
def createMaze():
global unionLink
global unionSize
global Maze
Maze = [[1 for i in range(mazeSize)] for i in range(mazeSize)]
unionLink = [i for i in range(mazeSize * mazeSize)]
unionSize = [1 for i in range(mazeSize * mazeSize)]
Maze[startPoint.y][startPoint.x] = 0
Maze[endPoint.y][endPoint.x] = 0
start = startPoint.y * mazeSize + startPoint.x
finish = endPoint.y * mazeSize + endPoint.x
while unionFind(start) != unionFind(finish):
curPoint = randint(0, mazeSize * mazeSize - 1)
while(curPoint == start or curPoint == finish):
curPoint = randint(0, mazeSize * mazeSize - 1)
curX = curPoint % mazeSize
curY = curPoint // mazeSize
Maze[curY][curX] = 0
for i in range(4):
newY = curY + diry[i]
newX = curX + dirx[i]
newPoint = newY * mazeSize + newX
if(newX >= 0 and newX < mazeSize and newY >= 0 and newY < mazeSize and
Maze[newY][newX] == 0 and unionFind(newPoint) != unionFind(curPoint)):
unionUnite(newPoint, curPoint)
""":
An implementation of breadth-first search to find
shortest path. The solution is stored in the Maze
array it has a value of 2
"""
def createPath():
global Maze
used = {}
distance = {}
parent = {}
start = (startPoint.x, startPoint.y)
used[start] = 1
parent[start] = start
queue = [start]
while queue:
curPoint = queue.pop(0)
curX = curPoint[0]
curY = curPoint[1]
if curPoint == (endPoint.x, endPoint.y):
break
for i in range(4):
newX = curX + dirx[i]
newY = curY + diry[i]
if(newX >= 0 and newX < mazeSize and newY >= 0 and newY < mazeSize and not used.has_key((newX,newY)) and Maze[newY][newX] != 1):
queue.append((newX,newY))
used[(newX,newY)] = 1
parent[(newX,newY)] = curPoint
if not parent.has_key((endPoint.x,endPoint.y)):
return
wayBack = []
curNode = parent[(endPoint.x,endPoint.y)]
while curNode != start:
wayBack.append(curNode)
curNode = parent[curNode]
for cell in wayBack:
curX = cell[0]
curY = cell[1]
Maze[curY][curX] = 2
def drawGrid():
stroke(0)
for i in range(1,mazeSize):
line(i * windowWidth / mazeSize, 0, i * windowWidth / mazeSize, windowHeight)
for j in range(1, mazeSize):
line(0, j * windowHeight / mazeSize, windowWidth, j * windowHeight / mazeSize)
fill(startColor)
rect(startPoint.x * windowWidth / mazeSize , startPoint.y * windowHeight / mazeSize, windowWidth / mazeSize, windowHeight / mazeSize)
fill(endColor)
rect(endPoint.x * windowWidth / mazeSize , endPoint.y * windowHeight / mazeSize, windowWidth / mazeSize, windowHeight / mazeSize)
def drawMaze():
fill(wallColor)
for i in range(mazeSize):
for j in range(mazeSize):
if Maze[i][j] == 1:
rect(j * windowWidth / mazeSize, i * windowHeight / mazeSize, windowWidth / mazeSize, windowHeight / mazeSize)
def drawPath():
global showPath
if showPath == 0:
return
fill(wayColor)
for i in range(mazeSize):
for j in range(mazeSize):
if Maze[i][j] == 2:
rect(j * windowWidth / mazeSize, i * windowHeight / mazeSize, windowWidth / mazeSize, windowHeight / mazeSize)
def clearMaze():
global Maze
Maze = [[0 for i in range(mazeSize)] for i in range(mazeSize)]
def clearPath():
global Maze
for i in range(mazeSize):
for j in range(mazeSize):
if Maze[i][j] == 2:
Maze[i][j] = 0
def setup():
size(windowWidth, windowHeight)
stroke(255)
background(backgroundColor)
createPath()
def draw():
background(backgroundColor)
drawGrid()
drawMaze()
drawPath()
def mousePressed():
clearPath()
if mouseButton == LEFT:
global startPoint
startPoint = Point( int( (mouseX % windowWidth) * mazeSize / windowWidth), int( (mouseY % windowHeight) * mazeSize / windowHeight))
if mouseButton == RIGHT:
global endPoint
endPoint = Point( int( (mouseX % windowWidth) * mazeSize / windowWidth), int( (mouseY % windowHeight) * mazeSize / windowHeight))
def mouseDragged():
global Maze
if mouseButton == CENTER:
wallX = int( (mouseX % windowWidth) * mazeSize / windowWidth)
wallY = int( (mouseY % windowHeight) * mazeSize / windowHeight)
Maze[wallY][wallX] = 1
def mouseReleased():
createPath()
def keyPressed():
global showPath
if key == 'c' or key == 'C':
clearMaze()
createPath()
if key == 's' or key == 'S':
showPath = ~showPath
if key == 'd' or key == 'D':
createMaze()
createPath()
| [
"noreply@github.com"
] | noreply@github.com |
c0e32ca735b155f4c09cec0c9f7385ae2d3dcce8 | c9c4f05df86ee81fad560ca6321974b6ab414132 | /python3/solutions/skocimis.py | f95a23b1ee54101da3e6a1c32ca86a1987515fbe | [] | no_license | ionelh/kattis | 520ce63d887127ed1c9a5e57c7473e4688bb9f76 | 9f335c352b152081fb0ca128532a7c2038db6fe4 | refs/heads/master | 2021-01-23T21:49:46.517516 | 2017-08-07T17:29:28 | 2017-08-07T17:29:28 | 83,111,740 | 6 | 4 | null | 2018-04-26T10:42:10 | 2017-02-25T06:31:26 | JavaScript | UTF-8 | Python | false | false | 475 | py | # Problem: https://open.kattis.com/problems/skocimis
########## GENERATE TEST CASES LOCALLY ##########
dummy_input = [
# '2 3 5',
'3 5 9'
]
def input():
if len(dummy_input) != 0:
return dummy_input.pop(0)
raise EOFError()
################## CODE #####################
kangaroos = [int(elt) for elt in input().split(' ')]
result = kangaroos[1] - kangaroos[0]
if kangaroos[2] - kangaroos[1] > result:
result = kangaroos[2] - kangaroos[1]
print(result - 1)
| [
"ionel.hindorean@gmail.com"
] | ionel.hindorean@gmail.com |
8a840e7b2d610e51b72c8e3d920adace9b441c9e | 6b4c76462ebb3491447e8eca9d83b00087b41c23 | /oflibpytorch/utils.py | ba2cba2b2a9112d904e613170de02327fbb432fb | [
"MIT"
] | permissive | snehashis1997/oflibpytorch | 19586626b0be1752ef2588bb50e0a7354316ed48 | 5c1e28ad0cf00b25a7dd040c50b92407cbefe8ff | refs/heads/main | 2023-06-14T10:28:43.437528 | 2021-07-09T17:27:18 | 2021-07-09T17:27:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,847 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright: 2021, Claudio S. Ravasio
# License: MIT (https://opensource.org/licenses/MIT)
# Author: Claudio S. Ravasio, PhD student at University College London (UCL), research assistant at King's College
# London (KCL), supervised by:
# Dr Christos Bergeles, PI of the Robotics and Vision in Medicine (RViM) lab in the School of Biomedical Engineering &
# Imaging Sciences (BMEIS) at King's College London (KCL)
# Prof Lyndon Da Cruz, consultant ophthalmic surgeon, Moorfields Eye Hospital, London UK
#
# This file is part of oflibpytorch
import math
import torch
import torch.nn.functional as f
from scipy.interpolate import griddata
import numpy as np
import cv2
from typing import Any, Union
DEFAULT_THRESHOLD = 1e-3
def get_valid_vecs(vecs: Any, desired_shape: Union[tuple, list] = None, error_string: str = None) -> torch.Tensor:
"""Checks array or tensor input for validity and returns 2-H-W tensor of dtype float for use as flow vectors
:param vecs: Valid if numpy array or torch tensor, either shape 2-H-W (assumed first) or H-W-2
:param desired_shape: List or tuple of (H, W) the input vecs should be compared about. Optional
:param error_string: Optional string to be added before the error message if input is invalid. Optional
:return: Tensor valid for flow vectors, shape 2-H-W, dtype float
"""
error_string = '' if error_string is None else error_string
# Check type and dimensions
if not isinstance(vecs, (np.ndarray, torch.Tensor)):
raise TypeError(error_string + "Input is not a numpy array or a torch tensor")
if len(vecs.shape) != 3:
raise ValueError(error_string + "Input is not 3-dimensional")
# Transform to tensor if necessary
if isinstance(vecs, np.ndarray):
vecs = torch.tensor(vecs, dtype=torch.float, device='cpu')
# Check channels, transpose if necessary
if vecs.shape[0] != 2: # Check if input shape can be interpreted as 2-H-W
if vecs.shape[2] == 2: # Input shape is H-W-2
vecs = move_axis(vecs, -1, 0)
else: # Input shape is neither H-W-2 nor 2-H-W
raise ValueError(error_string + "Input needs to be shape H-W-2 or 2-H-W")
# Check shape if necessary
if desired_shape is not None:
if vecs.shape[1] != desired_shape[0] or vecs.shape[2] != desired_shape[1]:
raise ValueError(error_string + "Input shape H or W does not match the desired shape")
# Check for invalid values
if not torch.isfinite(vecs).all():
raise ValueError(error_string + "Input contains NaN, Inf or -Inf values")
return vecs.float()
def get_valid_ref(ref: Any) -> str:
"""Checks flow reference input for validity
:param ref: Flow reference to be checked
:return: Valid flow reference, either 't' or 's'
"""
if ref is None:
ref = 't'
else:
if not isinstance(ref, str):
raise TypeError("Error setting flow reference: Input is not a string")
if ref not in ['s', 't']:
raise ValueError("Error setting flow reference: Input is not 's' or 't', but {}".format(ref))
return ref
def get_valid_device(device: Any) -> str:
"""Checks tensor device input for validity
:param device: Tensor device to be checked
:return: Valid tensor device, either 'cpu' or 'cuda'
"""
if device is None:
device = 'cpu'
else:
if device not in ['cpu', 'cuda']:
raise ValueError("Error setting tensor device: Input is not 'cpu' or 'cuda', but {}".format(device))
if device == 'cuda' and not torch.cuda.is_available():
raise ValueError("Error setting tensor device: Input is 'cuda', but cuda is not available")
return device
def get_valid_padding(padding: Any, error_string: str = None) -> list:
"""Checks padding input for validity
:param padding: Padding to be checked, should be a list of length 4 of positive integers
:param error_string: Optional string to be added before the error message, if padding is invalid
:return: valid padding list, if indeed valid
"""
error_string = '' if error_string is None else error_string
if not isinstance(padding, (list, tuple)):
raise TypeError(error_string + "Padding needs to be a list [top, bot, left, right]")
if len(padding) != 4:
raise ValueError(error_string + "Padding list needs to be a list of length 4 [top, bot, left, right]")
if not all(isinstance(item, int) for item in padding):
raise ValueError(error_string + "Padding list [top, bot, left, right] items need to be integers")
if not all(item > 0 for item in padding):
raise ValueError(error_string + "Padding list [top, bot, left, right] items need to be 0 or larger")
return padding
def validate_shape(shape: Any) -> Union[tuple, list]:
if not isinstance(shape, (list, tuple)):
raise TypeError("Error creating flow from matrix: Dims need to be a list or a tuple")
if len(shape) != 2:
raise ValueError("Error creating flow from matrix: Dims need to be a list or a tuple of length 2")
if any((item <= 0 or not isinstance(item, int)) for item in shape):
raise ValueError("Error creating flow from matrix: Dims need to be a list or a tuple of integers above zero")
def move_axis(input_tensor: torch.Tensor, source: int, destination: int) -> torch.Tensor:
"""Helper function to imitate np.moveaxis
:param input_tensor: Input torch tensor, e.g. N-H-W-C
:param source: Source position of the dimension to be moved, e.g. -1
:param destination: Target position of the dimension to be moved, e.g. 1
:return: Output torch tensor, e.g. N-C-H-W
"""
source %= input_tensor.dim()
destination %= input_tensor.dim()
if source < destination:
destination += 1 # Otherwise e.g. source = 0, destination = 1 won't give correct result
elif source > destination:
source += 1 # Otherwise e.g. source = 1, destination = 0 won't give correct result
return input_tensor.unsqueeze(destination).transpose(source, destination).squeeze(source)
def to_numpy(tensor: torch.Tensor, switch_channels: bool = None) -> np.ndarray:
"""Tensor to numpy, calls .cpu() if necessary
:param tensor: Input tensor
:param switch_channels: Boolean determining whether the channels are moved from the first to the last dimension,
defaults to ``False``
:return: Numpy array, with channels switched if required
"""
switch_channels = False if switch_channels is None else switch_channels
with torch.no_grad():
if tensor.device.type == 'cuda':
tensor = tensor.cpu()
else:
tensor = tensor.detach()
arr = tensor.numpy()
if switch_channels:
arr = np.moveaxis(arr, 0, -1)
return arr
def to_tensor(array: np.ndarray, switch_channels: bool = None, device: str = None) -> torch.Tensor:
"""Numpy to tensor
:param array: Input array
:param switch_channels: Boolean determining whether the channels are moved from the last to the first dimension,
defaults to ``False``
:param device: Tensor device, ``cpu`` or ``cuda`` (if available). Defaults to ``cpu``
:return: Torch tensor, with channels switched if required
"""
switch_channels = False if switch_channels is None else switch_channels
device = 'cpu' if device is None else device
if switch_channels:
array = np.moveaxis(array, -1, 0)
tens = torch.tensor(array).to(device)
return tens
def show_masked_image(img: Union[torch.Tensor, np.ndarray], mask: Union[torch.Tensor, np.ndarray] = None) -> np.ndarray:
"""Mimics flow.show(), for an input image and a mask
:param img: Torch tensor of shape :math:`(3, H, W)` or numpy array of shape :math:`(H, W, 3)`, BGR input image
:param mask: Torch tensor or numpy array of shape :math:`(H, W)`, boolean mask showing the valid area
:return: Masked image, in BGR colour space
"""
if isinstance(img, torch.Tensor):
img = to_numpy(img, switch_channels=True)
if mask is None:
mask = np.ones(img.shape[:2], 'bool')
elif isinstance(mask, torch.Tensor):
mask = to_numpy(mask)
hsv = cv2.cvtColor(np.round(img).astype('uint8'), cv2.COLOR_BGR2HSV)
hsv[np.invert(mask), 2] = 180
contours, hierarchy = cv2.findContours((255 * mask).astype('uint8'),
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(hsv, contours, -1, (0, 0, 0), 1)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow("Image masked by valid area", bgr)
cv2.waitKey()
return bgr
def flow_from_matrix(matrix: torch.Tensor, shape: Union[list, tuple]) -> torch.Tensor:
"""Flow calculated from a transformation matrix
NOTE: This corresponds to a flow with reference 's': based on meshgrid in image 1, warped to image 2, flow vectors
at each meshgrid point in image 1 corresponding to (warped end points in image 2 - start points in image 1)
:param matrix: Transformation matrix, torch tensor of shape 3-3
:param shape: List or tuple [H, W] containing required size of the flow field
:return: Flow field according to cv2 standards, torch tensor 2-H-W
"""
# Make default vector field and populate it with homogeneous coordinates
h, w = shape
device = matrix.device
ones = torch.ones(shape).to(device)
grid_x, grid_y = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
default_vec_hom = torch.stack((grid_y.to(torch.float).to(device),
grid_x.to(torch.float).to(device),
ones), dim=-1)
# Calculate the flow from the difference of the transformed default vectors, and the original default vector field
transformed_vec_hom = torch.matmul(matrix.to(torch.float), default_vec_hom.unsqueeze(-1)).squeeze(-1)
transformed_vec = transformed_vec_hom[..., 0:2] / transformed_vec_hom[..., 2:3]
transformed_vec = move_axis(transformed_vec - default_vec_hom[..., 0:2], -1, 0)
return transformed_vec
def matrix_from_transforms(transform_list: list) -> torch.Tensor:
"""Calculates a transformation matrix from a given list of transforms
:param transform_list: List of transforms to be turned into a flow field, where each transform is expressed as
a list of [transform name, transform value 1, ... , transform value n]. Supported options:
['translation', horizontal shift in px, vertical shift in px]
['rotation', horizontal centre in px, vertical centre in px, angle in degrees, counter-clockwise]
['scaling', horizontal centre in px, vertical centre in px, scaling fraction]
:return: Transformation matrix as torch tensor of shape 3-3
"""
matrix = torch.eye(3)
for transform in reversed(transform_list):
matrix = matrix @ matrix_from_transform(transform[0], transform[1:])
return matrix
def matrix_from_transform(transform: str, values: list) -> torch.Tensor:
"""Calculates a transformation matrix from given transform types and values
:param transform: Transform type. Options: 'translation', 'rotation', 'scaling'
:param values: Transform values as list. Options:
For 'translation': [<horizontal shift in px>, <vertical shift in px>]
For 'rotation': [<horizontal centre in px>, <vertical centre in px>, <angle in degrees, counter-clockwise>]
For 'scaling': [<horizontal centre in px>, <vertical centre in px>, <scaling fraction>]
:return: Transformation matrix as torch tensor of shape 3-3
"""
matrix = torch.eye(3)
if transform == 'translation': # translate: value is a list of [horizontal movement, vertical movement]
matrix[0, 2] = values[0]
matrix[1, 2] = values[1]
if transform == 'scaling': # zoom: value is a list of [horizontal coord, vertical coord, scaling]
translation_matrix_1 = matrix_from_transform('translation', [-values[0], -values[1]])
translation_matrix_2 = matrix_from_transform('translation', values[:2])
matrix[0, 0] = values[2]
matrix[1, 1] = values[2]
matrix = translation_matrix_2 @ matrix @ translation_matrix_1
if transform == 'rotation': # rotate: value is a list of [horizontal coord, vertical coord, rotation in degrees]
rot = math.radians(values[2])
translation_matrix_1 = matrix_from_transform('translation', [-values[0], -values[1]])
translation_matrix_2 = matrix_from_transform('translation', values[:2])
matrix[0:2, 0:2] = torch.tensor([[math.cos(rot), math.sin(rot)], [-math.sin(rot), math.cos(rot)]])
# NOTE: diff from usual signs in rot matrix [[+, -], [+, +]] results from 'y' axis pointing down instead of up
matrix = translation_matrix_2 @ matrix @ translation_matrix_1
return matrix
def reverse_transform_values(transform_list: list) -> list:
"""Changes the values for all transforms in the list so the result is equal to the reverse transform
:param transform_list: List of transforms to be turned into a flow field, where each transform is expressed as
a list of [transform name, transform value 1, ... , transform value n]. Supported options:
['translation', horizontal shift in px, vertical shift in px]
['rotation', horizontal centre in px, vertical centre in px, angle in degrees, counter-clockwise]
['scaling', horizontal centre in px, vertical centre in px, scaling fraction]
:return: List of reversed transforms
"""
reversed_transform_list = []
for value_list in transform_list:
transform, values = value_list[0], value_list[1:]
if transform == 'translation': # translate: value is a list of [horizontal movement, vertical movement]
reversed_transform_list.append([transform, -values[0], -values[1]])
if transform == 'scaling': # zoom: value is a list of [horizontal coord, vertical coord, scaling]
reversed_transform_list.append([transform, values[0], values[1], 1/values[2]])
if transform == 'rotation': # rotate: value is a list of [horizontal coord, vertical coord, rotation in deg]
reversed_transform_list.append([transform, values[0], values[1], -values[2]])
return reversed_transform_list
def normalise_coords(coords: torch.Tensor, shape: Union[tuple, list]) -> torch.Tensor:
"""Normalise actual coordinates to [-1, 1]
Coordinate locations start "mid-pixel" and end "mid-pixel" (pixel box model):
Pixels | 0 | 1 | 2 |
| | |
Grid -1 0 1
:param coords: tensor of any shape, ending in a dim=2, which is (x, y) = [hor, ver]
:param shape: list of flow (or image) size [ver, hor]
:return: Normalised coordinates
"""
normalised_coords = 2. * coords
normalised_coords[..., 0] /= (shape[1] - 1) # points[..., 0] is x, which is horizontal, so shape[1]
normalised_coords[..., 1] /= (shape[0] - 1) # points[..., 1] is y, which is vertical, so shape[0]
normalised_coords -= 1
return normalised_coords
def apply_flow(flow: torch.Tensor, target: torch.Tensor, ref: str = None, mask: torch.Tensor = None) -> torch.Tensor:
"""Warps target according to flow of given reference
:param flow: Torch tensor 2-H-W containing the flow vectors in cv2 convention (1st channel hor, 2nd channel ver)
:param target: Torch tensor H-W, C-H-W, or N-C-H-W containing the content to be warped
:param ref: Reference of the flow, 't' or 's'. Defaults to 't'
:param mask: Torch tensor H-W containing the flow mask, only relevant for 's' flows. Defaults to True everywhere
:return: Torch tensor of the same shape as the target, with the content warped by the flow
"""
# Check if all flow vectors are almost zero
if torch.all(torch.norm(flow, dim=0) <= DEFAULT_THRESHOLD): # If the flow field is actually 0 or very close
return target
# Set up
ref = get_valid_ref(ref)
device = flow.device.type
h, w = flow.shape[1:]
# Prepare target dtype, device, and shape
target_dtype = target.dtype
target = target.to(torch.float)
if target.device != flow.device:
target = target.to(flow.device)
target_dims = target.dim()
if target_dims == 2: # shape H-W to 1-1-H-W
target = target.unsqueeze(0).unsqueeze(0)
elif target_dims == 3: # shape C-H-W to 1-C-H-W
target = target.unsqueeze(0)
# Warp target
if ref == 't':
# Prepare grid
grid_x, grid_y = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
grid = torch.stack((grid_y, grid_x), dim=-1).to(torch.float).to(device)
field = normalise_coords(grid.unsqueeze(0) - flow.unsqueeze(-1).transpose(-1, 0), (h, w))
if target.shape[0] > 1: # target wasn't just unsqueezed, but has a true N dimension
field = field.repeat(target.shape[0], 1, 1, 1)
torch_version = globals()['torch'].__version__
if int(torch_version[0]) == 1 and float(torch_version[2:4]) <= 3:
result = f.grid_sample(target, field)
else:
# noinspection PyArgumentList
result = f.grid_sample(target, field, align_corners=True)
# Comment on grid_sample: given grid_sample(input, grid), the input is sampled at grid points.
# For this to work:
# - input is shape NCHW (containing data vals in C)
# - grid is shape NHW2, where 2 is [x, y], each in limits [-1, 1]
# - grid locations by default start "mid-pixel", end "mid-pixel" (box model): Pixels | 0 | 1 | 2 |
# | | |
# Grid -1 0 1
# - in practice, this box model leads to artefacts around the corners (might be fixable), setting align_corner
# to True fixes this.
# - x and y are spatially defined as follows, same as the cv2 convention (e.g. Farnebäck flow)
# -1 0 1
# -1 +---------+--> x
# | |
# 0 | image |
# | |
# 1 +---------+
# v
# y
else: # ref == 's'
# Get the positions of the unstructured points with known values
field = to_numpy(flow, True).astype('float32')
x, y = np.mgrid[:field.shape[0], :field.shape[1]]
positions = np.swapaxes(np.vstack([x.ravel(), y.ravel()]), 0, 1)
flow_flat = np.reshape(field[..., ::-1], (-1, 2)) # Shape H*W-2
pos = positions + flow_flat
# Get the known values themselves
target_np = np.moveaxis(to_numpy(target), 1, -1) # from N-C-H-W to N-H-W-C
target_flat = np.reshape(target_np, (target.shape[0], -1, target.shape[1])) # from N-H-W-C to N-H*W-C
# Mask points, if required
if mask is not None:
pos = pos[to_numpy(mask.flatten())]
target_flat = target_flat[:, to_numpy(mask.flatten())]
# Perform interpolation of regular grid from unstructured data
results = np.copy(target_np)
for i in range(target_flat.shape[0]): # Perform griddata for each "batch" member
result = griddata(pos, target_flat[i], (x, y), method='linear')
results[i] = np.nan_to_num(result)
# Make sure the output is returned with the same dtype as the input, if necessary rounded
result = torch.tensor(np.moveaxis(results, -1, 1)).to(flow.device)
# Reduce target to original shape
if target_dims == 2: # shape 1-1-H-W to H-W
result = result.squeeze(0).squeeze(0)
elif target_dims == 3: # shape 1-C-H-W to C-H-W
result = result.squeeze(0)
# Return target with original dtype, rounding if necessary
# noinspection PyUnresolvedReferences
if not target_dtype.is_floating_point:
result = torch.round(result)
result = result.to(target_dtype)
return result
def threshold_vectors(vecs: torch.Tensor, threshold: Union[float, int] = None) -> torch.Tensor:
"""Sets all flow vectors with a magnitude below threshold to zero
:param vecs: Input flow torch tensor, shape 2-H-W
:param threshold: Threshold value as float or int, defaults to DEFAULT_THRESHOLD (top of file)
:return: Flow tensor with vector magnitudes below the threshold set to 0
"""
threshold = DEFAULT_THRESHOLD if threshold is None else threshold
mags = torch.norm(vecs, dim=0)
f = vecs.clone()
f[:, mags < threshold] = 0
return f
| [
"CSRavasio@users.noreply.github.com"
] | CSRavasio@users.noreply.github.com |
3ec23889b0b41f273a08c57ccb91e806f23972c4 | a49ebe04a533f5359592c7222b2212b49002f066 | /merge/test/test_01.py | 2cd07044698cb655e9188b3bdb7b264c8a6ecead | [] | no_license | prem1982/airassessmentreporting | ffb23e87e120ade54dee971b215186d12968fd43 | 0c5a3714de0e157b14f92cb3a2bd5611d2dc6e97 | refs/heads/master | 2020-03-28T14:49:34.876496 | 2018-09-12T18:50:13 | 2018-09-12T18:50:13 | 148,526,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,561 | py | import unittest
import os.path
from airassessmentreporting.merge import *
from airassessmentreporting.airutility import yesno
from abstractmergetestcase import AbstractMergeTestCase
from airassessmentreporting.testutility import ( integer_compare,
mixed_compare, to_str, compare_tables )
def truncated_compare( x, y ):
if x is None:
return y is None
if y is None:
return False
return x[:12] == y[:12]
_JOIN_NAMES = {
JOIN_TYPE_LEFT:'LEFT',
JOIN_TYPE_INNER:'INNER',
JOIN_TYPE_FULL:'OUTER'
}
OUT_COLUMNS = (
( 'char_1', 'char_1', None ),
( 'char_2', 'char_2', None ),
# ( 'ethnicity', 'ethnicity', None ),
( 'gender', 'gender', None ),
( 'n1', 'n1', None ),
( 'n2', 'n2', None ),
( 'num_1', 'num_1', integer_compare ),
( 'num_2', 'num_2', integer_compare ),
( 'studentfnm', 'studentfnm', truncated_compare ),
( 'studentid', 'studentid', None ),
( 'studentlnm', 'studentlnm', None ),
( 'barcode_char', 'barcode_char', None ),
( 'barcode_num', 'barcode_num', integer_compare ),
)
FUZZY_COLUMNS_A = (
( 'barcode_num', 'tmp1barcode_num', integer_compare ),
( 'lfuzzykey_1_1', 'tmp1studentlnm', mixed_compare ),
( 'lfuzzykey_1_2', 'tmp1studentfnm', mixed_compare ),
( 'rfuzzykey_1_1', 'tmp2studentlnm', mixed_compare ),
( 'rfuzzykey_1_2', 'tmp2studentfnm', mixed_compare ),
)
FUZZY_COLUMNS_B = (
( 'primary1', 'tmp1barcode_num', integer_compare ),
( 'lfuzzykey_1_1', 'tmp1studentlnm', mixed_compare ),
( 'lfuzzykey_1_2', 'tmp1studentfnm', mixed_compare ),
( 'rfuzzykey_1_1', 'tmp2studentlnm', mixed_compare ),
( 'rfuzzykey_1_2', 'tmp2studentfnm', mixed_compare ),
)
'''A python implementation of the checks that are performed in MergeMacro_test1.sas
'''
class MergeTest01( AbstractMergeTestCase ):
def test_01(self):
'''Run the same set of tests as were performed in SAS, and compare the
results
'''
answer_dir = os.path.join( self.run_context.logs_dir, 'merge_test_01' )
if not os.path.exists( answer_dir ):
os.makedirs( answer_dir )
answer_file = os.path.join( answer_dir, 'log' )
succeed = self._doMergePermutations( None, answer_file, 0.8, 'A' )
self.assertTrue( succeed, "Merge tests failed. See logs in {}".format( answer_dir ) )
def test_01b( self ):
'''Repeat the test using a merge spec read from Excel instead of the one
created in the constructor
'''
answer_dir = os.path.join( self.run_context.logs_dir, 'merge_test_01b' )
if not os.path.exists( answer_dir ):
os.makedirs( answer_dir )
answer_file = os.path.join( answer_dir, 'log' )
spec_file = os.path.join( self.run_context.tests_safe_dir, 'merge_spec.xls' )
read_spec_file( spec_file, self.merge_def )
succeed = self._doMergePermutations( None, answer_file, 0.8, 'B' )
self.assertTrue( succeed, "Merge tests failed. See logs in {}".format( answer_dir ) )
def _doMergePermutations( self, spec_file, answer_file, similarity_threshold, fuzzy_version ):
# merge.createSpecFileIfNotExists( spec_file )
# self.merge_def.fieldSpec = merge.readSpecFile( spec_file )
self.merge_def.similarity_thresholds = similarity_threshold
succeed = True
for allow_dups_left in ( True, False ):
self.merge_def.allow_dups_left = allow_dups_left
for allow_dups_right in ( True, False ):
self.merge_def.allow_dups_right = allow_dups_right
dups_both_permutations = ( True, False ) if ( allow_dups_left and allow_dups_right ) else ( False, )
for allow_dups_both in dups_both_permutations:
self.merge_def.allow_dups_both = allow_dups_both
for join_type in ( JOIN_TYPE_LEFT, JOIN_TYPE_INNER, JOIN_TYPE_FULL ):
self.merge_def.join_type = join_type
case_name = "_".join( ( yesno(allow_dups_left),
yesno(allow_dups_right),
yesno(allow_dups_both),
_JOIN_NAMES[ join_type ] ) )
self.merge_def.table_name = 'mergeOut_' + case_name
self.merge_def.fuzzy_report_table = 'fuzzy_'+ case_name
self.merge_def.left_remain_table = 'left_remain_' + case_name
self.merge_def.right_remain_table = 'right_remain_' + case_name
if type == JOIN_TYPE_FULL:
self.merge_def.left_remain_table = self.merge_def.right_remain_table = None
elif type == JOIN_TYPE_LEFT:
self.merge_def.left_remain_table = None
self.merge_def.execute()
del self.merge_def['fk_right_1']
del self.merge_def['fk_left_1']
result = self.compare_output_tables( case_name, answer_file )
if self.merge_def.left_remain_table is not None:
result = result and self.compare_remain_tables( case_name, answer_file, 1, 'left' )
if self.merge_def.left_remain_table is not None:
result = result and self.compare_remain_tables( case_name, answer_file, 2, 'right' )
if fuzzy_version == 'A':
result = result and self.compare_fuzzy_tables_a( case_name, answer_file )
else:
result = result and self.compare_fuzzy_tables_b( case_name, answer_file )
succeed = succeed and result
self.run_context.info( "{1}: Merge test 01 for case {0}".format( case_name, 'PASSED' if result else 'FAILED' ) )
return succeed
def compare_output_tables( self, case_name, answer_file ):
log_name = answer_file + '_OUTPUT_' + case_name
specimen_name = 'DS_OUT_{}.xls'.format( case_name )
specimen_name = os.path.join( self.run_context.tests_safe_dir,
'merge_outputs', specimen_name )
sort_fun = lambda row: ( None if row.barcode_num is None else int( float( row.barcode_num ) + 0.5 ),
row.n1,
row.n2 )
return compare_tables( log_name, self.merge_def.table_name, specimen_name,
OUT_COLUMNS, sort_fun, sort_fun, self.db_context, 0 )
def compare_remain_tables(self, case_name, answer_file, specimen_side, output_side ):
return True
def compare_fuzzy_tables_a(self, case_name, answer_file ):
log_name = answer_file + '_FUZZY_REPORT_' + case_name
table_name = 'fuzzy_{}'.format( case_name )
specimen_name = 'FUZZY_{}.xls'.format( case_name )
specimen_name = os.path.join( self.run_context.tests_safe_dir, 'merge_outputs', specimen_name )
def table_sort( row ):
barcode_num = None if row.barcode_num is None else int( float( row.barcode_num ) + 0.5 )
return ( barcode_num, to_str( row.lfuzzykey_1_1 ), to_str( row.lfuzzykey_1_2 ), to_str( row.rfuzzykey_1_1 ),
to_str( row.rfuzzykey_1_2 ) )
def specimen_sort( row ):
barcode_num = None if row['tmp1barcode_num'] is None else int( row['tmp1barcode_num'] )
return ( barcode_num, to_str( row['tmp1studentlnm'] ), to_str( row['tmp1studentfnm'] ),
to_str( row['tmp2studentlnm'] ), to_str( row['tmp2studentfnm'] ) )
return compare_tables( log_name, table_name, specimen_name,
FUZZY_COLUMNS_A, table_sort, specimen_sort, self.db_context, 0 )
def compare_fuzzy_tables_b(self, case_name, answer_file ):
log_name = answer_file + '_FUZZY_REPORT_' + case_name
table_name = 'fuzzy_{}'.format( case_name )
specimen_name = 'FUZZY_{}.xls'.format( case_name )
specimen_name = os.path.join( self.run_context.tests_safe_dir, 'merge_outputs', specimen_name )
def table_sort( row ):
barcode_num = None if row.primary1 is None else int( float( row.primary1 ) + 0.5 )
return ( barcode_num, to_str( row.lfuzzykey_1_1 ), to_str( row.lfuzzykey_1_2 ), to_str( row.rfuzzykey_1_1 ),
to_str( row.rfuzzykey_1_2 ) )
def specimen_sort( row ):
barcode_num = None if row['tmp1barcode_num'] is None else int( float( row['tmp1barcode_num'] ) + 0.5 )
return ( barcode_num, to_str( row['tmp1studentlnm'] ), to_str( row['tmp1studentfnm'] ),
to_str( row['tmp2studentlnm'] ), to_str( row['tmp2studentfnm'] ) )
return compare_tables( log_name, table_name, specimen_name,
FUZZY_COLUMNS_B, table_sort, specimen_sort, self.db_context, 0 )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"prem1pre@gmail.com"
] | prem1pre@gmail.com |
f715bb4188f7502c3b3cfec70e313d25dcebe13e | 37abefa640c3074a26c7c884d5cbfb3d0bc90c6d | /Customer/migrations/0001_initial.py | 2d12173f5b9b51e2daa3287d0991164ea965497c | [] | no_license | dishbail/PostureApp | 439f8cc6463e8f7f60479f51b437d4293485909f | 93194bc199588b051fe66a51269f56eb135c5a78 | refs/heads/main | 2023-04-15T00:59:46.037618 | 2021-04-23T22:24:17 | 2021-04-23T22:24:17 | 335,673,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,520 | py | # Generated by Django 3.0.7 on 2021-02-17 17:05
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('phone', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('profile_pic', models.ImageField(blank=True, default='profile1.png', null=True, upload_to='')),
('date_created', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 17, 17, 5, 57, 946781, tzinfo=utc), null=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SittingRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 17, 17, 5, 57, 947771, tzinfo=utc), null=True)),
('sitting_time_in_min', models.FloatField(default=0)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Customer.Customer')),
],
),
migrations.CreateModel(
name='PostureRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 17, 17, 5, 57, 947771, tzinfo=utc), null=True)),
('posture_value', models.CharField(choices=[(1, 'Correct Posture'), (0, 'Incorrect Posture')], max_length=30, null=True)),
('confidence_value', models.FloatField(default=1)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Customer.Customer')),
],
),
]
| [
"rebeccagolm32@gmail.com"
] | rebeccagolm32@gmail.com |
a9b0a4dd2f123ad3af0d2e9e35f3602686baf7b5 | 4dca81f7e7c3ae60397c40fde44faec71b720b17 | /chapter5_convolutional_neural_networks/5.3_channels.py | cf3500c5e2172f6cb784686dd1e9ce8c5234d7c2 | [
"MIT"
] | permissive | Rosetta-Leong/pytorch_learning | 5b46411c417f5d14604ca7fe6105564b8864573e | 86cf2252dd210a5fc613a65dbb773bdf1e2b734f | refs/heads/main | 2023-07-05T23:44:54.430716 | 2021-08-11T09:26:47 | 2021-08-11T09:26:47 | 384,839,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | # -*-coding:utf-8-*-
# @File : 2021/8/4 下午4:26
# @Author : Rosetta0
# @File : 5.3_channels.py
import torch
from d2l import torch as d2l
#多通道输入互相关运算
def corr2d_multi_in(X, K):
return sum(d2l.corr2d(x, k) for x, k in zip(X, K))
#使用 zip() 函数“压缩”多个序列时,它会分别取各序列中第 1 个元素、第 2 个元素、... 第 n 个元素,各自组成新的元组
#此处相当于先取第一个通道与其对应二维卷积核做互相关,然后依次是第二个...
#多通道输入、输出
def corr2d_multi_in_out(X, K):
return torch.stack([corr2d_multi_in(X, k) for k in K], 0)
#1 * 1卷积(利用全连接实现)
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = X.reshape((c_i, h * w))
K = K.reshape((c_o, c_i))
Y = torch.matmul(K, X)
return Y.reshape((c_o, h, w))
if __name__ == "__main__":
#多输入单输出
X = torch.tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
K = torch.tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])
# print(corr2d_multi_in(X, K))
#多输入多输出
K = torch.stack((K, K + 1, K + 2), 0)
# print(K.shape)
# print(K)
# print(corr2d_multi_in_out(X, K))
# 1 * 1卷积
X = torch.normal(0, 1, (3, 3, 3))
K = torch.normal(0, 1, (2, 3, 1, 1))
Y1 = corr2d_multi_in_out_1x1(X, K)
Y2 = corr2d_multi_in_out(X, K)
assert float(torch.abs(Y1 - Y2).sum()) < 1e-6 | [
"lxsnu11678911@gmail.com"
] | lxsnu11678911@gmail.com |
37e3105b16c43a36e130e2227fd973d23acab791 | fcc2e8ea496e3bbc4e14e0dd8c97d785b08f0905 | /extract-swf-from-ppt.py | 15ca657b201450b4f5242e71fcebc0cd6900ca23 | [
"Apache-2.0"
] | permissive | PeterUpfold/extract-swf-from-ppt | 0eda19266f01a713176c076c63827861c986577f | bbb47cad6d6b365e39841adbc09a40183ad2ec09 | refs/heads/master | 2022-02-28T21:51:50.985305 | 2019-10-31T15:51:47 | 2019-10-31T15:51:47 | 208,244,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,304 | py | #!/usr/bin/env python3
#
# Extract SWF from PPT
#
# Use LibreOffice's unoconv to convert PowerPoint 97-2003 format files into PPTX, extract Flash files (*.swf)
# from inside the presentation and dump these to a folder.
# Copyright 2019 Test Valley School.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from tqdm import tqdm
import zipfile
#require csplitb from Pip
argparser = argparse.ArgumentParser(description='Convert PowerPoint 97-2003 fiels and extract Flash files (*.swf) from inside them.')
argparser.add_argument('-i', '--input-dir', dest='input_dir', help='The directory containing source files', required=True)
argparser.add_argument('-o', '--output-dir', dest='output_dir', help='The directory where output files should be written.', required=True)
args = argparser.parse_args()
if not os.path.isdir(args.input_dir):
raise ValueError("The input directory specified is not a directory.")
# check output path
if not os.path.exists(args.output_dir):
raise ValueError("The output directory specified does not exist.")
if not os.path.isdir(args.output_dir):
raise ValueError("The output directory specified is not a directory.")
print("Converting to PPTX...")
for src_filename in tqdm(os.listdir(args.input_dir)):
if src_filename.endswith(".ppt"):
print(src_filename)
print(['unoconv', '-f', 'pptx', '-o', os.path.abspath(args.output_dir), src_filename])
subprocess.run(['unoconv', '-f', 'pptx', '-o', os.path.abspath(args.output_dir), src_filename], cwd=args.input_dir)
print("Extracting PPTX to ZIP...")
# loop over output folder and investigate zips for bin files
for pptx_file in tqdm(os.listdir(args.output_dir)):
print(pptx_file)
if pptx_file.endswith(".pptx"):
zip_pptx = zipfile.ZipFile(os.path.join(args.output_dir, pptx_file), 'r')
for entry_info in zip_pptx.infolist():
#print(entry_info.filename)
if entry_info.filename.endswith('.bin'):
zip_pptx.extract(entry_info, path=args.output_dir)
anim_dest=os.path.join(args.output_dir, pptx_file + ".Animations", entry_info.filename)
os.renames(old=os.path.join(args.output_dir, entry_info.filename), new=anim_dest)
print("Extracted bin animation ", anim_dest)
subprocess.run(['csplitb', '--prefix', pptx_file, '--suffix', '.swf', '--number', '2', '465753', anim_dest])
for anim_dest_file in os.listdir(os.getcwd()):
if anim_dest_file.endswith(".swf"):
print("Move ", anim_dest_file, "into place")
os.rename(src=os.path.join(os.getcwd(), anim_dest_file), dst=os.path.join(args.output_dir, pptx_file + ".Animations", anim_dest_file)) | [
"upfoldp@testvalley.hants.sch.uk"
] | upfoldp@testvalley.hants.sch.uk |
655afc59bdf886491d282fd26968e9a37ec13aee | bb46e44301da95b8ed1b2b3af6caf8ef6f0e91f2 | /scripts/LeistungsdatenScraper.py | 220547f258d3d684357c11f13892fb1ebaf13336 | [] | no_license | vgoel30/European-Soccer-Diversity | ccca31a2233e19cd304d5c3d7cb80413d01aa359 | a62364e3876cecf074a8aee2ea8976bf7a9c5576 | refs/heads/master | 2021-03-24T13:20:22.569083 | 2018-06-10T10:48:31 | 2018-06-10T10:48:31 | 117,543,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | from bs4 import BeautifulSoup
from pprint import pprint
import requests
import json
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'}
main_url = "https://www.transfermarkt.com/"
'''
https://www.transfermarkt.com/paris-saint-germain/leistungsdaten/verein/583/plus/0?reldata=FR1%262016
'''
country = 'Spain'
directory = '../data/Leistungsdaten/' + country + '/'
file_name = '../data/Yearly teams/' + country + '.json'
years = [year for year in range(1995,2017)]
players_dict = {}
with open(file_name) as data_file:
data = json.load(data_file)
for year in years:
players_dict[year] = {}
year_data = data[str(year)]
for team_id in year_data.keys():
i = 0
team_name = year_data[team_id]
players_dict[year][team_name] = {}
url = main_url + team_name + "/leistungsdaten/verein/" + team_id + "/plus/0?reldata=ES1%26" + str(year)
print(url)
request = requests.get(url, headers=headers)
html_data = request.text
soup = BeautifulSoup(html_data, 'lxml')
table = soup.find("table", { "class" : "items" })
for row in table.findAll("tr"):
cells = row.findAll('td')
if len(cells) > 0:
try:
#name
name = cells[3].find("a", {"class" : "spielprofil_tooltip"})['title']
#pprint(name)
#age
age = cells[5].getText()
#pprint(age)
#nationality
nationality = cells[6].find("img", {"class" : "flaggenrahmen"})['title']
#pprint(nationality)
#appearances
appearances = cells[8].getText()
if not appearances.isdigit():
appearances = 0
#pprint(appearances)
#minutes
minutes = cells[10].getText()
# if not minutes.isdigit():
# minutes = 0
#pprint(minutes)
players_dict[year][team_name][i] = {'name':name, 'age':int(age), 'nationality':nationality, 'appearances':int(appearances), 'minutes':minutes}
i += 1
except:
pass
pprint(players_dict)
out_file = directory + str(year) + '.json'
with open(out_file, 'w') as outfile:
json.dump(players_dict, outfile, sort_keys=True, indent=4)
#exit() | [
"varun.goel@stonybrook.edu"
] | varun.goel@stonybrook.edu |
342d9ea7626bfa91902ea78fa41d7879ef2d8cd6 | e90577849e71431c8c0b580c16ad2d9c7d1e03ba | /recurrsion.py | 1b834613096bb995060901862805207b37cdeece | [] | no_license | ArtemonCoder/hangman | bfa74cc2a55c1120be7d5f174386e18b781ac51a | 48deae7d33a1958363726368ade306a496b377d3 | refs/heads/master | 2020-03-24T03:56:58.177534 | 2018-07-29T10:55:21 | 2018-07-29T10:55:21 | 142,438,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | def bottles_of_beer(bob):
""" Печатает текст песенки про 99 бутылок пива.
:param bob: Должно быть целым числом.
"""
if bob < 1:
print("""Нет бутылок пива на стене. Нет бутылок
пива.""")
return
tmp = bob
bob -= 1
print("""{} бутылок пива на стене. {} бутылок пива.
Возьми одну, пусти по кругу, {} бутылок пива на
стене.
""".format(tmp,
tmp,
bob))
bottles_of_beer(bob)
bottles_of_beer(99)
| [
"artkuryshev@yandex.ru"
] | artkuryshev@yandex.ru |
2cc1ad9577f5e70e29c22b33730e0ab7a2a2afb9 | d1cd25f84f7b7ba631cdf66578a907fdf85abd62 | /blog/migrations/0001_initial.py | 86461cf25a6a527875065c659258381032977f86 | [] | no_license | honghuynhit/gadjango | 7818b34044a4b30d07468b9c5b5e96ba25cd4ea5 | b5ccbdbbe26098391e498b6857007046cc8c22c4 | refs/heads/master | 2023-08-25T20:34:00.399111 | 2020-06-27T11:55:58 | 2020-06-27T11:55:58 | 275,237,875 | 0 | 0 | null | 2021-09-22T19:19:39 | 2020-06-26T20:03:57 | Python | UTF-8 | Python | false | false | 986 | py | # Generated by Django 3.0.7 on 2020-06-26 19:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"honghuynhit@gmail.com"
] | honghuynhit@gmail.com |
e188b05ee0eefdc3df896a57bbe3cadf04e3dd5e | b9bce55f33bd7b028f192230fe7f0fa43e47849b | /DeprecatedFiles/ALTERNATE_9.0.py | a83f130febbb0c93179da5bf9e9774cc7af1409d | [] | no_license | bpayami/FinalProject17 | e3c67768c27783fd1f0f13cfa0e6e8c2af9a8c1a | ee3d889794f1691f96ee8575de441e4456f9a496 | refs/heads/master | 2021-09-06T03:54:48.599560 | 2018-02-02T07:42:58 | 2018-02-02T07:42:58 | 116,167,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,014 | py | #TO-DO LIST
#GAME CONTENT
#change nowakowski cookbook name
#GAME ENDING
#figure out ending message
#GRADING
#finish adding comments
#MISC.
#slow type of 'processing'
#bug test!
#http://copy.r74n.com/ascii-art
#http://www.chris.com/ascii/
import cmd
#visual 'map' of the game that the user can call to see from within the cmd loop
def map():
print("""
+---------------+
| |
T H E M A P | |
O F | |
| Kitchen |
T H E M A N S I O N | |
| |
| |
+---------------+-------O-------+---------------+
| | | | | |
| | | | | |
| |______| |______| |
| Study O______ D ______O Library |
| | | | | |
| | | | | |
| | | | | |
+---------------+-------O-------+______| |______+-------O-------+---------------+
| | | | | ____ ____ | | | | |
| | | | | | | | | | | | | |
| Art |______| |______| | +--0--+ | |______| |______| Locked |
| Gallery O______ B ______ | | ??? | | ______ C ______0 Closet |
| | | | | | +-----+ | | | | | |
| | | | | |___________| | | | | |
| | | | |______ ______| | | | |
+---------------+-------O-------+ | | +-------O-------+---------------+
| | | | | |
| | | | | |
| Grand |______| |______| Trophy |
| Ballroom O______ A ______O Room |
| | | | | |
| | | | | |
| | | | | |
+---------------+-------O-------+---------------+
| |
| |
| Start |
| Here... |
| |
| |
| |
+---------------+
""")
#dictionary containing information for all rooms in world
worldRooms = {
'Start': {
'DOORS': {'FORWARD': 'Wing A'},
'DESC': 'You wake up in a room with no memory of how you got there. The last thing you remember was freaking out over midterms -- so how did you end up here?\nYou slowly stagger to your feet and try to get your bearings. Surveying the room for any hints as to where you could be you notice a welcome sign\non the other side of the room-- Hmmm... you wonder what it says',
'GROUND': ['Welcome Sign']},
'Wing A from Start': {
'DOORS': {'FORWARD': 'Main Hall', 'BACK': 'Start', 'LEFT': 'Grand Ballroom', 'RIGHT': 'Trophy Room'}},
'Grand Ballroom': {
'DOORS': {'LEFT': 'Wing B', 'RIGHT': 'Wing A'},
'DESC': 'Wow what a gorgeous room! It has tall ceilings, an expansive dance floor, and a glittering chandelier! Near the edge of the dancefloor you notice some shattered glass and in the opposite corner there is an abandoned pair of heels. There\'s also what seems to be a trapdoor in the far corner in the room.',
'GROUND': ['Shattered Glass', 'Trapdoor', 'Dancefloor', 'Chandelier', 'High Heels']},
'Wing A from Grand Ballroom': {
'DOORS': {'FORWARD': 'Trophy Room', 'BACK': 'Grand Ballroom', 'LEFT': 'Main Hall', 'RIGHT': 'Start'},},
'Wing B from Grand Ballroom': {
'DOORS': {'FORWARD': 'Study', 'BACK': 'Grand Ballroom', 'LEFT': 'Art Gallery', 'RIGHT': 'Main Hall'}},
'Trophy Room': {
'DOORS': {'LEFT': 'Wing A', 'RIGHT': 'Wing C'},
'DESC': 'Looking around in awe, you notice that the trophy room is more like a Magnet Memorabilia Hall of Fame! The room is full of glass shelves with various items and objects on display. A couple displays that catch your eye is a rubber band, a virendragon plush, a copy of Arthur\'s speakeasy performance, and a Purdue University acceptance letter.',
'GROUND': ['Rubber Band', 'Virendragon', 'Arthur\'s Speakeasy Performance', 'Purdue Acceptance Letter']},
'Wing A from Trophy Room': {
'DOORS': {'FORWARD': 'Grand Ballroom', 'BACK': 'Trophy Room', 'LEFT': 'Start', 'RIGHT': 'Main Hall'}},
'Wing C from Trophy Room': {
'DOORS': {'FORWARD': 'Library', 'BACK': 'Trophy Room', 'LEFT': 'Main Hall', 'RIGHT': 'Locked Closet'}},
'Art Gallery': {
'DOORS': {'FORWARD': 'Wing B'},
'DESC': 'You can\'t help but feel on edge as you enter into the room, not knowing what to expect. Near the very entrance you see a sign with text written on it. On one of the side tables you see, a moose costume?? You continue around the room to take in all the artwork. You see a Harambe tribute that truly does him justice. You also see a closed curtain with three different spotlights shining down on it. And wait... is that the Mona Lisa?!?',
'GROUND': ['Do Not Take This Sign Sign', 'Moose Costume', 'Mona Lisa', 'Harambe Tribute', 'Code #1', 'Curtain']},
'Wing B from Art Gallery': {
'DOORS': {'FORWARD': 'Main Hall', 'BACK': 'Art Gallery', 'LEFT': 'Study', 'RIGHT': 'Grand Ballroom'}},
'Study': {
'DOORS': {'LEFT': 'Wing D', 'RIGHT': 'Wing B'},
'DESC': 'In the center of the room is a desk with a spinny chair (yay!) On top of the desk sits a mug and you hear the satisfying sound of the printer finish printing something. There\'s also a pile of documents and handwritten responses sitting on the desk... Are they DBQ\'s??',
'GROUND': ['DBQ\'s', 'Mug', 'Printer', 'Quiz Cover Sheet', 'Spinny Chair']},
'Wing B from Study': {
'DOORS': {'FORWARD': 'Grand Ballroom', 'BACK': 'Study', 'LEFT': 'Main Hall', 'RIGHT': 'Art Gallery'}},
'Wing D from Study': {
'DOORS': {'FORWARD': 'Library', 'BACK': 'Study', 'LEFT': 'Kitchen', 'RIGHT': 'Long Corridor'}},
'Library': {
'DOORS': {'LEFT': 'Wing C', 'RIGHT': 'Wing D'},
'DESC': 'Your inner book-lover can\'t help but be in awe of the massive bookselves that line the room from floor to ceiling. You can\'t help but wish you had the time to read them all. You also notice an eerily familiar statue in the center of the library... \nYou definitely know it from somewhere-- why can\'t you place it?',
'GROUND': ['Statue', 'Bookshelf', 'Cookbook By Mr. Nowakoski', 'Varoun And The Sea Of Stories', 'The Communist Manifesto', 'Women Snd Economics', 'The Crucible', 'The Answer To The Great Pants Debate', 'To Build A Fire', 'How To Survive Magnet Guide']},
'Wing C from Library': {
'DOORS': {'FORWARD': 'Trophy Room', 'BACK': 'Library', 'LEFT': 'Locked Closet', 'RIGHT': 'Main Hall'}},
'Wing D from Library': {
'DOORS': {'FORWARD': 'Study', 'BACK': 'Library', 'LEFT': 'Long Corridor', 'RIGHT': 'Kitchen'}},
'Kitchen': {
'DOORS': {'FORWARD': 'Wing D'},
'DESC': 'The delightful smell of food fills the air, making you hungry. The chef flashes a grin, as he slides a box marked "Not Human Organs" under a table with his foot. Averting, your eyes you notice that there\'s a fridge, toaster, microwave, and cupboard in different corners of the room. There\'s also a bag of popcorn on the counter -- your favorite!',
'GROUND': ['Pink Goldfish', 'Fridge', 'Toaster', 'Toast', 'Box', 'Bag Of Popcorn', 'Key']},
'Wing D from Kitchen': {
'DOORS': {'FORWARD': 'Long Corridor', 'BACK': 'Kitchen', 'LEFT': 'Library', 'RIGHT': 'Study'}},
'Main Hall from Wing A': {
'DOORS': {'LEFT': 'Wing B', 'RIGHT': 'Wing C'},
'DESC': 'You are now standing in the main hall leading to the other wings of the mansion.'},
'Main Hall from Wing B': {
'DOORS': {'LEFT': 'Wing D', 'RIGHT': 'Wing A'},
'DESC': 'You are now standing in the main hall leading to the other wings of the mansion.'},
'Main Hall from Wing C': {
'DOORS': {'LEFT': 'Wing A', 'RIGHT': 'Wing D'},
'DESC': 'You are now standing in the main hall leading to the other wings of the mansion.'},
'Main Hall from Wing D': {
'DOORS': {'FORWARD': 'Long Corridor', 'LEFT': 'Wing C', 'RIGHT': 'Wing B'},
'DESC': 'You are now standing in the main hall leading to the other wings of the mansion.'},
'Wing A from Main Hall': {
'DOORS': {'FORWARD': 'Start', 'LEFT': 'Trophy Room', 'RIGHT': 'Grand Ballroom'}},
'Wing B from Main Hall': {
'DOORS': {'FORWARD': 'Art Gallery', 'LEFT': 'Grand Ballroom', 'RIGHT': 'Study'}},
'Wing C from Main Hall': {
'DOORS': {'FORWARD': 'Locked Closet', 'LEFT': 'Library', 'RIGHT': 'Trophy Room'}},
'Wing D from Main Hall': {
'DOORS': {'FORWARD': 'Kitchen', 'LEFT': 'Study', 'RIGHT': 'Library'}},
'Locked Closet': {
'DOORS': {'FORWARD': 'Locked Room', 'BACK': 'Wing C'},
'DESC': 'This room is locked and you need a key to enter.'},
'Locked Room': {
'DOORS': {'FORWARD': 'Wing C'},
'DESC': 'you see doors foo',
'GROUND': ['Code #2', 'Door B', 'Door C', 'Door D']},
'Wing C from Locked Closet': {
'DOORS': {'FORWARD': 'Main Hall', 'LEFT': 'Trophy Room', 'RIGHT': 'Library', 'BACK': 'Locked Closet'}},
'Long Corridor': {
'DOORS': {'FORWARD': 'Final Room', 'BACK': 'Wing D'},
'DESC': 'These doors are locked and you need both escape codes in order to proceed!'},
'Wing D from Long Corridor': {
'DOORS': {'FORWARD': 'Kitchen', 'LEFT': 'Study', 'RIGHT': 'Library'},
'DESC': ''},
'Locked Doors': {
'DOORS': {'FORWARD': 'Final Room', 'BACK': 'Wing D'},
'DESC': ' yeet yahhh '},
'Final Room': {
'DOORS': {'BACK': 'Wing B'},
'DESC': ' need this?? '},
}
#dictionary containing information for just the wings -- often referred to in order to convert player perspective to proper directions
worldWings = {
'Wing A': {
'DESC': 'You are standing in the middle of the wing, looking at the rooms you can next travel to.',
'NORTH': 'Main Hall from Wing A',
'EAST': 'Trophy Room',
'SOUTH': 'Start',
'WEST': 'Grand Ballroom'},
'Wing B': {
'DESC': 'You are standing in the middle of the wing, looking at the rooms you can next travel to.',
'NORTH': 'Study',
'EAST': 'Main Hall from Wing B',
'SOUTH': 'Grand Ballroom',
'WEST': 'Art Gallery'},
'Wing C': {
'DESC': 'You are standing in the middle of the wing, looking at the rooms you can next travel to.',
'NORTH': 'Library',
'EAST': 'Locked Closet',
'SOUTH': 'Trophy Room',
'WEST': 'Main Hall from Wing C'},
'Wing D': {
'DESC': 'You are standing in the middle of the wing, looking at the rooms you can next travel to.',
'NORTH': 'Kitchen',
'EAST': 'Library',
'SOUTH': 'Main Hall from Wing D',
'WEST': 'Study'}
}
#class used for all rooms (except for the wings)
class Room(object):
def __init__(self, name, desc, front=True, back=True, left=True, right=True, locked=False):
self.name = name
self.exception = 'Main Hall'
self.walls = {'front': front, 'back': back, 'left': left, 'right': right}
self.desc = desc
self.locked = locked
#general function to print room location, description, and other rooms relative to it
def nextTurn(self):
#prints the name of the room
if self.name not in ['Main Hall from Wing A', 'Main Hall from Wing B', 'Main Hall from Wing C', 'Main Hall from Wing D']:
print(self.name)
print("=" * len(self.name))
else: #this prints just the location name rather than the reference name of 'location from Wing __'
print(self.exception)
print("=" * len(self.exception))
#prints the description of the room
print(self.desc)
"""
#prints the available doors
for k, v in self.walls.items():
if v:
print(f"You see a door to the {k}.")
"""
#just for formatting reasons :)
print(' ')
#prints which room is in which direction
for k, v in self.walls.items():
if v == True:
if k == 'front':
print(f'Forward: '+ worldRooms[self.name]['DOORS']['FORWARD'])
elif k == 'back':
print(f'Back: ' + worldRooms[self.name]['DOORS']['BACK'])
elif k == 'left':
print(f'Left: ' + worldRooms[self.name]['DOORS']['LEFT'])
elif k == 'right':
print(f'Right: ' + worldRooms[self.name]['DOORS']['RIGHT'])
else:
break
#class used for just the wings -- has special accomodation to convert perspective to proper directions
class Wing(object):
def __init__(self, name, desc, north=True, south=True, east=True, west=True):
self.name = name
self.walls = {'north': north, 'south': south, 'east': east, 'west': west}
self.desc = desc
#general function to print room location, description, and other rooms relative to it -- but modified to be compatible with the wings
def altnextTurn(self):
#prints name of the room
print(self.name)
print("=" * len(self.name))
#prints the room description
print(self.desc)
print(' ')
#prints available rooms / display which toom is in which direction
for k,v in self.walls.items():
if v == True:
if prevlocation not in ['Main Hall from Wing A', 'Main Hall from Wing B', 'Main Hall from Wing C', 'Main Hall from Wing D']:
name_2 = (f'{self.name} from {prevlocation}') #in order to match the dictionary worldRooms the loaction name must be altered
else:
name_2 = (f'{self.name} from Main Hall')
list1 = (worldRooms[name_2]['DOORS']).keys() #based on which cardinal direction is set to true, the list of possibile rooms is checked
for n in list1:
print((str(f"{n}: {worldRooms[name_2]['DOORS'][n]}")).title())
break
#class used for all objects
class Object(object):
def __init__(self, official, desc, names, takemg='', dropmg = '', eatmg='', takeable=True, edible=False, usable=False, validity=False, lookdeadly=False, takedeadly=False):
self.official = official
self.desc = desc
self.names = names
self.takemg = takemg
self.dropmg = dropmg
self.eatmg = eatmg
self.takeable = takeable
self.edible = edible
self.usable = usable
self.lookdeadly = lookdeadly
self.takedeadly = takedeadly
#list containing information (filled in paramaters) that correspond with the Object class
worldItems = [
#items in start
Object('Welcome Sign', 'The sign reads, "Welcome to the game, Missing from Magnet! The objective is to find the two needed codes to escape. You can type "help" for a list\nof commands or type "map" to reference the map. Best of luck!', ['welcome sign', 'welcome'], takeable=False),
#items in grand ballroom
Object('Trapdoor', 'You cautiously approach the trapdoor, debating whether you should open it or not. Mustering up some courage you lift the door of the trap door and are rewarded with a hand grabbing your ankle and dragging you down with it! \nLol you dead.', ['trapdoor'], takeable=False, lookdeadly=True),
Object('Shattered Glass', 'Why is there just a pile of broken glass on the floor?', ['shattered glass', 'glass'], edible=True, takemg='Ouch.', eatmg='\nSeriously? What is wrong with you? Did you seriously just eat shards of glass?!? \n\n...What? \nOh.. You have the Sanservino midterm next. -- Proceed as you were.'),
Object('Dancefloor', 'Looking at the dancefloor, you are suddenly reminded of your inability to dance...\nYou die immediately of extreme cringe.', ['dancefloor', 'dance', 'floor'], takeable=False, lookdeadly=True),
Object('Chandelier', 'Wow, the crystals on the chandelier are stunning!', ['chandelier'], takeable=False),
Object('High Heels', 'You take a closer look and realize that SCORE! These EXPENSIVE, these is RED BOTTOMS, these is BLOODY SHOES!', ['high heels', 'heels', 'red bottoms', 'louboutins'], takemg='Same girl, same.'),
#items in trophy room
Object('Rubber Band', 'Upon closer inspection you realize that the rubber band is THE very rubber band that Limo used to shoot down a wasp in the AIT bridge in a \nSINGLE. SHOT.\n...we are just mere mortals', ['rubber band']),
Object('Virendragon', 'You pick up the virendragon plush and are startled to find that this is no ordinary plush --\nIn fact, when you squeeze the plush, it lets out a vicious roar:\n\nREEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE!!!!!', ['virendragon', 'virendragon plush', 'plush'], dropmg='Why would you get rid of such a magnificent plush?'),
Object('Arthur\'s Speakeasy Performance', 'Sorry, legends only.', ['arthur\'s speakeasy performance', 'speakeasy', 'speakeasy performance', 'performance'], takeable=False, takemg='Sorry, legends only.'),
Object('Purdue Acceptance Letter', 'YEAHHHHHHH PURDUEEEEEEE', ['purdue acceptance letter', 'purdue', 'purdue acceptance', 'acceptance letter', 'letter', 'purdue letter']),
#items in art gallery
Object('Do Not Take This Sign Sign','The sign reads, "Do not take this sign."', ['do not take this sign sign', 'sign']),
Object('Moose Costume', 'You lift up the moose costume and hold it up against yourself-- It appears to be exactly your size! \n& bonus: it looks super fuzzy warm :)', ['moose costume', 'moose', 'costume'], takedeadly=True, takemg='From the distance you hear an ominous music draw closer and closer -- louder and louder\nYou can finally start making some of the words out... \n\n DRESS UP LIKE A MOOSE DURING HUNTING SEASOOOOOOOON \n\n\nDumb ways to dieeeeeeeee\nso many dumb ways to dieeeeeeeee\nDumb ways to die-ie-ieeeeeeeee\nso many dumb ways to dieeeee'),
Object('Mona Lisa', 'Could it be?? There\'s always been rumors that the real Mona Lisa had been secretly stolen but the painting right before you is enough to put all those rumors to rest...', ['mona lisa', 'painting'], takemg='\nWoah, when you move the painting you notice a folded piece of paper hidden in a crack in the wall!'),
Object('Harambe Tribute', 'You spend a moment to take it all in and silently brush a tear from the corner of your eye...\nHe will forever live on in our hearts.', ['harambe tribute', 'harambe', 'tribute'], takeable=False),
Object('Curtain', 'When you move the curtain aside you are quite surprised to find a life size oil painting of Blaise\'s "octodab"!\n#iconic', ['curtain'], takeable=False),
Object('Code #1', 'The piece of paper reads: \n * Code #1 -- 42 72 69 65 6c 6c 61', ['code #1', 'folded piece of paper', 'piece of paper', 'paper'], edible=True, eatmg='WHY WOULD YOU DO THAT YOU IDIOT??! I hope you have a good memory...'),
#items in study
Object('DBQ\'s', 'Wait... what\'s that on the desk?? Is that the DBQ\'s from freshmen year that Mr. McMenamin STILL hasn\'t graded??', ['dbq\'s', 'dbq', 'dbqs'], takeable=False, takemg='You can\'t take these yet! They still haven\'t been graded!'),
Object('Mug', 'You pick up the mug and turn it around to see what it says. It reads:\n\n WORLD\'S BEST BOSS', ['mug'], takeable=False, takemg='You\'re not Michael Scott!!'),
Object('Printer', 'It looks like someone must have left something on the printer. You look at the top page and it says:\n\n "QUIZ COVER SHEET"\n\n...Well it was left here after all.. Would just a peek hurt?', ['printer'], takeable=False),
Object('Quiz Cover Sheet', 'After a quick glance over each shoulder and and quick breath, you lift up the cover sheet to see what\'s underneath. \nScrawled in Sharpie on the page underneath is,\n\n yOu PlAyEd YoUrSeLf SoN!!!', ['quiz cover sheet', 'quiz', 'cover sheet', 'cover', 'sheet'], takeable=False),
Object('Spinny Chair', 'Come on, who can resisted spinning in a spinny chair?', ['spinny chair', 'chair'], takeable=False),
#items in kitchen
Object('Pink Goldfish', 'You have just discovered Mr. Moskowitz\'s secret stash of pink goldfish!!', ['pink goldfish', 'cupboard', 'goldfish'], edible=True, eatmg='Oooooo I\'m telling Mr. Moskowitz that you ate his goldfish!'),
Object('Fridge', 'Don\'t you know that there\'s never anything that you want when you check the fridge?! Game or not, you\'re about to dramatically starve to death', ['fridge'], takeable=False, lookdeadly=True),
Object('Toaster', 'You look inside the toaster and find toast!', ['toaster'], takeable=False),
Object('Toast', 'Although you go to Magnet, you apparently still thought it was a good idea to take the toast out with a fork!', ['toast'], lookdeadly=True, takedeadly=True, takemg='Although you go to Magnet, you apparently still thought it was a good idea to take the toast out with a fork!'),
Object('Box', 'Duhhhh, he pushed it under the table for a reason dummy! You can\'t look!', ['box'], takeable=False),
Object('Bag Of Popcorn', 'Just an unpopped bag of popcorn.', ['bag of popcorn', 'popcorn', 'popcorn bag', 'bag']),
Object('Key', 'You open the microwave and find a key sitting inside! It\'s a good thing you didn\'t turn it on to try to pop the popcorn!', ['key', 'microwave']),
#items in library
Object('Statue', 'You get closer to the statue and suddenly realize why it looks so familiar!! It\'s King Neptune from the Spongebob Squarepants movie! The statue has a paper bag over its head and you decide to lift it up. The second you do a bliding light emits from the baldness of his crownless head and you fall to the floor while hearing echoing screams of "BALD, BALD, BALD, MYYYY EYESSSSSSSSS"', ['statue', 'familiar statue', 'statue of king neptune', 'king neptune'], takeable=False, lookdeadly=True),
Object('Bookshelf', 'Wow, there sure are a lot of books! You see Cookbook By Mr. Nowakowski, Varoun And The Sea Of Stories, Women And Economics, The Crucible, The Answer To The Great Pants Debate, To Build A Fire, and the How To Survive Magnet Guide.', ['bookshelf', 'bookshelves'], takeable=False),
Object('Cookbook By Mr. Nowakoski', 'A limited edition, first edition copy of former executive chef, Mr. Peter Nowakoski\'s cookbook that has a recipe for every occasion!', ['cookbook by mr. nowakoski', 'cookbook by mr nowakoski', 'cookbook'], takemg='Good choice!'),
Object('Varoun And The Sea Of Stories', 'Flashbacks to freshmen year...', ['varoun and the sea of stories', 'varoun', 'sea of stories']),
Object('Women And Economics', 'Umm, yes please!', ['women and economics']),
Object('The Crucible', 'You open the massive literature textbook to the Crucible, but when you start to crack open the book you swear you hear a voice! Startled, you slam the book closed and tentatively begin to crack the book back open. You hear a rasping voice wheeze out, "more weighhhhhht!"', ['the crucible', 'crucible']),
Object('The Answer To The Great Pants Debate', 'I\'m sorry but this knowledge is too powerful...', ['the answer to the great pants debate', 'answer', 'debate', 'great pants debate', 'the great pants debate']),
Object('To Build A Fire', 'Moral of the story: Gotta get back to the boys!', ['to build a fire']),
Object('How To Survive Magnet Guide', 'HA! Yeahhhhh this doesn\'t exist.', ['how to survive magnet guide', 'survival guide', 'guide'], edible=True, eatmg='That might be its most useful use yet!'),
#locked room
Object('Code #2', 'You stuck with your gut and chose right! Your reward is the second needed code sequence.\n\nDon\'t forget to type "take code" to add it to your inventory.', ['door a', 'a', 'code #2', 'code']),
Object('Door B', 'Story Time!: “Once there was an Ugly Barnacle. He was so ugly, that everyone died! The End.”\n\n...You\'re that Ugly Barnacle.', ['door b', 'b'], takeable=False, lookdeadly=True),
Object('Door C', 'Some believe that by choosing "C" you have a higher chance of guessing correctly! ...Well today\'s not that day.', ['door c', 'c'], takeable=False, lookdeadly=True),
Object('Door D', 'You open the door to only to find yourself staring at your reflection in the mirror! You die partly from shock and partly from sheer ugliness.', ['door d', 'd'], takeable=False, lookdeadly=True),
]
#master command loop that allows a variety of user inputted commands
class TextAdventureCmd(cmd.Cmd):
prompt = '\n> '
def default(self, arg):
print('I do not understand that command. Type "help" for a list of commands.')
def do_quit(self, arg):
"""Quit the game."""
global quit
quit = 'True'
return True #by returning True (and this applies to all functions containing 'return True' that are withing this command loop) it exits the command loop and then runs code from the middle of the while loop that follows
def do_forward(self, arg):
"""Move in the forward direction, if possible."""
global user_input
user_input = 'FORWARD'
return True
def do_back(self,arg):
"""Move in the back direction, if possible."""
global user_input
user_input = 'BACK'
return True
def do_left(self, arg):
"""Move in the left direction, if possible."""
global user_input
user_input = 'LEFT'
return True
def do_right(self, arg):
"""Move in the right direction, if possible."""
global user_input
user_input = 'RIGHT'
return True
#methods with shortened names:
do_f = do_forward
do_b = do_back
do_l = do_left
do_r = do_right
def do_look(self, item):
"""look <item> - Look at an item within the room."""
global endgame
choice = item.lower()
current = ''
#checks if user inputted item is within the list of items
for item in worldItems:
if choice in item.names:
current = item
if choice not in item.names:
validity = 'no'
#sets validity as positive if the object is in the list of items
for item in worldItems:
if choice in item.names:
validity = 'yes'
current.validity = True
#checks each item in the list to see whether the item is plausible based on its criteria
for item in worldItems:
if choice in item.names:
if current.official in worldRooms[location]['GROUND']: #checks if item is in current room
print(current.desc) #prints item description
if current.lookdeadly == True: #if looking at the item is deadly, user is deffered from game
endgame = 'True'
return True
if current.official not in worldRooms[location]['GROUND']: #if the item is plausible but not in the current room, it prints message
print('You do not see that item.')
#this will print a message if the user enters an item that doesn't exist or an improper input
if validity != 'yes':
print('You do not see that item.')
def do_take(self, item):
"""take <item> - Take an item within the room."""
global endgame
#changes user input
choice = item.lower()
current = ''
#checks to see if item is in list of items, and if not, the item's validity is set to "no"
for item in worldItems:
if choice in item.names:
current = item
if choice not in item.names:
validity = 'no'
#changes valid items to be positive
for item in worldItems:
if choice in item.names:
validity = 'yes'
current.validity = True
#checks each item in list of items and goes through checklist of criteria
for item in worldItems:
if choice in item.names:
if current.official in worldRooms[location]['GROUND']: #checks if item is in current room in order to proceed
if current.takeable == True: #checks if item is takeable
if current.takedeadly == True: #if taking the item is deadly, the game is ended
print(current.takemg)
endgame = 'True'
return True
inventory.append(current.official) #if not, the item is added to the player's inventory
print(f'"{current.official}" has been added to your inventory.')
if current.takemg != '': #if the item has a special 'take description' it will be displayed
print(current.takemg)
else: #if item is not takeable, message is displayed
if current.takemg != '':
print(current.takemg)
else:
print('You can not take that item.')
if current.official not in worldRooms[location]['GROUND']: #if item is not in current room, message is displayed
print('That item is not here to take.')
if validity != 'yes': #if the item does not exist or is entered wrong, message is displayed
print('That item is not here to take.')
def do_drop(self, item):
"""drop <item> - Drop an item and remove it from your inventory."""
#changes user input
choice = item.title()
current = ''
#checks if item is in list of items
for item in worldItems:
if choice.lower() in item.names:
current = item
if choice in inventory: #checks if item is in inventory
inventory.remove(choice) #if yes, the item is removed
print(inventory) #the current inventory is displayed
if current.dropmg != '': #checks if there is a special 'drop message'
print(current.dropmg)
else:
print('You do not have that item in your inventory to remove.') #if item is not inventory, displays message
def do_eat(self, item):
"""eat <item> - Eat an item that is in your inventory."""
#changes user input format
choice = item.lower()
current = ''
#checks to see if reference name is in list of items
for item in worldItems:
if choice in item.names:
current = item
if choice.title() in inventory: #checks to see if item is in inventory
if current.edible == True: #checks to see if the item is edible
inventory.remove(choice.title()) #if yes, the item is removed from the player's inventory
print(f'You just ate "{choice}."') #prints message
if current.eatmg != '': #checks to see if there is special 'eat message'
print(current.eatmg) #and if so, prints the message
else:
print('You can not eat that item.') #if item is not edible, displays message
else:
print('That item is not in your inventory to eat.') #pretty self explanatory lol
def do_use(self, item):
"""use <item> - Use an item that is in your inventory."""
#changes user input
choice = item.lower()
current = ''
#checks to relate reference name back to official name within list of items
for item in worldItems:
if choice in item.names:
current = item
if choice.title() in inventory: #checks to see if item is in inventory
if current.usable == True: #if usable is true, message is displayed
print(f'You just used your {choice}.')
else:
print('You can not use that item.') #if usable is false, message is displayed
else:
print('That item is not in your inventory to use.') #if item is not in inventory, message is displayed
def do_inventory(self, arg):
"""This will show your current inventory."""
print(inventory)
def do_map(self, arg):
"""View the map of the mansion."""
map()
#Begginning Fancy Stuff
print("""
▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬ஜ۩۞۩ஜ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
M I S S I N G F R O M M A G N E T
▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬ஜ۩۞۩ஜ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
~ (Type "help" for commands.) ~
""")
#Beginning Conditions
location = 'Start'
inventory = ['Key', 'Code #1', 'Code #2']
endgame = ''
#slowed printing for game ending
import sys,time,random
typing_speed = 100 #wpm
def slow_type(t):
for l in t:
sys.stdout.write(l)
sys.stdout.flush()
time.sleep(random.random()*10.0/typing_speed)
while True:
if location != 'Wing A' or 'Wing B' or 'Wing C' or 'Wing D':
location = location
parameters = {
'start': Room(location, worldRooms[location]['DESC'], back=False, left=False, right=False),
'grandballroom': Room(location, worldRooms[location]['DESC'], front=False, back=False),
'trophyroom': Room(location, worldRooms[location]['DESC'], front=False, back=False),
'artgallery': Room(location, worldRooms[location]['DESC'], back=False, left=False, right=False),
'locked room': Room(location, worldRooms[location]['DESC'], back=False, left=False, right=False),
'study': Room(location, worldRooms[location]['DESC'], front=False, back=False),
'library': Room(location, worldRooms[location]['DESC'], front=False, back=False),
'secretattic': Room(location, worldRooms[location]['DESC'], front=False, back=False, left=False, right=False),
'kitchen': Room(location, worldRooms[location]['DESC'], back=False, left=False, right=False),
'longcorridor': Room('Locked Doors', worldRooms[location]['DESC'], left=False, right=False),
'lockedcloset': Room(location, worldRooms[location]['DESC'], left=False, right=False),
'mainhallfromwinga': Room(location, worldRooms[location]['DESC'], front=False, back=False),
'mainhallfromwingb': Room(location, worldRooms[location]['DESC'], front=False, back=False),
'mainhallfromwingc': Room(location, worldRooms[location]['DESC'], front=False, back=False),
'mainhallfromwingd': Room(location, worldRooms[location]['DESC'],front=False, back=False)}
location_modified = location.lower()
if ' ' in (location_modified):
location_modified = location_modified.replace(" ", "")
self = parameters[location_modified]
self.nextTurn()
TextAdventureCmd().cmdloop()
if quit == 'True':
print('\n\nThanks for playing!')
break
if endgame == 'True':
print('\nThanks for playing -- try again')
break
if user_input == 'FORWARD' or 'BACK' or 'LEFT' or 'RIGHT':
print(' \n \n')
newlocation = worldRooms[location]['DOORS'][user_input]
prevlocation = location
location = newlocation
else:
print('Sorry, I don\'t recognize that command. Enter "help" for the list of commands.')
if location == 'Final Room':
print(location)
print("=" * len(location))
finalRoom = Room(location, worldRooms[location]['DESC'], front=False, back=False, left=False, right=False, locked=True)
fr = finalRoom
end = ''
if fr.locked == True:
if 'Code #1' and 'Code #2' in inventory:
print('Congratulations! You have found both of the code sequences! Enter "look codes" to see the sequences again.')
while True:
if end == True:
quit = 'True'
continue
displayCodes = (input(f'{inventory} \n\n> ')).lower()
if displayCodes == 'look codes':
print(""" * Code #1 -- 42 72 69 65 6c 6c 61\n * Code #2 -- 41 70 72 69 6c""")
while True:
code1 = input('\nEnter code sequence #1: ')
if code1 == '42 72 69 65 6c 6c 61':
slow_type(' processing...\n processing...\n')
code2 = input('Enter code sequence #2: ')
if code2 == '41 70 72 69 6c':
slow_type(' processing...\n processing...\n')
print("""
..
.. .., .
,,.
. ..
,, .** ,@%
,, . ..., &% .,. @@
,*,. #@* . (&.
,*. ,,,. . * /%@ *@*
./@@(. .%, . ,. /(/,...,,(%@@@@&, *&%
*@@&. .&. .. .*@, ./@&@@@ &&.
*@@, (( *&* ,@@/ .#@& /&* *%&* /@,
@@, /@% #%( ., (*@. . .*. #@, (%@* . .%@ #@/ .@#
*@&. ,@, /%( *@@*(@#.*& . @@*/* @#%&&* *&% #. (@/ #, (@
(@@ ,&,,@@* @(.(.%( ,# * #&. /&@* #.(@@. *@( /@, ,@% ,%#, .
@@( /@@@/ *# ,*.@* ( *. .&, %(.&&,& ,@@, &@. .@&. #@* . .%( (@(
,@& %. ( ,#/ /&@, ,@%# .&@, .%*&*%# &,.%@@ *@&. .(
#@# ., ,. (*##. .&&, /@( %@, // .#. . &@ *,/&@/
(@& *#. ,%. . #@, @@,&/ /@ ,*
@@% .%( ., ,* && ,%%. , #,
*&%/,,/%&(. .. ,/ *@/ . .
. .* *@&..,,... .,,,,.,,,******,,..
.,,****%****,,%@&,,*****,,,*,***********,...,,***,
.,********,,,****,,*/%#*/%. ..,,,***.
.,********,,.. ...,***
.***, ...,,**********************************,
*****,,.. ....
.,,***************,. .
..**, /.
**.
.,..
""")
print('You\'ve entered the proper codes to escape!!\nto be continued...')
end = True
break
else:
print('Hmm... you must have entered the code wrong. Try entering it again.\n')
continue
else:
print('Hmm... you must have entered the code wrong. Try entering it again.\n')
continue
else:
print('\nSorry, I don\'t recognize that command. Try entering "look codes" again.')
continue
else:
print('This room is locked and you are unable to enter without the needed escape codes. Keep looking for the codes!\n')
prevlocation = 'Long Corridor'
location = 'Wing D'
if location == 'Locked Room':
print(location)
print("=" * len(location))
lockedCloset = Room(location, worldRooms[location]['DESC'], front=False, back=False, left=False, right=False, locked=True)
lc = lockedCloset
if lc.locked == True:
useKey = (input('You need to use a key to unlock this door. Type "use key" to use your key and enter the room. \n\n> ')).lower()
while True:
if useKey == 'use key':
if 'Key' in inventory:
print(f'\n\n{location}')
print("=" * len(location))
print('You have unlocked the door and entered the room!\n')
print('As you glance around, you notice that the room is bare except for four doors each labeled a letter: "A", "B", "C", & "D." \nBehind one of the doors is the essential code vital to escape. I guess this is just good practice for having to guess on multiple choice tests!')
print(f'\nForward: '+ worldRooms[location]['DOORS']['FORWARD'])
location = 'Locked Room'
prevlocation = 'Locked Closet'
TextAdventureCmd().cmdloop()
print('\n\n')
location = 'Wing C'
break
else:
print('This room is locked and you are unable to enter without the key. Keep looking for the key!\n')
prevlocation = 'Locked Closet'
location = 'Wing C'
break
else:
useKey = (input('\nSorry, I don\'t recognize that command. Try entering "use key" again. \n\n> ')).lower()
continue
if endgame == 'True':
print('\nThanks for playing -- try again')
break
if location == 'Wing A' or 'Wing B' or 'Wing C' or 'Wing D':
location = location
parameters = {
'winga': Wing(location, worldWings[location]['DESC']),
'wingb': Wing(location, worldWings[location]['DESC']),
'wingc': Wing(location, worldWings[location]['DESC']),
'wingd': Wing(location, worldWings[location]['DESC'])}
location_modified = location.lower()
if ' ' in (location_modified):
location_modified = location_modified.replace(" ", "")
self = parameters[location_modified]
self.altnextTurn()
print(' ')
user_input = str(input('Which way would you like to go next? ')).upper()
if user_input not in ['FORWARD', 'BACK', 'LEFT', 'RIGHT']:
if user_input == 'F':
user_input = 'FORWARD'
if user_input == 'B':
user_input = 'BACK'
if user_input == 'L':
user_input = 'LEFT'
if user_input == 'R':
user_input = 'RIGHT'
if user_input not in ['FORWARD', 'BACK', 'LEFT', 'RIGHT']:
user_input = str(input('\nSorry, you can\'t go that way. Which way would you like to go next? ')).upper()
if user_input not in ['FORWARD', 'BACK', 'LEFT', 'RIGHT']:
if user_input == 'F':
user_input = 'FORWARD'
if user_input == 'B':
user_input = 'BACK'
if user_input == 'L':
user_input = 'LEFT'
if user_input == 'R':
user_input = 'RIGHT'
print(' \n \n')
if prevlocation not in ['Main Hall from Wing A', 'Main Hall from Wing B', 'Main Hall from Wing C', 'Main Hall from Wing D']:
name_5 = str(f'{location} from {prevlocation}')
else:
name_5 = (f'{location} from Main Hall')
newlocation = worldRooms[name_5]['DOORS'][user_input]
prevlocation = location
location = newlocation
if location == 'Main Hall':
location = str(f'{location} from {prevlocation}')
else:
print('Sorry, you are unable to go in that direction.')
| [
"bpayami@ucvts.tec.nj.us"
] | bpayami@ucvts.tec.nj.us |
39c564c01ad80cc148d2600ecf6c163390b54e21 | e84f63e7c8a105c6da6e98802762646d4c34d139 | /manage.py | 7d53e8e2bb6b9ad87e3156332179657a59f32270 | [] | no_license | ganeshkuikel/barber_project | dfed8591d94730427fd483500f2da4b73382c170 | 589ea1aaa1f0a653f35ca155095ba1e71d74fff9 | refs/heads/master | 2020-07-05T19:03:28.894825 | 2019-08-17T04:26:09 | 2019-08-17T04:26:09 | 202,739,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smbarbershp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ganeshkuikel66@gmail.com"
] | ganeshkuikel66@gmail.com |
32507acd78f501ec54d3ee9e35911dfe8ca480b6 | 03dfcd4bd41ff9ba76e67895e96a9794ad003a31 | /sandbox/internet/web-scraping/myparser.py | 82a2e133b52c265a643c1d4c02ec7e0966db8a05 | [] | no_license | gittygitgit/python-sandbox | 71ca68fcc90745931737f7aeb61306ac3417ce60 | 3b3e0eaf4edad13aabe51eb3258ebe9e6b951c67 | refs/heads/master | 2021-01-19T02:41:17.047711 | 2018-11-22T18:07:15 | 2018-11-22T18:07:15 | 39,742,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | #!/usr/bin/python
import HTMLParser
class MyParse(HTMLParser.HTMLParser):
def __init__(self):
#super() does not work for this class
HTMLParser.HTMLParser.__init__(self)
self.tag_stack = []
self.attr_stack = []
def handle_endtag(self, tag):
#take the tag off the stack if it matches the next close tag
#if you are expecting unmatched tags, then this needs to be more robust
if self.tag_stack[len(self.tag_stack)-1][0] == tag:
self.tag_stack.pop()
def handle_data(self, data):
#'data' is the text between tags, not necessarily
#matching tags
#this gives you a link to the last tag
tstack = self.tag_stack[len(self.tag_stack)-1]
#do something with the text
def handle_starttag(self, tag, attrs):
#add tag to the stack
self.tag_stack.append([tag, attrs])
#if this tag is a link
if tag =="a":
#these next few lines find if there is a hyperlink in the tag
tloc = map(lambda x: 1 if x[0]=='href' else 0,attrs)
try:
#did we find any hyperlinks
attr_loc = tloc.index(1)
except:
pass
# attr_loc only exists if we found a hyperlink
if vars().has_key('attr_loc'):
#append to the last item in the stack the location of the hyperlink
#note, this does not increase the length of the stack
#as we are putting it inside the last item on the stack
self.tag_stack[len(self.tag_stack)-1].append(attr_loc)
#now we can do what we need with the hyperlink
| [
"grudkowm@Michaels-Air-2.fios-router.home"
] | grudkowm@Michaels-Air-2.fios-router.home |
a33a45c3bb847c2488e53b3f8ce4ab04c98892ea | d8f0f334cdf334688c0006c4a8f67f32ab950b77 | /lab03/cdavies/primes.py | 89619bdeb6e466ddd8ee9a202c51dec83e0438e6 | [] | no_license | acheney/python-labs | e892198c1dcb2a3df525616e3c0af2a8680551be | eabacfef432f436cf6af6a8235aa7d28fd966c61 | refs/heads/master | 2021-01-18T09:08:03.650541 | 2013-01-27T22:17:38 | 2013-01-27T22:17:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # primes.py
# Makes a list of primes and finds any twin primes among them.
#
# Connor Davies
# 1/10/13
def main():
n = eval(input("Please enter a number greater than 1:"))
prev=2 #the first prime
total=0 #total number of twins
print("The first", n, "primes are: ", end="")
print(2, end="")
for i in range(3,n+1): #loop through all numbers up to n
if isPrime(i): #check if they're prime
print(",", i, end="") #print out the primes
if i-prev==2: #check for twins
total=total+1 #add to the twin total
prev=i #set the previous prime to the current one for future twin checks
print()
print("Amongst these there are", total, "twin primes.")
def isPrime(x):
for i in range(2,x):
if(x%i==0):
return False
return True
main() | [
"connordavies11@gmail.com"
] | connordavies11@gmail.com |
cf60eb518d39c69bceb479291a1a42f71beca59e | 214d0bfb30dc17b12d786804d07085542fa1eef2 | /getOTToLStats.py | fea77671c5ab3f9547d0c1d98bd27163808ae8b5 | [] | no_license | Katzlab/OTToL-Scripts | 76a1922ca845dec35cc0fd57fabc0785d0839419 | b30ec4356627206808677006342be15b20a450c4 | refs/heads/master | 2018-12-28T07:56:20.855159 | 2012-09-10T13:36:52 | 2012-09-10T13:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | ###############################################
#This script reads in the OTToL file and outputs the # of species, genera and 'above genera'
###############################################
print "This script reads in the OTToL file and outputs the # of species, genera and 'above genera'"
def getStats(file):
spcount = 0
gencount = 0
othercount = 0
infile = open(file,'r').readlines()
for line in infile:
rank = line.split('\t')[-2]
if rank == 'species':
spcount = spcount + 1
elif rank == 'genus':
gencount = gencount + 1
else:
othercount = othercount + 1
print 'species: ' + str(spcount)
print 'genera: ' + str(gencount)
print 'other: ' + str(othercount)
def main():
file = raw_input('What file do you want to check? ' )
try:
x = open(file,'r')
except:
print ' Trouble opening that file. Try again. '
main()
getStats(file)
main() | [
"jgrant@smith.edu"
] | jgrant@smith.edu |
eb8cfcd9d3cfa4307c32ac076282793074731f1e | 4b594a54a58d85da48d0c123403edec41adf474b | /EvaluationOfModelPicks.py | d0f54cd2ff3a5cb1331ae6c790868b3f090c18bc | [] | no_license | EWiliams0590/StockPredictor | 66b0f168aa3d60bdf341c114cf715e4d6f6f337a | 2cf6844d87664974301ee943df581efff2fc5681 | refs/heads/main | 2023-06-10T22:37:58.790406 | 2021-06-28T17:46:55 | 2021-06-28T17:46:55 | 346,145,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | ### For determining how well the stock picks did for the week
import pandas as pd
from datetime import date, timedelta
import yfinance as yf
def add_SP_increase(date):
"""
Parameters
----------
date : The date to evaluate data from.
Returns
-------
Writes SP_increase column to stock picks df.
"""
today = date.today()
filename = f'StockPicks{date}.csv'
df = pd.read_csv(filename).set_index('Ticker')
tickers = df.index
SP_increases = []
for ticker in tickers:
curr_close = df.loc[ticker][f"Close on {date}"]
ticker_name = yf.Ticker(ticker)
start = date + timedelta(days=1)
end = today
ticker_df = ticker_name.history(start=start, end=end, actions=False)
closes = ticker_df['Close']
max_close = closes.max() # largest close
SP_increase = (max_close-curr_close)/curr_close
SP_increases.append(SP_increase)
df['SP_increases'] = SP_increases
df.to_csv(filename)
date = date(2021, 6, 21)
add_SP_increase(date)
| [
"noreply@github.com"
] | noreply@github.com |
c623f87b22b649226f52dc2e56f8651ae57fca85 | 484da6ff9bda06183c3d3bbda70c6d11e1ad6b67 | /.history/main_20191007162714.py | 3c0a5d2227f2184e32bd46b4e6485a71ab54093b | [] | no_license | Shynar88/TSP | 009a88bbddb29214921de4d0cf1761dea61b7b75 | 889751ab7d6a91469e86c6583f3c91b85857edd9 | refs/heads/master | 2020-08-06T22:40:49.217474 | 2020-01-14T13:41:44 | 2020-01-14T13:41:44 | 213,185,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,330 | py | import argparse
import math
import random
import operator
class City():
def __init__(self, index, x_coord, y_coord):
self.index = index
self.x_coord = x_coord
self.y_coord = y_coord
def __repr__(self):
return "[" + str(self.x_coord) + ", " + str(self.y_coord) + "]"
def get_distance_to(self, other):
dx = (other.x_coord - self.x_coord) ** 2
dy = (other.y_coord - self.y_coord) ** 2
return math.sqrt(dx + dy)
class Instance():
def __init__(self, route):
self.route = route
self.route_distance = self.get_route_distance()
self.fitness = self.get_fitness()
def get_route_distance(self):
distance = 0
for i in range(len(self.route)):
src = self.route[i]
dest = self.route[i + 1] if i + 1 < len(self.route) else self.route[0]
distance += src.get_distance_to(dest)
return distance
def get_fitness(self):
return 1 / self.route_distance
class GeneticAlgorithm():
def __init__(self, population_size, mat_pool_size, tournament_size, elite_size, max_generations, crossover_rate, mutation_rate, cities_list):
self.population_size = population_size
self.mat_pool_size = mat_pool_size
self.tournament_size = tournament_size
self.elite_size = elite_size
self.max_generations = max_generations
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.cities_list = cities_list
def generate_instance(self):
route = random.sample(self.cities_list, len(self.cities_list))
instance = Instance(route)
return instance
def create_initial_population(self):
initial_population = []
for _ in range(self.population_size):
initial_population.append(self.generate_instance())
return initial_population
def crossover(self, p1, p2): # proposing good crossover method https://www.hindawi.com/journals/cin/2017/7430125/
#implement simple crossover then try to enhance it
#ordered crossover
li = 0
hi = 0
while hi <= li:
li = int(random.random() * len(p1.route))
hi = int(random.random() * len(p1.route))
chunk = p1.route[li:hi]
child_route = []
not_used_el_in_p2 = [el for el in p2.route if el not in chunk]
pointer = 0
for _ in range(li):
child_route.append(not_used_el_in_p2[pointer])
pointer += 1
child_route += chunk
for _ in range(hi, len(p1.route)):
child_route.append(not_used_el_in_p2[pointer])
pointer += 1
child = Instance(child_route)
return child
def mutate(self, instance):
instance = instance.route
if random.random() < self.mutation_rate:
i1, i2 = random.sample(range(len(self.cities_list)), 2)
instance[i1], instance[i2] = instance[i2], instance[i1]
def selection(self, population):
#experiment on selection way
#implement the simple one
#Tournament selection P/2 size might be better
mating_pool = []
while len(mating_pool) < self.mat_pool_size:
participants = random.sample(population, self.tournament_size)
fittest = max(participants, key=operator.attrgetter('fitness'))
mating_pool.append(fittest)
return mating_pool
def generate_path(self):
# Step 1. Create an initial population of P chromosomes.
population = self.create_initial_population()
# Step 2. Evaluate the fitness of each chromosome. done in create population
for generation in range(self.max_generations):
print(f"generation number: {generation}")
# Step 3. Choose P/2 parents from the current population via proportional selection.
mating_pool = self.selection(population)
population_sorted = sorted(population, key=lambda instance: instance.fitness, reverse=True)
old_elite = population_sorted[:self.elite_size]
new_population = old_elite
while len(new_population) < self.population_size:
# Step 4. Randomly select two parents to create offspring using crossover operator.
parents = random.sample(mating_pool, 2)
child = self.crossover(parents[0], parents[1])
# Step 5. Apply mutation operators for minor changes in the results.
self.mutate(child)
new_population.append(child)
# Step 6. Repeat Steps 4 and 5 until all parents are selected and mated.
# Step 7. Replace old population of chromosomes with new one.
population = new_population
# Step 8. Evaluate the fitness of each chromosome in the new population. Already done in crossover when creating the child
# Step 9. Terminate if the number of generations meets some upper bound; otherwise go to Step 3.
return 0
# parses command line arguments
#is mating pool size also a hyperparameter???????
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-p', type=str, default="a280.tsp", help="path to the input file")
parser.add_argument('-s', type=int, default=50, help="population size")
parser.add_argument('-ms', type=int, default=25, help="mating pool size")
parser.add_argument('-ts', type=int, default=5, help="tournament size")
parser.add_argument('-e', type=int, default=20, help="elite_size")
parser.add_argument('-mg', type=int, default=50, help="max generations")
parser.add_argument('-cr', type=float, default=0.3, help="crossover rate")
parser.add_argument('-mr', type=float, default=0.1, help="mutation rate")
args = parser.parse_args()
return args.p, args.s, args.ms, args.ts, args.e, args.mg, args.cr, args.mr
# parses file, returns list of city coordinates(ex: [(x1, y1), ...])
def parse(file_path): #coordinates start from the 7th line, end with EOF
cities = []
f = open(file_path, "r")
for _ in range(6):
f.readline()
for line in f:
line_contents = line.split()
if len(line_contents) == 3:
cities.append((line_contents[0], line_contents[1], line_contents[2]))
f.close()
return cities
def create_cities(coordinates_list):
cities_list = []
for coordinates in coordinates_list:
cities_list.append(City(coordinates[0], coordinates[1], coordinates[2]))
return cities_list
def main():
path, population_size, mat_pool_size, tournament_size, elite_size, max_generations, crossover_rate, mutation_rate = parse_arguments()
#delete prints
print(path)
print(population_size)
print(mat_pool_size)
print(tournament_size)
print(elite_size)
print(max_generations)
print(crossover_rate)
print(mutation_rate)
#####
coordinates_list = parse(path)
cities_list = create_cities(coordinates_list)
gen_algo = GeneticAlgorithm(population_size, mat_pool_size, tournament_size, elite_size, max_generations, crossover_rate, mutation_rate, cities_list)
# distance = gen_algo.generate_path()
# print(distance)
if __name__ == "__main__":
main() | [
"shynar@mindslab.ai"
] | shynar@mindslab.ai |
5f0cdf0d2784b52685ced9bb53eddefc839f98bf | b89ae940c8982baa3067fb15279c9c1c40b8f2e7 | /2020-04-23 Residual Block and ROI pooling Pytorch/1_Residual Block/wrong1.py | e815b3ca1748e5bb4300b8924bfee3e8d095f730 | [] | no_license | albertovitto/VCS | c04e7a7d95a5b8193266dd1fb3390da0224a6335 | 4286596ee7d3e627f9f2681770f72e8d68150a59 | refs/heads/master | 2022-12-17T02:13:20.463482 | 2020-09-19T11:04:58 | 2020-09-19T11:04:58 | 242,100,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1):
super(ResidualBlock, self).__init__()
self.stride, self.inplanes, self.planes = stride, inplanes, planes
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=0, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=0, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
y = self.conv1(x)
y = self.bn1(y)
y = self.relu(y)
y = self.conv2(y)
y = self.bn2(y)
if (self.stride > 1) or (self.inplanes != self.planes):
residual = nn.Sequential(
nn.Conv2d(
self.inplanes,
self.planes,
kernel_size=1,
stride=self.stride,
padding=0,
bias=False,
),
nn.BatchNorm2d(self.planes),
)
residual = torch.Tensor(residual)
y += residual
y = self.relu(y)
return y
| [
"albertovitto16@gmail.com"
] | albertovitto16@gmail.com |
64b0ec5dfdc7b0d1977c397a0e8e10ae0763203e | 9789753e890cd519ee9c27fb358d96744fdf14db | /tieba_autoanswer.py | bcfd3b28c1bd4c94bcc60f28f928e239dc10450f | [] | no_license | txb1989hehe/shtxbDB | a3c65755344303a9fa73e4eea81db461dfa3db10 | ab147b0ea9fe0e93fb5a74aa228a5485b19311ce | refs/heads/master | 2021-04-12T08:28:38.567434 | 2018-10-18T08:50:15 | 2018-10-18T08:50:15 | 126,261,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,616 | py | # ---------- nothing is impossiable -------------
# __author__ = Luke_Tang
# __time__ = 2018.10.17
# __main__ = 百度貼吧自動回復增加經驗值
# -----------------------------------------------
from selenium import webdriver
import time
from urllib.parse import quote
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
import re
from selenium.common.exceptions import TimeoutException
import random
KEYWORD = '魔靈召喚'
offset = 0
url = 'https://tieba.baidu.com/f?kw=' + quote(KEYWORD)+ '&pn=' + str(offset)
brower = webdriver.Chrome()
brower.get(url)
brower.maximize_window()
brower.delete_all_cookies()
wait = WebDriverWait(brower,20)
brower.find_element_by_css_selector('.u_login > div:nth-child(1) > a:nth-child(1)').click()
time.sleep(2)
login = wait.until(ec.element_to_be_clickable((By.ID,'TANGRAM__PSP_11__footerULoginBtn')))
login.click()
# print(login)
user = wait.until(ec.presence_of_element_located((By.ID,'TANGRAM__PSP_11__userName')))
user.send_keys('txb1989hehe')
psword = wait.until(ec.presence_of_element_located((By.ID,'TANGRAM__PSP_11__password')))
psword.send_keys('kuang803#')
login_button = wait.until(ec.element_to_be_clickable((By.ID,'TANGRAM__PSP_11__submit')))
login_button.click()
time.sleep(2)
try:
quit_button = wait.until(ec.element_to_be_clickable((By.ID,'TANGRAM__26__header_a')))
quit_button.click()
time.sleep(2)
except NoSuchElementException:
pass
count = 0
while True:
count =+ 1
new_url = brower.find_element_by_xpath('//*[@id="thread_list"]/li[2]/div/div[2]/div[1]/div[1]/a').get_attribute('href')
# print(brower.window_handles)
str_cookie = 'TIEBA_USERTYPE=fa1d9a0fa7623ca6978c0a06; TIEBAUID=628f5f105828c41f45750eea; ' \
'rpln_guide=1; bdshare_firstime=1516933161145; __' \
'cfduid=da12aac6da48e8df217b5dc8f86419fcc1524019599; ' \
'Hm_lvt_287705c8d9e2073d13275b18dbd746dc=1536546604,1536546795,1537343585,1538038213; ' \
'BAIDUID=F2B750422FF031BC4A21E67A67D25F0C:FG=1; PSTM=1538620169; ' \
'BIDUPSID=C253C687607D8956C42898BEBB81CF1C; ' \
'BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; ' \
'H_PS_PSSID=1469_21088_20927; wise_device=0; 189532523_FRSVideoUploadTip=1; ' \
'Hm_lvt_98b9d8c2fd6608d564bf2ac2ae642948=1539682858,1539758862,1539760381,1539764529; ' \
'baidu_broswer_setup_txb1989hehe=0; ' \
'BDUSS=FlY0pQc3VkcmpXa3ZQM2tkWDIwRWFYVW9rU1l4UndWS2pGcHdjbFhPZy1YdTliQVFBQUFBJCQAAAAAAAAAAAEAAABrCUwLdHhiMTk4OWhlaGUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD7Rx1s-0cdbNG; ' \
'STOKEN=1154be55c84aa0e2a6a90fa3c0fc1f01e2f9981735f908a92aee6f0be3320681; ' \
'Hm_lpvt_98b9d8c2fd6608d564bf2ac2ae642948=1539823039; delPer=0; PSINO=3'
lst = re.findall('([\s\S]*?)=([\s\S]*?); ', str_cookie+'; ')
for i in lst:
ck = {'name':i[0],'value':i[1]}
# print(ck)
brower.add_cookie(ck)
if len(brower.window_handles) < 2:
brower.execute_script('window.open()')
brower.switch_to_window(brower.window_handles[1])
else:
brower.switch_to_window(brower.window_handles[1])
brower.get(new_url)
# print(brower.get_cookies())
new_wait = WebDriverWait(brower,20)
# brower.execute_script('window.scrollTo(0, document.body.scrollHeight)')
content = ['殺不盡的歐洲狗,我只能說66666666',
'大水熊的歐氣在號召你們~',
'貼吧里的人越來越少,哎']
new_wait.until(ec.element_to_be_clickable((By.ID,'ueditor_replace'))).click()
js = "document.getElementById('ueditor_replace').innerHTML='"+ random.choice(content) + "'"
brower.execute_script(js)
# brower.find_element_by_xpath('//*[@id="ueditor_replace"]/p').send_keys(content)
time.sleep(3)
try:
brower.execute_script('window.scrollTo(0, document.body.scrollHeight)')
bt = new_wait.until(ec.element_to_be_clickable((By.XPATH, '//*[@class="j_floating"]/a')))
bt.click()
# print(bt.text)
# brower.find_element_by_css_selector('.lzl_panel_submit').click()
time.sleep(50)
print('回帖成功,回帖數量:' + str(count))
# new_wait.until(ec.element_to_be_clickable((By.CSS_SELECTOR, '.lzl_panel_submit'))).click()
# brower.find_element_by_css_selector('.lzl_panel_submit').click()
except TimeoutException:
print('回復失敗~')
brower.close()
brower.switch_to_window(brower.window_handles[0])
brower.refresh()
# print(brower.window_handles)
# brower.close()
| [
"noreply@github.com"
] | noreply@github.com |
63901810b92d4e20b3098301bbd464048df71d71 | 0d303fb64dc9545394df739159630291030c0bc3 | /game.py | 8f35f84dd148d77834130a97f008c7b218b23d65 | [] | no_license | S-Rawlani/Snake-game | 561a0f6768e0f6a11dcc23e1de6495136bb1428a | 7ade28b343349013ce68a5c89bb7273cf422bd69 | refs/heads/master | 2020-03-22T16:46:26.306035 | 2018-07-09T23:00:33 | 2018-07-09T23:00:33 | 140,350,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | import random
import curses
s= curses.initscr() #initialize screen
curses.curs_set(0)
sh, sw= s.getmaxyx() #width and height of window
w= curses.newwin(sh,sw,0,0)
w.keypad(1)
w.timeout(100)
snk_x=sw/4
snk_y=sh/2
snake=[
[snk_y,snk_x], #bodyparts of snake
[snk_y, snk_x-1],
[snk_y, snk_x-2]
]
food= [sh//2, sw//2] #food is at the center of the screen
w.addch(food[0], food[1], curses.ACS_PI) #adding food to the screen. here food is pie
key= curses.KEY_RIGHT #initial movement of snake to right
while True:
next_key= w.getch()
key= key if next_key== -1 else next_key
#hloosing condition
if snake[0][0] in [0,sh] or snake[0][1] in [0,sw] or snake[0] in snake[1:]:
curses.endwin()
quit()
new_head= [snake[0][0], snake[0][1]] #new head of snake
if key== curses.KEY_DOWN:
new_head[0]+= 1
if key== curses.KEY_UP:
new_head[0]-= 1
if key== curses.KEY_LEFT:
new_head[1]-= 1
if key== curses.KEY_RIGHT:
new_head[1]+= 1
snake.insert(0, new_head)
if snake[0]== food:
food=None
while food is None:
nf= [
random.randint(1,sh-1),
random.randint(1,sw-1)
]
food= nf if nf not in snake else None
w.addch(food[0], food[1], curses.ACS_PI)
else:
tail= snake.pop()
w.addch(int(tail[0]), int(tail[1]), ' ')
w.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)
| [
"srawlani.30@gmail.com"
] | srawlani.30@gmail.com |
255610234d501625fe610cb167ad316e94a03388 | f901ee260d5cb8ec2d1f295aa9221e58e402dc66 | /gui.py | e9a6f40162be810c3b1ee585cabca3d4b47c49ab | [
"Apache-2.0"
] | permissive | mgp/huger-status-monitor | 3500d1852aeaeb6fd25957eeadbebfd72944c9b8 | 0064a416bd401c9f179558d0b2aa54edd581ee92 | refs/heads/master | 2020-05-24T15:42:10.994685 | 2014-01-16T00:01:11 | 2014-01-16T00:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,118 | py | import sys
from PyQt5.QtWidgets import QApplication, QInputDialog, QMainWindow
from collections import deque
from datetime import datetime, timedelta
def _make_hbox(*widgets):
"""Returns an QHBoxLayout with the given widgets."""
hbox = QHBoxLayout()
for widget in widgets:
hbox.addWidget(widget)
return hbox
def _show_error_message(parent, error_message):
"""Displays a QErrorMessage with the given error message."""
error_message = QErrorMessage(parent)
error_message.ShowMessage(error_message)
class ConnectDialog(QtGui.QWidget):
"""The server connection dialog upon startup."""
def __init__(self):
super(ConnectDialog, self).__init__()
self._output_filename = None
self._show_ui()
def _show_file_dialog(self):
self._output_filename = QFileDialog.getOpenFileName(self, 'Choose output file')
def _show_ui(self):
vbox = QtGui.QVBoxLayout()
# Add the server text field.
server_label = QLabel('Server')
self._server_edit = QLineEdit()
vbox.addLayout(_make_hbox(server_label, self._server_edit))
# Add the polling rate text field.
poll_label = QLabel('Seconds/poll')
self._poll_slider = QSlider()
self._poll_slider.setRange(5, 60)
self._poll_slider.setTickInterval(1)
vbox.addLayout(_make_hbox(poll_label, self._poll_slider))
# Add the output file chooser.
output_label = QLabel('Ouptut')
self._output_edit = QLineEdit()
vbox.addLayout(_make_hbox(output_label, self._output_edit))
vbox.addStretch(1)
# Add the Connect and Quit buttons.
self._connect_button = QtGui.QPushButton("Connect")
self._quit_button = QtGui.QPushButton("Quit")
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self._connect_button)
hbox.addWidget(self._quit_button)
vbox.addLayout(hbox)
# Configure this window.
self.setLayout(vbox)
self.setGeometry(300, 300, 300, 150)
self.setWindowTitle('Connect to Server')
self.show()
def _validate():
# Validate the server address.
server_parts = server_label.text().strip().split(':')
if len(parts) != 2:
_show_error_message(self, "Server must have form [address]:[port]")
return
server_address, server_port = server_parts
poll_length = self._poll_slider.value()
# Validate the output filename.
if self._output_filename == None:
_show_error_message(self, "Must choose an output file")
return
elif os.path.isdir(self._output_filename):
_show_error_message(self, "Cannot choose a directory")
return
# TODO: Move on.
class PlayerRow:
def _empty_label(self):
return QLabel("-")
def __init__(self, player_name):
self._player_name = player_name
self._use_checkbox = QCheckBox(self._player_name)
# Labels for the statistics.
self._total_label = self._empty_label()
self._avg_label = self._empty_label()
self._stddev_label = self._empty_label()
self._new_label = self._empty_label()
# Labels for the player ranks.
self._new_rank_label = self._empty_label()
self._stddev_rank_label = self._empty_label()
self._combined_rank_label = self._empty_label()
def update_stats(self, stats):
"""Updates the statistics for this player."""
self._total_label.setText(stats.total())
self._avg_label.setText(stats.avg())
self._stddev_label.setText(stats.std_dev())
self._new_label.setText(stats.new())
def _get_rank_text(self, rank):
# Create text.
if rank == 1:
text = "1<sup>st</sup>"
elif rank == 2:
text = "2<sup>nd</sup>"
elif rank == 3:
text = "3<sup>rd</sup>"
else:
text = "%s<sup>th</sup>" % rank
# Color text.
if rank == 1:
text = "<font color=\"green\">%s</font>" % text
return text
def update_poll(self, new_rank, stddev_rank, combined_rank):
"""Updates the ranks for this player."""
self._new_rank_label.setText(self._get_rank_text(new_rank))
self._stddev_rank_label.setText(self._get_rank_text(stddev_rank))
self._combined_rank_label.setText(self._get_rank_text(combined_rank))
def add_to_row(self, row, grid):
"""Adds this player to the given grid at the given row."""
for column, widget in enumerate((
self._use_checkbox,
self._total_label,
self._avg_label,
self._stddev_label,
self._new_label,
self._new_rank_label,
self._stddev_rank_label,
self._combined_rank_label)):
grid.addWidget(widget, row, column)
ObservedPlayer = namedtuple('ObservedPlayer', ['name', 'deque_time'])
class ServerMonitor(QMainWindow):
# Enum value for updating the Monitor instance.
_UPDATE_MONITOR = 'update_monitor'
# Enum value for updating the file containing the best player to spectate.
_UPDATE_SPEC = 'update_spec'
def __init__(self, server_address):
super(ServerMonitor, self).__init__()
self._server_address = server_address
self._show_ui()
self._obs_player_queue = deque()
self._next_update_monitor_time = None
self._next_update_spec_time = None
self._next_update_timer = None
def _get_next_action(self):
"""Returns the next action that the update thread should perform."""
if self._next_update_monitor_time and self._next_update_spec_time:
# Return the time that is closer in the future.
if self._next_update_monitor_time < self._next_update_spec_time:
return ServerMonitor._UPDATE_MONITOR
else:
return ServerMonitor._UPDATE_SPEC
elif self._next_update_monitor_time:
return ServerMonitor._UPDATE_MONITOR
elif self._next_update_spec_time:
return ServerMonitor._UPDATE_SPEC
else:
return None
def _get_seconds_until_next_action(self, action, now=None):
"""Returns the time in seconds until running the given action.
This method returns None if there is no action."""
if not action:
return None
if now is None:
now = datetime.utcnow()
if action == ServerMonitor._UPDATE_MONITOR:
# Return the time until we should update the monitor.
return (self._next_update_monitor_time - now).total_seconds()
elif action == ServerMonitor._UPDATE_SPEC:
# Return the time until we should update the best spectator.
return (self._next_update_spec_time - now).total_seconds()
else:
raise ValueError('Invalid action: %s' % action)
def _schedule_next_update_timer(self, seconds_until):
self._next_update_timer = Timer(seconds_until, self._update)
def _update_spec_file(self):
"""Writes the best player to spectate to the file."""
if not self._obs_player_queue:
return
# Write the player name to the file.
obs_player = self._obs_player_queue.popleft()
with open(self._output_filename, 'w') as f:
f.write('spec_player "%s"' % player_name)
# Update the next time at which this method should be run.
if self._obs_player_queue:
self._next_update_spec_time = self._obs_player_queue[0].deque_time
else:
self._next_update_spec_time = None
def _update(self):
"""The method that the update thread executes."""
while True:
next_action = self._get_next_action()
if not next_action:
# No next action to execute, so exit.
break
seconds_until_next_action = self._get_time_until_next_action()
if seconds_until_next_action > 0:
# Run this action at the given time in the future.
self._schedule_next_update_timer(seconds_until_next_action)
break
# Run the next action now.
if next_action == ServerMontior._UPDATE_MONITOR:
# TODO
pass
elif next_action == ServerMonitor._UPDATE_SPEC:
self._update_spec_file()
def _show_ui(self):
self._weight_slider = QSlider()
self._weight_slider.setRange(0, 100)
self._weight_slider.setTickInterval(25)
def _populate_grid(self):
self._grid = QGridLayout()
self.setLayout(grid)
def main():
app = QApplication(sys.argv)
server_address, success = QInputDialog.getText(
None, 'Connect to server', 'Enter server address:')
if success:
print 'server_address=%s' % server_address
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| [
"michael.g.parker@gmail.com"
] | michael.g.parker@gmail.com |
bb5e27e12fcd57aa9ed9dcc922c61ab48228b112 | ca0a353659fa96abb4150417e036e194572911d9 | /agendavenv/bin/django-admin | 7b41f936fb831d4f3a2d4d7044bed3282d4b6c1f | [] | no_license | serleonc/experimento-git | 30b6f4aba6050bc8b987e7e9c036e9a1d9bf714a | ec0222a77ac6717b78a9770779eeb6c06678ddbf | refs/heads/master | 2021-01-12T18:26:36.646484 | 2016-10-19T16:25:59 | 2016-10-19T16:25:59 | 71,377,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | #!/home/aqtiva/Documentos/agendavenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"serleonc@gmail.com"
] | serleonc@gmail.com | |
97c91dcfae21a4d82f6a3a6dbe04691efd0046d0 | 523c390bef6593b58a99ccbed9dca14bf69da116 | /calc/app.py | d2ac4e093dbecd8933be439bb63a477c42bdce09 | [] | no_license | LLGwinn/flask-greet-calc | 32adf0b050619868f308a3ee1f6075553324d3b2 | 1b9200f02741b818f9c40ec083b0a234d423025a | refs/heads/master | 2023-07-26T05:02:17.171890 | 2021-09-13T22:56:25 | 2021-09-13T22:56:25 | 406,153,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | from flask import Flask, request
from operations import add, sub, mult, div
app = Flask(__name__)
@app.route('/add')
def add_nums():
""" Multiply search parameters a, b and return result """
a = int(request.args.get('a'))
b = int(request.args.get('b'))
sum = add(a, b)
return str(sum)
@app.route('/sub')
def subtract_nums():
""" Subtract search parameters a, b and return result """
a = int(request.args.get('a'))
b = int(request.args.get('b'))
difference = sub(a, b)
return str(difference)
@app.route('/mult')
def multiply_nums():
""" Multiply search parameters a, b and return result """
a = int(request.args.get('a'))
b = int(request.args.get('b'))
product = mult(a, b)
return str(product)
@app.route('/div')
def divide_nums():
""" Divide search parameters a, b and return result """
a = int(request.args.get('a'))
b = int(request.args.get('b'))
dividend = div(a, b)
return str(dividend)
# FURTHER STUDY
opers = {
'add' : add(a, b),
'sub' : sub(a, b),
'mult' : mult(a, b),
'div' : div(a, b)
}
@app.route('/math/<oper>')
def do_math(oper):
""" Perform math operation based on path parameter and search parameters a, b """
a = int(request.args.get('a'))
b = int(request.args.get('b'))
return str(opers[oper])
| [
"ells.gwinn@gmail.com"
] | ells.gwinn@gmail.com |
742f5ac79f5d96da0a8637746f3ce813ab3367c1 | 3a2999fe1324ee8566f6df7073b3f29006578f78 | /AsyncLine/handler.py | a211c2eae9b2d9b25f42e9ad5b4f4c0537091e11 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Alnyz/AsyncLine | ea168e706131ecf509f7fcdc24828acc458e4579 | e20453cf41afb9861103b67f9d8f33756806df7f | refs/heads/master | 2023-07-16T12:57:18.573907 | 2023-07-10T06:39:39 | 2023-07-10T06:39:39 | 198,466,683 | 1 | 0 | MIT | 2023-07-10T06:39:41 | 2019-07-23T16:16:41 | Python | UTF-8 | Python | false | false | 958 | py | from .filters import Filter
import AsyncLine
class BaseClient:
def add_handler(self, *args, **kwgs):
pass
class Handler:
def __init__(self, callback: callable, filters=None):
self.callback = callback
self.filters = filters
class MessageHandler(Handler):
def __init__(self, callback: callable, filters=None):
super().__init__(callback, filters)
class HookMessage(BaseClient):
def hooks(self=None, filters=None, type: int = 0):
def decorator(func):
if isinstance(self, AsyncLine.Client):
self.add_handler(type, MessageHandler(func, filters).callback, filters)
elif isinstance(self, Filter) or self is None:
func.line_plugin = (
MessageHandler(func, filters), type)
try:
return func.line_plugin
except AttributeError:
return func
return decorator
class Methods(HookMessage):
pass | [
"katro.coplax@gmail.com"
] | katro.coplax@gmail.com |
fd012d58c9ab68256ae74c1857f9758c98a77d18 | a0be30533cbbdc12dba5fdfa8d61088f0845c185 | /src/cost_func.py | e11d792bae6fb9a15eec6d2fe4a12c5aefe2f3a6 | [] | no_license | mmssouza/cbir | 1fbf2a115a799e683d86bd43e85742b8c7a52cef | 372d67c1b16abaa682802fce547f5f028b99fb4a | refs/heads/master | 2021-06-04T07:35:27.468533 | 2017-10-25T13:28:09 | 2017-10-25T13:28:09 | 37,946,123 | 0 | 1 | null | 2017-10-05T12:56:25 | 2015-06-23T21:29:50 | Python | UTF-8 | Python | false | false | 1,225 | py | #!/usr/bin/python
import subprocess
import shlex
import tempfile
import os
dist = "JS"
path = "../datasets/1400_mpeg7/"
def cost_func(args):
args = shlex.split(str(args.tolist()).lstrip('[').rstrip(']'))
args = [a.strip(',') for a in args]
#aii_args = args[0:4]
#curv_args = args[4:8]
#angle_args = args[8:13]
#cd_args = args[13:17]
curv_args = args[0:4]
angle_args = args[4:7]
cd_args = args[7:9]
rank_args = args[9:12]
tmp0 = tempfile.NamedTemporaryFile(suffix ='.pkl',dir='/tmp',delete = False)
tmp1 = tempfile.NamedTemporaryFile(suffix ='.pkl',dir='/tmp',delete = False)
tmp2 = tempfile.NamedTemporaryFile(suffix ='.pkl',dir='/tmp',delete = False)
#print "passo 1 - Extracao caracteristicas"
p_curv = subprocess.Popen(['./gera_curvatura_sig.py',path]+curv_args+[tmp0.name])
p_angle = subprocess.Popen(['./gera_angle_sig.py',path]+angle_args+[tmp1.name])
p_cd = subprocess.Popen(['./gera_cd_sig.py',path]+cd_args+[tmp2.name])
p_cd.wait()
p_angle.wait()
p_curv.wait()
# print "passo 2 - Bull eye"
res = subprocess.check_output(['./rank40.py',tmp0.name,tmp1.name,tmp2.name,dist]+rank_args)
os.remove(tmp0.name)
os.remove(tmp1.name)
os.remove(tmp2.name)
# print res
return float(res)
| [
"marcelo.mssouza@gmail.com"
] | marcelo.mssouza@gmail.com |
2cf9a70811ec72e263f4b6c70a8cc1bbd65c0e75 | 830acb926cc5cf5a12f2045c8497d6f4aa1c2ef2 | /HyperNews Portal/Problems/Limitations/task.py | 4a672123fc6df90e8780fdc4bcd6bac7ef3634d1 | [] | no_license | BuyankinM/JetBrainsAcademyProjects | ca2223875ea4aab3ee7fceedc8e293bdb6e1fdcf | d5f9fcde4298af714960b2755f762141de796694 | refs/heads/main | 2023-02-26T05:47:26.070972 | 2021-02-03T22:10:53 | 2021-02-03T22:10:53 | 335,762,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | import math
real_number = float(input())
check = math.isfinite(real_number) | [
"mbuyankin@gmail.com"
] | mbuyankin@gmail.com |
851943613ee8d23464eee2cc06ee144417c8ba6b | d39aebf96919b55d1a2a6b6f33e5514752af8269 | /hspo-kg-builder/data-lifting/mimic/notes_cui_extraction/map_extracted_umls_files.py | 33bd062fdf75be8e851a29da34a2540e18e202dc | [
"Apache-2.0"
] | permissive | IBM/hspo-ontology | 7f4bdb64d7aaad592bbbf31f303a94654536dc3a | 14541ec322a7d7588287da8e20dcc283a88b8b05 | refs/heads/main | 2023-08-30T23:53:37.862110 | 2023-08-30T15:56:33 | 2023-08-30T15:56:33 | 542,597,882 | 31 | 5 | Apache-2.0 | 2022-10-12T12:30:43 | 2022-09-28T13:13:34 | null | UTF-8 | Python | false | false | 1,009 | py | import argparse
import os
from utils_ import find_json_files, read_json, save_json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--umls_codes_path", default=None, type=str, required=True,
help = "The path where the extracted UMLS codes are stored.")
parser.add_argument("--output_path", default=None, type=str, required=True,
help = "The path where the task-valid extracted UMLS files are going to be stored.")
args = parser.parse_args()
extracted_files = find_json_files(args.umls_codes_path)
mapping = read_json('../data/processed_data/key_mapping_from_total_to_task_valid.json')
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
for ex_f in extracted_files:
ex_f_dict = read_json(args.umls_codes_path + ex_f + '.json')
try:
save_json(ex_f_dict, args.output_path + mapping[ex_f] + '.json')
except:
pass | [
"christheodoropoulos42@gmail.com"
] | christheodoropoulos42@gmail.com |
0b13a187ec32ce7aa897761988d4c15a6c652734 | ab3d5ea4bf0e48914ed14fcf16e5b1d752f199ba | /pcg_libraries/src/pcg_gazebo/parsers/sdf/pose.py | c59190b53c6490da0a3566d7aeb02a72a6f5997d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | boschresearch/pcg_gazebo_pkgs | 5f1004d0de874d4d1abc4eb695777013027158b2 | 1c112d01847ca4f8da61ce9b273e13d13bc7eb73 | refs/heads/master | 2020-06-11T06:28:36.228431 | 2020-02-07T13:05:28 | 2020-02-07T13:05:28 | 193,876,180 | 44 | 3 | NOASSERTION | 2020-02-07T12:00:55 | 2019-06-26T09:45:05 | Python | UTF-8 | Python | false | false | 2,348 | py | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLVector
class Pose(XMLVector):
_NAME = 'pose'
_TYPE = 'sdf'
_ATTRIBUTES = dict(
frame=''
)
def __init__(self):
XMLVector.__init__(self, 6)
self.reset()
@property
def frame(self):
return self.attributes['frame']
@frame.setter
def frame(self, value):
assert isinstance(value, str)
self.attributes['frame'] = value
@property
def pose(self):
return self.value
@pose.setter
def pose(self, value):
XMLVector._set_value(self, value)
@property
def x(self):
return self.value[0]
@x.setter
def x(self, value):
assert self._is_scalar(value)
self.value[0] = float(value)
@property
def y(self):
return self.value[1]
@y.setter
def y(self, value):
assert self._is_scalar(value)
self.value[1] = float(value)
@property
def z(self):
return self.value[2]
@z.setter
def z(self, value):
assert self._is_scalar(value)
self.value[2] = float(value)
@property
def roll(self):
return self.value[3]
@roll.setter
def roll(self, value):
assert self._is_scalar(value)
self.value[3] = float(value)
@property
def pitch(self):
return self.value[4]
@pitch.setter
def pitch(self, value):
assert self._is_scalar(value)
self.value[4] = float(value)
@property
def yaw(self):
return self.value[5]
@yaw.setter
def yaw(self, value):
assert self._is_scalar(value)
self.value[5] = float(value)
| [
"Musa.Marcusso@de.bosch.com"
] | Musa.Marcusso@de.bosch.com |
ebebad6c7731a9504ee607513be35017d718188d | 8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac | /PySide/QtCore/QPointF.py | 2c350c353ea530cc837989441679f71325e61e69 | [
"Apache-2.0"
] | permissive | sonictk/python-skeletons | be09526bf490856bb644fed6bf4e801194089f0d | 49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d | refs/heads/master | 2020-04-06T04:38:01.918589 | 2016-06-09T20:37:43 | 2016-06-09T20:37:43 | 56,334,503 | 0 | 0 | null | 2016-04-15T16:30:42 | 2016-04-15T16:30:42 | null | UTF-8 | Python | false | false | 3,951 | py | # encoding: utf-8
# module PySide.QtCore
# from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtCore.so
# by generator 1.138
# no doc
# no imports
from _Object import _Object
class QPointF(_Object):
# no doc
def isNull(self, *args, **kwargs): # real signature unknown
pass
def manhattanLength(self, *args, **kwargs): # real signature unknown
pass
def setX(self, *args, **kwargs): # real signature unknown
pass
def setY(self, *args, **kwargs): # real signature unknown
pass
def toPoint(self, *args, **kwargs): # real signature unknown
pass
def toTuple(self, *args, **kwargs): # real signature unknown
pass
def x(self, *args, **kwargs): # real signature unknown
pass
def y(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __init__(self, *more): # real signature unknown; restored from __doc__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
__new__ = None
| [
"yliangsiew@blizzard.com"
] | yliangsiew@blizzard.com |
2c7e76ed5b0970288bb384219f3ee8396939b802 | 414667ed0107dfaa1c8d72acc4b50ab93e6e4ca9 | /tools/common/screen.py | 387f12e0752d580bc7b0bf870059af13c050a8d1 | [] | no_license | edpro/amp-firmware | c8d6d964f96cb3e13828c1c763ca16e7da987884 | dcae28dc4253bd2faa936cad2380418778fa80de | refs/heads/master | 2022-02-01T19:49:52.416154 | 2021-12-29T11:03:40 | 2021-12-29T11:03:40 | 182,949,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | import ctypes
import os
win_console_initialized = False
def scr_init():
global win_console_initialized
if win_console_initialized:
return
# enable ANSI colors in Win10 console
if os.name == "nt":
kernel32 = ctypes.windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
win_console_initialized = True
class Colors:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
GRAY = '\033[37m'
LIGHT_RED = '\033[91m'
LIGHT_GREEN = '\033[92m'
LIGHT_YELLOW = '\033[93m'
LIGHT_BLUE = '\033[94m'
LIGHT_MAGENTA = '\033[95m'
LIGHT_CYAN = '\033[96m'
LIGHT_WHITE = '\033[97m'
RED_BG = '\033[1;41m'
RESET = '\033[0m'
def scr_print(msg: str, color: str):
print(f'{color}{msg}{Colors.RESET}')
def scr_prompt(msg: str):
return input(f"\n{Colors.CYAN}>> {msg}{Colors.RESET}")
def scr_clear():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def scr_pause():
input("Press <ENTER> to continue...")
| [
"canab.ua@gmail.com"
] | canab.ua@gmail.com |
8f70224e0b7fda45a9cd3bef0e6bf6072b8719a3 | 1640efbc4085f4df030ad6d6c5340d2679319437 | /degrees/util.py | fc4af7fd47ed21cbf6b12af2ea483e2a7791dee5 | [] | no_license | neel7202/AI50 | e982582f9a728270e4062aa84d0a215ff9adee58 | 6af1911f398519ad15b0a603b70af332ec1eb627 | refs/heads/main | 2023-08-17T08:50:04.337462 | 2021-09-21T05:13:05 | 2021-09-21T05:13:05 | 408,694,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | class Node():
def __init__(self, movie, data, parent):
self.movie = movie
self.data = data
self.parent = parent
class StackFrontier():
def __init__(self):
self.frontier = []
def add(self, node):
self.frontier.append(node)
def contains_state(self, state):
return any(node.state == state for node in self.frontier)
def empty(self):
return len(self.frontier) == 0
def remove(self):
if self.empty():
raise Exception("empty frontier")
else:
node = self.frontier[-1]
self.frontier = self.frontier[:-1]
return node
class QueueFrontier(StackFrontier):
def remove(self):
if self.empty():
raise Exception("empty frontier")
else:
node = self.frontier[0]
self.frontier = self.frontier[1:]
return node
| [
"noreply@github.com"
] | noreply@github.com |
33b7519144745001acf5c7efd0107fbdde324283 | 6ff33cd108f2e5ff0996580f7ccc0e11bc288564 | /rnn2.py | 0f69a5101bed79dd17cffc15d4108000af9f1ed8 | [] | no_license | Thanagaraj2016/Recurrent-neural-network | 5b9492dfd1eb1a0526a7dda67e041368970fd190 | e0cfe0383f147b85e8187a91d66f83935c878e9d | refs/heads/main | 2023-07-10T20:17:25.054784 | 2021-07-29T01:20:19 | 2021-07-29T01:20:19 | 390,556,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,070 | py |
import pandas as pd
data = pd.read_csv('Senti.csv')
# Keeping only the neccessary columns
data = data[['text','sentiment']]
data = data[data.sentiment != "Neutral"]
data['text'] = data['text'].apply(lambda x: x.lower())
data['text'] = data['text'].apply((lambda x: re.sub('[^a-zA-z0-9\s]','',x)))
for idx,row in data.iterrows():
row[0] = row[0].replace('rt',' ')
max_fatures = 2000
tokenizer = Tokenizer(nb_words=max_fatures, split=' ')
tokenizer.fit_on_texts(data['text'].values)
X = tokenizer.texts_to_sequences(data['text'].values)
X = pad_sequences(X)
Y = pd.get_dummies(data['sentiment']).values
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33, random_state = 42)
print('Shape of training samples:',X_train.shape,Y_train.shape)
print('Shape of testing samples:',X_test.shape,Y_test.shape)
model = Sequential()
model.add(Embedding(max_fatures, 128 ,input_length = X.shape[1], dropout=0.2))
model.add(LSTM(128))
model.add(Dense(2, activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print(model.summary())
batch_size = 32
model.fit(X_train, Y_train, epochs = 5, batch_size=batch_size, verbose = 2)
score,acc = model.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size)
print("Score: %.2f" % (score))
print("Accuracy: %.2f" % (acc))
text = 'We are going to Delhi'
tester = np.array([text])
tester = pd.DataFrame(tester)
tester.columns = ['text']
tester['text'] = tester['text'].apply(lambda x: x.lower())
tester['text'] = tester['text'].apply((lambda x: re.sub('[^a-zA-z0-9\s]','',x)))
max_fatures = 2000
test = tokenizer.texts_to_sequences(tester['text'].values)
test = pad_sequences(test)
if X.shape[1]>test.shape[1]:
test = np.pad(test[0], (X.shape[1]-test.shape[1],0), 'constant')
test = np.array([test])
prediction = model.predict(test)
print('Prediction value:',prediction[0])
model = Sequential()
model.add(Embedding(max_features, 8))
model.add(LSTM(8, dropout=0.0, recurrent_dropout=0.0))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=1, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
#Influence of Embedding
model = Sequential()
model.add(Embedding(max_features, 4))
model.add(LSTM(16, dropout=0.0, recurrent_dropout=0.0))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=1, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
#Influence of Dropout
#Dropout with probability
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(8, dropout=0.5, recurrent_dropout=0.5))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=1, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
#rnn with 2 layers
model = Sequential()
model.add(Embedding(max_features, 8))
model.add(LSTM(8, dropout=0.0, recurrent_dropout=0.0, return_sequences=True))
model.add(LSTM(8, dropout=0.0, recurrent_dropout=0.0, return_sequences=True))
model.add(LSTM(8, dropout=0.0, recurrent_dropout=0.0))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=1, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| [
"noreply@github.com"
] | noreply@github.com |
293f7a573690bb972f45d9f251ef6e4aeede5f90 | a47a53a9d2da6ac101779e82b4e915dc5c87137c | /youku/youku/mysql_pipeline.py | 427304a0a07aed49dfbbc9f6e7317f1e27504f69 | [] | no_license | xuhaomin/spider | 19cfe17c42f5032bbda63d6992c8fa486bac7bc5 | 57c3f8e71537106b0c9bbdb2041ffa8e8b63a73e | refs/heads/master | 2021-01-19T04:49:50.396945 | 2018-03-28T02:09:39 | 2018-03-28T02:09:39 | 87,399,140 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from .settings import *
class youkuPipeline(object):
def __init__(self):
self.conn = pymysql.connect(
host=DB_HOST,
port=DB_PORT,
user=DB_USER,
password=DB_PW,
charset=DB_CHARSET,
db=DB_NAME)
self.table = DB_TABLE
def insert_sql(self, params):
sql = 'INSERT INTO {table} ({keys}) VALUE ({values})'
keys = ''
values = ''
add = 'ON DUPLICATE KEY UPDATE rank={}'.format(int(params['rank'])) if 'rank' in params else ''
for k, v in params.items():
keys += '{k},'.format(k=str(k))
values += "'{v}',".format(v=str(v))
sql_line = sql.format(table=self.table, keys=keys[:-1], values=values[:-1]) + add
return sql_line
def process_item(self, item, spider):
cursor = self.conn.cursor()
sql = self.insert_sql(item)
try:
cursor.execute(sql)
self.conn.commit()
except Exception as e:
self.conn.rollback()
print(e)
return item
def close_spider(self, spider):
self.conn.close() | [
"xhmgreat@sina.com"
] | xhmgreat@sina.com |
3513e1d9367d0cfc6fceab60b8d91581828f9654 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /schedulingtoolsplugin/0.11/trunk/schedulingtools/model.py | c9b2e5b43a4f7235a58ac98e6a57acdf3c6ad85c | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | # from pysqlite2 import dbapi2 as sqlite
from trac.core import *
try:
import pysqlite2.dbapi2 as sqlite
have_pysqlite = 2
except ImportError:
try:
import sqlite3 as sqlite
have_pysqlite = 2
except ImportError:
try:
import sqlite
have_pysqlite = 1
except ImportError:
have_pysqlite = 0
class Availability(object):
"blabla"
def find(cls, db, name):
cursor = db.cursor()
cursor.execute("SELECT name,validFrom,validUntil,weekdays,resources,workFrom,workUntil FROM AVAILABILITY WHERE name='%s'" % (name))
av = None
for row in cursor:
av = Availability()
av.name = row[0]
av.validFrom = row[1]
av.validUntil = row[2]
av.weekdays = row[3]
av.resources = row[4]
av.workFrom = row[5]
av.workUntil = row[6]
cursor.close()
return av;
find = classmethod(find)
def save(self, db, name):
""
cursor = db.cursor()
cursor.execute("UPDATE AVAILABILITY SET name=%s,validFrom=%s,validUntil=%s,weekdays=%s,resources=%s,workFrom=%s,workUntil=%s WHERE name=%s",
(self.name, self.validFrom,self.validUntil,self.weekdays,self.resources,self.workFrom,self.workUntil,name))
if cursor.rowcount==0:
cursor.execute("INSERT INTO AVAILABILITY (name,validFrom,validUntil,weekdays,resources,workFrom,workUntil) VALUES (%s,%s,%s,%s,%s,%s,%s)",
(self.name, self.validFrom,self.validUntil,self.weekdays,self.resources,self.workFrom,self.workUntil))
cursor.close()
db.commit()
def delete(self, db):
""
cursor = db.cursor()
cursor.execute("DELETE FROM AVAILABILITY WHERE name=%s", [self.name])
cursor.close()
db.commit()
class Availabilities(object):
def get(cls, db):
cursor = db.cursor()
try:
cursor.execute("SELECT name,validFrom,validUntil,weekdays,resources,workFrom,workUntil FROM AVAILABILITY")
except sqlite.OperationalError:
cursor.execute("CREATE TABLE AVAILABILITY(name varchar(255),validFrom varchar(20),validUntil varchar(20),weekdays varchar(255),resources varchar(255),workFrom char(5),workUntil char(5))")
cursor.execute("SELECT name,validFrom,validUntil,weekdays,resources,workFrom,workUntil FROM AVAILABILITY")
result = []
for row in cursor:
av = Availability()
av.name = row[0]
av.validFrom = row[1]
av.validUntil = row[2]
av.weekdays = row[3]
av.resources = row[4]
av.workFrom = row[5]
av.workUntil = row[6]
result.append(av)
cursor.close()
return result
get = classmethod(get)
def reset(cls, db):
cursor = db.cursor()
cursor.execute("DROP TABLE AVAILABILITY")
db.commit()
reset = classmethod(reset)
| [
"viola@7322e99d-02ea-0310-aa39-e9a107903beb"
] | viola@7322e99d-02ea-0310-aa39-e9a107903beb |
209332580efe025fb4d46261a1772fc630f5f6c4 | 101588fbd9fe43dd4de13db42a4feffbf3a2ace0 | /Flask files/run.py | 5c79ca5b5e87243a37e07b33fb782e5ae62a5b7b | [] | no_license | eudesgsantos/ReTest | 0a9585b4405d49e3c10a7a10efc87a56b46691ff | 66dd53c0159beebf6cb7c4058132190b77ab8a9a | refs/heads/master | 2022-12-16T19:30:34.266522 | 2019-06-27T13:51:03 | 2019-06-27T13:51:03 | 184,690,542 | 3 | 6 | null | 2022-12-08T05:15:50 | 2019-05-03T02:56:21 | HTML | UTF-8 | Python | false | false | 76 | py | from pingsite import app
if __name__ == "__main__":
app.run(debug=True) | [
"cvjso@cesar.school"
] | cvjso@cesar.school |
8bc830421f90192c9d3663b600fc48d9d96980eb | 61bfe506ead460fc3896fd16001ffd1250977fde | /Python/basic_op/src/submodule.py | 63d9d6d8cf4dc4b8e8ce1cf4f76fa79a6aed25fb | [] | no_license | jasonleakey/CrapCodes | 9de63eb033a25dd2e6f288c3e293861d4fabf130 | 8ea109c242356a020c9c9d094f90632b6a7e98af | refs/heads/master | 2021-01-20T06:57:03.907537 | 2013-03-18T05:48:44 | 2013-03-18T05:48:44 | 8,847,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# File name : submodule.py
# Establishment date: 2009-1-21
# Author : jasonleakey
# Copyright (R) 2009 jasonleakey
# <jasonleakey2005@gmail.com>
# <QQ:174481438>
# --------------------------------------------------------------------
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
print '程序从此运行!'
if __name__ == '__main__':
print '此模块不能单独运行!'
else:
def sayhi():
print 'Hi, 子模块~'
version = '0.1'
| [
"yetianhuang.cs@gmail.com"
] | yetianhuang.cs@gmail.com |
fb697cb562885666f1ac01b27f37e56eacaf2b56 | 9b1ad2dcc4e5ee341f68e513954b070d5265be57 | /django/qa/module310/migrations/0001_initial.py | cc27c6a7b8b303e2b688fa47ba8cc79274af986a | [] | no_license | tomkitchen/qa | 453081a32f744cd83371073db17ac3f26a66e6e2 | 7f43ab45cdbc578700cb298ce9a1e17a5912e148 | refs/heads/master | 2020-04-23T02:56:22.395093 | 2019-03-27T09:13:50 | 2019-03-27T09:13:50 | 170,861,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flashcards', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Module310Card',
fields=[
('card_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='flashcards.Card')),
],
options={
},
bases=('flashcards.card',),
),
]
| [
"tomkitchen@Toms-MBP.lan"
] | tomkitchen@Toms-MBP.lan |
d19329ceaa1048a1107204badcf34fb17c7d6788 | 2d3cf4f3b8ea1bfa8690fa72480fe7999494efc6 | /yolo.py | 999b2a66bda3b397f7f8e3079d71640a48ad5646 | [] | no_license | khushbukella/Object-Detection | 07ae963d7194fa7be9dc00de3662226a423a23c5 | 598ff82c2af9c51b5682bec968362a2c80785684 | refs/heads/master | 2021-04-02T03:43:02.286290 | 2020-03-18T13:45:13 | 2020-03-18T13:45:13 | 248,196,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | #USAGE
#python yolo.py --image images/baggage_claim.jpg --yolo yolo-coco
# import the necessary packages
import numpy as np
import argparse
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.9,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.7,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"],"yolov3.weights"])
configPath = os.path.sep.join([args["yolo"],"yolov3.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# load our input image and grab its spatial dimensions
image = cv2.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show timing information on YOLO
print("[INFO] YOLO took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
0.5, color, 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0) | [
"noreply@github.com"
] | noreply@github.com |
82c61a9738f287ed1685f5069dc74e93b59f453a | a4df5a27c4f2564da80d1b944988b47b04be951a | /www/common/orm.py | a865540bed3ede3bed2653e2e2dc1674ece0a558 | [] | no_license | yanchengdegithub/python3web | 9ede4ca3f31d6936f3db201ff92d5e0acc575d70 | 565fbc69dc9f5939054a6a7ae7a2f6fbf8268180 | refs/heads/master | 2021-08-08T03:35:11.341235 | 2017-11-09T13:51:38 | 2017-11-09T13:51:38 | 104,431,887 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,631 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'yancheng'
import asyncio, logging
import aiomysql
def log(sql, args=()):
logging.info('SQL: %s ARGS: %s' % (sql, args))
async def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
__pool = await aiomysql.create_pool(
host=kw.get('host', 'localhost'),
port=kw.get('port', 3306),
user=kw['user'],
password=kw['password'],
db=kw['dbname'],
charset=kw.get('charset', 'utf8'),
autocommit=kw.get('autocommit', True),
maxsize=kw.get('maxsize', 10),
minsize=kw.get('minsize', 1),
loop=loop
)
async def close_pool():
logging.info('close database connection pool...')
global __pool
__pool.close()
await __pool.wait_closed()
async def select(sql, args, size=None):
log(sql, args)
global __pool
async with __pool.get() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args or ())
if size:
rs = await cur.fetchmany(size)
else:
rs = await cur.fetchall()
logging.info('rows returned: %s' % len(rs))
return rs
async def execute(sql, args, autocommit=True):
log(sql, args)
async with __pool.get() as conn:
if not autocommit:
await conn.begin()
try:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args)
#affected = cur.rowcount
#lastrowid = cur.lastrowid
if not autocommit:
await conn.commit()
except BaseException as e:
if not autocommit:
await conn.rollback()
raise
return cur
def create_args_string(num):
L = []
for n in range(num):
L.append('?')
return ', '.join(L)
class Field(object):
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name)
class StringField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchar(100)'):
super().__init__(name, ddl, primary_key, default)
class BooleanField(Field):
def __init__(self, name=None, default=False):
super().__init__(name, 'boolean', False, default)
class IntegerField(Field):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'bigint', primary_key, default)
class FloatField(Field):
def __init__(self, name=None, primary_key=False, default=0.0):
super().__init__(name, 'real', primary_key, default)
class TextField(Field):
def __init__(self, name=None, default=None):
super().__init__(name, 'text', False, default)
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
# 排除Model类本身:
if name=='Model':
return type.__new__(cls, name, bases, attrs)
tableName = attrs.get('__table__', None) or name
logging.info('found model: %s (table: %s)' % (name, tableName))
mappings = dict()
fields = []
primaryKey = None
for k, v in attrs.items():
if isinstance(v, Field):
logging.info(' found mapping: %s ==> %s' % (k, v))
mappings[k] = v
if v.primary_key:
# 找到主键:
if primaryKey:
raise StandardError('Duplicate primary key for field: %s' % k)
primaryKey = k
else:
fields.append(k)
if not primaryKey:
raise StandardError('Primary key not found.')
for k in mappings.keys():
attrs.pop(k)
escaped_fields = list(map(lambda f: '`%s`' % f, fields))
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = tableName
attrs['__primary_key__'] = primaryKey # 主键属性名
attrs['__fields__'] = fields # 除主键外的属性名
attrs['__select__'] = 'select `%s`, %s from `%s`' % (primaryKey, ', '.join(escaped_fields), tableName)
attrs['__insert__'] = 'insert into `%s` (%s, `%s`) values (%s)' % (tableName, ', '.join(escaped_fields), primaryKey, create_args_string(len(escaped_fields) + 1))
attrs['__update__'] = 'update `%s` set %s where `%s`=?' % (tableName, ', '.join(map(lambda f: '`%s`=?' % (mappings.get(f).name or f), fields)), primaryKey)
attrs['__delete__'] = 'delete from `%s` where `%s`=?' % (tableName, primaryKey)
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass=ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def getValue(self, key):
return getattr(self, key, None)
def getValueOrDefault(self, key):
value = getattr(self, key, None)
if value is None:
field = self.__mappings__[key]
if field.default is not None:
value = field.default() if callable(field.default) else field.default
logging.debug('using default value for %s: %s' % (key, str(value)))
setattr(self, key, value)
return value
@classmethod
async def findAll(cls, where=None, args=None, **kw):
' find objects by where clause. '
sql = [cls.__select__]
if where:
sql.append('where')
sql.append(where)
if args is None:
args = []
orderBy = kw.get('orderBy', None)
if orderBy:
sql.append('order by')
sql.append(orderBy)
limit = kw.get('limit', None)
if limit is not None:
sql.append('limit')
if isinstance(limit, int):
sql.append('?')
args.append(limit)
elif isinstance(limit, tuple) and len(limit) == 2:
sql.append('?, ?')
args.extend(limit)
else:
raise ValueError('Invalid limit value: %s' % str(limit))
rs = await select(' '.join(sql), args)
return [cls(**r) for r in rs]
@classmethod
async def findNumber(cls, selectField, where=None, args=None):
' find number by select and where. '
sql = ['select %s _num_ from `%s`' % (selectField, cls.__table__)]
if where:
sql.append('where')
sql.append(where)
rs = await select(' '.join(sql), args, 1)
if len(rs) == 0:
return None
return rs[0]['_num_']
@classmethod
async def find(cls, pk):
' find object by primary key. '
rs = await select('%s where `%s`=?' % (cls.__select__, cls.__primary_key__), [pk], 1)
if len(rs) == 0:
return None
return cls(**rs[0])
async def save(self):
args = list(map(self.getValueOrDefault, self.__fields__))
args.append(self.getValueOrDefault(self.__primary_key__))
cur = await execute(self.__insert__, args)
rows = cur.rowcount
if rows != 1:
logging.warn('failed to insert record: affected rows: %s' % rows)
return 0
return cur.lastrowid
async def update(self):
args = list(map(self.getValue, self.__fields__))
args.append(self.getValue(self.__primary_key__))
cur = await execute(self.__update__, args)
rows = cur.rowcount
if rows != 1:
logging.warn('failed to update by primary key: affected rows: %s' % rows)
async def remove(self):
args = [self.getValue(self.__primary_key__)]
cur = await execute(self.__delete__, args)
rows = cur.rowcount
if rows != 1:
logging.warn('failed to remove by primary key: affected rows: %s' % rows) | [
"yanchengdee@aliyun.com"
] | yanchengdee@aliyun.com |
87f4d49b6166eb3cabd30a9cd22eaf7974bf10d7 | 4b723652507d3d03eb630cc5e72f8d4444719296 | /app/migrations/0023_auto_20190628_0726.py | 8204d935c8b32329d82dc915fc897d73ce4b5a72 | [] | no_license | abinba/sharewood | 00ee57c9c6ce23bfc5679954de755368438fbac0 | 758170090b1383bf9fa6192ccfc48284a056143e | refs/heads/master | 2020-07-09T11:49:29.622054 | 2020-07-02T21:32:30 | 2020-07-02T21:32:30 | 203,961,250 | 1 | 0 | null | 2020-07-02T21:32:31 | 2019-08-23T08:51:35 | Python | UTF-8 | Python | false | false | 688 | py | # Generated by Django 2.2.2 on 2019-06-28 07:26
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('app', '0022_auto_20190627_1728'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='date',
field=models.DateField(default=datetime.datetime(2019, 6, 28, 7, 26, 42, 265841)),
),
migrations.AlterField(
model_name='records',
name='date',
field=models.DateField(default=datetime.datetime(2019, 6, 28, 7, 26, 42, 265408, tzinfo=utc)),
),
]
| [
"arcturus5340@gmail.com"
] | arcturus5340@gmail.com |
286590a9fe52b4359057b9360cd7b7a404aa8d70 | fe18994a1880f347d8004383434842286b9dccd3 | /python_stack/flask/flask_fundamentals/Dojo_Survey/server.py | 335aa9e1c425addeaea0b58be5359da8056a3f95 | [] | no_license | Anbousi/Python | 682d5b00555ab3183d06afddb4c5f6e1d5739f6c | 4f05dd8ec62e80a28ca607feae976d9220a62227 | refs/heads/master | 2023-05-06T03:37:28.878915 | 2021-05-30T19:11:28 | 2021-05-30T19:11:28 | 364,501,098 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from flask import Flask , render_template , request , redirect
app = Flask(__name__)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/result' , methods=['POST'])
def result():
name_form = request.form['name']
location_form = request.form['location']
language_form = request.form['language']
comment_form = request.form['comment']
radio_form = request.form['radio']
check_form = request.form['check']
print(check_form)
return render_template('result.html' , name_form = name_form , location_form = location_form , language_form = language_form , comment_form = comment_form )
if __name__ == '__main__':
app.run(debug = True) | [
"anbousi@gmail.com"
] | anbousi@gmail.com |
cdd26c51960b9afdca9e6096e66ea1df910dc336 | bb0de80e3744537dfbb44dc8c1b7c0c84c95d5ac | /model/__init__.py | d2f4494d88e672b32fccd9470469527236f70c26 | [
"Apache-2.0"
] | permissive | MOE-LYON/KLNews | 4b2670c4a39781de2fa3de62dbf13e2e6a65724b | 076a044a72eec08f15c05306fa7d56fcae09dff4 | refs/heads/master | 2022-06-19T21:12:45.531971 | 2020-05-06T00:09:21 | 2020-05-06T00:09:21 | 260,695,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from .category import Category
from .news import News
| [
"wrisboy@gmail.com"
] | wrisboy@gmail.com |
fd9b2b5f03d3dc5bc61de7795ca876950a6683e0 | 7e0393251012e91213dddfd9c93f6b6b73ca2bfe | /tests/unit/test_drizzle_error.py | e7d05e37e25a68aede0c2312672d6c495e350d23 | [
"MIT"
] | permissive | josephhardinee/cloudnetpy | ff4cc0303d7f2ae40f2d3466298257659ff3ccde | c37760db3cdfe62ae769f8090ba621803ec9a92c | refs/heads/master | 2021-03-06T15:37:51.529776 | 2020-02-13T09:05:29 | 2020-02-13T09:05:29 | 246,207,849 | 0 | 0 | MIT | 2020-03-10T04:29:48 | 2020-03-10T04:26:16 | null | UTF-8 | Python | false | false | 3,347 | py | import numpy as np
import numpy.testing as testing
import pytest
from cloudnetpy.products import drizzle_error as de
DRIZZLE_PARAMETERS = {'Do': np.array([[0.0001, 0.01, 0.000001],
[0.001, 0.000001, 0.0001]])}
DRIZZLE_INDICES = {'drizzle': np.array([[1, 1, 1], [1, 1, 1]], dtype=bool),
'small': np.array([[1, 0, 0], [0, 0, 1]], dtype=bool),
'tiny': np.array([[0, 0, 1], [0, 1, 0]], dtype=bool)}
ERROR_INPUT = (np.array([[0.01, 0.34, 0.5],
[0.2, 0.3, 0.56]]), 0.14)
BIAS_INPUT = (0.01, 0.57)
@pytest.mark.parametrize('key, value', [
('drizzle', [False, True, True, True]),
('small', [False, True, False, False]),
('tiny', [False, False, False, True])])
def test_get_drizzle_indices(key, value):
dia = np.array([-1, 2 * 1e-5, 1, 1e-6])
d = de._get_drizzle_indices(dia)
testing.assert_array_equal(d[key], value)
@pytest.mark.parametrize('key', [
'Do_error', 'drizzle_lwc_error', 'drizzle_lwf_error', 'S_error'])
def test_calc_parameter_errors(key):
x = de._calc_parameter_errors(DRIZZLE_INDICES, ERROR_INPUT)
assert key in x.keys()
@pytest.mark.parametrize('key', [
'Do_bias', 'drizzle_lwc_bias', 'drizzle_lwf_bias'])
def test_calc_parameter_biases(key):
x = de._calc_parameter_biases(BIAS_INPUT)
assert key in x.keys()
@pytest.fixture
def results():
errors = de._calc_parameter_errors(DRIZZLE_INDICES, ERROR_INPUT)
biases = de._calc_parameter_biases(BIAS_INPUT)
return {**errors, **biases}
@pytest.mark.parametrize('key', [
'drizzle_N_error', 'v_drizzle_error', 'mu_error'])
def test_add_supplementary_errors(results, key):
x = de._add_supplementary_errors(results, DRIZZLE_INDICES, ERROR_INPUT)
assert key in x.keys()
def test_calc_v_error(results):
results['Do_error'] = np.array([[2, 2, 2], [2, 2, 2]])
x = de._add_supplementary_errors(results, DRIZZLE_INDICES, ERROR_INPUT)
testing.assert_almost_equal(x['v_drizzle_error'][DRIZZLE_INDICES['tiny']], 4)
@pytest.mark.parametrize('key', [
'drizzle_N_bias', 'v_drizzle_bias'])
def test_add_supplementary_biases(results, key):
x = de._add_supplementary_biases(results, BIAS_INPUT)
assert key in x.keys()
def test_calc_error():
from cloudnetpy.utils import l2norm_weighted
compare = l2norm_weighted(ERROR_INPUT, 1, 1)
testing.assert_almost_equal(de._calc_error(1, 1, ERROR_INPUT), compare)
def test_stack_errors():
DRIZZLE_INDICES['drizzle'] = np.array([[0, 1, 1], [1, 1, 0]], dtype=bool)
compare = np.ma.array(ERROR_INPUT[0], mask=[[1, 0, 0], [0, 0, 1]])
x = de._stack_errors(ERROR_INPUT[0], DRIZZLE_INDICES)
testing.assert_array_almost_equal(x, compare)
@pytest.mark.parametrize("x, result", [
(-1000, -1),
(-100, -0.99999),
(-10, -0.9),
(-1, np.exp(-1 / 10 * np.log(10)) - 1)])
def test_db2lin(x, result):
testing.assert_array_almost_equal(de.db2lin(x), result, decimal=5)
def test_db2lin_raise():
with pytest.raises(ValueError):
de.db2lin(150)
@pytest.mark.parametrize("x, result", [
(1e6, 60),
(1e5, 50),
(1e4, 40)])
def test_lin2db(x, result):
testing.assert_array_almost_equal(de.lin2db(x), result, decimal=3)
def test_lin2db_raise():
with pytest.raises(ValueError):
de.lin2db(-1)
| [
"simo.tukiainen@fmi.fi"
] | simo.tukiainen@fmi.fi |
1abc67418dafabbb3f468f4ff08fea5c925b3bde | d86c5aa92a9763510b539776510ad9795d33ae89 | /September 2020/03-Multidimensional-Lists/03-Primary-Diagonal.py | cb353fab20268a1b0cc58deae94420f1b386b6f6 | [
"MIT"
] | permissive | eclipse-ib/Software-University-Professional-Advanced-Module | 42e3bd50ac5f0df8082add29f4113cffb87889e1 | 636385f9e5521840f680644824d725d074b93c9a | refs/heads/main | 2023-02-13T06:02:53.246980 | 2021-01-06T21:12:14 | 2021-01-06T21:12:14 | 306,282,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | #Съкратен Вариант с комрехеншън:
size = int(input())
matrix = [
[int(el) for el in input().split()]
for i in range(size)
]
current_sum = sum([matrix[i][i] for i in range(size)])
print(current_sum)
#Вариант с range:
# size = int(input())
#
# matrix = [
# [int(el) for el in input().split()]
# for i in range(size)
# ]
#
# current_sum = 0
# for i in range(size):
# current_sum += matrix[i][i]
#
# print(current_sum)
# Вариант с ожхождане на матрицата:
# size = int(input())
#
# matrix = [
# [int(el) for el in input().split()]
# for i in range(size)
# ]
#
# index = 0
# current_sum = 0
# for j in matrix:
# current_sum += j[index]
# index += 1
# print(current_sum)
| [
"65770519+eclipse-ib@users.noreply.github.com"
] | 65770519+eclipse-ib@users.noreply.github.com |
77dde7595e4cc4aa796bd23875458cd41b251b56 | 7f8d33e4d7e080011825d9b053ca091e4e8a90cd | /blog/admin.py | c5a2cdc9f1df8e177b40bb6e1b203e94c308b21d | [] | no_license | ChristiWilson/portfolio | 216adf5b0a6206fcdf16bc7f9ba2c89972e2322d | 504668a7b9436af87ce8b266c33c08b5229c2c81 | refs/heads/master | 2020-05-16T06:37:29.872337 | 2019-04-24T23:37:01 | 2019-04-24T23:37:01 | 182,852,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from django.contrib import admin
from .models import Post, Category, Comment
class PostAdmin(admin.ModelAdmin):
pass
class CategoryAdmin(admin.ModelAdmin):
pass
class CommentAdmin(admin.ModelAdmin):
pass
admin.site.register(Post, PostAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Comment, CategoryAdmin)
| [
"Christi@ChristiWilson.com"
] | Christi@ChristiWilson.com |
ce640e99ab9b4f9311a737f6a8f10585751a2bcf | 3ce592352627591346ea33ea0c2665ad879414e2 | /References/web-scraping/101scrapetest.py | 64aa6f9bfe35ee27d2cbd8684578f2c2e1fafc06 | [
"MIT"
] | permissive | royqh1979/python_libs_usage | 113df732ef106f4a5faae1343493756fd703c8c0 | 57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511 | refs/heads/master | 2021-04-16T18:14:43.835482 | 2021-01-11T03:55:25 | 2021-01-11T03:55:25 | 249,374,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen('http://www.pythonscraping.com/pages/warandpeace.html')
bs = BeautifulSoup(html.read(),'html5lib')
print(bs.h1)
namelist = bs.findAll('span',{'class':'green'})
for name in namelist:
print(name.get_text()) | [
"royqh1979@gmail.com"
] | royqh1979@gmail.com |
1936e2b9dfb941c143b221ae1ef571e2fc8c5abf | 7629cd891d1352455d4ceed3c3b03042e7b3810f | /matplotlib/02.designGraph.py | 67a7d5ab51cc1bfb6bf685ef643ba25a83fce5fe | [] | no_license | mingginew88/study-python | 83962034df94ef44d7c1d09eeed6e9cf8a6918a3 | 5e45005c367ee45c1eb72717807d46568596653a | refs/heads/master | 2023-04-27T10:00:21.484293 | 2021-05-14T00:11:07 | 2021-05-14T00:11:07 | 328,588,027 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | import matplotlib.pyplot as plt
#################################################
# 그래프 옵션
# color, marker, linestyle 옵션 사용 가능
# color의 경우 Hex Code 및 색상 이름 사용
# 사용법 (색상 + 마커 + 라인) 순으로 plot() 옵션으로 입력
# 색상 종류 - b(blue), g(green), r(red), c(cyan), m(magenta), y(yello), w(white), k(black) ...
# 마커 종류 - o(circle), .(point), ,(pixel), v(triangle-down), ^(triangle-up), 1(tri-down), 2(tri-up), s(square), *(star), +(plus), D(diamond)
# 라인 종류 - -(solid line), --(dashed line), -.(dash-dot line), :(dotted line)
#################################################
plt.plot([1, 2, 3, 4], [2, 4, 7, 11], 'b^--')
plt.plot([1, 2, 3, 4], [1, 3, 5, 7], 'r+-.')
plt.plot([1, 2, 3, 4], [3, 4, 6, 8], color='violet')
plt.plot([1, 2, 3, 4], [9, 5, 2, 1], color='lawngreen')
# X축 명칭 표기
plt.xlabel('x-axis')
# Y축 명칭 표기
plt.ylabel('y-axis')
# 축 범위 지정 - 반드시 네 개의 값 (xmin, xmax, ymin, ymax)이 있어야 함.
plt.axis([0, 5, 0, 15])
# pyplot을 이용한 시각화
plt.show()
| [
"seoyoungjun88@gmail.com"
] | seoyoungjun88@gmail.com |
9bfa2e020f3266107c5bc70f347db53f8663b448 | 8a2f35f106cd7b31690a6ca6998c6eea9a0d3fdb | /app.py | 31636d5c3af5fc508ccf7d1f7e013a606f40d4ff | [] | no_license | acserna/Capstone-Cloud | af752d12e17a9f2c35fe094b53909e88d4f83b8f | 6bb4d38b36dd536bab96f94a2e00f8a79a413f0d | refs/heads/master | 2023-04-06T16:08:21.185193 | 2021-04-19T03:26:42 | 2021-04-19T03:26:42 | 358,479,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World, my name is Andres Camilo Nuevo"
if __name__ == "__main__":
app.run() | [
"andres.c.serna@gmail.com"
] | andres.c.serna@gmail.com |
dbaaec9420ba7a6285a256df2fa7755d3ae9209b | 3363c489c63fe6b0348fd104d7778decc2baf014 | /RayCastingPythonMaze-main/pycastermaze.py | 7ea73377dd703a367fda429dc8a60af57204094c | [
"MIT"
] | permissive | RyanIsCoding2021/RyanIsCoding2021 | 9040dc5c749d29ca0e643cf384390102854e927f | e4ec24d5ec38f27861fed4e73d158be40ae04993 | refs/heads/master | 2023-08-15T00:33:55.640357 | 2021-10-11T17:15:18 | 2021-10-11T17:15:18 | 386,810,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,960 | py | import numpy as np
from matplotlib import pyplot as plt
import keyboard
#random map generator
size = 15
mapa = [[list(np.random.uniform(0, 1, 3))] * size for i in range(size)]
for i in range(size-2):
for j in range(size-2):
if np.random.uniform() > 0.33:
mapa[i+1][j+1] = 0
posx, posy = (1, np.random.randint(1, size -1))
rot = np.pi/4
x, y = (posx, posy)
mapa[x][y] = 0
count = 0
while True:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if mapa[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
mapa[x][y] = 0
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
while True: #main game loop
plt.hlines(-0.6, 0, 60, colors='gray', lw=165, alpha=0.5)
plt.hlines(0.6, 0, 60, colors='lightblue', lw=165, alpha=0.5)
tilex, tiley, tilec = ([], [], [])
for i in range(60): #vision loop
rot_i = rot + np.deg2rad(i - 30)
x, y = (posx, posy)
sin, cos = (0.02*np.sin(rot_i), 0.02*np.cos(rot_i))
n = 0
while True: # ray loop
xx, yy = (x, y)
x, y = (x + cos, y + sin)
n = n+1
if abs(int(3*xx)-int(3*x)) > 0 or abs(int(3*yy)-int(3*y))>0:
tilex.append(i)
## tiley.append(-1/(0.02 * n))
tiley.append(-1/(0.02 * n*np.cos(np.deg2rad(i - 30))))
if int(x) == exitx and int(y) == exity:
tilec.append('b')
else:
tilec.append('k')
if mapa[int(x)][int(y)] != 0:
## h = np.clip(1/(0.02 * n), 0, 1)
h = np.clip(1/(0.02 * n*np.cos(np.deg2rad(i-30))), 0, 1)
c = np.asarray(mapa[int(x)][int(y)])*(0.3 + 0.7 * h**2)
break
plt.vlines(i, -h, h, lw = 8, colors = c) # draw vertical lines
plt.scatter(tilex, tiley, c=tilec) # draw tiles on the floor
plt.axis('off'); plt.tight_layout(); plt.axis([0, 60, -1, 1])
plt.draw(); plt.pause(0.0001); plt.clf()
# player's movement
key = keyboard.read_key()
x, y = (posx, posy)
if key == 'up':
x, y = (x + 0.3*np.cos(rot), y + 0.3*np.sin(rot))
elif key == 'down':
x, y = (x - 0.3*np.cos(rot), y - 0.3*np.sin(rot))
elif key == 'left':
rot = rot - np.pi/8
elif key == 'right':
rot = rot + np.pi/8
elif key == 'esc':
break
if mapa[int(x)][int(y)] == 0:
if int(posx) == exitx and int(posy) == exity:
break
posx, posy = (x, y)
plt.close()
| [
"ryan@Ryans-MacBook-Pro.local"
] | ryan@Ryans-MacBook-Pro.local |
db89b4926bf8f251c68f068747c97003c1c04fbc | cfac0f4f862180baae078bd7656ac41c8f946006 | /Day22/full.py | 53638388312ad69de0807e67cf6732d90355eefc | [] | no_license | RaspiKidd/AoC2017 | bcf4a8c161b48b2b8f89745d6ff5b741f023b5b7 | 2be828462cd5d56e2f8a8f636525359bb4de045e | refs/heads/master | 2021-09-01T20:07:34.228665 | 2017-12-28T14:25:08 | 2017-12-28T14:25:08 | 112,738,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | def read():
with open("data.txt") as f:
inpt = f.read().splitlines()
offset = len(inpt) // 2
infected = set()
for r, line in enumerate(inpt):
for c, ch in enumerate(line):
if ch == '#':
infected.add((r - offset, c - offset))
return infected
infected = read()
dirs = [(-1, 0), (0, -1), (1, 0), (0, 1)]
d = 0
virusAt = (0, 0)
def burst():
global infected, d, virusAt
infectionCaused = False
if virusAt in infected:
d = (d - 1) % 4
infected.remove(virusAt)
else:
d = (d + 1) % 4
infected.add(virusAt)
infectionCaused = True
virusAt = (virusAt[0] + dirs[d][0], virusAt[1] + dirs[d][1])
return infectionCaused
numInfections = 0
for i in range(10000):
if burst():
numInfections += 1
# Part 1 answer
print(numInfections)
clean = 0
infected = 1
weak = 2
flagged = 3
state = {k: infected for k in read()}
virusAt = (0, 0)
def burst2():
global state, d, virusAt
infectionCaused = False
currentState = state.get(virusAt, 0)
if currentState == clean:
d = (d + 1) % 4
state[virusAt] = weak
elif currentState == weak:
state[virusAt] = infected
infectionCaused = True
elif currentState == infected:
d = (d - 1) % 4
state[virusAt] = flagged
else: # FLAGGED
d = (d + 2) % 4
del state[virusAt]
virusAt = (virusAt[0] + dirs[d][0], virusAt[1] + dirs[d][1])
return infectionCaused
numInfections = 0
for i in range(10000000):
if burst2():
numInfections += 1
# part 2 answer
print (numInfections)
| [
"kerry@raspikidd.com"
] | kerry@raspikidd.com |
571860097351a8417fdd22e08e51b2f2e645009c | 095b9710fdaa4d964b99ac836f98182a29004bf7 | /ui_modules/datepicker.py | 1cc5401b0f7cbaf4c416182e875568ff29775019 | [] | no_license | Xeronel/opportunity_tracker | 17920e0155905c1f2c1bc23d9b58e9c2c429cb59 | fbdb637c450dec8f1c6e09cdf3900077b4931fa5 | refs/heads/master | 2021-01-20T05:48:18.820804 | 2017-03-20T19:42:41 | 2017-03-20T19:42:41 | 89,810,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from tornado.web import UIModule
from .util import get_path
class DatePicker(UIModule):
def render(self, name="date", label='Select Date',
classes="col-lg-3 col-md-3", style="",
required=True, readonly=True):
return self.render_string(get_path('datepicker.html'),
classes=classes,
style=style,
required=required,
readonly=readonly,
label=label,
name=name)
| [
"treische@shealy-solutions.com"
] | treische@shealy-solutions.com |
9aa4a709b2fb9c4126573b81ea202c4b9b894974 | 476439d06825eda08d417e5192fac230cf7c7ba1 | /myapp/models.py | 39797b005ab69f55fd2d6d3954699f1d30b9a1a9 | [] | no_license | moden-py/django_education | 36ee7c41a6cda24f4b622cf26ffcd868ece819c8 | 608cf32f47b4934894b9aa5d5f2ef20edfbbfbc6 | refs/heads/master | 2021-01-10T06:07:15.230112 | 2015-11-11T13:07:00 | 2015-11-11T13:07:00 | 45,917,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | # from django.db import models
MAIN_USER_ID = 2
| [
"moden.py@yandex.ru"
] | moden.py@yandex.ru |
04483072d03f682fa80d3014d1f0fa2fc7e36601 | 1a1b857c67768f20de0df42a7edb87edd57d9a33 | /Quick_Sort/Quick_Sort_Practice_November_2.py | 1b6901fe97a57dc5b190a20e25f2f70f0af9dfb8 | [] | no_license | LilySu/Python_Practice | 7c7eb30c549239f27680f410d365289b67813c5e | 26767e64742149813ecbc91815454836ffce8b6e | refs/heads/master | 2023-07-29T01:14:19.751490 | 2021-08-15T01:09:41 | 2021-08-15T01:09:41 | 279,446,861 | 1 | 2 | null | 2020-10-09T04:10:40 | 2020-07-14T01:05:55 | Jupyter Notebook | UTF-8 | Python | false | false | 760 | py | def swap(a, b, arr):
arr[a], arr[b] = arr[b], arr[a]
def partition(elements, start, end):
pivot_index = start
pivot = elements[pivot_index]
while start < end:
while start < len(elements) and elements[start] <= pivot:
start += 1
while elements[end] > pivot:
end -= 1
if start < end:
swap(start, end, elements)
swap(pivot_index, end, elements)
return end
def quick_sort(elements, start, end):
if start < end:
pi = partition(elements, start, end)
quick_sort(elements, start, pi - 1)
quick_sort(elements, pi + 1, end)
if __name__ == "__main__":
elements = [8, 24, 92, 14, 3, 47]
quick_sort(elements, 0, len(elements) - 1)
print(elements) | [
"LilySu@users.noreply.github.com"
] | LilySu@users.noreply.github.com |
23d719ceebb01cacaa4ea14c9639243a3cce8d14 | 09f5439cd40586e5404ae2eb41399b4c8deb0542 | /core/forms.py | 776eb8ad05a9f3140a3a48d99e7a71842de6ce52 | [] | no_license | citi-onboarding/IntegrarJr | fc7876bfe4494808ef25c00d1996500c97c887b0 | 22f1989adfd5f6942379d373091e80a0d442d667 | refs/heads/deploy | 2022-12-13T08:47:21.517613 | 2018-12-11T00:12:46 | 2018-12-11T00:12:46 | 136,086,659 | 1 | 0 | null | 2022-12-08T01:11:41 | 2018-06-04T21:48:34 | JavaScript | UTF-8 | Python | false | false | 908 | py | from django import forms
class Contato(forms.Form):
comoConheceuOp=(
('Selecione', 'Como conheceu a Integrar?'),
('Redes sociais', 'Redes sociais'),
('Indicação', 'Indicação'),
('Pesquisa', 'Pesquisa'),
('Outros', 'Outros'),
)
nameContato = forms.CharField(label="",widget=forms.TextInput(attrs={'placeholder': 'Nome'}))
mailContato = forms.EmailField(label="",widget=forms.TextInput(attrs={'placeholder': 'Email'}))
phoneContato = forms.CharField(label="",widget=forms.TextInput(attrs={'placeholder': 'Telefone'}))
subjectContato = forms.CharField(label="",widget=forms.TextInput(attrs={'placeholder': 'Assunto'}))
messageContato = forms.CharField(label="", widget=forms.Textarea(attrs={'width':"100%", 'cols' : "30", 'rows': "5", 'placeholder': 'Mensagem'}))
meetContato = forms.ChoiceField(label="", choices=comoConheceuOp)
| [
"gas5@cin.ufpe.br"
] | gas5@cin.ufpe.br |
1bb9148ce95fde056cc739590a46fc4e5b644e2b | d99a94729002af2a4fc26ef83388e6e5e4aca877 | /bran/cross_validation/plot.py | e876e0a74dba22db467b3af5efa9cdbbdc1dbe19 | [
"BSD-3-Clause"
] | permissive | KI-labs/BRAN | c12fd364bce12fc5fab0b01cc63798cdfa7b1ff1 | ec6bc8ec63c84224c9d4f028d355be8c895dc725 | refs/heads/master | 2021-10-27T01:28:49.274887 | 2020-02-26T12:48:31 | 2020-02-26T12:48:31 | 210,864,690 | 7 | 1 | null | 2020-02-26T12:48:33 | 2019-09-25T14:24:36 | Python | UTF-8 | Python | false | false | 2,033 | py | import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred).astype(int)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax | [
"s.sathyakumari@kigroup.de"
] | s.sathyakumari@kigroup.de |
0968d72186a5995c5e8612179a024c0364d6d6f3 | 5d4ae2e0cc1835d728ec8da04ac2186f5ecd6402 | /Part1/model.py | 7b49a75e783e96739adf9c09c75e6807623033ca | [
"MIT"
] | permissive | RobCrichton/ToyClimateModel | 25d77849bab57d2f5aa1d4ab7d2cd5a34c82b1f8 | b5f2e7de61aa844f207b938e2faebc03ae925204 | refs/heads/main | 2023-04-13T23:34:40.250881 | 2021-04-29T08:01:22 | 2021-04-29T08:01:22 | 361,738,556 | 0 | 0 | MIT | 2021-04-29T08:01:23 | 2021-04-26T12:17:04 | null | UTF-8 | Python | false | false | 1,058 | py | # model
import numpy as np
import matplotlib.pyplot as plt
import time
t = 0
temperature_planet = 200
dt = 60 * 10
heat_capacity = 1E5
insolation = 1370
sigma = 5.67E-8
planet_radius = 6.4E6
def AreaOfACircle(radius):
return np.pi * radius ** 2
def SurfaceAreaOfASphere(radius):
return 4 * np.pi * radius ** 2
def StefanBoltzmann(temperature):
return sigma * temperature ** 4
planet_disc_area = AreaOfACircle(planet_radius)
planet_sphere_area = SurfaceAreaOfASphere(planet_radius)
plt.scatter(y = temperature_planet, x = t, \
s = 2, \
color = 'blue')
plt.ion()
plt.xlabel('Time (s)')
plt.ylabel('Temperature (K)')
plt.show()
while True:
temperature_planet += \
dt \
* (planet_disc_area * insolation - planet_sphere_area * StefanBoltzmann(temperature_planet)) \
/ (heat_capacity * planet_sphere_area)
t += dt
plt.scatter( \
y = temperature_planet, \
x = t, \
s = 2, \
color = 'blue')
plt.pause(0.001)
time.sleep(0.001)
| [
"kryrob@gmail.com"
] | kryrob@gmail.com |
5e39409a4c2f2614414bd07dd991770a07dcd61e | c6ec067c3ee9c2e2f60cf2a0c1cc53b7b1a1f9ff | /genfsm | 5add8e9e778aff5dffe818e5c55ba4bbac23d5c5 | [] | no_license | varungadh/cozmo-tools | 7d1917f3bcf1c4a693397f915162368063212102 | 019aebfe952e22be9bd5215fccad19d8713c6402 | refs/heads/master | 2021-01-25T10:21:27.605314 | 2018-02-26T16:53:29 | 2018-02-26T16:53:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,344 | #!/usr/bin/env python3
"""
Finite State Machine generator for the cozmo_fsm package.
Modeled after the Tekkotsu stateparser tool.
Usage: genfsm [infile.fsm | -] [outfile.py | -]
Use '-' to indicate standard input or standard output. If a
second argument is not supplied, writes to infile.py, or to
standard output if the input was '-'.
To enter state machine notation use a line that contains
just $setup ''', followed by the lines of the state machine,
and ending with a line contaning just '''. This will result
in a definition of a setup() method for the state node class
you are defining. Example:
class MyNode(StateNode):
$setup '''
Say("Hello") =C=> Forward(50)
'''
See the cozmo_fsm/examples directory for examples of .fsm files
and the .py files they generate.
Note: install termcolor package to get messages displayed in color.
Author: David S. Touretzky, Carnegie Mellon University
"""
import sys, time, re
try:
from termcolor import cprint
except:
def cprint(string, color=None, file=None):
print(string)
class Token:
def __repr__(self):
return "<%s>" % self.__class__.__name__
# Lexer tokens
def isIdentifier(self): return isinstance(self,Identifier)
def isColon(self): return isinstance(self,Colon)
def isNewline(self): return isinstance(self,Newline)
def isEqual(self): return isinstance(self,Equal)
def isArrowHead(self): return isinstance(self,ArrowHead)
def isComma(self): return isinstance(self,Comma)
def isLBrace(self): return isinstance(self,LBrace)
def isRBrace(self): return isinstance(self,RBrace)
def isArglist(self): return isinstance(self,Arglist)
# Parser stage 1 tokens
def isLabelRef(self): return isinstance(self,LabelRef)
def isLabelDef(self): return isinstance(self,LabelDef)
def isConstructorCall(self): return isinstance(self,ConstructorCall)
def isIdentifierList(self): return isinstance(self,IdentifierList)
# Parser stage 2 token
def isTransition(self) : return isinstance(self,Transition)
# Parser stage 3 tokens
def isNodeDefinition(self): return isinstance(self,NodeDefinition)
# Lexer tokens:
class Identifier(Token):
def __init__(self,name):
self.name = name
def __repr__(self):
return "<Identifier %s>" % self.name
class Colon(Token): pass
class Equal(Token): pass
class ArrowHead(Token) : pass
class Comma(Token) : pass
class LBrace(Token) : pass
class RBrace(Token) : pass
class Newline(Token) : pass
class Arglist(Token):
def __init__(self,value):
self.value = value
def __repr__(self):
return "<Arglist %s>" % self.value
# Parser stage 1 and 2 tokens:
class IdentifierList(Token):
def __init__(self,label_refs):
self.label_refs = label_refs
def __repr__(self):
return "<IdentifierList %s>" % ','.join(self.label_refs)
class LabelDef(Token):
def __init__(self,label):
self.label = label
def __repr__(self):
return "<LabelDef %s>" % self.label
class LabelRef(Token):
def __init__(self,label):
self.label = label
def __repr__(self):
return "<LabelRef %s>" % self.label
class ConstructorCall(Token):
def __init__(self,name,arglist):
self.name = name
self.arglist = arglist
def __repr__(self):
return "<ConstructorCall %s%s>" % (self.name, self.arglist)
# Parser stage 3 tokens
class NodeDefinition(Token):
def __init__(self,label,node_type,arglist):
self.label = label
self.node_type = node_type
self.arglist = arglist
def __repr__(self):
label = self.label+':' if self.label else ''
return "<NodeDefinition %s%s%s>" % \
(label, self.node_type, self.arglist)
class Transition(Token):
def __init__(self,label,trans_type,arglist):
self.label = label
self.trans_type = trans_type
self.arglist = arglist
self.sources = []
self.destinations = []
def __repr__(self):
label = self.label+':' if self.label else ''
if len(self.sources) == 1:
srcs = self.sources[0]
else:
srcs = '{%s}' % ','.join(self.sources)
if len(self.destinations) == 1:
dests = self.destinations[0]
else:
dests = '{%s}' % ','.join(self.destinations)
return "<Transition %s=%s%s=>%s>" % (srcs,label,self.trans_type,dests)
_lex_punc_table = \
(
(' ', None),
('\t', None),
('\n', Newline),
('\r\n', Newline), # must precede '\r' in this list for line counting to work correctly
('\r', Newline),
(':', Colon),
(',', Comma),
('=>', ArrowHead), # must precede Equal in this list
('=', Equal),
('{', LBrace),
('}', RBrace)
)
current_line = 0
def handle_newline():
global current_line
current_line += 1
def lexer (string):
"""Convert input string into a sequence of lexer tokens."""
r_identifier = re.compile('((self\.)|)\w+')
tokens = []
while string:
was_in_table = False
for chars,tok in _lex_punc_table:
if string[0:len(chars)] == chars:
if tok:
this_token = tok()
tokens.append(this_token)
if this_token.isNewline(): handle_newline()
string = string[len(chars):]
was_in_table = True
break
if was_in_table: continue
if string[0] == '#':
string = lexer_skip_comment(string)
continue
if string[0] == '(':
arglist, string = lexer_build_arglist(string)
tokens.append(arglist)
continue
match_result = r_identifier.match(string)
if match_result:
endpos = match_result.span()[1]
tokens.append(Identifier(string[0:endpos]))
string = string[endpos:]
continue
# If we reach here, we've found something indigestible.
report_line_error("syntax error at '%s'" % error_fragment(string))
next_line = string.find('\n')
if next_line > -1:
string = string[next_line:]
continue
break
return tokens
def lexer_skip_comment(string):
"""String begins with '#'. Skip everything from there to the first newline."""
pos = string.find('\r')
if pos == -1:
pos = string.find('\n')
if pos == -1:
raise Exception('Missing newline at end of comment.')
return string[pos:]
def lexer_build_arglist(string):
"""Helper for lexer. Parses an argument list and returns it plus the remainder of the string."""
ostring = string
lookstack = [')']
pos = 1
while lookstack and pos < len(string):
if lookstack[0] == string[pos]:
del lookstack[0]
elif lookstack[0] not in '\'"':
if string[pos] == '(':
lookstack.insert(0,')')
elif string[pos] == '[':
lookstack.insert(0,']')
elif string[pos] == '{':
lookstack.insert(0,'}')
elif string[pos] in ')]}':
break
pos += 1
if lookstack:
cleanstr = string.strip()
p = min(pos, len(cleanstr)-1)
report_line_error("Ill-formed argument list at '%s' near '%s'" %
(error_fragment(ostring), cleanstr[p]))
return Arglist(''), ''
return Arglist(string[0:pos]), string[pos:]
def parser1(lex_tokens):
"""Assembles label-def / constructor-call / label-ref / identifier-list tokens."""
p1tokens = []
while lex_tokens:
if lex_tokens[0].isIdentifier():
# An identifier must be a label definition, constructor call, or label reference.
if len(lex_tokens) > 1 and lex_tokens[1].isColon(): # Label definition
p1tokens.append(LabelDef(lex_tokens[0].name))
del lex_tokens[0:2]
continue
if len(lex_tokens) > 1 and lex_tokens[1].isArglist(): # Constructor call
p1tokens.append(ConstructorCall(lex_tokens[0].name,lex_tokens[1].value))
del lex_tokens[0:2]
continue
# Default case: identifier assumed to be a label reference.
p1tokens.append(LabelRef(lex_tokens[0].name))
del lex_tokens[0]
continue
# A left braces introduces a comma-separated list of label references
if lex_tokens[0].isLBrace():
del lex_tokens[0]
label_refs = []
need_comma = False
need_rbrace = True
while lex_tokens:
if lex_tokens[0].isRBrace():
p1tokens.append(IdentifierList(label_refs))
del lex_tokens[0]
need_rbrace = False
break
if need_comma and lex_tokens[0].isComma():
del lex_tokens[0]
need_comma = False
elif not need_comma and lex_tokens[0].isIdentifier():
label_refs.append(lex_tokens[0].name)
del lex_tokens[0]
need_comma = True
else:
report_line_error('Syntax error in identifier list near %s.' %lex_tokens[0])
del lex_tokens[0]
if not label_refs:
report_line_error('Empty identifier list {}.')
if label_refs and not need_comma:
report_line_error('Trailing comma in identifier list {... ,}.')
if need_rbrace:
report_line_error('Missing right brace in identifier list {....')
continue
if lex_tokens[0].isRBrace():
report_line_error('Extraneous right brace.')
del lex_tokens[0]
continue
if lex_tokens[0].isEqual():
p1tokens.append(lex_tokens[0])
del lex_tokens[0]
# Special handling for transitions: convert identifier to
# labelref if followed by ":" or to constructor call with
# no arguments if followed by "=>". Otherwise just
# continue and we'll process "identifier (" on the next
# iteration.
if len(lex_tokens) < 2:
report_line_error('Syntax error in transition near %s' %
(lex_tokens[0] if len(lex_tokens) > 0 else 'end of line'))
continue
# Assemble optional label for transition.
if lex_tokens[0].isIdentifier() and lex_tokens[1].isColon():
p1tokens.append(LabelDef(lex_tokens[0].name))
del lex_tokens[0:2]
if len(lex_tokens) < 2:
report_line_error('Syntax error in transition near %s' %
(lex_tokens[0] if len(lex_tokens) > 0 else 'end of line'))
continue
# For transitions, an identifier with no arglist is still a constructor call.
if lex_tokens[0].isIdentifier():
if len(lex_tokens) >= 2 and not lex_tokens[1].isArglist():
p1tokens.append(ConstructorCall(lex_tokens[0].name,'()'))
del lex_tokens[0]
continue
# Default: just pass the item (arrowhead, newline) on to the next stage
if lex_tokens[0].isNewline(): handle_newline()
p1tokens.append(lex_tokens[0])
del lex_tokens[0]
return p1tokens
def parser2(p1tokens):
"""Create a node definition with label, or a transition with label
and constructor call; no sources/destinations yet."""
p2tokens = []
while p1tokens:
if p1tokens[0].isNewline():
handle_newline()
p2tokens.append(p1tokens.pop(0))
continue
# Must begin with a node reference or definition.
if p1tokens[0].isLabelDef():
label = p1tokens[0].label
# labeled constructor call
if p1tokens[1].isConstructorCall():
call = p1tokens[1]
p2tokens.append(NodeDefinition(label, call.name, call.arglist))
del p1tokens[0:2]
continue
else:
if p1tokens[1].isLabelRef() and p1tokens[1].label[0].isupper():
hint = "\n\tDid you mean '%s()' ?" % p1tokens[1].label
else:
hint = ""
report_line_error("Label '%s:' should be followed by a node definition, not %s.%s"
% (label, p1tokens[1], hint))
del p1tokens[0]
continue
if p1tokens[0].isConstructorCall():
# Unlabeled constructor call: label it.
call = p1tokens[0]
label = gen_name(call.name)
p2tokens.append(NodeDefinition(label, call.name, call.arglist))
del p1tokens[0]
continue
if p1tokens[0].isEqual(): # start of a transition
del p1tokens[0]
label = None
trans = None
# look for optional transition label
if p1tokens[0].isLabelDef():
label = p1tokens[0].label
del p1tokens[0] # labeldef
# look for transition constructor
if p1tokens[0].isConstructorCall():
trans_type = p1tokens[0].name
trans_args = p1tokens[0].arglist
else:
report_line_error('Ill-formed transition: should not see %s here.' % p1tokens[0])
del p1tokens[0]
continue
del p1tokens[0] # constructor
if not p1tokens[0].isArrowHead():
report_line_error("Error in transition: expected '=>' not %s." % p1tokens[0])
del p1tokens[0] # arrowhead
trans_class = transition_names.get(trans_type,trans_type)
if not label:
label = gen_name(trans_class)
p2tokens.append(Transition(label,trans_class,trans_args))
continue
# Pass along an identifier list without modification
if p1tokens[0].isIdentifierList() or p1tokens[0].isLabelRef():
p2tokens.append(p1tokens[0])
del p1tokens[0]
continue
else:
report_line_error("A %s token is not legal in this context." % p1tokens[0])
del p1tokens[0]
continue
return p2tokens
transition_names = dict(
N = 'NullTrans',
T = 'TimerTrans',
C = 'CompletionTrans',
S = 'SuccessTrans',
F = 'FailureTrans',
D = 'DataTrans',
TM = 'TextMsgTrans',
RND = 'RandomTrans',
PILOT = 'PilotTrans',
Tap = 'TapTrans',
Aruco = 'ArucoTrans',
Next = 'NextTrans',
CNext = 'CNextTrans',
SayData = 'SayDataTrans',
Hear = 'HearTrans'
)
def gen_name(base_name, name_counts=dict()):
name = base_name.lower()
if name.startswith('self.'):
name = name[5:]
count = name_counts.get(name,0) + 1
name_counts[name] = count
return name + repr(count)
def parser3(p2tokens):
"""Chain nodes and transitions by filling in source/destination fields."""
current_node = None
need_destination = False
p3tokens = []
must_transition = False
while p2tokens:
while p2tokens and p2tokens[0].isNewline():
must_transition = False
handle_newline()
del p2tokens[0]
if not p2tokens: break
if p2tokens[0].isLabelRef():
must_transition = True
current_node = [p2tokens[0].label]
del p2tokens[0]
elif p2tokens[0].isNodeDefinition():
must_transition = True
current_node = [p2tokens[0].label]
p3tokens.append(p2tokens[0])
del p2tokens[0]
elif p2tokens[0].isIdentifierList():
must_transition = True
current_node = p2tokens[0].label_refs
del p2tokens[0]
elif not current_node:
report_line_error('Node reference expected before this transition: %s' % p2tokens[0])
# node definition could be followed by newlines
while p2tokens and p2tokens[0].isNewline():
must_transition = False
handle_newline()
del p2tokens[0]
if not p2tokens: break
# next item must be a transition
if p2tokens[0].isTransition():
# check for source
if not current_node:
report_line_error('Transition %s has no source nodes.' % p2tokens[0].label)
p2tokens[0].sources = current_node
need_destination = True
p3tokens.append(p2tokens[0])
del p2tokens[0]
elif must_transition:
report_line_error("Expected a transition after '%s', not %s." %
(','.join(current_node), p2tokens[0]))
del p2tokens[0]
continue
while p2tokens and p2tokens[0].isNewline():
handle_newline()
del p2tokens[0]
if not p2tokens:
report_line_error('Missing destination for transition %s.' % p3tokens[-1].label)
continue
# next item must be a destination for the transition
if p2tokens[0].isLabelRef():
current_node = [p2tokens[0].label]
del p2tokens[0]
if need_destination:
if p3tokens[-1].isTransition():
p3tokens[-1].destinations = current_node
need_destination = False
continue
elif p2tokens[0].isNodeDefinition():
current_node = [p2tokens[0].label]
p3tokens[-1].destinations = current_node
continue # process the node defintion on the next iteration
elif p2tokens[0].isIdentifierList():
current_node = p2tokens[0].label_refs
del p2tokens[0]
if need_destination:
if p3tokens[-1].isTransition():
p3tokens[-1].destinations = current_node
need_destination = False
else:
raise Exception('parser3 is confused by %s.' % p2tokens)
return p3tokens
def generate_machine(lines):
global indent_level, current_line, found_error
found_error = False
current_line = starting_line
tok = lexer(''.join(lines))
if found_error: return
current_line = starting_line
p1tokens = parser1(tok)
if found_error: return
current_line = starting_line
p2tokens = parser2(p1tokens)
if found_error: return
current_line = starting_line
p3tokens = parser3(p2tokens)
if found_error: return
labels = {}
for item in p3tokens:
if item.label in labels:
report_global_error("Label '%s:' is multiply defined." % item.label)
elif item.isNodeDefinition() or item.isTransition():
labels[item.label] = item
else:
raise Exception("Problem in generate_machine: %s" % item)
# Check for undefined references
for item in p3tokens:
if item.isTransition():
for ref in item.sources + item.destinations:
if ref not in labels:
hint = (" Should it be %s() ?" % ref) if ref[0].isupper() else ""
report_global_error("Label '%s' was referenced but never defined.%s" %
(ref,hint))
labels[ref] = None
# Write out the state machine source as a comment
emit_line('def setup(self):')
indent_level += 4
emit_line('"""')
indent_level += 4
indent = ' ' * 4
out_f.write(indent + indent.join(lines))
indent_level -= 4
emit_line('"""')
emit_line('')
emit_line('# Code generated by genfsm on %s:' % time.strftime('%c'))
emit_line('')
# Generate the nodes, then the transitions
for item in p3tokens:
if item.isNodeDefinition():
emit_line('%s = %s%s .set_name("%s") .set_parent(self)' %
(item.label, item.node_type, item.arglist, item.label))
for item in p3tokens:
if item.isTransition():
emit_line('')
emit_line('%s = %s%s .set_name("%s")' %
(item.label, item.trans_type, item.arglist, item.label))
emit_line('%s .add_sources(%s) .add_destinations(%s)' %
(item.label, ','.join(item.sources), ','.join(item.destinations)))
emit_line('')
emit_line('return self')
indent_level = 0
def emit_line(line):
out_f.write((' '*indent_level) + line + '\n')
def process_file():
global line_cache, current_line, starting_line, indent_level
line_cache = [None] # dummy line 0
current_line = 0
r_setup = re.compile('^\s*\$setup\s*((\"\"\")|(\'\'\')|\{)\s*((\#.*)|)$')
r_indent = re.compile('^\s*')
while True:
line = in_f.readline()
if not line: break
line_cache.append(line)
current_line += 1
# Echo lines to the output file until we reach a $setup line.
if line.find('$setup') == -1:
out_f.write(line)
continue
setup_match = r_setup.match(line)
if not setup_match:
report_line_error("Incorrect $setup syntax: '%s'" % line.strip())
continue
delim = setup_match.group(1)[0]
if delim == '{':
close_delim = '}'
r_end = re.compile('\s*\}\s*$')
else:
close_delim = delim * 3
r_end = re.compile('^\s*' + close_delim)
# Collect the lines of the state machine.
starting_line = current_line + 1
indent_level = r_indent.match(line).span()[1]
lines = []
while True:
line = in_f.readline()
if not line:
report_line_error("State machine at line %s ended without closing %s." %
(starting_line-1, close_delim))
return
current_line += 1
line_cache.append(line)
if r_end.match(line): break
lines.append(line)
# Now parse the collected lines and generate code.
generate_machine(lines)
found_error = False
def report_line_error(error_text):
global found_error
cprint(line_cache[current_line].rstrip(), color='red', file=sys.stderr)
cprint('Line %d: %s\n' % (current_line, error_text), color='red', file=sys.stderr)
found_error = True
def report_global_error(error_text):
global found_error
cprint('Error: %s\n' % error_text, color='red', file=sys.stderr)
found_error = True
def error_fragment(string):
s = string.strip()
p = s.find('\n')
if p == -1:
p = len(s)
fragment = s[0:min(p,20)]
if len(fragment) < p:
fragment += "..."
return fragment
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Usage: genfsm [infile.fsm | -] [outfile.py | -]')
sys.exit(0)
infile_name = sys.argv[1]
if len(sys.argv) == 3:
outfile_name = sys.argv[2]
elif infile_name == '-':
outfile_name = '-'
else:
outfile_name = infile_name[0:infile_name.rfind('.')] + ".py"
if infile_name == outfile_name:
print("Output file name can't be the same as input file.\nDid you mean %s ?" %
(infile_name[0:infile_name.rfind('.')] + ".fsm"))
sys.exit(1)
try:
with (open(infile_name) if infile_name != '-' else sys.stdin) as in_f:
try:
with (open(outfile_name,'w') if outfile_name != '-' else sys.stdout) as out_f:
process_file()
if not found_error:
cprint('Wrote generated code to %s.' %
(outfile_name if outfile_name != '-' else 'standard output'),
color='green')
except Exception as e:
print('Error opening output file: %s' % e)
import traceback
traceback.print_exc()
sys.exit(1)
except Exception as e:
print('Error opening input file: %s' % e)
sys.exit(1)
sys.exit(0)
| [
"dst@cs.cmu.edu"
] | dst@cs.cmu.edu | |
1475f679e11085d7a8cd28fd3cafdd6cc4d78916 | b31567d271c9dfc4041e4f95531028b3f3a704e1 | /Languages/short/itumbuso.py | 90e75f80de5a73859f62cbf6df93b449822816fa | [] | no_license | piantado/NumberSystemsCorpus | e45b194190617deb419bc508c8e7ca4926f36fc8 | f2bc76a6c5b8243e697b6d1284207281cf71925d | refs/heads/master | 2020-12-24T14:10:49.078848 | 2015-03-26T23:17:08 | 2015-03-26T23:17:08 | 16,675,092 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | @article{connellprolegomena1994,
author = {Bruce Connel},
title = {The Lower Cross Languages: A Prologemona to the Classification of the Cross River Languages },
journal = {The Journal of West African Languages},
pages = {43-44},
year = {1994},
localfile = {afr5}
}
f[1] = "k\textepsilon\n"
f[2] = "iba"
f[3] = "ita"
f[4] = "inaa\ng"
f[5] = "ition"
f[6] = "it\textepsilon\k\textepsilon\n"
f[7] = "itjaba"
f[8] = "itja", "ita"
f[9] = "anan", "k\textepsilon\n"
f[10] = "dwob"
f[12] = "dwobeba"
f[15] = "\textepsilon\fuud"
f[20] = "\textepsilon\dib"
f[100] = "iki\textepsilon\" | [
"spiantado@gmail.com"
] | spiantado@gmail.com |
7f982cc85d4fdd3bb49da9a1c34b8cf4697d13d6 | bbe6d4089bd96189c42ebb0a6a3394ef71bdb58c | /gautamgs@bu.edu_hw1/gautamgs@bu.edu_hw1_1_5.py | f5cc0697fdf2c255ab3a3b5a390c787e8ec01144 | [] | no_license | gautam-ergo/Python-Assignments | c8f314ce0574e81d2b85e7020110a17b652ba1b8 | 0ebd7230e060b3d3c6972aec8648d8716171521c | refs/heads/master | 2020-03-30T02:56:33.695982 | 2019-03-25T15:19:21 | 2019-03-25T15:19:21 | 150,659,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """
CS 521 Information Structures with Python
#########################################
Module - HW 1
Creation Date - 09/18/2018
Student Name - Gautam Gowrishankar
Intent:
Using Python to Perform Mathematical Computations
"""
#For Displaying the current version of Python
import platform
print ("Python version being used for this code - ", platform.python_version(),'\n')
#Calculation
Numerator = (4.5 * 9.5)-(2.5 * 3)
Denominator = (45.5 - 3.5)
print ("Result of the expression (4.5 * 9.5)-(2.5 * 3) / (45.5 - 3.5) is = %.3f" %(Numerator/Denominator) )
print ("\nEnd of Program\n")
| [
"arunkumar.vls@gmail.com"
] | arunkumar.vls@gmail.com |
597faaffcc9cf04066be1f5093b42c4a300c83fe | a80f73c8b5f2b807b4ec6d1c5c1c781ba0bfdc3a | /projecteuler/problem_36.py | f7f3be92c181dddd068bda9115ec86cde7f93cc7 | [] | no_license | Himanshu-Mishr/projecteuler | 215d30c1b2742bb2e8f95336db3cdb4799f78680 | 419be91e480c9f29911f3370c443f0abb528f033 | refs/heads/master | 2021-01-13T02:30:24.313301 | 2014-09-07T05:22:39 | 2014-09-07T05:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | #---------------------------------------------------#
# Title: problem 36 #
# Author: Himanshu Mishra #
# email: himanshu.m786@gmail.com #
#-__________________________________________________#
import time
"""
About the Program :-
problem 36 of project euler
"""
def main():
start = time.time()
sum = 0
for i in range(1,1000000):
part_1 = list(str(i))
part_2 = list(str(i))
part_2.reverse()
if part_1[:] == part_2[:]:
bin_part = bin(i)
bin_1 = list(bin_part[2:])
bin_2 = list(bin_part[2:])
bin_2.reverse()
if bin_1[:] == bin_2[:]:
sum += i
print(i,bin_part,sum)
print("time taken is ",time.time() - start)
return 0
if __name__ == '__main__':
main() | [
"himanshu.m786@gmail.com"
] | himanshu.m786@gmail.com |
268724a43ed44dc6ecebf9539dbef83b464927dd | 83b800abbace7e9342b4147032374f449338d93f | /risc_control/src/Vision_controller.py | 72e26dfba69324a1700a9026be5064099d0e5f05 | [] | no_license | rajnikant1010/Public_ARdroneRISC | 8725b364c5de4cd94ad45b57b503d55fc44061f9 | a05bdca63691d89324f925a1c108e10b061b2321 | refs/heads/master | 2021-01-11T15:08:45.716708 | 2017-01-28T17:34:02 | 2017-01-28T17:34:02 | 80,298,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | #!/usr/bin/env python
'''======================================================
Created by: Li Yin
Last updated: March 2015
File name: Vision_controller.py
Organization: RISC Lab, Utah State University
Notes: ECE6930 Final Project
======================================================'''
#================================#
# Libraries/modules Needed #
#================================#
import roslib; roslib.load_manifest('ardrone_tutorials')
roslib.load_manifest('risc_msgs')
import rospy
from math import *
import rospkg
import numpy as np
import scipy.linalg as la
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
#========================#
# Globals #
#========================#
yaw_kp = 1.5
throttle_kp = .5
LM = Landmark()
LM_old = 0
width = 640
height = 360
#==================#
# Publishers #
#==================#
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
#================#
# Get Rois #
#================#
def GetRois(S):
global LM
for i in range(len(S.Obj[0].landmarks)):
if S.Obj[0].landmarks[i].name == 'pink':
LM = S.Obj[0].landmarks[i]
#========================#
# Basic Controller #
#========================#
def Basic_Controller():
global LM, yaw_kp, width, height, throttle_kp
g = 9.81
m = .45
Ctrl = Controls()
# set controls object array length to 1
Ctrl.Obj = [Control()]*1
#===================#
# Get Errors #
#===================#
if LM.x != LM_old:
yaw_error = -(LM.x - .5*width)/(.5*width)
else:
yaw_error = 0
if LM.y != LM_old:
T_error = -(LM.y - .5*height)/(.5*height)
else:
T_error = 0
#==================#
# Set Controls #
#==================#
psi_cmd = yaw_kp*yaw_error
T_cmd = T_error*throttle_kp
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
ctrl = Control()
ctrl.name = "quad"
ctrl.phi = 0
ctrl.theta = 0
ctrl.psi = psi_cmd
ctrl.T = g*m+T_cmd
Ctrl.Obj[0] = ctrl
pub_ctrl.publish(Ctrl)
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('Image_controller')
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(30)
while not rospy.is_shutdown():
sub_rois = rospy.Subscriber('/land_rois' , Observed_rois, GetRois)
Basic_Controller()
r.sleep()
| [
"raj.drdo@gmail.com"
] | raj.drdo@gmail.com |
fcd2a44ea9a00f0ce40db7edf17b71276a96c6a7 | 6baf51ce6eb5bd033f05995a20357c0d30e3a0ea | /train_mvr.py | c0a0a72dae263d57b14a356679106326cbe91a51 | [] | no_license | wx-b/DSS | e6412caba86c7efd37ac16f169d83cfbb2d53beb | d96260c8c0b926ba2fd43d82eb3e0afd970a046a | refs/heads/master | 2023-03-13T15:22:43.398263 | 2021-03-17T14:43:18 | 2021-03-17T14:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,914 | py | import argparse
import time
import numpy as np
import git
import os
import logging
import config
import torch
import torch.optim as optim
from DSS.utils import tolerating_collate
from DSS.misc.checkpoints import CheckpointIO
from DSS.utils.sampler import WeightedSubsetRandomSampler
from DSS import logger_py, set_deterministic_
set_deterministic_()
# Arguments
parser = argparse.ArgumentParser(
description='Train implicit representations without 3D supervision.'
)
parser.add_argument('--config', type=str,
default="configs/donut_dss_complete.yml", help='Path to config file.')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=600,
help='Checkpoint and exit after specified number of '
'seconds with exit code 2.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Shorthands
out_dir = os.path.join(cfg['training']['out_dir'], cfg['name'])
backup_every = cfg['training']['backup_every']
exit_after = args.exit_after
lr = cfg['training']['learning_rate']
batch_size = cfg['training']['batch_size']
batch_size_val = cfg['training']['batch_size_val']
n_workers = cfg['training']['n_workers']
model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
model_selection_sign = -1
else:
raise ValueError('model_selection_mode must be '
'either maximize or minimize.')
# Output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Begin logging also to the log file
fileHandler = logging.FileHandler(os.path.join(out_dir, cfg.training.logfile))
fileHandler.setLevel(logging.DEBUG)
logger_py.addHandler(fileHandler)
repo = git.Repo(search_parent_directories=False)
sha = repo.head.object.hexsha
logger_py.debug('Git commit: %s' % sha)
# Data
train_dataset = config.create_dataset(cfg.data, mode='train')
val_dataset = config.create_dataset(cfg.data, mode='val')
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size_val, num_workers=int(n_workers // 2),
shuffle=False, collate_fn=tolerating_collate,
)
# data_viz = next(iter(val_loader))
model = config.create_model(
cfg, camera_model=train_dataset.get_cameras(), device=device)
# Create rendering objects from loaded data
cameras = train_dataset.get_cameras()
lights = train_dataset.get_lights()
# Optimizer
param_groups = []
if cfg.model.model_kwargs.learn_normals:
param_groups.append(
{"params": [model.normals], "lr": 0.01, 'betas': (0.5, 0.9)})
if cfg.model.model_kwargs.learn_points:
param_groups.append(
{"params": [model.points], "lr": 0.01, 'betas': (0.5, 0.9)})
if cfg.model.model_kwargs.learn_colors:
param_groups.append(
{"params": [model.colors], "lr": 1.0, 'betas': (0.5, 0.9)})
# optimizer = optim.SGD(param_groups, lr=lr)
optimizer = optim.Adam(param_groups, lr=0.01, betas=(0.5, 0.9))
# Loads checkpoints
checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
load_dict = checkpoint_io.load(cfg.training.resume_from)
except FileExistsError:
load_dict = dict()
epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1)
# Save config to log directory
config.save_config(os.path.join(out_dir, 'config.yaml'), cfg)
# Update Metrics from loaded
model_selection_metric = cfg['training']['model_selection_metric']
metric_val_best = load_dict.get(
'loss_val_best', -model_selection_sign * np.inf)
if metric_val_best == np.inf or metric_val_best == -np.inf:
metric_val_best = -model_selection_sign * np.inf
logger_py.info('Current best validation metric (%s): %.8f'
% (model_selection_metric, metric_val_best))
# Shorthands
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
validate_every = cfg['training']['validate_every']
visualize_every = cfg['training']['visualize_every']
debug_every = cfg['training']['debug_every']
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, cfg['training']['scheduler_milestones'],
gamma=cfg['training']['scheduler_gamma'], last_epoch=epoch_it)
# Set mesh extraction to low resolution for fast visuliation
# during training
cfg['generation']['resolution'] = 64
cfg['generation']['img_size'] = tuple(x // 4 for x in train_dataset.resolution)
generator = config.create_generator(cfg, model, device=device)
trainer = config.create_trainer(
cfg, model, optimizer, scheduler, generator, None, val_loader, device=device)
# Print model
nparameters = sum(p.numel() for p in model.parameters())
logger_py.info('Total number of parameters: %d' % nparameters)
# Start training loop
t0 = time.time()
t0b = time.time()
sample_weights = np.ones(len(train_dataset)).astype('float32')
while True:
epoch_it += 1
train_sampler = WeightedSubsetRandomSampler(
list(range(len(train_dataset))), sample_weights)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=n_workers, drop_last=True,
collate_fn=tolerating_collate)
trainer.train_loader = train_loader
for batch in train_loader:
it += 1
loss = trainer.train_step(batch, cameras=cameras, lights=lights, it=it)
# Visualize output
if it > 0 and visualize_every > 0 and (it % visualize_every) == 0:
logger_py.info('Visualizing')
trainer.visualize(batch, it=it, vis_type='image',
cameras=cameras, lights=lights)
trainer.visualize(
batch, it=it, vis_type='pointcloud', cameras=cameras, lights=lights)
# Print output
if print_every > 0 and (it % print_every) == 0:
logger_py.info('[Epoch %02d] it=%03d, loss=%.4f, time=%.4f'
% (epoch_it, it, loss, time.time() - t0b))
t0b = time.time()
# Debug visualization
if it > 0 and debug_every > 0 and (it % debug_every) == 0:
logger_py.info('Visualizing gradients')
trainer.debug(batch, cameras=cameras, lights=lights, it=it,
mesh_gt=train_dataset.get_meshes())
# Save checkpoint
if it > 0 and (checkpoint_every > 0 and (it % checkpoint_every) == 0):
logger_py.info('Saving checkpoint')
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if it > 0 and (backup_every > 0 and (it % backup_every) == 0):
logger_py.info('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Run validation and adjust sampling rate
if it > 0 and validate_every > 0 and (it % validate_every) == 0:
if model_selection_metric == 'chamfer':
eval_dict = trainer.evaluate_3d(
val_loader, it, cameras=cameras, lights=lights)
else:
eval_dict = trainer.evaluate_2d(
val_loader, cameras=cameras, lights=lights)
metric_val = eval_dict[model_selection_metric]
logger_py.info('Validation metric (%s): %.4g' %
(model_selection_metric, metric_val))
if model_selection_sign * (metric_val - metric_val_best) > 0:
metric_val_best = metric_val
logger_py.info('New best model (loss %.4g)' % metric_val_best)
checkpoint_io.backup_model_best('model_best.pt')
checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# save point cloud
pointcloud = trainer.generator.generate_pointclouds(
{}, with_colors=False, with_normals=True)[0]
pointcloud.export(os.path.join(trainer.val_dir, 'best.ply'))
# Exit if necessary
if exit_after > 0 and (time.time() - t0) >= exit_after:
logger_py.info('Time limit reached. Exiting.')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
for t in trainer._threads:
t.join()
exit(3)
# Make scheduler step after full epoch
trainer.update_learning_rate(it)
| [
"yifan.wang@inf.ethz.ch"
] | yifan.wang@inf.ethz.ch |
b2202dbb26af0e9a886cbb59f45bc8e82435f10b | 28ab5ae813a2d3ed5b9af0b2f0a2d1f839cdd806 | /resources/user.py | 8fec4a1869d15c4d495501a7fcc8a04de20bf3ac | [] | no_license | davitamirkhanyan/rest-api-stores | 27b008973aefa929abec96902bd93d15be6e141e | 0e64bf80ba27797a8b1b75ec7152a938a62cd3e1 | refs/heads/master | 2022-10-07T04:20:30.827586 | 2020-06-11T12:16:34 | 2020-06-11T12:16:34 | 271,399,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | import sqlite3
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('password',
type=str,
required=True,
help="This field cannot be blank."
)
def post(self):
data = UserRegister.parser.parse_args()
if UserModel.find_by_username(data['username']):
return {"message": f"User with {data['username']} username already exists."}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully."}, 201
def get(self):
return {'users': [user.json() for user in UserModel.query.all()]}
| [
"damirkh@synopsys.com"
] | damirkh@synopsys.com |
a9054c017bab08437b04e3685a1cfddade94e6a7 | db9f71a28dd41ab4f2d5c6c1c966416dcdc62ff6 | /deeplearning/sentence_distance.py | bf5fdeb20b6bd3d5cb839303292e88b154adca34 | [] | no_license | ZephyrChenzf/BotHelperOffline | adc06c9118c938c965078725e0033b133fbca0ee | 67ca8ce237e15944bb12d1a0a7d04f934429d780 | refs/heads/master | 2021-04-15T15:16:52.903393 | 2018-03-23T11:35:29 | 2018-03-23T11:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,457 | py | # -*- coding: utf-8 -*-
import sys
import numpy as np
import pyemd
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
double, uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray
import cPickle as pickle
# import logging
# logger = logging.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf-8')
def two_sentence_dis(sentence1, sentence2):
embedding = pickle.load(open('/home/wxr/blazer/BotHelperOffline/deeplearning/model/word2vec/embedding.pkl'))
word_index = pickle.load(open('/home/wxr/blazer/BotHelperOffline/deeplearning/model/word2vec/word_index.pkl'))
len_sentence1 = len(sentence1)
len_sentence2 = len(sentence2)
# Remove out-of-vocabulary words.
sentence1 = [word_index.get(token) for token in sentence1 if word_index.has_key(token)]
sentence2 = [word_index.get(token) for token in sentence2 if word_index.has_key(token)]
diff1 = len_sentence1 - len(sentence1)
diff2 = len_sentence2 - len(sentence2)
if diff1 > 0 or diff2 > 0:
print ('Removed %d and %d OOV words from document 1 and 2 (respectively).',
diff1, diff2)
if len(sentence1) == 0 or len(sentence2) == 0:
print ('At least one of the documents had no words that were'
'in the vocabulary. Aborting (returning inf).')
return float('inf')
dictionary_temp = list(set(sentence1 + sentence2))
dictionary = dict(enumerate(dictionary_temp))
vocab_len = len(dictionary)
sen_set1 = set(sentence1)
sen_set2 = set(sentence2)
distance_matrix = np.zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if not t1 in sen_set1 or not t2 in sen_set2:
continue
# 计算距离
distance_matrix[i, j] = sqrt(np_sum((embedding[t1] - embedding[t2]) ** 2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
print ('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def doc2bow(document, dictionary):
freq_dic = dict()
for i in document:
if freq_dic.has_key(i):
freq_dic[i] = freq_dic[i] + 1
else:
freq_dic[i] = 1
return_freq = dict()
for i in range(len(document)):
if return_freq.has_key(i):
for key in range(len(dictionary)):
if dictionary[key] == document[i]:
return_freq[key] = freq_dic[document[i]]
else:
for key in range(len(dictionary)):
if dictionary[key] == document[i]:
return_freq[key] = freq_dic[document[i]]
return return_freq
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = doc2bow(document, dictionary) # Word frequencies.
doc_len = len(document)
for (idx, freq) in nbow.items():
#for idx, freq in nbow:
d[idx] = float(freq) / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(sentence1)
d2 = nbow(sentence2)
# Compute WMD.
#print pyemd.emd(d1, d2, distance_matrix)
return pyemd.emd(d1, d2, distance_matrix)
| [
"linyimin520812@gmail.com"
] | linyimin520812@gmail.com |
c86ac7ba43b4ac3fc8f414f3d318ea1681e66f51 | 25e6aceace81a2a29aebc8249c11d0f9c1fcafbf | /resourceprovider/controllers/cloudservices.py | b9f27225cadf811b58d8e42927d7cc6e578380b5 | [] | no_license | willholley/flask-azure | e9178f295ded252b4591978be96c3a57631bdfbb | fb35811ddc00ea0b7f0aad42a6a056f4bb57a820 | refs/heads/master | 2021-01-22T02:08:24.436127 | 2013-10-01T21:14:18 | 2013-10-01T21:14:18 | 13,322,133 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | """
TODO: what are the cloudservice controllers?
"""
from resourceprovider.models import CloudService
def get(subscription_id, cloud_service_name):
"""
This happens when a user views details about a purchased Cloud Service.
"""
cs = CloudService()
return cs.get(subscription_id, cloud_service_name)
| [
"garbados@gmail.com"
] | garbados@gmail.com |
84db84fe9a7212dd1cd05e7d7ae4edf9936001a9 | a69cbe2a90650709335f088e8f44719ecb27c5e6 | /K-Means-Clustering/18 - Projeto - Universities.py | 4de574e8aef0e296d6bc0aa08dc284ea30fa84b0 | [] | no_license | diogoaraujogit/MachineLearning | 9dc4864f4f887cb266ad1c2c29b70f9f0ee7aead | 6e921611231e040c87374527f6502bb535a108e0 | refs/heads/master | 2020-07-08T09:39:56.941875 | 2019-08-25T20:00:59 | 2019-08-25T20:00:59 | 203,635,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#Dados das universidades
df = pd.read_csv('College_Data',index_col=0)
df.head()
df.info()
df.describe()
#Análise exploratória de dados
sns.set_style('whitegrid')
sns.lmplot('Room.Board','Grad.Rate',data=df, hue='Private',
palette='coolwarm',size=6,aspect=1,fit_reg=False)
sns.set_style('whitegrid')
sns.lmplot('Outstate','F.Undergrad',data=df, hue='Private',
palette='coolwarm',size=6,aspect=1,fit_reg=False)
sns.set_style('darkgrid')
g = sns.FacetGrid(df,hue="Private",palette='coolwarm',size=6,aspect=2)
g = g.map(plt.hist,'Outstate',bins=20,alpha=0.7)
sns.set_style('darkgrid')
g = sns.FacetGrid(df,hue="Private",palette='coolwarm',size=6,aspect=2)
g = g.map(plt.hist,'Grad.Rate',bins=20,alpha=0.7)
#Criaão de clusters
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2)
kmeans.fit(df.drop('Private',axis=1))
kmeans.cluster_centers_
#Avaliação
def converter(cluster):
if cluster=='Yes':
return 1
else:
return 0
df['Cluster'] = df['Private'].apply(converter)
from sklearn.metrics import confusion_matrix,classification_report
print(confusion_matrix(df['Cluster'],kmeans.labels_))
print(classification_report(df['Cluster'],kmeans.labels_))
| [
"daraujo.augusto@gmail.com"
] | daraujo.augusto@gmail.com |
a7434b6e64c87722607be70db7925fba7466d441 | 4d91f1b06664684c091f77587fd526ee483387f5 | /theta/gridpack/scripts/.svn/text-base/copydeps.py.svn-base | 73796249855d3e0caf1036fdd66bcb3027552d1a | [] | no_license | camclean/ZprimeCombo13TeV | a2a41b4e50d2650e590088c6e95cbe04789f28ee | ebbcb1edb692708b4912b47d28f0e0a3cac28ec6 | refs/heads/master | 2021-01-16T21:15:10.739769 | 2018-11-30T14:15:23 | 2018-11-30T14:15:23 | 62,053,181 | 1 | 3 | null | 2018-05-16T18:38:46 | 2016-06-27T12:03:51 | Makefile | UTF-8 | Python | false | false | 2,209 | #!/usr/bin/env python
# Usage:
# copydeps.py <binary> <target path> <ld path1> <ld path 2> <ld path 3> ...
# copies the shared object the binary depends on to the target path, ignoring any
# libraries which are resolved to the given ld paths, if using these as the LD_LIBRARY_PATH.
import sys, os, os.path, subprocess, shutil, stat
if len(sys.argv) < 3: raise RuntimeError, "too few arguments"
binary = os.path.realpath(sys.argv[1])
target_path = os.path.realpath(sys.argv[2])
ld_paths = map(os.path.realpath, sys.argv[3:])
old_ld_path = os.environ.get('LD_LIBRARY_PATH', None)
os.environ['LD_LIBRARY_PATH'] = ':'.join(ld_paths)
if old_ld_path:
os.environ['LD_LIBRARY_PATH'] += ':' + old_ld_path
out, err = '',''
p = subprocess.Popen(['ldd', binary], stderr = subprocess.PIPE, stdout=subprocess.PIPE)
while p.poll() is None:
(tmpout, tmperr) = p.communicate()
out += tmpout
err += tmperr
code = p.wait()
if code!=0:
print "error from 'ldd %s': " % binary, out, err
sys.exit(1)
#print out
lines = out.split('\n')
for line in lines:
if line.find('ld-linux') > -1:
p = line.find('ld-linux')
p_end = line.find(' ', p)
p_start = p
while p_start > 0 and line[p_start].strip()!="": p_start -= 1
ld_path = line[p_start+1:p_end]
shutil.copy2(ld_path, os.path.join(target_path, 'ld-linux.so'))
continue
if line.find('=>') == -1: continue
so_name, so_path = line.split('=>', 2)
so_path = so_path.strip()
so_name = so_name.strip()
if 'not found' in so_path: raise RuntimeError, "dependency %s not found." % so_name
if so_path.find('(') != -1:
so_path, dummy = so_path.split('(', 2)
so_path = so_path.strip()
if so_path=='': continue
so_path = os.path.realpath(so_path)
in_ld_paths = False
for ld_path in ld_paths:
if so_path.startswith(ld_path): in_ld_paths = True
if in_ld_paths:
#print "Skipping %s as it resolved already to one ld path" % so_path
continue
#print "Copying '%s' to %s" % (so_path, target_path)
target = os.path.join(target_path, so_name)
shutil.copy(so_path, target)
sres = os.stat(target)
os.chmod(target, sres.st_mode | stat.S_IRUSR | stat.S_IWUSR)
| [
"camclean@ucdavis.edu"
] | camclean@ucdavis.edu | |
d9deda7fe1a28a0cdd3dc2ef0b92d7e750e7a386 | 176ebc25c8f2cd361ca9135fc2c2b1df32386371 | /horizen_code/tools/inference_pyramid_aug.py | 3a0feb3d3085f77a78e4b528d723d1bad47fa739 | [
"MIT"
] | permissive | yangJirui/DOTA-DOAI | a3ba7c0e17ebad69eb24ad373d3ffb74f84b2cb3 | aec4a2085d62a941c1b3f50fc9a452b2c8d3cd9f | refs/heads/master | 2022-02-23T07:42:52.565128 | 2019-04-16T07:05:05 | 2019-04-16T07:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,312 | py | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os, sys
sys.path.append("../")
import cv2
import numpy as np
from timeit import default_timer as timer
import tensorflow as tf
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.networks import build_whole_network
from help_utils.tools import *
from libs.box_utils import draw_box_in_img
# from libs.box_utils import coordinate_convert
from libs.label_name_dict.label_dict import LABEl_NAME_MAP, NAME_LABEL_MAP
from help_utils import tools
# from libs.box_utils import nms
from libs.box_utils.cython_utils.cython_nms import nms, soft_nms
from libs.configs import cfgs
from libs.box_utils.cython_utils.cython_bbox import bbox_overlaps
def flip_boxes_w(width, bboxes=None):
if bboxes is not None:
flip_bboxes = bboxes.copy()
flip_bboxes[:, 0] = width - bboxes[:, 2] - 1 # x
flip_bboxes[:, 2] = width - bboxes[:, 0] - 1 # x1
return flip_bboxes
def flip_boxes_h(height, bboxes=None):
if bboxes is not None:
flip_bboxes = bboxes.copy()
flip_bboxes[:, 1] = height - bboxes[:, 3] - 1 # x
flip_bboxes[:, 3] = height - bboxes[:, 1] - 1 # x1
return flip_bboxes
def box_voting(top_dets, all_dets, thresh, scoring_method='ID', beta=1.0):
"""Apply bounding-box voting to refine `top_dets` by voting with `all_dets`.
See: https://arxiv.org/abs/1505.01749. Optional score averaging (not in the
referenced paper) can be applied by setting `scoring_method` appropriately.
"""
# top_dets is [N, 5] each row is [x1 y1 x2 y2, score]
# all_dets is [N, 5] each row is [x1 y1 x2 y2, score]
top_dets_out = top_dets.copy()
top_boxes = top_dets[:, :4].astype(np.float)
all_boxes = all_dets[:, :4].astype(np.float)
all_scores = all_dets[:, 4]
top_to_all_overlaps = bbox_overlaps(top_boxes, all_boxes)
for k in range(top_dets_out.shape[0]):
inds_to_vote = np.where(top_to_all_overlaps[k] >= thresh)[0]
boxes_to_vote = all_boxes[inds_to_vote, :]
ws = all_scores[inds_to_vote]
top_dets_out[k, :4] = np.average(boxes_to_vote, axis=0, weights=ws)
if scoring_method == 'ID':
# Identity, nothing to do
pass
elif scoring_method == 'TEMP_AVG':
# Average probabilities (considered as P(detected class) vs.
# P(not the detected class)) after smoothing with a temperature
# hyperparameter.
P = np.vstack((ws, 1.0 - ws))
P_max = np.max(P, axis=0)
X = np.log(P / P_max)
X_exp = np.exp(X / beta)
P_temp = X_exp / np.sum(X_exp, axis=0)
P_avg = P_temp[0].mean()
top_dets_out[k, 4] = P_avg
elif scoring_method == 'AVG':
# Combine new probs from overlapping boxes
top_dets_out[k, 4] = ws.mean()
elif scoring_method == 'IOU_AVG':
P = ws
ws = top_to_all_overlaps[k, inds_to_vote]
P_avg = np.average(P, weights=ws)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'GENERALIZED_AVG':
P_avg = np.mean(ws**beta)**(1.0 / beta)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'QUASI_SUM':
top_dets_out[k, 4] = ws.sum() / float(len(ws))**beta
else:
raise NotImplementedError(
'Unknown scoring method {}'.format(scoring_method)
)
return top_dets_out
def get_file_paths_recursive(folder=None, file_ext=None):
""" Get the absolute path of all files in given folder recursively
:param folder:
:param file_ext:
:return:
"""
file_list = []
if folder is None:
return file_list
# for dir_path, dir_names, file_names in os.walk(folder):
# for file_name in file_names:
# if file_ext is None:
# file_list.append(os.path.join(dir_path, file_name))
# continue
# if file_name.endswith(file_ext):
# file_list.append(os.path.join(dir_path, file_name))
file_list = [os.path.join(folder, f) for f in sorted(os.listdir(folder)) if f.endswith(file_ext)]
return file_list
def inference(det_net, file_paths, des_folder, h_len, w_len, h_overlap, w_overlap, save_res=False):
# if save_res:
# assert cfgs.SHOW_SCORE_THRSHOLD >= 0.5, \
# 'please set score threshold (example: SHOW_SCORE_THRSHOLD = 0.5) in cfgs.py'
#
# else:
# assert cfgs.SHOW_SCORE_THRSHOLD < 0.005, \
# 'please set score threshold (example: SHOW_SCORE_THRSHOLD = 0.00) in cfgs.py'
# TMP_FILE = './tmp_concat.txt' if cfgs.USE_CONCAT else './tmp.txt'
TMP_FILE = './tmp1_%s.txt' % cfgs.VERSION
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3])
img_batch = tf.cast(img_plac, tf.float32)
if cfgs.MXNET_NORM:
print("USe Mxnet Norm...\n")
img_batch = img_batch / 255.0
img_batch = img_batch - tf.constant(cfgs.MXNET_MEAN)
img_batch = img_batch / tf.constant(cfgs.MXNET_STD)
else:
img_batch = img_batch - tf.constant([[cfgs.PIXEL_MEAN]]) # sub pixel mean at last
if cfgs.NET_NAME.endswith(('b', 'd')):
print("Note: Use Mxnet ResNet, But Do Not Norm Img like MxNet....")
print('\n')
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN[0],
is_resize=False)
det_boxes_h, det_scores_h, det_category_h = det_net.build_whole_detection_network(input_img_batch=img_batch,
gtboxes_batch=None,
gtboxes_r_batch=None)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model')
if not os.path.exists(TMP_FILE):
fw = open(TMP_FILE, 'w')
fw.close()
fr = open(TMP_FILE, 'r')
pass_img = fr.readlines()
fr.close()
for count, img_path in enumerate(file_paths):
fw = open(TMP_FILE, 'a+')
if img_path + '\n' in pass_img:
continue
start = timer()
img = cv2.imread(img_path)
box_res = []
label_res = []
score_res = []
imgH = img.shape[0]
imgW = img.shape[1]
ori_H = imgH
ori_W = imgW
# print(" ori_h, ori_w: ", imgH, imgW)
if imgH < h_len:
temp = np.zeros([h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = h_len
if imgW < w_len:
temp = np.zeros([imgH, w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = w_len
for hh in range(0, imgH, h_len - h_overlap):
if imgH - hh - 1 < h_len:
hh_ = imgH - h_len
else:
hh_ = hh
for ww in range(0, imgW, w_len - w_overlap):
if imgW - ww - 1 < w_len:
ww_ = imgW - w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + h_len), ww_:(ww_ + w_len), :]
for short_size in cfgs.IMG_SHORT_SIDE_LEN:
max_len = 1200
if h_len < w_len:
new_h, new_w = short_size, min(int(short_size*float(w_len)/h_len), max_len)
else:
new_h, new_w = min(int(short_size*float(h_len)/w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_h, new_w))
det_boxes_h_, det_scores_h_, det_category_h_ = \
sess.run(
[det_boxes_h, det_scores_h, det_category_h],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
# -=------
valid = det_scores_h_ > 0.001
det_boxes_h_ = det_boxes_h_[valid]
det_scores_h_ = det_scores_h_[valid]
det_category_h_ = det_category_h_[valid]
# ---------
det_boxes_h_[:, 0] = det_boxes_h_[:, 0] * w_len / new_w
det_boxes_h_[:, 1] = det_boxes_h_[:, 1] * h_len / new_h
det_boxes_h_[:, 2] = det_boxes_h_[:, 2] * w_len / new_w
det_boxes_h_[:, 3] = det_boxes_h_[:, 3] * h_len / new_h
if len(det_boxes_h_) > 0:
for ii in range(len(det_boxes_h_)):
box = det_boxes_h_[ii]
box[0] = box[0] + ww_
box[1] = box[1] + hh_
box[2] = box[2] + ww_
box[3] = box[3] + hh_
box_res.append(box)
label_res.append(det_category_h_[ii])
score_res.append(det_scores_h_[ii])
img_resize_flip = cv2.flip(img_resize, 1)
det_boxes_h_flip_, det_scores_h_flip_, det_category_h_flip_ = \
sess.run(
[det_boxes_h, det_scores_h, det_category_h],
feed_dict={img_plac: img_resize_flip[:, :, ::-1]}
)
det_boxes_h_flip_ = flip_boxes_w(new_w, det_boxes_h_flip_)
valid = det_scores_h_flip_ > 0.001
det_boxes_h_flip_ = det_boxes_h_flip_[valid]
det_scores_h_flip_ = det_scores_h_flip_[valid]
det_category_h_flip_ = det_category_h_flip_[valid]
# ---------
det_boxes_h_flip_[:, 0] = det_boxes_h_flip_[:, 0] * w_len / new_w
det_boxes_h_flip_[:, 1] = det_boxes_h_flip_[:, 1] * h_len / new_h
det_boxes_h_flip_[:, 2] = det_boxes_h_flip_[:, 2] * w_len / new_w
det_boxes_h_flip_[:, 3] = det_boxes_h_flip_[:, 3] * h_len / new_h
if len(det_boxes_h_flip_) > 0:
for ii in range(len(det_boxes_h_flip_)):
box = det_boxes_h_flip_[ii]
box[0] = box[0] + ww_
box[1] = box[1] + hh_
box[2] = box[2] + ww_
box[3] = box[3] + hh_
box_res.append(box)
label_res.append(det_category_h_flip_[ii])
score_res.append(det_scores_h_flip_[ii])
img_resize_flip = cv2.flip(img_resize, 0)
det_boxes_h_flip_, det_scores_h_flip_, det_category_h_flip_ = \
sess.run(
[det_boxes_h, det_scores_h, det_category_h],
feed_dict={img_plac: img_resize_flip[:, :, ::-1]}
)
det_boxes_h_flip_ = flip_boxes_h(new_h, det_boxes_h_flip_)
valid = det_scores_h_flip_ > 0.001
det_boxes_h_flip_ = det_boxes_h_flip_[valid]
det_scores_h_flip_ = det_scores_h_flip_[valid]
det_category_h_flip_ = det_category_h_flip_[valid]
# ---------
det_boxes_h_flip_[:, 0] = det_boxes_h_flip_[:, 0] * w_len / new_w
det_boxes_h_flip_[:, 1] = det_boxes_h_flip_[:, 1] * h_len / new_h
det_boxes_h_flip_[:, 2] = det_boxes_h_flip_[:, 2] * w_len / new_w
det_boxes_h_flip_[:, 3] = det_boxes_h_flip_[:, 3] * h_len / new_h
if len(det_boxes_h_flip_) > 0:
for ii in range(len(det_boxes_h_flip_)):
box = det_boxes_h_flip_[ii]
box[0] = box[0] + ww_
box[1] = box[1] + hh_
box[2] = box[2] + ww_
box[3] = box[3] + hh_
box_res.append(box)
label_res.append(det_category_h_flip_[ii])
score_res.append(det_scores_h_flip_[ii])
box_res = np.array(box_res)
label_res = np.array(label_res)
score_res = np.array(score_res)
box_res_, label_res_, score_res_ = [], [], []
# h_threshold = {'roundabout': 0.35, 'tennis-court': 0.35, 'swimming-pool': 0.4, 'storage-tank': 0.3,
# 'soccer-ball-field': 0.3, 'small-vehicle': 0.4, 'ship': 0.35, 'plane': 0.35,
# 'large-vehicle': 0.4, 'helicopter': 0.4, 'harbor': 0.3, 'ground-track-field': 0.4,
# 'bridge': 0.3, 'basketball-court': 0.4, 'baseball-diamond': 0.3}
h_threshold = {'turntable': 0.5, 'tennis-court': 0.5, 'swimming-pool': 0.5, 'storage-tank': 0.5,
'soccer-ball-field': 0.5, 'small-vehicle': 0.5, 'ship': 0.5, 'plane': 0.5,
'large-vehicle': 0.5, 'helicopter': 0.5, 'harbor': 0.5, 'ground-track-field': 0.5,
'bridge': 0.5, 'basketball-court': 0.5, 'baseball-diamond': 0.5, 'container-crane': 0.5}
for sub_class in range(1, cfgs.CLASS_NUM + 1):
index = np.where(label_res == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_h = box_res[index]
tmp_label_h = label_res[index]
tmp_score_h = score_res[index]
tmp_boxes_h = np.array(tmp_boxes_h)
tmp = np.zeros([tmp_boxes_h.shape[0], tmp_boxes_h.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_h
tmp[:, -1] = np.array(tmp_score_h)
# inx = nms.py_cpu_nms(dets=np.array(tmp, np.float32),
# thresh=h_threshold[LABEL_NAME_MAP[sub_class]],
# max_output_size=500)
if cfgs.SOFT_NMS:
inx = soft_nms(np.array(tmp, np.float32), 0.5, Nt=h_threshold[LABEl_NAME_MAP[sub_class]],
threshold=0.001, method=2) # 2 means Gaussian
else:
inx = nms(np.array(tmp, np.float32),
h_threshold[LABEl_NAME_MAP[sub_class]])
# inx = inx[:500] # max_outpus is 500
box_vote = box_voting(tmp[inx], tmp, thresh=0.9, scoring_method='ID')
box_res_.append(box_vote[:, 0:-1])
score_res_.append(box_vote[:, -1])
label_res_.append(np.ones_like(box_vote[:, -1]) * sub_class)
box_res_ = np.concatenate(box_res_, axis=0)
score_res_ = np.concatenate(score_res_, axis=0)
label_res_ = np.concatenate(label_res_, axis=0)
time_elapsed = timer() - start
if save_res:
scores = np.array(score_res_)
labels = np.array(label_res_)
boxes = np.array(box_res_)
valid_show = scores > cfgs.SHOW_SCORE_THRSHOLD
scores = scores[valid_show]
boxes = boxes[valid_show]
labels = labels[valid_show]
det_detections_h = draw_box_in_img.draw_boxes_with_label_and_scores((np.array(img, np.float32) - np.array(cfgs.MXNET_MEAN))/np.array(cfgs.MXNET_STD),
boxes=boxes,
labels=labels,
scores=scores)
det_detections_h = det_detections_h[:ori_H, :ori_W]
save_dir = os.path.join(des_folder, cfgs.VERSION)
tools.mkdir(save_dir)
cv2.imwrite(save_dir + '/' + img_path.split('/')[-1].split('.')[0] + '_h_s%d_t%f.jpg' %(h_len, cfgs.FAST_RCNN_NMS_IOU_THRESHOLD),
det_detections_h)
view_bar('{} cost {}s'.format(img_path.split('/')[-1].split('.')[0],
time_elapsed), count + 1, len(file_paths))
else:
# eval txt
CLASS_DOTA = NAME_LABEL_MAP.keys()
# Task2
write_handle_h = {}
txt_dir_h = os.path.join('txt_output', cfgs.VERSION + '_flip_voting_h')
tools.mkdir(txt_dir_h)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle_h[sub_class] = open(os.path.join(txt_dir_h, 'Task2_%s.txt' % sub_class), 'a+')
for i, hbox in enumerate(box_res_):
command = '%s %.3f %.1f %.1f %.1f %.1f\n' % (img_path.split('/')[-1].split('.')[0],
score_res_[i],
hbox[0], hbox[1], hbox[2], hbox[3])
write_handle_h[LABEl_NAME_MAP[label_res_[i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle_h[sub_class].close()
view_bar('{} cost {}s'.format(img_path.split('/')[-1].split('.')[0],
time_elapsed), count + 1, len(file_paths))
fw.write('{}\n'.format(img_path))
fw.close()
os.remove(TMP_FILE)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
file_paths = get_file_paths_recursive('/home/omnisky/DataSets/Dota/test/images/images', '.png')
if cfgs.USE_CONCAT:
from libs.networks import build_whole_network_Concat
det_net = build_whole_network_Concat.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
else:
det_net = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
inference(det_net, file_paths, './demos', 800, 800,
200, 200, save_res=False)
| [
"1192150908@qq.com"
] | 1192150908@qq.com |
2ab9f2f34b31a7152edd0e7524c21dddd1269df8 | 247389d0b916f972297fe3c38d262502a6cfa084 | /morse | ef7bed09c034b15fde12125c089eb665e1695bcd | [] | no_license | reteps/raspi | 2e69fee4eb96e4a43059f3125c79cf577e2b5bb6 | 96771f0525b3ad71c9b13a36de49b599c5769310 | refs/heads/master | 2021-09-28T05:22:32.999241 | 2017-07-26T13:24:51 | 2017-07-26T13:24:51 | 98,200,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | #!/usr/bin/env python3
import RPi.GPIO as GPIO
import pilights, time
def to_morse_code(message):
morseAlphabet ={
"A" : ".-",
"B" : "-...",
"C" : "-.-.",
"D" : "-..",
"E" : ".",
"F" : "..-.",
"G" : "--.",
"H" : "....",
"I" : "..",
"J" : ".---",
"K" : "-.-",
"L" : ".-..",
"M" : "--",
"N" : "-.",
"O" : "---",
"P" : ".--.",
"Q" : "--.-",
"R" : ".-.",
"S" : "...",
"T" : "-",
"U" : "..-",
"V" : "...-",
"W" : ".--",
"X" : "-..-",
"Y" : "-.--",
"Z" : "--..",
" " : "/"
}
output = ""
for letter in message.upper():
output += morseAlphabet[letter]
return output
pin = 17
lights = pilights.Lights(pin)
raw_message = input("Message > ")
message = to_morse_code(raw_message)
shorttime = 0.1
longtime = 0.4
split = 0.2
word = 0.6
print(message)
for character in message:
if character == ".":
lights.onoff(pin,shorttime)
elif character == "/":
time.sleep(word)
elif character == "-":
lights.onoff(pin,longtime)
time.sleep(split)
| [
"peter.a.stenger@gmail.com"
] | peter.a.stenger@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.