max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
tests/test_cue.py | tebeka/cue | 8 | 6618151 | from pathlib import Path
import pytest
import yaml
import cue
here = Path(__file__).absolute().parent
ok_data_file = here / 'data_ok.yml'
with ok_data_file.open() as fp:
ok_data = fp.read()
bad_data_file = here / 'data_bad.yml'
with bad_data_file.open() as fp:
bad_data = fp.read()
cue_file = here / 'schema.cue'
with cue_file.open() as fp:
cue_data = fp.read()
def test_files_fail():
with pytest.raises(cue.Error):
cue.vet.files(cue_file, bad_data_file)
def test_files_ok():
cue.vet.files(cue_file, ok_data_file)
def test_data_str_fail():
with pytest.raises(cue.Error):
cue.vet.data(cue_data, bad_data, cue.YAML)
def test_data_bytes_fail():
schema, data = cue_data.encode('utf-8'), bad_data.encode('utf-8')
with pytest.raises(cue.Error):
cue.vet.data(schema, data, cue.YAML)
def test_data_str_ok():
cue.vet.data(cue_data, ok_data, cue.YAML)
def test_data_bytes_ok():
schema, data = cue_data.encode('utf-8'), ok_data.encode('utf-8')
cue.vet.data(schema, data, cue.YAML)
def test_validator_ok():
v = cue.Validator(cue_data)
obj = yaml.safe_load(ok_data)
v.validate(obj)
def test_validator_fail():
v = cue.Validator(cue_data)
obj = yaml.safe_load(bad_data)
with pytest.raises(cue.Error):
v.validate(obj)
| from pathlib import Path
import pytest
import yaml
import cue
here = Path(__file__).absolute().parent
ok_data_file = here / 'data_ok.yml'
with ok_data_file.open() as fp:
ok_data = fp.read()
bad_data_file = here / 'data_bad.yml'
with bad_data_file.open() as fp:
bad_data = fp.read()
cue_file = here / 'schema.cue'
with cue_file.open() as fp:
cue_data = fp.read()
def test_files_fail():
with pytest.raises(cue.Error):
cue.vet.files(cue_file, bad_data_file)
def test_files_ok():
cue.vet.files(cue_file, ok_data_file)
def test_data_str_fail():
with pytest.raises(cue.Error):
cue.vet.data(cue_data, bad_data, cue.YAML)
def test_data_bytes_fail():
schema, data = cue_data.encode('utf-8'), bad_data.encode('utf-8')
with pytest.raises(cue.Error):
cue.vet.data(schema, data, cue.YAML)
def test_data_str_ok():
cue.vet.data(cue_data, ok_data, cue.YAML)
def test_data_bytes_ok():
schema, data = cue_data.encode('utf-8'), ok_data.encode('utf-8')
cue.vet.data(schema, data, cue.YAML)
def test_validator_ok():
v = cue.Validator(cue_data)
obj = yaml.safe_load(ok_data)
v.validate(obj)
def test_validator_fail():
v = cue.Validator(cue_data)
obj = yaml.safe_load(bad_data)
with pytest.raises(cue.Error):
v.validate(obj)
| none | 1 | 2.222804 | 2 | |
src/comments.py | arrivance/gyazo-to-imgur | 2 | 6618152 | <gh_stars>1-10
"""
Name: Gyazo-to-imgur bot
Purpose: To convert Gyazo links to imgur links, as imgur is objectively better for RES users.
Author: arrivance
"""
import praw
import json
import utility
import re
from imgurpython import ImgurClient
"""
Configuration
"""
if utility.file_checker("login.json") == False:
print("You are required to make a login.json file for the program to work.")
# opens the login.json file with all of the authentication dtails
with open("login.json") as data_file:
# dumps all the login details into the program
login_details = json.load(data_file)
gyazo_regex = re.compile("https?:\/\/gyazo\.com\/[a-z0-9]+")
# initialises PRAW instance
# and creates a user agent
user_agent = login_details["reddit_ua"]
print("Gyazo to imgur converter by /u/arrivance")
print("User agent:", user_agent)
r = praw.Reddit(user_agent)
r.set_oauth_app_info(client_id=login_details["reddit_client_id"], client_secret=login_details["reddit_client_secret"], redirect_uri=login_details["reddit_redirect_uri"])
"""
reddit auth
"""
access_token = utility.reddit_oauth_token(login_details, user_agent)
# gets the access information
r.set_access_credentials({"identity", "submit"}, access_token)
# authenticates the user with reddit
authenticated_user = r.get_me()
"""
imgur auth
"""
# logins into the imgurclient using the login details provided
imgur_client = ImgurClient(login_details["imgur_client_id"], login_details["imgur_secret"])
if utility.file_checker("commented.json") == False:
structure = {
"comment_ids":"[]",
"disallowed":"[]",
"submission_ids":"[]"
}
print("It is recommended to follow Bottiquete, and to add a list of blacklisted subreddits to disallowed.")
utility.file_maker("commented.json", structure)
# always loops
while True:
# opens the json file
with open("commented.json") as data_file:
# dumps the json file
raw_json = json.load(data_file)
# puts the handled_comments and submissions in memory
handled_comments = raw_json["comment_ids"]
disallowed_subreddits = raw_json["disallowed"]
# checks all the comments being posted on reddit at all
all_comments = praw.helpers.comment_stream(r, "all", verbosity=3)
# goes through all the comments
for comment in all_comments:
matches = gyazo_regex.findall(comment.body.lower())
if len(matches) != 0 and comment.id not in handled_comments:
for link in matches:
gyazo_link = utility.gyazo_link_parser(link)
imgur_upload = utility.imgur_uploader(gyazo_link, imgur_client)
if imgur_upload != False:
utility.comment_poster(comment, utility.comment_prep(imgur_upload))
# and then appends the comment to the handled comments so we don't recheck
if comment.id not in handled_comments:
raw_json["comment_ids"].append(comment.id)
with open("commented.json", "w") as data_file:
json.dump(raw_json, data_file)
| """
Name: Gyazo-to-imgur bot
Purpose: To convert Gyazo links to imgur links, as imgur is objectively better for RES users.
Author: arrivance
"""
import praw
import json
import utility
import re
from imgurpython import ImgurClient
"""
Configuration
"""
if utility.file_checker("login.json") == False:
print("You are required to make a login.json file for the program to work.")
# opens the login.json file with all of the authentication dtails
with open("login.json") as data_file:
# dumps all the login details into the program
login_details = json.load(data_file)
gyazo_regex = re.compile("https?:\/\/gyazo\.com\/[a-z0-9]+")
# initialises PRAW instance
# and creates a user agent
user_agent = login_details["reddit_ua"]
print("Gyazo to imgur converter by /u/arrivance")
print("User agent:", user_agent)
r = praw.Reddit(user_agent)
r.set_oauth_app_info(client_id=login_details["reddit_client_id"], client_secret=login_details["reddit_client_secret"], redirect_uri=login_details["reddit_redirect_uri"])
"""
reddit auth
"""
access_token = utility.reddit_oauth_token(login_details, user_agent)
# gets the access information
r.set_access_credentials({"identity", "submit"}, access_token)
# authenticates the user with reddit
authenticated_user = r.get_me()
"""
imgur auth
"""
# logins into the imgurclient using the login details provided
imgur_client = ImgurClient(login_details["imgur_client_id"], login_details["imgur_secret"])
if utility.file_checker("commented.json") == False:
structure = {
"comment_ids":"[]",
"disallowed":"[]",
"submission_ids":"[]"
}
print("It is recommended to follow Bottiquete, and to add a list of blacklisted subreddits to disallowed.")
utility.file_maker("commented.json", structure)
# always loops
while True:
# opens the json file
with open("commented.json") as data_file:
# dumps the json file
raw_json = json.load(data_file)
# puts the handled_comments and submissions in memory
handled_comments = raw_json["comment_ids"]
disallowed_subreddits = raw_json["disallowed"]
# checks all the comments being posted on reddit at all
all_comments = praw.helpers.comment_stream(r, "all", verbosity=3)
# goes through all the comments
for comment in all_comments:
matches = gyazo_regex.findall(comment.body.lower())
if len(matches) != 0 and comment.id not in handled_comments:
for link in matches:
gyazo_link = utility.gyazo_link_parser(link)
imgur_upload = utility.imgur_uploader(gyazo_link, imgur_client)
if imgur_upload != False:
utility.comment_poster(comment, utility.comment_prep(imgur_upload))
# and then appends the comment to the handled comments so we don't recheck
if comment.id not in handled_comments:
raw_json["comment_ids"].append(comment.id)
with open("commented.json", "w") as data_file:
json.dump(raw_json, data_file) | en | 0.840897 | Name: Gyazo-to-imgur bot Purpose: To convert Gyazo links to imgur links, as imgur is objectively better for RES users. Author: arrivance Configuration # opens the login.json file with all of the authentication dtails # dumps all the login details into the program # initialises PRAW instance # and creates a user agent reddit auth # gets the access information # authenticates the user with reddit imgur auth # logins into the imgurclient using the login details provided # always loops # opens the json file # dumps the json file # puts the handled_comments and submissions in memory # checks all the comments being posted on reddit at all # goes through all the comments # and then appends the comment to the handled comments so we don't recheck | 3.031406 | 3 |
meme/migrations/0006_auto_20210212_0019.py | aryanndhir/Xmeme | 4 | 6618153 | <filename>meme/migrations/0006_auto_20210212_0019.py
# Generated by Django 3.1.6 on 2021-02-11 18:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meme', '0005_auto_20210211_2112'),
]
operations = [
migrations.AlterField(
model_name='post',
name='date_posted',
field=models.DateTimeField(),
),
]
| <filename>meme/migrations/0006_auto_20210212_0019.py
# Generated by Django 3.1.6 on 2021-02-11 18:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meme', '0005_auto_20210211_2112'),
]
operations = [
migrations.AlterField(
model_name='post',
name='date_posted',
field=models.DateTimeField(),
),
]
| en | 0.827678 | # Generated by Django 3.1.6 on 2021-02-11 18:49 | 1.348148 | 1 |
genofunk/refparser.py | rmcolq/genofunk | 1 | 6618154 | from Bio import SeqIO
import json
import logging
class ReferenceParser():
"""
Parses a genbank file containing multiple references and searches for
annotated CDS features. Writes json file with the relevant information.
"""
def __init__(self):
self.reference_genbank = None
self.reference_json = {"schema": "v1.1", 'features': {}, 'references': {}}
def load_reference_genbank(self, filepath):
# tests:
# what if no file
# simple case check
# expected attributes (reference,sequence), (genes,(start,end,strand))
self.reference_genbank = SeqIO.index(filepath, "genbank")
def identify_features(self):
for i in self.reference_genbank.keys():
# print(i)
locations = {}
length = None
for feature in self.reference_genbank[i].features:
if feature.type == 'source':
length = int(feature.location.end)
continue
if feature.type == 'gene':
continue
if feature.type == 'CDS':
# print(feature)
if "gene" in feature.qualifiers:
gene_id = feature.qualifiers['gene'][0]
self.reference_json['features'][gene_id] = {
"name": gene_id.lower(),
"type": "CDS"
}
if "note" in feature.qualifiers:
self.reference_json['features'][gene_id]["description"] = feature.qualifiers['note'][0],
if feature.location_operator == "join":
location_list = []
for loc in feature.location.parts:
d = {
"start": int(loc.start),
"end": int(loc.end),
"strand": loc.strand
}
location_list.append(d)
locations[feature.qualifiers['gene'][0]] = {"join": location_list}
else:
locations[feature.qualifiers['gene'][0]] = {
"start": int(feature.location.start),
"end": int(feature.location.end),
"strand": feature.location.strand
}
# else:
# print(feature.type)
# print(feature)
record = self.reference_genbank[i]
self.reference_json['references'][i] = {
'accession': i,
'description': record.description,
'length': length,
'locations': locations,
'sequence': str(record.seq)
}
def write_json(self, filepath):
with open(filepath, 'w') as json_file:
json.dump(self.reference_json, json_file)
def run(self, infilepath, outfilepath):
self.load_reference_genbank(infilepath)
self.identify_features()
self.write_json(outfilepath) | from Bio import SeqIO
import json
import logging
class ReferenceParser():
"""
Parses a genbank file containing multiple references and searches for
annotated CDS features. Writes json file with the relevant information.
"""
def __init__(self):
self.reference_genbank = None
self.reference_json = {"schema": "v1.1", 'features': {}, 'references': {}}
def load_reference_genbank(self, filepath):
# tests:
# what if no file
# simple case check
# expected attributes (reference,sequence), (genes,(start,end,strand))
self.reference_genbank = SeqIO.index(filepath, "genbank")
def identify_features(self):
for i in self.reference_genbank.keys():
# print(i)
locations = {}
length = None
for feature in self.reference_genbank[i].features:
if feature.type == 'source':
length = int(feature.location.end)
continue
if feature.type == 'gene':
continue
if feature.type == 'CDS':
# print(feature)
if "gene" in feature.qualifiers:
gene_id = feature.qualifiers['gene'][0]
self.reference_json['features'][gene_id] = {
"name": gene_id.lower(),
"type": "CDS"
}
if "note" in feature.qualifiers:
self.reference_json['features'][gene_id]["description"] = feature.qualifiers['note'][0],
if feature.location_operator == "join":
location_list = []
for loc in feature.location.parts:
d = {
"start": int(loc.start),
"end": int(loc.end),
"strand": loc.strand
}
location_list.append(d)
locations[feature.qualifiers['gene'][0]] = {"join": location_list}
else:
locations[feature.qualifiers['gene'][0]] = {
"start": int(feature.location.start),
"end": int(feature.location.end),
"strand": feature.location.strand
}
# else:
# print(feature.type)
# print(feature)
record = self.reference_genbank[i]
self.reference_json['references'][i] = {
'accession': i,
'description': record.description,
'length': length,
'locations': locations,
'sequence': str(record.seq)
}
def write_json(self, filepath):
with open(filepath, 'w') as json_file:
json.dump(self.reference_json, json_file)
def run(self, infilepath, outfilepath):
self.load_reference_genbank(infilepath)
self.identify_features()
self.write_json(outfilepath) | en | 0.749005 | Parses a genbank file containing multiple references and searches for annotated CDS features. Writes json file with the relevant information. # tests: # what if no file # simple case check # expected attributes (reference,sequence), (genes,(start,end,strand)) # print(i) # print(feature) # else: # print(feature.type) # print(feature) | 2.804527 | 3 |
Assignment of Week 4:/TutorialsPoint - Python Data Structure and Algorithms Tutorial.py | jakkcanada/Boot_Camp | 1 | 6618155 | <gh_stars>1-10
# Python - Linked Lists
# Creation of Linked list
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
list1 = SLinkedList()
list1.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
# Link first Node to second node
list1.headval.nextval = e2
# Link second Node to third node
e2.nextval = e3
# Traversing a Linked List
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
# Link first Node to second node
list.headval.nextval = e2
# Link second Node to third node
e2.nextval = e3
list.listprint()
# Output
Mon
Tue
Wed
# Insertion in a Linked List
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
# Print the linked list
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
def AtBegining(self,newdata):
NewNode = Node(newdata)
# Update the new nodes next val to existing node
NewNode.nextval = self.headval
self.headval = NewNode
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
list.headval.nextval = e2
e2.nextval = e3
list.AtBegining("Sun")
list.listprint()
# Output
Sun
Mon
Tue
Wed
# Inserting at the End
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
# Function to add newnode
def AtEnd(self, newdata):
NewNode = Node(newdata)
if self.headval is None:
self.headval = NewNode
return
laste = self.headval
while(laste.nextval):
laste = laste.nextval
laste.nextval=NewNode
# Print the linked list
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
list.headval.nextval = e2
e2.nextval = e3
list.AtEnd("Thu")
list.listprint()
# Output
Mon
Tue
Wed
Thu
# Inserting in between two Data Nodes
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
# Function to add node
def Inbetween(self,middle_node,newdata):
if middle_node is None:
print("The mentioned node is absent")
return
NewNode = Node(newdata)
NewNode.nextval = middle_node.nextval
middle_node.nextval = NewNode
# Print the linked list
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Thu")
list.headval.nextval = e2
e2.nextval = e3
list.Inbetween(list.headval.nextval,"Fri")
list.listprint()
# Output
Mon
Tue
Fri
Thu
# Removing an Item
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class SLinkedList:
def __init__(self):
self.head = None
def Atbegining(self, data_in):
NewNode = Node(data_in)
NewNode.next = self.head
self.head = NewNode
# Function to remove node
def RemoveNode(self, Removekey):
HeadVal = self.head
if (HeadVal is not None):
if (HeadVal.data == Removekey):
self.head = HeadVal.next
HeadVal = None
return
while (HeadVal is not None):
if HeadVal.data == Removekey:
break
prev = HeadVal
HeadVal = HeadVal.next
if (HeadVal == None):
return
prev.next = HeadVal.next
HeadVal = None
def LListprint(self):
printval = self.head
while (printval):
print(printval.data),
printval = printval.next
llist = SLinkedList()
llist.Atbegining("Mon")
llist.Atbegining("Tue")
llist.Atbegining("Wed")
llist.Atbegining("Thu")
llist.RemoveNode("Tue")
llist.LListprint()
# Output
Thu
Wed
Mon
# Python - Stack
class Stack:
def __init__(self):
self.stack = []
def add(self, dataval):
# Use list append method to add element
if dataval not in self.stack:
self.stack.append(dataval)
return True
else:
return False
# Use peek to look at the top of the stack
def peek(self):
return self.stack[-1]
AStack = Stack()
AStack.add("Mon")
AStack.add("Tue")
AStack.peek()
print(AStack.peek())
AStack.add("Wed")
AStack.add("Thu")
print(AStack.peek())
# Output
Tue
Thu
# POP from a Stack
class Stack:
def __init__(self):
self.stack = []
def add(self, dataval):
# Use list append method to add element
if dataval not in self.stack:
self.stack.append(dataval)
return True
else:
return False
# Use list pop method to remove element
def remove(self):
if len(self.stack) <= 0:
return ("No element in the Stack")
else:
return self.stack.pop()
AStack = Stack()
AStack.add("Mon")
AStack.add("Tue")
AStack.add("Wed")
AStack.add("Thu")
print(AStack.remove())
print(AStack.remove())
# Output
Thu
Wed
# Python - Queue
# Adding Elements
class Queue:
def __init__(self):
self.queue = list()
def addtoq(self,dataval):
# Insert method to add element
if dataval not in self.queue:
self.queue.insert(0,dataval)
return True
return False
def size(self):
return len(self.queue)
TheQueue = Queue()
TheQueue.addtoq("Mon")
TheQueue.addtoq("Tue")
TheQueue.addtoq("Wed")
print(TheQueue.size())
# Output
3
# Removing Element
class Queue:
def __init__(self):
self.queue = list()
def addtoq(self,dataval):
# Insert method to add element
if dataval not in self.queue:
self.queue.insert(0,dataval)
return True
return False
# Pop method to remove element
def removefromq(self):
if len(self.queue)>0:
return self.queue.pop()
return ("No elements in Queue!")
TheQueue = Queue()
TheQueue.addtoq("Mon")
TheQueue.addtoq("Tue")
TheQueue.addtoq("Wed")
print(TheQueue.removefromq())
print(TheQueue.removefromq())
# Output
Mon
Tue
# Python - Dequeue
import collections
DoubleEnded = collections.deque(["Mon","Tue","Wed"])
DoubleEnded.append("Thu")
print ("Appended at right - ")
print (DoubleEnded)
DoubleEnded.appendleft("Sun")
print ("Appended at right at left is - ")
print (DoubleEnded)
DoubleEnded.pop()
print ("Deleting from right - ")
print (DoubleEnded)
DoubleEnded.popleft()
print ("Deleting from left - ")
print (DoubleEnded)
# Output
Appended at right -
deque(['Mon', 'Tue', 'Wed', 'Thu'])
Appended at right at left is -
deque(['Sun', 'Mon', 'Tue', 'Wed', 'Thu'])
Deleting from right -
deque(['Sun', 'Mon', 'Tue', 'Wed'])
Deleting from left -
deque(['Mon', 'Tue', 'Wed'])
# Python - Advanced Linked list
# Creating Doubly linked list
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class doubly_linked_list:
def __init__(self):
self.head = None
# Adding data elements
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Print the Doubly Linked list
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.push(8)
dllist.push(62)
dllist.listprint(dllist.head)
# Output
62 8 12
# Inserting into Doubly Linked List
# Create the Node class
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
# Create the doubly linked list
class doubly_linked_list:
def __init__(self):
self.head = None
# Define the push method to add elements
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Define the insert method to insert the element
def insert(self, prev_node, NewVal):
if prev_node is None:
return
NewNode = Node(NewVal)
NewNode.next = prev_node.next
prev_node.next = NewNode
NewNode.prev = prev_node
if NewNode.next is not None:
NewNode.next.prev = NewNode
# Define the method to print the linked list
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.push(8)
dllist.push(62)
dllist.insert(dllist.head.next, 13)
dllist.listprint(dllist.head)
# Output
62 8 13 12
# Appending to a Doubly linked list
# Create the node class
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
# Create the doubly linked list class
class doubly_linked_list:
def __init__(self):
self.head = None
# Define the push method to add elements at the begining
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Define the append method to add elements at the end
def append(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = None
if self.head is None:
NewNode.prev = None
self.head = NewNode
return
last = self.head
while (last.next is not None):
last = last.next
last.next = NewNode
NewNode.prev = last
return
# Define the method to print
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.append(9)
dllist.push(8)
dllist.push(62)
dllist.append(45)
dllist.listprint(dllist.head)
# Output
62 8 12 9 45
# Python - Hash Table
# Accessing Values in Dictionary
# Declare a dictionary
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
# Accessing the dictionary with its key
print "dict['Name']: ", dict['Name']
print "dict['Age']: ", dict['Age']
# Output
dict['Name']: Zara
dict['Age']: 7
# Updating Dictionary
# Declare a dictionary
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
dict['Age'] = 8; # update existing entry
dict['School'] = "DPS School"; # Add new entry
print "dict['Age']: ", dict['Age']
print "dict['School']: ", dict['School']
# Output
dict['Age']: 8
dict['School']: DPS School
# Delete Dictionary Elements
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
del dict['Name']; # remove entry with key 'Name'
dict.clear(); # remove all entries in dict
del dict ; # delete entire dictionary
print "dict['Age']: ", dict['Age']
print "dict['School']: ", dict['School']
# Output
dict['Age']:
Traceback (most recent call last):
File "test.py", line 8, in <module>
print "dict['Age']: ", dict['Age'];
TypeError: 'type' object is unsubscriptable
# Python - Binary Tree
# Create Root
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def PrintTree(self):
print(self.data)
root = Node(10)
root.PrintTree()
# Output
10
# Inserting into a Tree
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def insert(self, data):
# Compare the new value with the parent node
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Use the insert method to add nodes
root = Node(12)
root.insert(6)
root.insert(14)
root.insert(3)
root.PrintTree()
# Output
3 6 12 14
# Tree Traversal Algorithms
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Insert Node
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
else data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the Tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Inorder traversal
# Left -> Root -> Right
def inorderTraversal(self, root):
res = []
if root:
res = self.inorderTraversal(root.left)
res.append(root.data)
res = res + self.inorderTraversal(root.right)
return res
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
print(root.inorderTraversal(root))
# Output
[10, 14, 19, 27, 31, 35, 42]
# Pre-order Traversal
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Insert Node
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the Tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Preorder traversal
# Root -> Left ->Right
def PreorderTraversal(self, root):
res = []
if root:
res.append(root.data)
res = res + self.PreorderTraversal(root.left)
res = res + self.PreorderTraversal(root.right)
return res
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
print(root.PreorderTraversal(root))
# Output
[27, 14, 10, 19, 35, 31, 42]
# Post-order Traversal
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Insert Node
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
else if data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the Tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Postorder traversal
# Left ->Right -> Root
def PostorderTraversal(self, root):
res = []
if root:
res = self.PostorderTraversal(root.left)
res = res + self.PostorderTraversal(root.right)
res.append(root.data)
return res
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
print(root.PostorderTraversal(root))
# Output
[10, 19, 14, 31, 42, 35, 27]
| # Python - Linked Lists
# Creation of Linked list
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
list1 = SLinkedList()
list1.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
# Link first Node to second node
list1.headval.nextval = e2
# Link second Node to third node
e2.nextval = e3
# Traversing a Linked List
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
# Link first Node to second node
list.headval.nextval = e2
# Link second Node to third node
e2.nextval = e3
list.listprint()
# Output
Mon
Tue
Wed
# Insertion in a Linked List
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
# Print the linked list
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
def AtBegining(self,newdata):
NewNode = Node(newdata)
# Update the new nodes next val to existing node
NewNode.nextval = self.headval
self.headval = NewNode
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
list.headval.nextval = e2
e2.nextval = e3
list.AtBegining("Sun")
list.listprint()
# Output
Sun
Mon
Tue
Wed
# Inserting at the End
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
# Function to add newnode
def AtEnd(self, newdata):
NewNode = Node(newdata)
if self.headval is None:
self.headval = NewNode
return
laste = self.headval
while(laste.nextval):
laste = laste.nextval
laste.nextval=NewNode
# Print the linked list
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
list.headval.nextval = e2
e2.nextval = e3
list.AtEnd("Thu")
list.listprint()
# Output
Mon
Tue
Wed
Thu
# Inserting in between two Data Nodes
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
# Function to add node
def Inbetween(self,middle_node,newdata):
if middle_node is None:
print("The mentioned node is absent")
return
NewNode = Node(newdata)
NewNode.nextval = middle_node.nextval
middle_node.nextval = NewNode
# Print the linked list
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Thu")
list.headval.nextval = e2
e2.nextval = e3
list.Inbetween(list.headval.nextval,"Fri")
list.listprint()
# Output
Mon
Tue
Fri
Thu
# Removing an Item
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class SLinkedList:
def __init__(self):
self.head = None
def Atbegining(self, data_in):
NewNode = Node(data_in)
NewNode.next = self.head
self.head = NewNode
# Function to remove node
def RemoveNode(self, Removekey):
HeadVal = self.head
if (HeadVal is not None):
if (HeadVal.data == Removekey):
self.head = HeadVal.next
HeadVal = None
return
while (HeadVal is not None):
if HeadVal.data == Removekey:
break
prev = HeadVal
HeadVal = HeadVal.next
if (HeadVal == None):
return
prev.next = HeadVal.next
HeadVal = None
def LListprint(self):
printval = self.head
while (printval):
print(printval.data),
printval = printval.next
llist = SLinkedList()
llist.Atbegining("Mon")
llist.Atbegining("Tue")
llist.Atbegining("Wed")
llist.Atbegining("Thu")
llist.RemoveNode("Tue")
llist.LListprint()
# Output
Thu
Wed
Mon
# Python - Stack
class Stack:
def __init__(self):
self.stack = []
def add(self, dataval):
# Use list append method to add element
if dataval not in self.stack:
self.stack.append(dataval)
return True
else:
return False
# Use peek to look at the top of the stack
def peek(self):
return self.stack[-1]
AStack = Stack()
AStack.add("Mon")
AStack.add("Tue")
AStack.peek()
print(AStack.peek())
AStack.add("Wed")
AStack.add("Thu")
print(AStack.peek())
# Output
Tue
Thu
# POP from a Stack
class Stack:
def __init__(self):
self.stack = []
def add(self, dataval):
# Use list append method to add element
if dataval not in self.stack:
self.stack.append(dataval)
return True
else:
return False
# Use list pop method to remove element
def remove(self):
if len(self.stack) <= 0:
return ("No element in the Stack")
else:
return self.stack.pop()
AStack = Stack()
AStack.add("Mon")
AStack.add("Tue")
AStack.add("Wed")
AStack.add("Thu")
print(AStack.remove())
print(AStack.remove())
# Output
Thu
Wed
# Python - Queue
# Adding Elements
class Queue:
def __init__(self):
self.queue = list()
def addtoq(self,dataval):
# Insert method to add element
if dataval not in self.queue:
self.queue.insert(0,dataval)
return True
return False
def size(self):
return len(self.queue)
TheQueue = Queue()
TheQueue.addtoq("Mon")
TheQueue.addtoq("Tue")
TheQueue.addtoq("Wed")
print(TheQueue.size())
# Output
3
# Removing Element
class Queue:
def __init__(self):
self.queue = list()
def addtoq(self,dataval):
# Insert method to add element
if dataval not in self.queue:
self.queue.insert(0,dataval)
return True
return False
# Pop method to remove element
def removefromq(self):
if len(self.queue)>0:
return self.queue.pop()
return ("No elements in Queue!")
TheQueue = Queue()
TheQueue.addtoq("Mon")
TheQueue.addtoq("Tue")
TheQueue.addtoq("Wed")
print(TheQueue.removefromq())
print(TheQueue.removefromq())
# Output
Mon
Tue
# Python - Dequeue
import collections
DoubleEnded = collections.deque(["Mon","Tue","Wed"])
DoubleEnded.append("Thu")
print ("Appended at right - ")
print (DoubleEnded)
DoubleEnded.appendleft("Sun")
print ("Appended at right at left is - ")
print (DoubleEnded)
DoubleEnded.pop()
print ("Deleting from right - ")
print (DoubleEnded)
DoubleEnded.popleft()
print ("Deleting from left - ")
print (DoubleEnded)
# Output
Appended at right -
deque(['Mon', 'Tue', 'Wed', 'Thu'])
Appended at right at left is -
deque(['Sun', 'Mon', 'Tue', 'Wed', 'Thu'])
Deleting from right -
deque(['Sun', 'Mon', 'Tue', 'Wed'])
Deleting from left -
deque(['Mon', 'Tue', 'Wed'])
# Python - Advanced Linked list
# Creating Doubly linked list
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class doubly_linked_list:
def __init__(self):
self.head = None
# Adding data elements
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Print the Doubly Linked list
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.push(8)
dllist.push(62)
dllist.listprint(dllist.head)
# Output
62 8 12
# Inserting into Doubly Linked List
# Create the Node class
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
# Create the doubly linked list
class doubly_linked_list:
def __init__(self):
self.head = None
# Define the push method to add elements
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Define the insert method to insert the element
def insert(self, prev_node, NewVal):
if prev_node is None:
return
NewNode = Node(NewVal)
NewNode.next = prev_node.next
prev_node.next = NewNode
NewNode.prev = prev_node
if NewNode.next is not None:
NewNode.next.prev = NewNode
# Define the method to print the linked list
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.push(8)
dllist.push(62)
dllist.insert(dllist.head.next, 13)
dllist.listprint(dllist.head)
# Output
62 8 13 12
# Appending to a Doubly linked list
# Create the node class
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
# Create the doubly linked list class
class doubly_linked_list:
def __init__(self):
self.head = None
# Define the push method to add elements at the begining
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Define the append method to add elements at the end
def append(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = None
if self.head is None:
NewNode.prev = None
self.head = NewNode
return
last = self.head
while (last.next is not None):
last = last.next
last.next = NewNode
NewNode.prev = last
return
# Define the method to print
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.append(9)
dllist.push(8)
dllist.push(62)
dllist.append(45)
dllist.listprint(dllist.head)
# Output
62 8 12 9 45
# Python - Hash Table
# Accessing Values in Dictionary
# Declare a dictionary
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
# Accessing the dictionary with its key
print "dict['Name']: ", dict['Name']
print "dict['Age']: ", dict['Age']
# Output
dict['Name']: Zara
dict['Age']: 7
# Updating Dictionary
# Declare a dictionary
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
dict['Age'] = 8; # update existing entry
dict['School'] = "DPS School"; # Add new entry
print "dict['Age']: ", dict['Age']
print "dict['School']: ", dict['School']
# Output
dict['Age']: 8
dict['School']: DPS School
# Delete Dictionary Elements
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
del dict['Name']; # remove entry with key 'Name'
dict.clear(); # remove all entries in dict
del dict ; # delete entire dictionary
print "dict['Age']: ", dict['Age']
print "dict['School']: ", dict['School']
# Output
dict['Age']:
Traceback (most recent call last):
File "test.py", line 8, in <module>
print "dict['Age']: ", dict['Age'];
TypeError: 'type' object is unsubscriptable
# Python - Binary Tree
# Create Root
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def PrintTree(self):
print(self.data)
root = Node(10)
root.PrintTree()
# Output
10
# Inserting into a Tree
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def insert(self, data):
# Compare the new value with the parent node
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Use the insert method to add nodes
root = Node(12)
root.insert(6)
root.insert(14)
root.insert(3)
root.PrintTree()
# Output
3 6 12 14
# Tree Traversal Algorithms
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Insert Node
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
else data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the Tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Inorder traversal
# Left -> Root -> Right
def inorderTraversal(self, root):
res = []
if root:
res = self.inorderTraversal(root.left)
res.append(root.data)
res = res + self.inorderTraversal(root.right)
return res
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
print(root.inorderTraversal(root))
# Output
[10, 14, 19, 27, 31, 35, 42]
# Pre-order Traversal
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Insert Node
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the Tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Preorder traversal
# Root -> Left ->Right
def PreorderTraversal(self, root):
res = []
if root:
res.append(root.data)
res = res + self.PreorderTraversal(root.left)
res = res + self.PreorderTraversal(root.right)
return res
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
print(root.PreorderTraversal(root))
# Output
[27, 14, 10, 19, 35, 31, 42]
# Post-order Traversal
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Insert Node
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
else if data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# Print the Tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
# Postorder traversal
# Left ->Right -> Root
def PostorderTraversal(self, root):
res = []
if root:
res = self.PostorderTraversal(root.left)
res = res + self.PostorderTraversal(root.right)
res.append(root.data)
return res
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(10)
root.insert(19)
root.insert(31)
root.insert(42)
print(root.PostorderTraversal(root))
# Output
[10, 19, 14, 31, 42, 35, 27] | en | 0.664585 | # Python - Linked Lists # Creation of Linked list # Link first Node to second node # Link second Node to third node # Traversing a Linked List # Link first Node to second node # Link second Node to third node # Output # Insertion in a Linked List # Print the linked list # Update the new nodes next val to existing node # Output # Inserting at the End # Function to add newnode # Print the linked list # Output # Inserting in between two Data Nodes # Function to add node # Print the linked list # Output # Removing an Item # Function to remove node # Output # Python - Stack # Use list append method to add element # Use peek to look at the top of the stack # Output # POP from a Stack # Use list append method to add element # Use list pop method to remove element # Output # Python - Queue # Adding Elements # Insert method to add element # Output # Removing Element # Insert method to add element # Pop method to remove element # Output # Python - Dequeue # Output # Python - Advanced Linked list # Creating Doubly linked list # Adding data elements # Print the Doubly Linked list # Output # Inserting into Doubly Linked List # Create the Node class # Create the doubly linked list # Define the push method to add elements # Define the insert method to insert the element # Define the method to print the linked list # Output # Appending to a Doubly linked list # Create the node class # Create the doubly linked list class # Define the push method to add elements at the begining # Define the append method to add elements at the end # Define the method to print # Output # Python - Hash Table # Accessing Values in Dictionary # Declare a dictionary # Accessing the dictionary with its key # Output # Updating Dictionary # Declare a dictionary # update existing entry # Add new entry # Output # Delete Dictionary Elements # remove entry with key 'Name' # remove all entries in dict # delete entire dictionary # Output # Python - Binary Tree # Create Root # Output # Inserting into a Tree # Compare the new value with the parent node # Print the tree # Use the insert method to add nodes # Output # Tree Traversal Algorithms # Insert Node # Print the Tree # Inorder traversal # Left -> Root -> Right # Output # Pre-order Traversal # Insert Node # Print the Tree # Preorder traversal # Root -> Left ->Right # Output # Post-order Traversal # Insert Node # Print the Tree # Postorder traversal # Left ->Right -> Root # Output | 4.396301 | 4 |
kornia/augmentation/_2d/intensity/channel_shuffle.py | dichen-cd/kornia | 1 | 6618156 | from typing import Dict, Optional
import torch
from kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D
class RandomChannelShuffle(IntensityAugmentationBase2D):
r"""Shuffle the channels of a batch of multi-dimensional images.
.. image:: _static/img/RandomChannelShuffle.png
Args:
return_transform: if ``True`` return the matrix describing the transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation won't be concatenated.
same_on_batch: apply the same transformation across the batch.
p: probability of applying the transformation.
Examples:
>>> rng = torch.manual_seed(0)
>>> img = torch.arange(1*2*2*2.).view(1,2,2,2)
>>> RandomChannelShuffle()(img)
tensor([[[[4., 5.],
[6., 7.]],
<BLANKLINE>
[[0., 1.],
[2., 3.]]]])
To apply the exact augmenation again, you may take the advantage of the previous parameter state:
>>> input = torch.randn(1, 3, 32, 32)
>>> aug = RandomChannelShuffle(p=1.)
>>> (aug(input) == aug(input, params=aug._params)).all()
tensor(True)
"""
def __init__(
self, return_transform: bool = False, same_on_batch: bool = False, p: float = 0.5, keepdim: bool = False
) -> None:
super().__init__(
p=p, return_transform=return_transform, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim
)
def generate_parameters(self, shape: torch.Size) -> Dict[str, torch.Tensor]:
B, C, _, _ = shape
channels = torch.rand(B, C).argsort(dim=1)
return dict(channels=channels)
def apply_transform(
self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
) -> torch.Tensor:
out = torch.empty_like(input)
for i in range(out.shape[0]):
out[i] = input[i, params["channels"][i]]
return out
| from typing import Dict, Optional
import torch
from kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D
class RandomChannelShuffle(IntensityAugmentationBase2D):
r"""Shuffle the channels of a batch of multi-dimensional images.
.. image:: _static/img/RandomChannelShuffle.png
Args:
return_transform: if ``True`` return the matrix describing the transformation applied to each
input tensor. If ``False`` and the input is a tuple the applied transformation won't be concatenated.
same_on_batch: apply the same transformation across the batch.
p: probability of applying the transformation.
Examples:
>>> rng = torch.manual_seed(0)
>>> img = torch.arange(1*2*2*2.).view(1,2,2,2)
>>> RandomChannelShuffle()(img)
tensor([[[[4., 5.],
[6., 7.]],
<BLANKLINE>
[[0., 1.],
[2., 3.]]]])
To apply the exact augmenation again, you may take the advantage of the previous parameter state:
>>> input = torch.randn(1, 3, 32, 32)
>>> aug = RandomChannelShuffle(p=1.)
>>> (aug(input) == aug(input, params=aug._params)).all()
tensor(True)
"""
def __init__(
self, return_transform: bool = False, same_on_batch: bool = False, p: float = 0.5, keepdim: bool = False
) -> None:
super().__init__(
p=p, return_transform=return_transform, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim
)
def generate_parameters(self, shape: torch.Size) -> Dict[str, torch.Tensor]:
B, C, _, _ = shape
channels = torch.rand(B, C).argsort(dim=1)
return dict(channels=channels)
def apply_transform(
self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
) -> torch.Tensor:
out = torch.empty_like(input)
for i in range(out.shape[0]):
out[i] = input[i, params["channels"][i]]
return out
| en | 0.522367 | Shuffle the channels of a batch of multi-dimensional images. .. image:: _static/img/RandomChannelShuffle.png Args: return_transform: if ``True`` return the matrix describing the transformation applied to each input tensor. If ``False`` and the input is a tuple the applied transformation won't be concatenated. same_on_batch: apply the same transformation across the batch. p: probability of applying the transformation. Examples: >>> rng = torch.manual_seed(0) >>> img = torch.arange(1*2*2*2.).view(1,2,2,2) >>> RandomChannelShuffle()(img) tensor([[[[4., 5.], [6., 7.]], <BLANKLINE> [[0., 1.], [2., 3.]]]]) To apply the exact augmenation again, you may take the advantage of the previous parameter state: >>> input = torch.randn(1, 3, 32, 32) >>> aug = RandomChannelShuffle(p=1.) >>> (aug(input) == aug(input, params=aug._params)).all() tensor(True) | 2.911251 | 3 |
systems/playout/pb_system_3_6.py | Julian-Theis/AVATAR | 7 | 6618157 | <filename>systems/playout/pb_system_3_6.py
from util.playout import standard_playout
import os
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
if __name__ == "__main__":
pn = "data/systems/pb_system_3_6.pnml"
f_pop = "data/variants/pb_system_3_6_pop.txt"
f_train = "data/variants/pb_system_3_6_train.txt"
f_test = "data/variants/pb_system_3_6_test.txt"
xes_train = "data/variants/pb_system_3_6_train.xes"
csv_train = "data/variants/pb_system_3_6_train.csv"
standard_playout(pn=pn, f_pop=f_pop, f_train=f_train,f_test=f_test, xes_train=xes_train, csv_train=csv_train) | <filename>systems/playout/pb_system_3_6.py
from util.playout import standard_playout
import os
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
if __name__ == "__main__":
pn = "data/systems/pb_system_3_6.pnml"
f_pop = "data/variants/pb_system_3_6_pop.txt"
f_train = "data/variants/pb_system_3_6_train.txt"
f_test = "data/variants/pb_system_3_6_test.txt"
xes_train = "data/variants/pb_system_3_6_train.xes"
csv_train = "data/variants/pb_system_3_6_train.csv"
standard_playout(pn=pn, f_pop=f_pop, f_train=f_train,f_test=f_test, xes_train=xes_train, csv_train=csv_train) | none | 1 | 1.60804 | 2 | |
daemons/healthd.py | rexengineering/metaflow | 0 | 6618158 | <filename>daemons/healthd.py
import logging
import threading
import time
from etcd3.events import DeleteEvent, PutEvent
from quart import jsonify
import requests
from flowlib.bpmn_util import BPMNComponent
from flowlib.etcd_utils import get_etcd, get_next_level, get_keys_from_prefix
from flowlib.executor import get_executor
from flowlib.flowd_utils import get_log_format
from flowlib.quart_app import QuartApp
from flowlib.workflow import Workflow
from flowlib.constants import States, BStates, WorkflowKeys, WorkflowInstanceKeys
class HealthProbe:
def __init__(self, workflow: Workflow, task: BPMNComponent):
self.workflow = workflow
self.task = task
self.key = WorkflowKeys.task_key(workflow.id, task.id)
self.timer = None
self.running = False
self.status = None
self.logger = logging.getLogger()
self.etcd = get_etcd()
self.wf_state_key = WorkflowKeys.state_key(workflow.id)
health_path = self.task.health_properties.path
if not health_path.startswith('/'):
health_path = '/' + health_path
self.url = f'http://{self.task.envoy_host}:{self.task.service_properties.port}{health_path}'
def __call__(self):
health_properties = self.task.health_properties
try:
response = requests.request(
health_properties.method, self.url,
data=health_properties.query, timeout=health_properties.timeout,
)
exception = None
except requests.RequestException as exn:
exception = exn
response = exn.response
result = 'UP' if exception is None and response.ok else 'DOWN'
success, _ = self.etcd.transaction(
compare=[
# arcane syntax from the etcd3 library...doesn't do what you think
# https://github.com/kragniz/python-etcd3/blob/master/etcd3/transactions.py
self.etcd.transactions.value(self.wf_state_key) != b''
],
success=[self.etcd.transactions.put(self.key, result)],
failure=[],
)
if not success:
logging.warning(
f"Probe for task {self.task.id} {self.workflow.id} was orphaned."
)
self.stop()
return
self.status = result
self.logger.info(f'Status check for {self.task.id} is {result}')
if self.running:
self.timer = threading.Timer(self.task.health_properties.period, self)
self.timer.start()
return [self.task.id, self.status]
def start(self):
self.logger.info(f'Starting status checks for {self.task.id} ({self.url})')
assert self.timer is None
self.timer = threading.Timer(self.task.health_properties.period, self)
self.running = True
self.timer.start()
def stop(self):
if self.timer is not None:
logging.info(
f"shutting down probe for BPMNComponent {self.task.id}"
)
self.timer.cancel()
else:
logging.warning(
f"at shutdown, no threading.timer for probe {self.task.id}"
)
self.etcd.delete(self.key)
class HealthManager:
def __init__(self):
self.etcd = get_etcd()
self.executor = get_executor()
self.workflows = {
workflow_id: Workflow.from_id(workflow_id)
for workflow_id in get_next_level(WorkflowKeys.ROOT)
}
self.probes = {}
self.future = None
self.cancel_watch = None
self.logger = logging.getLogger()
def __call__(self):
watch_iter, self.cancel_watch = self.etcd.watch_prefix(WorkflowKeys.ROOT)
for event in watch_iter:
key = event.key.decode('utf-8')
value = event.value.decode('utf-8')
if key.endswith('/state'):
workflow_id = key.split('/')[3]
if isinstance(event, PutEvent):
if value == States.STARTING:
assert workflow_id not in self.workflows.keys()
workflow = Workflow.from_id(workflow_id)
self.workflows[workflow_id] = workflow
self.probes[workflow_id] = {
component.id: HealthProbe(workflow, component)
for component in workflow.process.all_components
if component.health_properties is not None
}
for probe in self.probes[workflow_id].values():
probe.start()
self.future = self.executor.submit(self.wait_for_up, workflow)
elif value == States.STOPPING:
workflow = self.workflows[workflow_id]
self.future = self.executor.submit(self.stop_workflow, workflow)
elif isinstance(event, DeleteEvent):
self.logger.info(f'{workflow_id} DELETE event - {value}')
# No action necessary because we stop the HealthProbes in the
# stop_workflow() function. This is good practice because we don't want
# a bunch of HealthProbes making calls to services that don't exist.
def wait_for_up(self, workflow: Workflow):
'''Waits for workflow to come up. If the workflow does not come up within the timeout
(defined in the `WorkflowProperties`) then the workflow is transitioned to ERROR state.
However, the Workflow can still be transitioned from ERROR to RUNNING if a probe
succeeds afterwards.
'''
def timeout_catch():
if not self.etcd.replace(workflow.keys.state, States.STARTING, States.ERROR):
logging.info(
f"Appears that {workflow.id} came up before timeout."
)
else:
logging.error(
f"Workflow {workflow.id} did not come up in time; transitioned to ERROR state."
)
try:
self.logger.info(f'wait_for_up() called for workflow {workflow.id}')
probes = self.probes[workflow.id]
watch_iter, _ = self.etcd.watch_prefix(workflow.keys.probe)
timeout_timer = threading.Timer(
workflow.properties.deployment_timeout, timeout_catch)
timeout_timer.start()
for event in watch_iter:
self.logger.info(f'wait_for_up(): Got {type(event)} to key {event.key}')
crnt_state = self.etcd.get(workflow.keys.state)[0]
if (crnt_state is None) or (crnt_state not in {BStates.STARTING, BStates.ERROR}):
self.logger.info(f'wait_for_up(): Workflow {workflow.id} is no '
'longer starting up, cancelling further '
'monitoring.')
break
if isinstance(event, PutEvent):
if all(probe.status == 'UP' for probe in probes.values()):
result = self.etcd.replace(workflow.keys.state,
crnt_state, States.RUNNING)
if result:
self.logger.info('wait_for_up(): State transition succeeded.')
else:
self.logger.error('wait_for_up(): State transition failed.')
return result
except Exception as exn:
logging.exception(f"failed on the waiting for up on {workflow.id}", exc_info=exn)
if not self.etcd.replace(workflow.keys.state, States.STARTING, States.ERROR):
logging.error(
f"Couldn't transition wf {workflow.id} to ERROR state."
)
return False
def wait_for_down(self, workflow: Workflow):
self.logger.info(f'wait_for_down() called for workflow {workflow.id}')
probes = self.probes[workflow.id]
watch_iter, cancel = self.etcd.watch_prefix(workflow.keys.probe)
timeout_timer = threading.Timer(
workflow.properties.deployment_timeout, cancel)
timeout_timer.start()
for event in watch_iter:
self.logger.info(f'wait_for_down(): Got {type(event)} to key {event.key}')
if isinstance(event, PutEvent):
if all(probe.status == 'DOWN' for probe in probes.values()):
for probe in probes.values():
probe.stop()
del self.probes[workflow.id]
del self.workflows[workflow.id]
result = self.etcd.replace(workflow.keys.state,
States.STOPPING, States.STOPPED)
if result:
self.logger.info('wait_for_down(): State transition succeeded.')
else:
self.logger.error('wait_for_down(): State transition failed.')
return result
# If we got here, then the deployment timed out before coming down.
if not self.etcd.replace(workflow.keys.state, States.STOPPING, States.ERROR):
logging.error(
f"Couldn't transition wf {workflow.id} to ERROR state."
)
return False
def stop_workflow(self, workflow: Workflow):
'''
Stopping a workflow means we need to wait for all the instances for that
workflow to COMPLETE or ERROR. Then we need to delete the deployment for
the workflow, and finally wait for all those tasks to go DOWN before
finally marking the workflow as STOPPED.
TODO: Do we need to enforce a timeout?
'''
self.logger.info(f'stop_workflow {workflow.id}')
try:
self.logger.info(f'Removing workflow {workflow.id}')
workflow.remove()
except Exception as exn:
logging.exception(
f"Failed to bring down workflow {workflow.id}",
exc_info=exn,
)
self.etcd.replace(workflow.keys.state, BStates.STOPPING, BStates.ERROR)
return self.wait_for_down(workflow)
def start(self):
for workflow in self.workflows.values():
probes = {
component.id: HealthProbe(workflow, component)
for component in workflow.process.all_components
if component.health_properties is not None
}
for probe in probes.values():
probe.start()
self.probes[workflow.id] = probes
workflow_state = self.etcd.get(workflow.keys.state)[0].decode()
self.logger.info(f'Started probes for {workflow.id}, in state {workflow_state}')
if workflow_state in {States.STARTING, States.ERROR}:
self.executor.submit(self.wait_for_up, workflow)
elif workflow_state == States.STOPPING:
self.executor.submit(self.stop_workflow, workflow)
self.future = self.executor.submit(self)
def stop(self):
probes = [
probe
for workflow in self.workflows.values()
for probe in self.probes[workflow.id].values()
]
for probe in probes:
probe.stop()
if self.cancel_watch:
self.cancel_watch()
def probe_all(self):
''' Force a health-check rather than waiting for the timer to mature.
'''
return [self.probe(workflow_id) for workflow_id in self.probes.keys()]
def probe(self, workflow_id):
''' Force a health-check on worfkow_id
'''
return {workflow_id : [probe() for probe in self.probes[workflow_id].values()]}
class HealthApp(QuartApp):
def __init__(self, **kws):
super().__init__(__name__, **kws)
self.manager = HealthManager()
self.app.route('/')(self.root_route)
self.app.route('/probe/<workflow_id>')(self.probe)
def root_route(self):
return jsonify({workflow_id: {
task_id: str(probe)
for task_id, probe in self.manager.probes[workflow_id].items()
} for workflow_id in self.manager.workflows.keys()})
def probe(self, workflow_id):
if not self.manager.workflows:
return jsonify({"result":"No workflows exist"})
if workflow_id == 'all':
return jsonify( self.manager.probe_all() )
if workflow_id in self.manager.workflows.keys():
return jsonify( self.manager.probe(workflow_id) )
return jsonify({"result":f"Workflow '{workflow_id}' not found"})
def _shutdown(self):
self.manager.stop()
def run(self):
self.manager.start()
super().run()
if __name__ == '__main__':
# Two startup modes:
# Hot (re)start - Data already exists in etcd, reconstruct probes.
# Cold start - No workflow and/or probe data are in etcd.
logging.basicConfig(format=get_log_format('healthd'), level=logging.INFO)
app = HealthApp(bind='0.0.0.0:5050')
app.run()
| <filename>daemons/healthd.py
import logging
import threading
import time
from etcd3.events import DeleteEvent, PutEvent
from quart import jsonify
import requests
from flowlib.bpmn_util import BPMNComponent
from flowlib.etcd_utils import get_etcd, get_next_level, get_keys_from_prefix
from flowlib.executor import get_executor
from flowlib.flowd_utils import get_log_format
from flowlib.quart_app import QuartApp
from flowlib.workflow import Workflow
from flowlib.constants import States, BStates, WorkflowKeys, WorkflowInstanceKeys
class HealthProbe:
def __init__(self, workflow: Workflow, task: BPMNComponent):
self.workflow = workflow
self.task = task
self.key = WorkflowKeys.task_key(workflow.id, task.id)
self.timer = None
self.running = False
self.status = None
self.logger = logging.getLogger()
self.etcd = get_etcd()
self.wf_state_key = WorkflowKeys.state_key(workflow.id)
health_path = self.task.health_properties.path
if not health_path.startswith('/'):
health_path = '/' + health_path
self.url = f'http://{self.task.envoy_host}:{self.task.service_properties.port}{health_path}'
def __call__(self):
health_properties = self.task.health_properties
try:
response = requests.request(
health_properties.method, self.url,
data=health_properties.query, timeout=health_properties.timeout,
)
exception = None
except requests.RequestException as exn:
exception = exn
response = exn.response
result = 'UP' if exception is None and response.ok else 'DOWN'
success, _ = self.etcd.transaction(
compare=[
# arcane syntax from the etcd3 library...doesn't do what you think
# https://github.com/kragniz/python-etcd3/blob/master/etcd3/transactions.py
self.etcd.transactions.value(self.wf_state_key) != b''
],
success=[self.etcd.transactions.put(self.key, result)],
failure=[],
)
if not success:
logging.warning(
f"Probe for task {self.task.id} {self.workflow.id} was orphaned."
)
self.stop()
return
self.status = result
self.logger.info(f'Status check for {self.task.id} is {result}')
if self.running:
self.timer = threading.Timer(self.task.health_properties.period, self)
self.timer.start()
return [self.task.id, self.status]
def start(self):
self.logger.info(f'Starting status checks for {self.task.id} ({self.url})')
assert self.timer is None
self.timer = threading.Timer(self.task.health_properties.period, self)
self.running = True
self.timer.start()
def stop(self):
if self.timer is not None:
logging.info(
f"shutting down probe for BPMNComponent {self.task.id}"
)
self.timer.cancel()
else:
logging.warning(
f"at shutdown, no threading.timer for probe {self.task.id}"
)
self.etcd.delete(self.key)
class HealthManager:
def __init__(self):
self.etcd = get_etcd()
self.executor = get_executor()
self.workflows = {
workflow_id: Workflow.from_id(workflow_id)
for workflow_id in get_next_level(WorkflowKeys.ROOT)
}
self.probes = {}
self.future = None
self.cancel_watch = None
self.logger = logging.getLogger()
def __call__(self):
watch_iter, self.cancel_watch = self.etcd.watch_prefix(WorkflowKeys.ROOT)
for event in watch_iter:
key = event.key.decode('utf-8')
value = event.value.decode('utf-8')
if key.endswith('/state'):
workflow_id = key.split('/')[3]
if isinstance(event, PutEvent):
if value == States.STARTING:
assert workflow_id not in self.workflows.keys()
workflow = Workflow.from_id(workflow_id)
self.workflows[workflow_id] = workflow
self.probes[workflow_id] = {
component.id: HealthProbe(workflow, component)
for component in workflow.process.all_components
if component.health_properties is not None
}
for probe in self.probes[workflow_id].values():
probe.start()
self.future = self.executor.submit(self.wait_for_up, workflow)
elif value == States.STOPPING:
workflow = self.workflows[workflow_id]
self.future = self.executor.submit(self.stop_workflow, workflow)
elif isinstance(event, DeleteEvent):
self.logger.info(f'{workflow_id} DELETE event - {value}')
# No action necessary because we stop the HealthProbes in the
# stop_workflow() function. This is good practice because we don't want
# a bunch of HealthProbes making calls to services that don't exist.
def wait_for_up(self, workflow: Workflow):
'''Waits for workflow to come up. If the workflow does not come up within the timeout
(defined in the `WorkflowProperties`) then the workflow is transitioned to ERROR state.
However, the Workflow can still be transitioned from ERROR to RUNNING if a probe
succeeds afterwards.
'''
def timeout_catch():
if not self.etcd.replace(workflow.keys.state, States.STARTING, States.ERROR):
logging.info(
f"Appears that {workflow.id} came up before timeout."
)
else:
logging.error(
f"Workflow {workflow.id} did not come up in time; transitioned to ERROR state."
)
try:
self.logger.info(f'wait_for_up() called for workflow {workflow.id}')
probes = self.probes[workflow.id]
watch_iter, _ = self.etcd.watch_prefix(workflow.keys.probe)
timeout_timer = threading.Timer(
workflow.properties.deployment_timeout, timeout_catch)
timeout_timer.start()
for event in watch_iter:
self.logger.info(f'wait_for_up(): Got {type(event)} to key {event.key}')
crnt_state = self.etcd.get(workflow.keys.state)[0]
if (crnt_state is None) or (crnt_state not in {BStates.STARTING, BStates.ERROR}):
self.logger.info(f'wait_for_up(): Workflow {workflow.id} is no '
'longer starting up, cancelling further '
'monitoring.')
break
if isinstance(event, PutEvent):
if all(probe.status == 'UP' for probe in probes.values()):
result = self.etcd.replace(workflow.keys.state,
crnt_state, States.RUNNING)
if result:
self.logger.info('wait_for_up(): State transition succeeded.')
else:
self.logger.error('wait_for_up(): State transition failed.')
return result
except Exception as exn:
logging.exception(f"failed on the waiting for up on {workflow.id}", exc_info=exn)
if not self.etcd.replace(workflow.keys.state, States.STARTING, States.ERROR):
logging.error(
f"Couldn't transition wf {workflow.id} to ERROR state."
)
return False
def wait_for_down(self, workflow: Workflow):
self.logger.info(f'wait_for_down() called for workflow {workflow.id}')
probes = self.probes[workflow.id]
watch_iter, cancel = self.etcd.watch_prefix(workflow.keys.probe)
timeout_timer = threading.Timer(
workflow.properties.deployment_timeout, cancel)
timeout_timer.start()
for event in watch_iter:
self.logger.info(f'wait_for_down(): Got {type(event)} to key {event.key}')
if isinstance(event, PutEvent):
if all(probe.status == 'DOWN' for probe in probes.values()):
for probe in probes.values():
probe.stop()
del self.probes[workflow.id]
del self.workflows[workflow.id]
result = self.etcd.replace(workflow.keys.state,
States.STOPPING, States.STOPPED)
if result:
self.logger.info('wait_for_down(): State transition succeeded.')
else:
self.logger.error('wait_for_down(): State transition failed.')
return result
# If we got here, then the deployment timed out before coming down.
if not self.etcd.replace(workflow.keys.state, States.STOPPING, States.ERROR):
logging.error(
f"Couldn't transition wf {workflow.id} to ERROR state."
)
return False
def stop_workflow(self, workflow: Workflow):
'''
Stopping a workflow means we need to wait for all the instances for that
workflow to COMPLETE or ERROR. Then we need to delete the deployment for
the workflow, and finally wait for all those tasks to go DOWN before
finally marking the workflow as STOPPED.
TODO: Do we need to enforce a timeout?
'''
self.logger.info(f'stop_workflow {workflow.id}')
try:
self.logger.info(f'Removing workflow {workflow.id}')
workflow.remove()
except Exception as exn:
logging.exception(
f"Failed to bring down workflow {workflow.id}",
exc_info=exn,
)
self.etcd.replace(workflow.keys.state, BStates.STOPPING, BStates.ERROR)
return self.wait_for_down(workflow)
def start(self):
for workflow in self.workflows.values():
probes = {
component.id: HealthProbe(workflow, component)
for component in workflow.process.all_components
if component.health_properties is not None
}
for probe in probes.values():
probe.start()
self.probes[workflow.id] = probes
workflow_state = self.etcd.get(workflow.keys.state)[0].decode()
self.logger.info(f'Started probes for {workflow.id}, in state {workflow_state}')
if workflow_state in {States.STARTING, States.ERROR}:
self.executor.submit(self.wait_for_up, workflow)
elif workflow_state == States.STOPPING:
self.executor.submit(self.stop_workflow, workflow)
self.future = self.executor.submit(self)
def stop(self):
probes = [
probe
for workflow in self.workflows.values()
for probe in self.probes[workflow.id].values()
]
for probe in probes:
probe.stop()
if self.cancel_watch:
self.cancel_watch()
def probe_all(self):
''' Force a health-check rather than waiting for the timer to mature.
'''
return [self.probe(workflow_id) for workflow_id in self.probes.keys()]
def probe(self, workflow_id):
''' Force a health-check on worfkow_id
'''
return {workflow_id : [probe() for probe in self.probes[workflow_id].values()]}
class HealthApp(QuartApp):
def __init__(self, **kws):
super().__init__(__name__, **kws)
self.manager = HealthManager()
self.app.route('/')(self.root_route)
self.app.route('/probe/<workflow_id>')(self.probe)
def root_route(self):
return jsonify({workflow_id: {
task_id: str(probe)
for task_id, probe in self.manager.probes[workflow_id].items()
} for workflow_id in self.manager.workflows.keys()})
def probe(self, workflow_id):
if not self.manager.workflows:
return jsonify({"result":"No workflows exist"})
if workflow_id == 'all':
return jsonify( self.manager.probe_all() )
if workflow_id in self.manager.workflows.keys():
return jsonify( self.manager.probe(workflow_id) )
return jsonify({"result":f"Workflow '{workflow_id}' not found"})
def _shutdown(self):
self.manager.stop()
def run(self):
self.manager.start()
super().run()
if __name__ == '__main__':
# Two startup modes:
# Hot (re)start - Data already exists in etcd, reconstruct probes.
# Cold start - No workflow and/or probe data are in etcd.
logging.basicConfig(format=get_log_format('healthd'), level=logging.INFO)
app = HealthApp(bind='0.0.0.0:5050')
app.run()
| en | 0.91357 | # arcane syntax from the etcd3 library...doesn't do what you think # https://github.com/kragniz/python-etcd3/blob/master/etcd3/transactions.py # No action necessary because we stop the HealthProbes in the # stop_workflow() function. This is good practice because we don't want # a bunch of HealthProbes making calls to services that don't exist. Waits for workflow to come up. If the workflow does not come up within the timeout (defined in the `WorkflowProperties`) then the workflow is transitioned to ERROR state. However, the Workflow can still be transitioned from ERROR to RUNNING if a probe succeeds afterwards. # If we got here, then the deployment timed out before coming down. Stopping a workflow means we need to wait for all the instances for that workflow to COMPLETE or ERROR. Then we need to delete the deployment for the workflow, and finally wait for all those tasks to go DOWN before finally marking the workflow as STOPPED. TODO: Do we need to enforce a timeout? Force a health-check rather than waiting for the timer to mature. Force a health-check on worfkow_id # Two startup modes: # Hot (re)start - Data already exists in etcd, reconstruct probes. # Cold start - No workflow and/or probe data are in etcd. | 1.846366 | 2 |
melanoma/bot/bot.py | vaaliferov/paranormal | 2 | 6618159 | <gh_stars>1-10
#!/usr/bin/env python3
import json
import numpy as np
import onnxruntime
from PIL import Image, ImageOps
from telegram.ext import Updater
from telegram.ext import Filters
from telegram.ext import MessageHandler
def pad(im):
w, h = im.size; m = np.max([w, h])
hp, hpr = (m - w) // 2, (m - w) % 2
vp, vpr = (m - h) // 2, (m - h) % 2
return (hp + hpr, vp + vpr, hp, vp)
def norm(x):
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
return (x - mean) / std
def load_image(path, size):
im = Image.open(path)
im.thumbnail((size, size), Image.ANTIALIAS)
im = ImageOps.expand(im, pad(im))
return np.array(im) / 255.
def to_tensor(x):
x = np.float32(norm(x))
x = x.transpose(2,0,1)
return x.reshape((1,) + x.shape)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def load_model(path):
return onnxruntime.InferenceSession(path)
def predict(model, path):
x = to_tensor(load_image(path, 224))
inps = {model.get_inputs()[0].name: x}
outs = model.run(None, inps)
y = sigmoid(outs[0])[0][0]
return int(y > 0.5), y
def handle_text(update, context):
update.message.reply_text('waiting for photos...')
def handle_photo(update, context):
file_id = update.message.photo[-1]['file_id']
context.bot.getFile(file_id).download('in.jpg')
pred, prob = predict(model, 'in.jpg')
update.message.reply_text(f'{pred} ({prob:.4f})')
model = load_model('model.onnx')
opt = json.load(open('config.json','r'))
updater = Updater(opt['bot_token'])
dispatcher = updater.dispatcher
dispatcher.add_handler(MessageHandler(Filters.text, handle_text))
dispatcher.add_handler(MessageHandler(Filters.photo, handle_photo))
updater.start_polling()
updater.idle() | #!/usr/bin/env python3
import json
import numpy as np
import onnxruntime
from PIL import Image, ImageOps
from telegram.ext import Updater
from telegram.ext import Filters
from telegram.ext import MessageHandler
def pad(im):
w, h = im.size; m = np.max([w, h])
hp, hpr = (m - w) // 2, (m - w) % 2
vp, vpr = (m - h) // 2, (m - h) % 2
return (hp + hpr, vp + vpr, hp, vp)
def norm(x):
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
return (x - mean) / std
def load_image(path, size):
im = Image.open(path)
im.thumbnail((size, size), Image.ANTIALIAS)
im = ImageOps.expand(im, pad(im))
return np.array(im) / 255.
def to_tensor(x):
x = np.float32(norm(x))
x = x.transpose(2,0,1)
return x.reshape((1,) + x.shape)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def load_model(path):
return onnxruntime.InferenceSession(path)
def predict(model, path):
x = to_tensor(load_image(path, 224))
inps = {model.get_inputs()[0].name: x}
outs = model.run(None, inps)
y = sigmoid(outs[0])[0][0]
return int(y > 0.5), y
def handle_text(update, context):
update.message.reply_text('waiting for photos...')
def handle_photo(update, context):
file_id = update.message.photo[-1]['file_id']
context.bot.getFile(file_id).download('in.jpg')
pred, prob = predict(model, 'in.jpg')
update.message.reply_text(f'{pred} ({prob:.4f})')
model = load_model('model.onnx')
opt = json.load(open('config.json','r'))
updater = Updater(opt['bot_token'])
dispatcher = updater.dispatcher
dispatcher.add_handler(MessageHandler(Filters.text, handle_text))
dispatcher.add_handler(MessageHandler(Filters.photo, handle_photo))
updater.start_polling()
updater.idle() | fr | 0.221828 | #!/usr/bin/env python3 | 2.318749 | 2 |
python/testData/types/RecursiveTypeAliasInAnotherFile/other.py | jnthn/intellij-community | 2 | 6618160 | from typing import List, Union
MyType = Union[List['MyType'], int]
| from typing import List, Union
MyType = Union[List['MyType'], int]
| none | 1 | 2.085931 | 2 | |
pirt/interp/__init__.py | almarklein/pirt | 10 | 6618161 | <reponame>almarklein/pirt<filename>pirt/interp/__init__.py
# flake8: noqa
# Copyright 2014-2017(C) <NAME>
"""
The interp module implements several functions for interpolation,
implemented in Numba.
"""
# More low level functions
from ._cubic import get_cubic_spline_coefs
from ._misc import meshgrid
from ._backward import warp, awarp
from ._forward import project, aproject
from ._misc import make_samples_absolute #, uglyRoot
# More higher level functions
from ._func import deform_backward, deform_forward
from ._func import resize, imresize
from ._func import zoom, imzoom
# Special kinds of functionality
from ._sliceinvolume import SliceInVolume
# Aliases
interp = warp
| # flake8: noqa
# Copyright 2014-2017(C) <NAME>
"""
The interp module implements several functions for interpolation,
implemented in Numba.
"""
# More low level functions
from ._cubic import get_cubic_spline_coefs
from ._misc import meshgrid
from ._backward import warp, awarp
from ._forward import project, aproject
from ._misc import make_samples_absolute #, uglyRoot
# More higher level functions
from ._func import deform_backward, deform_forward
from ._func import resize, imresize
from ._func import zoom, imzoom
# Special kinds of functionality
from ._sliceinvolume import SliceInVolume
# Aliases
interp = warp | en | 0.539972 | # flake8: noqa # Copyright 2014-2017(C) <NAME> The interp module implements several functions for interpolation, implemented in Numba. # More low level functions #, uglyRoot # More higher level functions # Special kinds of functionality # Aliases | 1.970386 | 2 |
algorithms/utils.py | traai/async-deep-rl | 77 | 6618162 | import tensorflow as tf
import os
def restore_vars(saver, sess, game, alg_type, max_local_steps):
""" Restore saved net, global step, and epsilons OR
create checkpoint directory for later storage. """
alg = alg_type + "{}/".format("_" + str(max_local_steps) + "_steps" if alg_type == 'q' else "")
checkpoint_dir = 'checkpoints/' + game + '/' + alg
check_or_create_checkpoint_dir(checkpoint_dir)
path = tf.train.latest_checkpoint(checkpoint_dir)
if path is None:
sess.run(tf.initialize_all_variables())
return 0
else:
saver.restore(sess, path)
global_step = int(path[path.rfind("-") + 1:])
return global_step
def save_vars(saver, sess, game, alg_type, max_local_steps, global_step):
""" Checkpoint shared net params, global score and step, and epsilons. """
alg = alg_type + "{}/".format("_" + str(max_local_steps) + "_steps" if alg_type == 'q' else "")
checkpoint_dir = 'checkpoints/' + game + '/' + alg
check_or_create_checkpoint_dir(checkpoint_dir)
saver.save(sess, checkpoint_dir + "model", global_step=global_step)
def check_or_create_checkpoint_dir(checkpoint_dir):
""" Create checkpoint directory if it does not exist """
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except OSError:
pass
# def save_shared_mem_vars(shared_mem_vars, game_name, alg_type,
# max_local_steps):
# checkpoint_dir = 'checkpoints/' + game_name + '/' + \
# {'0': 'Q/', '1': 'sarsa/', '2': 'a3c/'}[str(alg_type)] + \
# str(max_local_steps) + '_step' + '/'
#
# check_or_create_checkpoint_dir(checkpoint_dir)
# while True:
# g_step = shared_mem_vars['global_step'].val.value
# if g_step % 1000000 == 0:
# path = checkpoint_dir + 'vars-opt-' + str(g_step)
# np.save(path + '-learning', np.frombuffer(shared_mem_vars['learning_vars.vars'], ctypes.c_float))
# np.save(path + '-target', np.frombuffer(shared_mem_vars['target_vars.vars'], ctypes.c_float))
# for i in xrange(len(shared_mem_vars['opt_state.vars'])):
# np.save(path + '-opt' + str(i),
# np.frombuffer(shared_mem_vars['opt_state'].vars[i], ctypes.c_float))
| import tensorflow as tf
import os
def restore_vars(saver, sess, game, alg_type, max_local_steps):
""" Restore saved net, global step, and epsilons OR
create checkpoint directory for later storage. """
alg = alg_type + "{}/".format("_" + str(max_local_steps) + "_steps" if alg_type == 'q' else "")
checkpoint_dir = 'checkpoints/' + game + '/' + alg
check_or_create_checkpoint_dir(checkpoint_dir)
path = tf.train.latest_checkpoint(checkpoint_dir)
if path is None:
sess.run(tf.initialize_all_variables())
return 0
else:
saver.restore(sess, path)
global_step = int(path[path.rfind("-") + 1:])
return global_step
def save_vars(saver, sess, game, alg_type, max_local_steps, global_step):
""" Checkpoint shared net params, global score and step, and epsilons. """
alg = alg_type + "{}/".format("_" + str(max_local_steps) + "_steps" if alg_type == 'q' else "")
checkpoint_dir = 'checkpoints/' + game + '/' + alg
check_or_create_checkpoint_dir(checkpoint_dir)
saver.save(sess, checkpoint_dir + "model", global_step=global_step)
def check_or_create_checkpoint_dir(checkpoint_dir):
""" Create checkpoint directory if it does not exist """
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except OSError:
pass
# def save_shared_mem_vars(shared_mem_vars, game_name, alg_type,
# max_local_steps):
# checkpoint_dir = 'checkpoints/' + game_name + '/' + \
# {'0': 'Q/', '1': 'sarsa/', '2': 'a3c/'}[str(alg_type)] + \
# str(max_local_steps) + '_step' + '/'
#
# check_or_create_checkpoint_dir(checkpoint_dir)
# while True:
# g_step = shared_mem_vars['global_step'].val.value
# if g_step % 1000000 == 0:
# path = checkpoint_dir + 'vars-opt-' + str(g_step)
# np.save(path + '-learning', np.frombuffer(shared_mem_vars['learning_vars.vars'], ctypes.c_float))
# np.save(path + '-target', np.frombuffer(shared_mem_vars['target_vars.vars'], ctypes.c_float))
# for i in xrange(len(shared_mem_vars['opt_state.vars'])):
# np.save(path + '-opt' + str(i),
# np.frombuffer(shared_mem_vars['opt_state'].vars[i], ctypes.c_float))
| en | 0.339613 | Restore saved net, global step, and epsilons OR
create checkpoint directory for later storage. Checkpoint shared net params, global score and step, and epsilons. Create checkpoint directory if it does not exist # def save_shared_mem_vars(shared_mem_vars, game_name, alg_type, # max_local_steps): # checkpoint_dir = 'checkpoints/' + game_name + '/' + \ # {'0': 'Q/', '1': 'sarsa/', '2': 'a3c/'}[str(alg_type)] + \ # str(max_local_steps) + '_step' + '/' # # check_or_create_checkpoint_dir(checkpoint_dir) # while True: # g_step = shared_mem_vars['global_step'].val.value # if g_step % 1000000 == 0: # path = checkpoint_dir + 'vars-opt-' + str(g_step) # np.save(path + '-learning', np.frombuffer(shared_mem_vars['learning_vars.vars'], ctypes.c_float)) # np.save(path + '-target', np.frombuffer(shared_mem_vars['target_vars.vars'], ctypes.c_float)) # for i in xrange(len(shared_mem_vars['opt_state.vars'])): # np.save(path + '-opt' + str(i), # np.frombuffer(shared_mem_vars['opt_state'].vars[i], ctypes.c_float)) | 2.446492 | 2 |
setup.py | carlba/media-server-utils | 1 | 6618163 | # coding=utf-8
from setuptools import setup, find_packages
setup(name="media_server_utils",
version="0.1.0",
options={},
description="Various utils to manage a Media Server",
author="carlba",
packages=find_packages(),
install_requires=['click'],
entry_points={
'console_scripts': [
'add_torrents_from_folder = media_server_utils.cli:add_torrents_from_folder'
]
}
)
| # coding=utf-8
from setuptools import setup, find_packages
setup(name="media_server_utils",
version="0.1.0",
options={},
description="Various utils to manage a Media Server",
author="carlba",
packages=find_packages(),
install_requires=['click'],
entry_points={
'console_scripts': [
'add_torrents_from_folder = media_server_utils.cli:add_torrents_from_folder'
]
}
)
| en | 0.644078 | # coding=utf-8 | 1.189313 | 1 |
picture/urls.py | waytai/picture | 0 | 6618164 | <reponame>waytai/picture<gh_stars>0
# -*- encoding: utf8 -*-
from django.conf.urls import patterns, include, url
from views import login_view , signin , start_template
import settings
from views import image_explain ,process_img,contact,about
from loadpicture.views import load_image , upload
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'picture.views.home', name='home'),
# url(r'^picture/', include('picture.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url( '^assets/(?P<path>.*)$', 'django.views.static.serve',
{ 'document_root': settings.MEDIA_URL}),
url( '^img/(?P<path>.*)$', 'django.views.static.serve',
{ 'document_root': settings.Img_dir}),
url('^$' , signin),
url('^start_template/$' , start_template),
# just explain why I start this project
url('^image_explain/$' , image_explain),
url('^process_img/$' , process_img),
url('^contact/$' , contact),
url('^about/$' , about),
url('^load_image/$' , load_image),
url('^upload/$' , upload),
)
| # -*- encoding: utf8 -*-
from django.conf.urls import patterns, include, url
from views import login_view , signin , start_template
import settings
from views import image_explain ,process_img,contact,about
from loadpicture.views import load_image , upload
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'picture.views.home', name='home'),
# url(r'^picture/', include('picture.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url( '^assets/(?P<path>.*)$', 'django.views.static.serve',
{ 'document_root': settings.MEDIA_URL}),
url( '^img/(?P<path>.*)$', 'django.views.static.serve',
{ 'document_root': settings.Img_dir}),
url('^$' , signin),
url('^start_template/$' , start_template),
# just explain why I start this project
url('^image_explain/$' , image_explain),
url('^process_img/$' , process_img),
url('^contact/$' , contact),
url('^about/$' , about),
url('^load_image/$' , load_image),
url('^upload/$' , upload),
) | en | 0.588518 | # -*- encoding: utf8 -*- # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() # Examples: # url(r'^$', 'picture.views.home', name='home'), # url(r'^picture/', include('picture.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: # url(r'^admin/', include(admin.site.urls)), # just explain why I start this project | 2.01902 | 2 |
reverse_geocoder/schemas.py | aruneko/reverse_geocoding | 0 | 6618165 | from typing import List, Tuple
from pydantic import BaseModel
class GeoJsonGeometry(BaseModel):
type: str
coordinates: List[Tuple[float, float]]
class GeocodingProps(BaseModel):
address: str
class GeoJsonFeature(BaseModel):
type: str
geometry: GeoJsonGeometry
properties: GeocodingProps
class GeoJson(BaseModel):
type: str
features: List[GeoJsonFeature]
class Coordinate(BaseModel):
lat: float
lon: float
def to_wkt(self):
return f"POINT({self.lon} {self.lat})"
| from typing import List, Tuple
from pydantic import BaseModel
class GeoJsonGeometry(BaseModel):
type: str
coordinates: List[Tuple[float, float]]
class GeocodingProps(BaseModel):
address: str
class GeoJsonFeature(BaseModel):
type: str
geometry: GeoJsonGeometry
properties: GeocodingProps
class GeoJson(BaseModel):
type: str
features: List[GeoJsonFeature]
class Coordinate(BaseModel):
lat: float
lon: float
def to_wkt(self):
return f"POINT({self.lon} {self.lat})"
| none | 1 | 2.850692 | 3 | |
tests/test_ttl_enforcer_handler.py | zdjohn/sns-boomerang | 0 | 6618166 | from sns_boomerang.handlers import ttl_enforcer
from sns_boomerang.common.items import Job
def mock_flush():
pass
def test_flush(monkeypatch):
monkeypatch.setattr(Job, 'flush', mock_flush)
assert ttl_enforcer.flush()
| from sns_boomerang.handlers import ttl_enforcer
from sns_boomerang.common.items import Job
def mock_flush():
pass
def test_flush(monkeypatch):
monkeypatch.setattr(Job, 'flush', mock_flush)
assert ttl_enforcer.flush()
| none | 1 | 1.961065 | 2 | |
jp.atcoder/agc051/agc051_a/27983452.py | kagemeka/atcoder-submissions | 1 | 6618167 | <gh_stars>1-10
d = int(input())
MOD = 998_244_353
print(pow(2, d - 1, MOD))
| d = int(input())
MOD = 998_244_353
print(pow(2, d - 1, MOD)) | none | 1 | 2.955409 | 3 | |
hypergan/gans/configurable_gan.py | limberc/HyperGAN | 889 | 6618168 | import importlib
import json
import numpy as np
import os
import re
import sys
import time
import uuid
import copy
from hypergan.discriminators import *
from hypergan.distributions import *
from hypergan.generators import *
from hypergan.inputs import *
from hypergan.samplers import *
from hypergan.trainers import *
import hyperchamber as hc
from hyperchamber import Config
from hypergan.ops import TensorflowOps
import hypergan as hg
from hypergan.gan_component import ValidationException, GANComponent
from .base_gan import BaseGAN
class ConfigurableGAN(BaseGAN):
def __init__(self, *args, **kwargs):
self.d_terms = []
self.Ds = []
BaseGAN.__init__(self, *args, **kwargs)
def create_encoder(self):
return self.create_component(self.config.encoder)
def create_latent(self, zi):
return self.create_component(self.config.latent)
def create_generator(self):
return self.create_component(self.config.generator)
def parse_opts(self, opts):
options = {}
for opt in opts.split(","):
if opt == "":
continue
name, val = opt.split("=")
value = self.configurable_param(val)
options[name]=value
return hc.Config(options)
def required(self):
return "terms".split()
def create_term(self, term):
for match in
matching = {
"gN(eN(xN))": self.create_generator,
"gN(zN)": self.create_generator
}
for regex, method in matching.items():
regex_subbed = regex.replace("(", '\(').replace(")", '\)').replace("N", "(\d+)?").replace(",options", "([-,=\w\d\.\(\)]+)?")
regex = re.compile(regex_subbed)
args = re.match(regex, term)
if args:
return method(*args.groups())
raise ValidationException("Could not match term: " + term)
def forward_term(self, term):
matching = {
"gN(eN(xN))": self.geN,
"gN(zN)": self.gzN,
"xN": self.xN
}
for regex, method in matching.items():
regex_subbed = regex.replace("(", '\(').replace(")", '\)').replace("N", "(\d+)?").replace(",options", "([-,=\w\d\.\(\)]+)?")
regex = re.compile(regex_subbed)
args = re.match(regex, term)
if args:
return method(*args.groups())
raise ValidationException("Could not match term: " + term)
def create(self):
config = self.config
self.latent = hc.Config({"sample": self.zN(0)})
self.discriminators = []
self.losses = []
for i,term in enumerate(self.config.terms):
dN, args = term.split(":")
d_terms = args.split("/")
terms = []
for dt in d_terms:
terms += (term,self.create_term(dt))
reuse = False
dN = re.findall("\d+", dN)[0]
dN = int(dN)
tfname = "d"+str(dN)
D = self.create_component(config.discriminator)
self.Ds.append(D)
self.d_terms += terms
self.trainer = self.create_component(config.trainer)
def create_controls(self, z_shape):
direction = tf.constant(0.0, shape=z_shape, name='direction') * 1.00
slider = tf.constant(0.0, name='slider', dtype=tf.float32) * 1.00
return direction, slider
def forward_pass(self):
d_reals = []
d_fakes = []
for terms in d_terms:
return d_reals, d_fakes
def forward_loss(self):
losses = []
for d_real, d_fake in zip(d_reals, d_fakes):
loss = self.create_component(config.loss, discriminator=d, split=len(d_terms))
d_loss, g_loss = loss.forward(d_real, d_fake)
d_loss = [self.configurable_param(config.term_gammas[i]) * d_loss, self.configurable_param(config.term_gammas[i]) * g_loss]
losses += [[d_loss, g_loss]]
self.loss = hc.Config({
'sample': [sum([l.sample[0] for l in losses]), sum([l.sample[1] for l in losses])]
})
def g_parameters(self):
params = []
for d_terms in self.d_terms:
for term in d_terms:
params += term[1].parameters()
return params
def d_parameters(self):
params = []
for m in self.Ds:
params += m.parameters()
return params
| import importlib
import json
import numpy as np
import os
import re
import sys
import time
import uuid
import copy
from hypergan.discriminators import *
from hypergan.distributions import *
from hypergan.generators import *
from hypergan.inputs import *
from hypergan.samplers import *
from hypergan.trainers import *
import hyperchamber as hc
from hyperchamber import Config
from hypergan.ops import TensorflowOps
import hypergan as hg
from hypergan.gan_component import ValidationException, GANComponent
from .base_gan import BaseGAN
class ConfigurableGAN(BaseGAN):
def __init__(self, *args, **kwargs):
self.d_terms = []
self.Ds = []
BaseGAN.__init__(self, *args, **kwargs)
def create_encoder(self):
return self.create_component(self.config.encoder)
def create_latent(self, zi):
return self.create_component(self.config.latent)
def create_generator(self):
return self.create_component(self.config.generator)
def parse_opts(self, opts):
options = {}
for opt in opts.split(","):
if opt == "":
continue
name, val = opt.split("=")
value = self.configurable_param(val)
options[name]=value
return hc.Config(options)
def required(self):
return "terms".split()
def create_term(self, term):
for match in
matching = {
"gN(eN(xN))": self.create_generator,
"gN(zN)": self.create_generator
}
for regex, method in matching.items():
regex_subbed = regex.replace("(", '\(').replace(")", '\)').replace("N", "(\d+)?").replace(",options", "([-,=\w\d\.\(\)]+)?")
regex = re.compile(regex_subbed)
args = re.match(regex, term)
if args:
return method(*args.groups())
raise ValidationException("Could not match term: " + term)
def forward_term(self, term):
matching = {
"gN(eN(xN))": self.geN,
"gN(zN)": self.gzN,
"xN": self.xN
}
for regex, method in matching.items():
regex_subbed = regex.replace("(", '\(').replace(")", '\)').replace("N", "(\d+)?").replace(",options", "([-,=\w\d\.\(\)]+)?")
regex = re.compile(regex_subbed)
args = re.match(regex, term)
if args:
return method(*args.groups())
raise ValidationException("Could not match term: " + term)
def create(self):
config = self.config
self.latent = hc.Config({"sample": self.zN(0)})
self.discriminators = []
self.losses = []
for i,term in enumerate(self.config.terms):
dN, args = term.split(":")
d_terms = args.split("/")
terms = []
for dt in d_terms:
terms += (term,self.create_term(dt))
reuse = False
dN = re.findall("\d+", dN)[0]
dN = int(dN)
tfname = "d"+str(dN)
D = self.create_component(config.discriminator)
self.Ds.append(D)
self.d_terms += terms
self.trainer = self.create_component(config.trainer)
def create_controls(self, z_shape):
direction = tf.constant(0.0, shape=z_shape, name='direction') * 1.00
slider = tf.constant(0.0, name='slider', dtype=tf.float32) * 1.00
return direction, slider
def forward_pass(self):
d_reals = []
d_fakes = []
for terms in d_terms:
return d_reals, d_fakes
def forward_loss(self):
losses = []
for d_real, d_fake in zip(d_reals, d_fakes):
loss = self.create_component(config.loss, discriminator=d, split=len(d_terms))
d_loss, g_loss = loss.forward(d_real, d_fake)
d_loss = [self.configurable_param(config.term_gammas[i]) * d_loss, self.configurable_param(config.term_gammas[i]) * g_loss]
losses += [[d_loss, g_loss]]
self.loss = hc.Config({
'sample': [sum([l.sample[0] for l in losses]), sum([l.sample[1] for l in losses])]
})
def g_parameters(self):
params = []
for d_terms in self.d_terms:
for term in d_terms:
params += term[1].parameters()
return params
def d_parameters(self):
params = []
for m in self.Ds:
params += m.parameters()
return params
| none | 1 | 1.992675 | 2 | |
joystick/forms.py | d9w/joystick | 1 | 6618169 | from flask_wtf import Form
from wtforms import TextField, HiddenField, DecimalField, validators
from .models import Console
def console_name_unique(form, field):
if field.data in [c.name for c in Console.query.all()]:
raise validators.ValidationError(message='Console name already exists')
class ConsoleForm(Form):
type = HiddenField(default='console')
name = TextField('Name', [validators.Required(), validators.Length(min=2, max=50), console_name_unique])
class CommandForm(Form):
type = HiddenField(default='command')
cmd = TextField('Command', [validators.Required(), validators.Length(min=1, max=255)])
class ButtonForm(CommandForm):
type = HiddenField(default='button')
class ShellForm(CommandForm):
type = HiddenField(default='shell')
class LoopForm(CommandForm):
type = HiddenField(default='loop')
interval = DecimalField('Interval', [validators.Required(), validators.NumberRange(min=0)])
start_date = DecimalField('Start', [validators.Optional()])
| from flask_wtf import Form
from wtforms import TextField, HiddenField, DecimalField, validators
from .models import Console
def console_name_unique(form, field):
if field.data in [c.name for c in Console.query.all()]:
raise validators.ValidationError(message='Console name already exists')
class ConsoleForm(Form):
type = HiddenField(default='console')
name = TextField('Name', [validators.Required(), validators.Length(min=2, max=50), console_name_unique])
class CommandForm(Form):
type = HiddenField(default='command')
cmd = TextField('Command', [validators.Required(), validators.Length(min=1, max=255)])
class ButtonForm(CommandForm):
type = HiddenField(default='button')
class ShellForm(CommandForm):
type = HiddenField(default='shell')
class LoopForm(CommandForm):
type = HiddenField(default='loop')
interval = DecimalField('Interval', [validators.Required(), validators.NumberRange(min=0)])
start_date = DecimalField('Start', [validators.Optional()])
| none | 1 | 2.769679 | 3 | |
src/stow/server.py | rossmacarthur/stow | 3 | 6618170 | <reponame>rossmacarthur/stow
from flask import Flask
from flask_migrate import Migrate
from stow import models
from stow.config import Config
from stow.patches import register_patches
from stow.views import api, web
# Initialize main app
app = Flask(__name__, template_folder='../templates')
app.config.from_object(Config)
app.jinja_env.auto_reload = True
register_patches(app)
with app.app_context():
# Initialize extensions
models.bcrypt.init_app(app)
models.db.init_app(app)
web.login_manager.init_app(app)
# Migrate database
manager = Migrate(app, models.db, directory='src/migrations')
# Register API views
app.register_blueprint(api.bp, url_prefix='/api')
# Register Web views
app.register_blueprint(web.bp)
| from flask import Flask
from flask_migrate import Migrate
from stow import models
from stow.config import Config
from stow.patches import register_patches
from stow.views import api, web
# Initialize main app
app = Flask(__name__, template_folder='../templates')
app.config.from_object(Config)
app.jinja_env.auto_reload = True
register_patches(app)
with app.app_context():
# Initialize extensions
models.bcrypt.init_app(app)
models.db.init_app(app)
web.login_manager.init_app(app)
# Migrate database
manager = Migrate(app, models.db, directory='src/migrations')
# Register API views
app.register_blueprint(api.bp, url_prefix='/api')
# Register Web views
app.register_blueprint(web.bp) | en | 0.679477 | # Initialize main app # Initialize extensions # Migrate database # Register API views # Register Web views | 1.904909 | 2 |
tests/integration/hub_usage/dummyhub_slow/tests/test_dummy.py | Rohitpandit021/jina | 15,179 | 6618171 | <reponame>Rohitpandit021/jina
def test_dummy_executor():
pass
| def test_dummy_executor():
pass | none | 1 | 0.71606 | 1 | |
s_store_api/utils/views.py | Saknowman/django-s-store-api | 1 | 6618172 | <filename>s_store_api/utils/views.py
from django.db import transaction
from django.http import Http404
from rest_framework import status, exceptions
from rest_framework.response import Response
class PermissionDeniedResponseConverterMixin:
# noinspection PyMethodMayBeStatic
def permission_denied(self, request, message=None):
if message is None:
raise Http404
if request.authenticators and not request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied(detail=message)
def multi_create(view_set, request, *args, **kwargs):
with transaction.atomic():
serializer = view_set.get_serializer(data=request.data, many=isinstance(request.data, list))
serializer.is_valid(raise_exception=True)
view_set.perform_create(serializer)
headers = view_set.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
| <filename>s_store_api/utils/views.py
from django.db import transaction
from django.http import Http404
from rest_framework import status, exceptions
from rest_framework.response import Response
class PermissionDeniedResponseConverterMixin:
# noinspection PyMethodMayBeStatic
def permission_denied(self, request, message=None):
if message is None:
raise Http404
if request.authenticators and not request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied(detail=message)
def multi_create(view_set, request, *args, **kwargs):
with transaction.atomic():
serializer = view_set.get_serializer(data=request.data, many=isinstance(request.data, list))
serializer.is_valid(raise_exception=True)
view_set.perform_create(serializer)
headers = view_set.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
| en | 0.322074 | # noinspection PyMethodMayBeStatic | 2.057893 | 2 |
app/alembic/versions/d28eba89a9a5_.py | jberends/fastapi-tinyurl | 0 | 6618173 | <gh_stars>0
"""empty message
Revision ID: d28eba89a9a5
Revises:
Create Date: 2021-04-05 15:26:02.942452
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('shortened_urls',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('utcnow()'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('long', sa.Unicode(length=24576), nullable=True),
sa.Column('short', sa.Unicode(length=64), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('is_active')
)
op.create_index(op.f('ix_shortened_urls_id'), 'shortened_urls', ['id'], unique=False)
op.create_index(op.f('ix_shortened_urls_short'), 'shortened_urls', ['short'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_shortened_urls_short'), table_name='shortened_urls')
op.drop_index(op.f('ix_shortened_urls_id'), table_name='shortened_urls')
op.drop_table('shortened_urls')
# ### end Alembic commands ###
| """empty message
Revision ID: d28eba89a9a5
Revises:
Create Date: 2021-04-05 15:26:02.942452
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('shortened_urls',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('utcnow()'), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('long', sa.Unicode(length=24576), nullable=True),
sa.Column('short', sa.Unicode(length=64), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('is_active')
)
op.create_index(op.f('ix_shortened_urls_id'), 'shortened_urls', ['id'], unique=False)
op.create_index(op.f('ix_shortened_urls_short'), 'shortened_urls', ['short'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_shortened_urls_short'), table_name='shortened_urls')
op.drop_index(op.f('ix_shortened_urls_id'), table_name='shortened_urls')
op.drop_table('shortened_urls')
# ### end Alembic commands ### | en | 0.506247 | empty message Revision ID: d28eba89a9a5 Revises: Create Date: 2021-04-05 15:26:02.942452 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.806061 | 2 |
lib/googlecloudsdk/calliope/base.py | IsaacHuang/google-cloud-sdk | 0 | 6618174 | # Copyright 2013 Google Inc. All Rights Reserved.
"""Base classes for calliope commands and groups.
"""
import abc
from googlecloudsdk.calliope import usage_text
class LayoutException(Exception):
"""An exception for when a command or group .py file has the wrong types."""
class _Common(object):
"""Base class for Command and Group.
Attributes:
config: {str:object}, A set of key-value pairs that will persist (as long
as they are JSON-serializable) between command invocations. Can be used
for caching.
"""
__metaclass__ = abc.ABCMeta
_cli_holder = None
_is_hidden = False
_release_stage = None
def __init__(self):
pass
@staticmethod
def FromModule(module):
"""Get the type implementing CommandBase from the module.
Args:
module: module, The module resulting from importing the file containing a
command.
Returns:
type, The custom class that implements CommandBase.
Raises:
LayoutException: If there is not exactly one type inheriting
CommonBase.
"""
command_type = None
for thing in module.__dict__.values():
if issubclass(type(thing), type) and issubclass(thing, _Common):
if command_type:
raise LayoutException(
'More than one _CommonBase subclasses in %s' % module.__file__)
command_type = thing
if not command_type:
raise LayoutException(
'No _CommonBase subclasses in %s' % module.__file__)
return command_type
@staticmethod
def Args(parser):
"""Set up arguments for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
pass
@classmethod
def IsHidden(cls):
return cls._is_hidden
@classmethod
def ReleaseStage(cls):
return cls._release_stage
@classmethod
def GetExecutionFunction(cls, *args):
"""Get a fully bound function that will call another gcloud command.
This class method can be called at any time to generate a function that will
execute another gcloud command. The function itself can only be executed
after the gcloud CLI has been build i.e. after all Args methods have
been called.
Args:
*args: str, The args for the command to execute. Each token should be a
separate string and the tokens should start from after the 'gcloud'
part of the invocation.
Returns:
A bound function to call the gcloud command.
"""
def ExecFunc():
return cls._cli_generator.Generate().Execute(list(args),
call_arg_complete=False)
return ExecFunc
@classmethod
def GetCLIGenerator(cls):
"""Get a generator function that can be used to execute a gcloud command.
Returns:
A bound generator function to execute a gcloud command.
"""
return cls._cli_generator.Generate
@classmethod
def Execute(cls, *args):
"""Execute another gcloud command.
This calls GetExecutionFunction and then directly executes it. See that
method for full information.
Args:
*args: str, The args for the command to execute.
Returns:
The result of running the gcloud command.
"""
return cls.GetExecutionFunction(*args)()
class Command(_Common):
"""Command is a base class for commands to implement.
Attributes:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
entry_point: CommandGroup, The top-level group representing the containing
command hierarchy.
command: Command, The Command object representing this command.
group: base.Group, The instance of the group class above this command. You
can use this to access common methods within a group.
format: func(obj), A function that prints objects to stdout using the
user-chosen formatting option.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, context, entry_point, command, group):
super(Command, self).__init__()
self.context = context
self.entry_point = entry_point
self.command = command
self.group = group
self.format = None # This attribute is set before .Run() is called.
@abc.abstractmethod
def Run(self, args):
"""Run the command.
Args:
args: argparse.Namespace, An object that contains the values for the
arguments specified in the .Args() method.
Returns:
A python object that is given back to the python caller, or sent to the
.Display() method in CLI mode.
"""
raise NotImplementedError('CommandBase.Run is not overridden')
def Display(self, args, result):
"""Print the result for a human to read from the terminal.
Args:
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
result: object, The object return by the corresponding .Run() invocation.
"""
pass
class Group(_Common):
"""Group is a base class for groups to implement.
"""
def __init__(self):
super(Group, self).__init__()
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
"""
pass
class Argument(object):
"""A class that allows you to save an argument configuration for reuse."""
def __init__(self, *args, **kwargs):
"""Creates the argument.
Args:
*args: The positional args to parser.add_argument.
**kwargs: The keyword args to parser.add_argument.
"""
try:
self.__detailed_help = kwargs.pop('detailed_help')
except KeyError:
self.__detailed_help = None
self.__args = args
self.__kwargs = kwargs
def AddToParser(self, parser):
"""Adds this argument to the given parser.
Args:
parser: The argparse parser.
Returns:
The result of parser.add_argument().
"""
arg = parser.add_argument(*self.__args, **self.__kwargs)
if self.__detailed_help:
arg.detailed_help = self.__detailed_help
return arg
def Hidden(cmd_class):
"""Decorator for hiding calliope commands and groups.
Decorate a subclass of base.Command or base.Group with this function, and the
decorated command or group will not show up in help text.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._is_hidden = True
return cmd_class
def Alpha(cmd_class):
"""Decorator for annotating a command or group as ALPHA.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._release_stage = usage_text.ReleaseStageAnnotation.ALPHA
return cmd_class
def Beta(cmd_class):
"""Decorator for annotating a command or group as BETA.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._release_stage = usage_text.ReleaseStageAnnotation.BETA
return cmd_class
| # Copyright 2013 Google Inc. All Rights Reserved.
"""Base classes for calliope commands and groups.
"""
import abc
from googlecloudsdk.calliope import usage_text
class LayoutException(Exception):
"""An exception for when a command or group .py file has the wrong types."""
class _Common(object):
"""Base class for Command and Group.
Attributes:
config: {str:object}, A set of key-value pairs that will persist (as long
as they are JSON-serializable) between command invocations. Can be used
for caching.
"""
__metaclass__ = abc.ABCMeta
_cli_holder = None
_is_hidden = False
_release_stage = None
def __init__(self):
pass
@staticmethod
def FromModule(module):
"""Get the type implementing CommandBase from the module.
Args:
module: module, The module resulting from importing the file containing a
command.
Returns:
type, The custom class that implements CommandBase.
Raises:
LayoutException: If there is not exactly one type inheriting
CommonBase.
"""
command_type = None
for thing in module.__dict__.values():
if issubclass(type(thing), type) and issubclass(thing, _Common):
if command_type:
raise LayoutException(
'More than one _CommonBase subclasses in %s' % module.__file__)
command_type = thing
if not command_type:
raise LayoutException(
'No _CommonBase subclasses in %s' % module.__file__)
return command_type
@staticmethod
def Args(parser):
"""Set up arguments for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
pass
@classmethod
def IsHidden(cls):
return cls._is_hidden
@classmethod
def ReleaseStage(cls):
return cls._release_stage
@classmethod
def GetExecutionFunction(cls, *args):
"""Get a fully bound function that will call another gcloud command.
This class method can be called at any time to generate a function that will
execute another gcloud command. The function itself can only be executed
after the gcloud CLI has been build i.e. after all Args methods have
been called.
Args:
*args: str, The args for the command to execute. Each token should be a
separate string and the tokens should start from after the 'gcloud'
part of the invocation.
Returns:
A bound function to call the gcloud command.
"""
def ExecFunc():
return cls._cli_generator.Generate().Execute(list(args),
call_arg_complete=False)
return ExecFunc
@classmethod
def GetCLIGenerator(cls):
"""Get a generator function that can be used to execute a gcloud command.
Returns:
A bound generator function to execute a gcloud command.
"""
return cls._cli_generator.Generate
@classmethod
def Execute(cls, *args):
"""Execute another gcloud command.
This calls GetExecutionFunction and then directly executes it. See that
method for full information.
Args:
*args: str, The args for the command to execute.
Returns:
The result of running the gcloud command.
"""
return cls.GetExecutionFunction(*args)()
class Command(_Common):
"""Command is a base class for commands to implement.
Attributes:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
entry_point: CommandGroup, The top-level group representing the containing
command hierarchy.
command: Command, The Command object representing this command.
group: base.Group, The instance of the group class above this command. You
can use this to access common methods within a group.
format: func(obj), A function that prints objects to stdout using the
user-chosen formatting option.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, context, entry_point, command, group):
super(Command, self).__init__()
self.context = context
self.entry_point = entry_point
self.command = command
self.group = group
self.format = None # This attribute is set before .Run() is called.
@abc.abstractmethod
def Run(self, args):
"""Run the command.
Args:
args: argparse.Namespace, An object that contains the values for the
arguments specified in the .Args() method.
Returns:
A python object that is given back to the python caller, or sent to the
.Display() method in CLI mode.
"""
raise NotImplementedError('CommandBase.Run is not overridden')
def Display(self, args, result):
"""Print the result for a human to read from the terminal.
Args:
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
result: object, The object return by the corresponding .Run() invocation.
"""
pass
class Group(_Common):
"""Group is a base class for groups to implement.
"""
def __init__(self):
super(Group, self).__init__()
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
"""
pass
class Argument(object):
"""A class that allows you to save an argument configuration for reuse."""
def __init__(self, *args, **kwargs):
"""Creates the argument.
Args:
*args: The positional args to parser.add_argument.
**kwargs: The keyword args to parser.add_argument.
"""
try:
self.__detailed_help = kwargs.pop('detailed_help')
except KeyError:
self.__detailed_help = None
self.__args = args
self.__kwargs = kwargs
def AddToParser(self, parser):
"""Adds this argument to the given parser.
Args:
parser: The argparse parser.
Returns:
The result of parser.add_argument().
"""
arg = parser.add_argument(*self.__args, **self.__kwargs)
if self.__detailed_help:
arg.detailed_help = self.__detailed_help
return arg
def Hidden(cmd_class):
"""Decorator for hiding calliope commands and groups.
Decorate a subclass of base.Command or base.Group with this function, and the
decorated command or group will not show up in help text.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._is_hidden = True
return cmd_class
def Alpha(cmd_class):
"""Decorator for annotating a command or group as ALPHA.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._release_stage = usage_text.ReleaseStageAnnotation.ALPHA
return cmd_class
def Beta(cmd_class):
"""Decorator for annotating a command or group as BETA.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._release_stage = usage_text.ReleaseStageAnnotation.BETA
return cmd_class
| en | 0.739039 | # Copyright 2013 Google Inc. All Rights Reserved. Base classes for calliope commands and groups. An exception for when a command or group .py file has the wrong types. Base class for Command and Group. Attributes: config: {str:object}, A set of key-value pairs that will persist (as long as they are JSON-serializable) between command invocations. Can be used for caching. Get the type implementing CommandBase from the module. Args: module: module, The module resulting from importing the file containing a command. Returns: type, The custom class that implements CommandBase. Raises: LayoutException: If there is not exactly one type inheriting CommonBase. Set up arguments for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. Get a fully bound function that will call another gcloud command. This class method can be called at any time to generate a function that will execute another gcloud command. The function itself can only be executed after the gcloud CLI has been build i.e. after all Args methods have been called. Args: *args: str, The args for the command to execute. Each token should be a separate string and the tokens should start from after the 'gcloud' part of the invocation. Returns: A bound function to call the gcloud command. Get a generator function that can be used to execute a gcloud command. Returns: A bound generator function to execute a gcloud command. Execute another gcloud command. This calls GetExecutionFunction and then directly executes it. See that method for full information. Args: *args: str, The args for the command to execute. Returns: The result of running the gcloud command. Command is a base class for commands to implement. Attributes: context: {str:object}, A set of key-value pairs that can be used for common initialization among commands. entry_point: CommandGroup, The top-level group representing the containing command hierarchy. command: Command, The Command object representing this command. group: base.Group, The instance of the group class above this command. You can use this to access common methods within a group. format: func(obj), A function that prints objects to stdout using the user-chosen formatting option. # This attribute is set before .Run() is called. Run the command. Args: args: argparse.Namespace, An object that contains the values for the arguments specified in the .Args() method. Returns: A python object that is given back to the python caller, or sent to the .Display() method in CLI mode. Print the result for a human to read from the terminal. Args: args: argparse.Namespace: The same namespace given to the corresponding .Run() invocation. result: object, The object return by the corresponding .Run() invocation. Group is a base class for groups to implement. Modify the context that will be given to this group's commands when run. Args: context: {str:object}, A set of key-value pairs that can be used for common initialization among commands. args: argparse.Namespace: The same namespace given to the corresponding .Run() invocation. A class that allows you to save an argument configuration for reuse. Creates the argument. Args: *args: The positional args to parser.add_argument. **kwargs: The keyword args to parser.add_argument. Adds this argument to the given parser. Args: parser: The argparse parser. Returns: The result of parser.add_argument(). Decorator for hiding calliope commands and groups. Decorate a subclass of base.Command or base.Group with this function, and the decorated command or group will not show up in help text. Args: cmd_class: base._Common, A calliope command or group. Returns: A modified version of the provided class. # pylint: disable=protected-access Decorator for annotating a command or group as ALPHA. Args: cmd_class: base._Common, A calliope command or group. Returns: A modified version of the provided class. # pylint: disable=protected-access Decorator for annotating a command or group as BETA. Args: cmd_class: base._Common, A calliope command or group. Returns: A modified version of the provided class. # pylint: disable=protected-access | 2.585406 | 3 |
dampp/packages/frontend/main_window.py | s3h4n/DAMPP | 1 | 6618175 | <gh_stars>1-10
from ...src import constants
from ..backend import DockerHelper
from ..backend import FileHelper
from ..backend import ValidateHelper
from .dialogs import About
from .dialogs import EditPort
from .dialogs import NewProject
from .dialogs import Error
from .dialogs import Confirm
from PyQt5 import QtCore, QtGui, QtWidgets
from pathlib import Path
from sys import exit
import time
class Ui_MainWindow(object):
"""
Ui_MainWindow is the main window of the application.
:param object: self
:type object: object
"""
def __init__(self) -> None:
"""
__init__ initializes the main window of the application.
"""
self.home = Path.home()
self.main_directory = constants.MAIN_DIR
self.env_file_name = constants.ENV_FILE_NAME
self.public_directory = constants.PUBLIC_DIR
self.docker = DockerHelper()
self.file = FileHelper()
self.validate = ValidateHelper()
self.error = Error()
self.confirm = Confirm()
self.about = About()
self.edit_port_dialog = EditPort()
self.new_project = NewProject()
def setupUi(self, MainWindow: QtWidgets.QMainWindow) -> None:
"""
setupUi sets up the main window of the application.
:param MainWindow: MainWindow
:type MainWindow: QMainWindow
"""
if self.validate.dependancy_check() != True:
self.error.show(self.validate.dependancy_check())
exit(0)
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(800, 600)
MainWindow.setWindowTitle("DAMPP")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.plocation_label = QtWidgets.QLabel(self.centralwidget)
self.plocation_label.setGeometry(QtCore.QRect(20, 10, 180, 50))
self.plocation_label.setObjectName("plocation_label")
self.plocation = QtWidgets.QComboBox(self.centralwidget)
self.plocation.setGeometry(QtCore.QRect(20, 60, 590, 50))
self.plocation.setObjectName("plocation")
self.plocation.setPlaceholderText("Please Select a Project")
self.load_projects()
self.plocation.currentTextChanged.connect(self.goto_project)
self.start_stop_btn = QtWidgets.QPushButton(self.centralwidget)
self.start_stop_btn.setGeometry(QtCore.QRect(630, 60, 150, 50))
self.start_stop_btn.setCheckable(True)
self.start_stop_btn.setChecked(False)
self.start_stop_btn.setEnabled(False)
self.start_stop_btn.setObjectName("start_stop_btn")
self.start_stop_btn.clicked.connect(self.service_state)
self.lhost_btn = QtWidgets.QPushButton(self.centralwidget)
self.lhost_btn.setGeometry(QtCore.QRect(630, 160, 150, 50))
self.lhost_btn.setEnabled(False)
self.lhost_btn.setObjectName("lhost_btn")
self.lhost_btn.clicked.connect(self.open_localhost)
self.pma_btn = QtWidgets.QPushButton(self.centralwidget)
self.pma_btn.setGeometry(QtCore.QRect(630, 230, 150, 50))
self.pma_btn.setEnabled(False)
self.pma_btn.setObjectName("pma_btn")
self.pma_btn.clicked.connect(self.open_pma)
self.flocation_btn = QtWidgets.QPushButton(self.centralwidget)
self.flocation_btn.setGeometry(QtCore.QRect(630, 300, 150, 50))
self.flocation_btn.setEnabled(False)
self.flocation_btn.setObjectName("flocation_btn")
self.flocation_btn.clicked.connect(self.open_project)
self.op_log = QtWidgets.QTextBrowser(self.centralwidget)
self.op_log.setGeometry(QtCore.QRect(20, 160, 590, 370))
font = QtGui.QFont()
font.setFamily("Monospace")
self.op_log.setFont(font)
self.op_log.setObjectName("op_log")
self.op_log_label = QtWidgets.QLabel(self.centralwidget)
self.op_log_label.setGeometry(QtCore.QRect(20, 110, 100, 50))
self.op_log_label.setObjectName("op_log_label")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(130, 135, 650, 3))
self.line.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line.setObjectName("line")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 31))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew_Project = QtWidgets.QAction(MainWindow)
self.actionNew_Project.setObjectName("actionNew_Project")
self.actionNew_Project.triggered.connect(self.create_project)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionEdit_Ports = QtWidgets.QAction(MainWindow)
self.actionEdit_Ports.setObjectName("actionEdit_Ports")
self.actionEdit_Ports.setEnabled(False)
self.actionEdit_Ports.triggered.connect(self.edit_ports)
self.actionRemove_Services = QtWidgets.QAction(MainWindow)
self.actionRemove_Services.setObjectName("actionRemove_Services")
self.actionRemove_Services.setEnabled(False)
self.actionRemove_Services.triggered.connect(self.remove_services)
self.actionDAMPP_Help = QtWidgets.QAction(MainWindow)
self.actionDAMPP_Help.setObjectName("actionDAMPP_Help")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionAbout.triggered.connect(self.about.show)
self.menuFile.addAction(self.actionNew_Project)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuTools.addAction(self.actionEdit_Ports)
self.menuTools.addAction(self.actionRemove_Services)
self.menuHelp.addAction(self.actionDAMPP_Help)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.actionQuit.triggered.connect(self.exit_app)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow: QtWidgets.QMainWindow) -> None:
"""
retranslateUi translates the UI.
:param MainWindow: The main window.
:type MainWindow: QMainWindow
"""
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "DAMPP"))
self.start_stop_btn.setText(_translate("MainWindow", "Start"))
self.plocation_label.setText(_translate("MainWindow", "Project Location"))
self.op_log_label.setText(_translate("MainWindow", "Output Log"))
self.lhost_btn.setText(_translate("MainWindow", "Localhost"))
self.pma_btn.setText(_translate("MainWindow", "PhpMyAdmin"))
self.flocation_btn.setText(_translate("MainWindow", "File Location"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuTools.setTitle(_translate("MainWindow", "Tools"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionNew_Project.setText(_translate("MainWindow", "New Project"))
self.actionNew_Project.setShortcut(_translate("MainWindow", "Ctrl+N"))
self.actionQuit.setText(_translate("MainWindow", "Quit"))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionEdit_Ports.setText(_translate("MainWindow", "Edit Ports"))
self.actionEdit_Ports.setShortcut(_translate("MainWindow", "Ctrl+Shift+E"))
self.actionRemove_Services.setText(_translate("MainWindow", "Remove Services"))
self.actionRemove_Services.setShortcut(_translate("MainWindow", "Ctrl+Shift+R"))
self.actionDAMPP_Help.setText(_translate("MainWindow", "DAMPP Help"))
self.actionDAMPP_Help.setShortcut(_translate("MainWindow", "Ctrl+H"))
self.actionAbout.setText(_translate("MainWindow", "About "))
def load_projects(self) -> None:
"""
load_projects loads the projects from the main directory.
"""
self.plocation.clear()
for project in self.file.list_directory(f"{self.home}/{self.main_directory}"):
self.plocation.addItem(project)
def goto_project(self) -> None:
"""
goto_project goes to the selected project.
"""
self.directory = self.plocation.currentText()
self.file.change_directory(self.directory)
if self.validate.requirement_check() != True:
self.button_state(False)
self.action_state(False)
self.create_log(self.validate.requirement_check())
else:
self.button_state(True)
self.action_state(True)
self.create_log(
"<span style='color:green;'>All the requirements are met.</span>"
)
self.port = self.file.find_ports(self.env_file_name)
def create_log(self, message: str) -> None:
"""
create_log creates the log.
:param message: The message to be displayed.
:type message: str
"""
self.current_time = time.localtime()
self.current_time = time.strftime("%H:%M:%S", self.current_time)
self.op_log.append(
"<html><body>"
+ "<b style='color:blue;'>"
+ f"[{self.current_time}]"
+ "</b>"
+ " >>> "
+ message
+ "<br/></body></html>"
)
def button_state(self, state: bool) -> None:
"""
button_state changes the state of the buttons.
:param state: The state of the buttons.
:type state: bool
"""
self.start_stop_btn.setEnabled(state)
self.lhost_btn.setEnabled(state)
self.pma_btn.setEnabled(state)
self.flocation_btn.setEnabled(state)
def action_state(self, state: bool) -> None:
"""
action_state changes the state of the actions.
:param state: The state of the actions.
:type state: bool
"""
self.actionEdit_Ports.setEnabled(state)
self.actionRemove_Services.setEnabled(state)
def service_state(self) -> None:
"""
service_state changes the state of the services.
"""
ready_msg_1 = "Starting..."
ready_msg_2 = "Stopping..."
success_msg_1 = "Service started."
success_msg_2 = "Service stopped."
error_msg_1 = "Service failed to start."
error_msg_2 = "Service failed to stop."
btn_state = self.start_stop_btn.isChecked()
if btn_state:
self.create_log(ready_msg_1)
self.start_stop_btn.setText("Stop")
if self.docker.start():
self.create_log(success_msg_1)
else:
self.create_log(error_msg_1)
self.error.show(error_msg_1)
else:
self.create_log(ready_msg_2)
self.start_stop_btn.setText("Start")
if self.docker.stop():
self.create_log(success_msg_2)
else:
self.create_log(error_msg_2)
self.error.show(error_msg_2)
def open_localhost(self) -> None:
"""
open_localhost opens the localhost.
"""
ready_msg = "Opening localhost..."
success_msg = "Opened localhost."
warning_msg = "Please start the service first."
error_msg = "Failed to open localhost."
url = f"http://localhost:{self.port['WEB_PORT']}"
if self.start_stop_btn.isChecked():
self.create_log(ready_msg)
try:
self.file.open_this(url)
self.create_log(success_msg)
except:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg)
def open_pma(self) -> None:
"""
open_pma opens the phpmyadmin.
"""
ready_msg = "Opening phpmyadmin..."
success_msg = "Opened phpmyadmin."
warning_msg = "Please start the service first."
error_msg = "Failed to open phpmyadmin."
url = f"http://localhost:{self.port['PMA_PORT']}"
if self.start_stop_btn.isChecked():
self.create_log(ready_msg)
try:
self.file.open_this(url)
self.create_log(success_msg)
except:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg)
def open_project(self) -> None:
"""
open_project opens the project.
"""
success_msg = "Opened project."
error_msg = "Failed to open project folder."
url = f"{self.directory}/{self.public_directory}"
self.create_log("Opening project...")
try:
self.file.open_this(url)
self.create_log(success_msg)
except:
self.create_log(error_msg)
self.error.show(error_msg)
def create_project(self) -> None:
"""
create_project creates the project.
"""
ready_msg = "Adding new project..."
success_msg = "Project created."
error_msg = "Failed to create project."
self.create_log(ready_msg)
if self.new_project.show():
self.create_log(success_msg)
else:
self.create_log(error_msg)
self.error.show(error_msg)
self.load_projects()
def exit_app(self) -> None:
"""
exit_app will exit the application.
"""
ready_msg = "Stopping services..."
confirm_msg = "Are you sure you want to quit?"
success_msg = "Exited."
cancel_msg = "Exiting canceled."
if self.confirm.show(confirm_msg):
self.create_log(ready_msg)
self.docker.stop()
self.create_log(success_msg)
exit()
else:
self.create_log(cancel_msg)
def edit_ports(self) -> None:
"""
edit_ports will edit the ports.
"""
ready_msg = "Editing ports..."
success_msg = "Ports edited."
warning_msg = "Please start the service first."
error_msg = "Failed to edit ports."
self.create_log(ready_msg)
if not self.start_stop_btn.isChecked():
result = self.edit_port_dialog.show()
if result != False:
self.create_log(success_msg)
else:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg)
self.port = self.file.find_ports(self.env_file_name)
def remove_services(self) -> None:
"""
remove_services will remove the services.
"""
ready_msg = "Removing services..."
confirm_msg = "Are you sure you want to remove the services?"
success_msg = "Services removed."
warning_msg = "Please start the service first."
error_msg = "Failed to remove services."
cancel_msg = "Services removal canceled."
if not self.start_stop_btn.isChecked():
self.create_log(ready_msg)
if self.confirm.show(confirm_msg):
if self.docker.remove():
self.create_log(success_msg)
else:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(cancel_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg)
| from ...src import constants
from ..backend import DockerHelper
from ..backend import FileHelper
from ..backend import ValidateHelper
from .dialogs import About
from .dialogs import EditPort
from .dialogs import NewProject
from .dialogs import Error
from .dialogs import Confirm
from PyQt5 import QtCore, QtGui, QtWidgets
from pathlib import Path
from sys import exit
import time
class Ui_MainWindow(object):
"""
Ui_MainWindow is the main window of the application.
:param object: self
:type object: object
"""
def __init__(self) -> None:
"""
__init__ initializes the main window of the application.
"""
self.home = Path.home()
self.main_directory = constants.MAIN_DIR
self.env_file_name = constants.ENV_FILE_NAME
self.public_directory = constants.PUBLIC_DIR
self.docker = DockerHelper()
self.file = FileHelper()
self.validate = ValidateHelper()
self.error = Error()
self.confirm = Confirm()
self.about = About()
self.edit_port_dialog = EditPort()
self.new_project = NewProject()
def setupUi(self, MainWindow: QtWidgets.QMainWindow) -> None:
"""
setupUi sets up the main window of the application.
:param MainWindow: MainWindow
:type MainWindow: QMainWindow
"""
if self.validate.dependancy_check() != True:
self.error.show(self.validate.dependancy_check())
exit(0)
MainWindow.setObjectName("MainWindow")
MainWindow.setFixedSize(800, 600)
MainWindow.setWindowTitle("DAMPP")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.plocation_label = QtWidgets.QLabel(self.centralwidget)
self.plocation_label.setGeometry(QtCore.QRect(20, 10, 180, 50))
self.plocation_label.setObjectName("plocation_label")
self.plocation = QtWidgets.QComboBox(self.centralwidget)
self.plocation.setGeometry(QtCore.QRect(20, 60, 590, 50))
self.plocation.setObjectName("plocation")
self.plocation.setPlaceholderText("Please Select a Project")
self.load_projects()
self.plocation.currentTextChanged.connect(self.goto_project)
self.start_stop_btn = QtWidgets.QPushButton(self.centralwidget)
self.start_stop_btn.setGeometry(QtCore.QRect(630, 60, 150, 50))
self.start_stop_btn.setCheckable(True)
self.start_stop_btn.setChecked(False)
self.start_stop_btn.setEnabled(False)
self.start_stop_btn.setObjectName("start_stop_btn")
self.start_stop_btn.clicked.connect(self.service_state)
self.lhost_btn = QtWidgets.QPushButton(self.centralwidget)
self.lhost_btn.setGeometry(QtCore.QRect(630, 160, 150, 50))
self.lhost_btn.setEnabled(False)
self.lhost_btn.setObjectName("lhost_btn")
self.lhost_btn.clicked.connect(self.open_localhost)
self.pma_btn = QtWidgets.QPushButton(self.centralwidget)
self.pma_btn.setGeometry(QtCore.QRect(630, 230, 150, 50))
self.pma_btn.setEnabled(False)
self.pma_btn.setObjectName("pma_btn")
self.pma_btn.clicked.connect(self.open_pma)
self.flocation_btn = QtWidgets.QPushButton(self.centralwidget)
self.flocation_btn.setGeometry(QtCore.QRect(630, 300, 150, 50))
self.flocation_btn.setEnabled(False)
self.flocation_btn.setObjectName("flocation_btn")
self.flocation_btn.clicked.connect(self.open_project)
self.op_log = QtWidgets.QTextBrowser(self.centralwidget)
self.op_log.setGeometry(QtCore.QRect(20, 160, 590, 370))
font = QtGui.QFont()
font.setFamily("Monospace")
self.op_log.setFont(font)
self.op_log.setObjectName("op_log")
self.op_log_label = QtWidgets.QLabel(self.centralwidget)
self.op_log_label.setGeometry(QtCore.QRect(20, 110, 100, 50))
self.op_log_label.setObjectName("op_log_label")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(130, 135, 650, 3))
self.line.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line.setObjectName("line")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 31))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionNew_Project = QtWidgets.QAction(MainWindow)
self.actionNew_Project.setObjectName("actionNew_Project")
self.actionNew_Project.triggered.connect(self.create_project)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionEdit_Ports = QtWidgets.QAction(MainWindow)
self.actionEdit_Ports.setObjectName("actionEdit_Ports")
self.actionEdit_Ports.setEnabled(False)
self.actionEdit_Ports.triggered.connect(self.edit_ports)
self.actionRemove_Services = QtWidgets.QAction(MainWindow)
self.actionRemove_Services.setObjectName("actionRemove_Services")
self.actionRemove_Services.setEnabled(False)
self.actionRemove_Services.triggered.connect(self.remove_services)
self.actionDAMPP_Help = QtWidgets.QAction(MainWindow)
self.actionDAMPP_Help.setObjectName("actionDAMPP_Help")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionAbout.triggered.connect(self.about.show)
self.menuFile.addAction(self.actionNew_Project)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuTools.addAction(self.actionEdit_Ports)
self.menuTools.addAction(self.actionRemove_Services)
self.menuHelp.addAction(self.actionDAMPP_Help)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.actionQuit.triggered.connect(self.exit_app)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow: QtWidgets.QMainWindow) -> None:
"""
retranslateUi translates the UI.
:param MainWindow: The main window.
:type MainWindow: QMainWindow
"""
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "DAMPP"))
self.start_stop_btn.setText(_translate("MainWindow", "Start"))
self.plocation_label.setText(_translate("MainWindow", "Project Location"))
self.op_log_label.setText(_translate("MainWindow", "Output Log"))
self.lhost_btn.setText(_translate("MainWindow", "Localhost"))
self.pma_btn.setText(_translate("MainWindow", "PhpMyAdmin"))
self.flocation_btn.setText(_translate("MainWindow", "File Location"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuTools.setTitle(_translate("MainWindow", "Tools"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionNew_Project.setText(_translate("MainWindow", "New Project"))
self.actionNew_Project.setShortcut(_translate("MainWindow", "Ctrl+N"))
self.actionQuit.setText(_translate("MainWindow", "Quit"))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionEdit_Ports.setText(_translate("MainWindow", "Edit Ports"))
self.actionEdit_Ports.setShortcut(_translate("MainWindow", "Ctrl+Shift+E"))
self.actionRemove_Services.setText(_translate("MainWindow", "Remove Services"))
self.actionRemove_Services.setShortcut(_translate("MainWindow", "Ctrl+Shift+R"))
self.actionDAMPP_Help.setText(_translate("MainWindow", "DAMPP Help"))
self.actionDAMPP_Help.setShortcut(_translate("MainWindow", "Ctrl+H"))
self.actionAbout.setText(_translate("MainWindow", "About "))
def load_projects(self) -> None:
"""
load_projects loads the projects from the main directory.
"""
self.plocation.clear()
for project in self.file.list_directory(f"{self.home}/{self.main_directory}"):
self.plocation.addItem(project)
def goto_project(self) -> None:
"""
goto_project goes to the selected project.
"""
self.directory = self.plocation.currentText()
self.file.change_directory(self.directory)
if self.validate.requirement_check() != True:
self.button_state(False)
self.action_state(False)
self.create_log(self.validate.requirement_check())
else:
self.button_state(True)
self.action_state(True)
self.create_log(
"<span style='color:green;'>All the requirements are met.</span>"
)
self.port = self.file.find_ports(self.env_file_name)
def create_log(self, message: str) -> None:
"""
create_log creates the log.
:param message: The message to be displayed.
:type message: str
"""
self.current_time = time.localtime()
self.current_time = time.strftime("%H:%M:%S", self.current_time)
self.op_log.append(
"<html><body>"
+ "<b style='color:blue;'>"
+ f"[{self.current_time}]"
+ "</b>"
+ " >>> "
+ message
+ "<br/></body></html>"
)
def button_state(self, state: bool) -> None:
"""
button_state changes the state of the buttons.
:param state: The state of the buttons.
:type state: bool
"""
self.start_stop_btn.setEnabled(state)
self.lhost_btn.setEnabled(state)
self.pma_btn.setEnabled(state)
self.flocation_btn.setEnabled(state)
def action_state(self, state: bool) -> None:
"""
action_state changes the state of the actions.
:param state: The state of the actions.
:type state: bool
"""
self.actionEdit_Ports.setEnabled(state)
self.actionRemove_Services.setEnabled(state)
def service_state(self) -> None:
"""
service_state changes the state of the services.
"""
ready_msg_1 = "Starting..."
ready_msg_2 = "Stopping..."
success_msg_1 = "Service started."
success_msg_2 = "Service stopped."
error_msg_1 = "Service failed to start."
error_msg_2 = "Service failed to stop."
btn_state = self.start_stop_btn.isChecked()
if btn_state:
self.create_log(ready_msg_1)
self.start_stop_btn.setText("Stop")
if self.docker.start():
self.create_log(success_msg_1)
else:
self.create_log(error_msg_1)
self.error.show(error_msg_1)
else:
self.create_log(ready_msg_2)
self.start_stop_btn.setText("Start")
if self.docker.stop():
self.create_log(success_msg_2)
else:
self.create_log(error_msg_2)
self.error.show(error_msg_2)
def open_localhost(self) -> None:
"""
open_localhost opens the localhost.
"""
ready_msg = "Opening localhost..."
success_msg = "Opened localhost."
warning_msg = "Please start the service first."
error_msg = "Failed to open localhost."
url = f"http://localhost:{self.port['WEB_PORT']}"
if self.start_stop_btn.isChecked():
self.create_log(ready_msg)
try:
self.file.open_this(url)
self.create_log(success_msg)
except:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg)
def open_pma(self) -> None:
"""
open_pma opens the phpmyadmin.
"""
ready_msg = "Opening phpmyadmin..."
success_msg = "Opened phpmyadmin."
warning_msg = "Please start the service first."
error_msg = "Failed to open phpmyadmin."
url = f"http://localhost:{self.port['PMA_PORT']}"
if self.start_stop_btn.isChecked():
self.create_log(ready_msg)
try:
self.file.open_this(url)
self.create_log(success_msg)
except:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg)
def open_project(self) -> None:
"""
open_project opens the project.
"""
success_msg = "Opened project."
error_msg = "Failed to open project folder."
url = f"{self.directory}/{self.public_directory}"
self.create_log("Opening project...")
try:
self.file.open_this(url)
self.create_log(success_msg)
except:
self.create_log(error_msg)
self.error.show(error_msg)
def create_project(self) -> None:
"""
create_project creates the project.
"""
ready_msg = "Adding new project..."
success_msg = "Project created."
error_msg = "Failed to create project."
self.create_log(ready_msg)
if self.new_project.show():
self.create_log(success_msg)
else:
self.create_log(error_msg)
self.error.show(error_msg)
self.load_projects()
def exit_app(self) -> None:
"""
exit_app will exit the application.
"""
ready_msg = "Stopping services..."
confirm_msg = "Are you sure you want to quit?"
success_msg = "Exited."
cancel_msg = "Exiting canceled."
if self.confirm.show(confirm_msg):
self.create_log(ready_msg)
self.docker.stop()
self.create_log(success_msg)
exit()
else:
self.create_log(cancel_msg)
def edit_ports(self) -> None:
"""
edit_ports will edit the ports.
"""
ready_msg = "Editing ports..."
success_msg = "Ports edited."
warning_msg = "Please start the service first."
error_msg = "Failed to edit ports."
self.create_log(ready_msg)
if not self.start_stop_btn.isChecked():
result = self.edit_port_dialog.show()
if result != False:
self.create_log(success_msg)
else:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg)
self.port = self.file.find_ports(self.env_file_name)
def remove_services(self) -> None:
"""
remove_services will remove the services.
"""
ready_msg = "Removing services..."
confirm_msg = "Are you sure you want to remove the services?"
success_msg = "Services removed."
warning_msg = "Please start the service first."
error_msg = "Failed to remove services."
cancel_msg = "Services removal canceled."
if not self.start_stop_btn.isChecked():
self.create_log(ready_msg)
if self.confirm.show(confirm_msg):
if self.docker.remove():
self.create_log(success_msg)
else:
self.create_log(error_msg)
self.error.show(error_msg)
else:
self.create_log(cancel_msg)
else:
self.create_log(warning_msg)
self.error.show(warning_msg) | en | 0.767409 | Ui_MainWindow is the main window of the application. :param object: self :type object: object __init__ initializes the main window of the application. setupUi sets up the main window of the application. :param MainWindow: MainWindow :type MainWindow: QMainWindow retranslateUi translates the UI. :param MainWindow: The main window. :type MainWindow: QMainWindow load_projects loads the projects from the main directory. goto_project goes to the selected project. create_log creates the log. :param message: The message to be displayed. :type message: str button_state changes the state of the buttons. :param state: The state of the buttons. :type state: bool action_state changes the state of the actions. :param state: The state of the actions. :type state: bool service_state changes the state of the services. open_localhost opens the localhost. open_pma opens the phpmyadmin. open_project opens the project. create_project creates the project. exit_app will exit the application. edit_ports will edit the ports. remove_services will remove the services. | 2.246787 | 2 |
arrays/frogJump/Solution.py | shahbagdadi/py-algo-n-ds | 0 | 6618176 | <filename>arrays/frogJump/Solution.py
from typing import List
from collections import defaultdict
class Solution:
def canCross(self, stones: List[int]) -> bool:
if stones[1] != 1:
return False
d = {x: set() for x in stones}
d[1].add(1) # since first stone is always 0 and jump to stone[1] is 1
for x in stones[:-1]:
for j in d[x]:
for k in range(j-1, j+2):
if k > 0 and x+k in d:
d[x+k].add(k)
return bool(d[stones[-1]])
s = Solution()
ip = [0,1,3,5,6,8,12,17]
# ip = [0,1,2,3,4,8,9,11]
ans = s.canCross(ip)
print(ans) | <filename>arrays/frogJump/Solution.py
from typing import List
from collections import defaultdict
class Solution:
def canCross(self, stones: List[int]) -> bool:
if stones[1] != 1:
return False
d = {x: set() for x in stones}
d[1].add(1) # since first stone is always 0 and jump to stone[1] is 1
for x in stones[:-1]:
for j in d[x]:
for k in range(j-1, j+2):
if k > 0 and x+k in d:
d[x+k].add(k)
return bool(d[stones[-1]])
s = Solution()
ip = [0,1,3,5,6,8,12,17]
# ip = [0,1,2,3,4,8,9,11]
ans = s.canCross(ip)
print(ans) | en | 0.985528 | # since first stone is always 0 and jump to stone[1] is 1 # ip = [0,1,2,3,4,8,9,11] | 3.72062 | 4 |
searchapp/__init__.py | MehwishUmer/flask_search-master | 0 | 6618177 | # Programmer: <NAME>
# Email: <EMAIL>
# WWW: sinafathi.com
from flask import Flask
from config import Config
searchapp = Flask(__name__)
searchapp.config.from_object(Config)
searchapp.jinja_env.add_extension('jinja2.ext.do')
from searchapp import routes
from flask_bootstrap import Bootstrap # import twitter bootstrap library
bootstrap = Bootstrap(searchapp)
| # Programmer: <NAME>
# Email: <EMAIL>
# WWW: sinafathi.com
from flask import Flask
from config import Config
searchapp = Flask(__name__)
searchapp.config.from_object(Config)
searchapp.jinja_env.add_extension('jinja2.ext.do')
from searchapp import routes
from flask_bootstrap import Bootstrap # import twitter bootstrap library
bootstrap = Bootstrap(searchapp)
| en | 0.219732 | # Programmer: <NAME> # Email: <EMAIL> # WWW: sinafathi.com # import twitter bootstrap library | 1.802958 | 2 |
emmaa/tests/test_lambda.py | pagreene/emmaa | 6 | 6618178 | import boto3
import pickle
import unittest
from indra_reading.batch.monitor import BatchMonitor
from emmaa.aws_lambda_functions.model_tests import lambda_handler, QUEUE
from emmaa.util import make_date_str, get_s3_client
RUN_STATI = ['SUBMITTED', 'PENDING', 'RUNNABLE', 'RUNNING']
DONE_STATI = ['SUCCEEDED', 'FAILED']
def __get_jobs(batch):
job_ids = {}
for status in RUN_STATI + DONE_STATI:
resp = batch.list_jobs(jobQueue=QUEUE, jobStatus=status)
if 'jobSummaryList' in resp.keys():
job_ids[status] = [s['jobId'] for s in resp['jobSummaryList']]
return job_ids
@unittest.skip('Local test without starting up batch job not yet implemented')
def test_handler():
"""Test the lambda handler locally."""
dts = make_date_str()
key = f'models/test/test_model_{dts}.pkl'
event = {'Records': [{'s3': {'object': {'key': key}}}]}
context = None
res = lambda_handler(event, context)
print(res)
assert res['statusCode'] == 200, res
assert res['result'] == 'SUCCESS', res
assert res['job_id'], res
job_id = res['job_id']
results = {}
monitor = BatchMonitor(QUEUE, [{'jobId': job_id}])
monitor.watch_and_wait(result_record=results)
print(results)
assert job_id in [job_def['jobId'] for job_def in results['succeeded']], \
results['failed']
s3 = get_s3_client()
s3_res = s3.list_objects(Bucket='emmaa', Prefix='results/test/' + dts[:10])
print(s3_res.keys())
assert s3_res, s3_res
@unittest.skip('Unfinished test. See comments in code')
def test_s3_response():
"""Change a file on s3 and check for the correct response."""
# This will be a white-box test. We will check progress at various stages.
s3 = get_s3_client()
batch = boto3.client('batch')
# Define some fairly random parameters.
key = f'models/test/model_{make_date_str()}.pkl'
data = {'test_message': 'Hello world!'}
# This should trigger the lambda to start a batch job.
s3.put_object(Bucket='emmaa', Key=key, Body=pickle.dumps(data))
# TODO
# 1. verify that lambda has started a batch job
# 2. kill batch job
# 3. delete uploaded pickle
| import boto3
import pickle
import unittest
from indra_reading.batch.monitor import BatchMonitor
from emmaa.aws_lambda_functions.model_tests import lambda_handler, QUEUE
from emmaa.util import make_date_str, get_s3_client
RUN_STATI = ['SUBMITTED', 'PENDING', 'RUNNABLE', 'RUNNING']
DONE_STATI = ['SUCCEEDED', 'FAILED']
def __get_jobs(batch):
job_ids = {}
for status in RUN_STATI + DONE_STATI:
resp = batch.list_jobs(jobQueue=QUEUE, jobStatus=status)
if 'jobSummaryList' in resp.keys():
job_ids[status] = [s['jobId'] for s in resp['jobSummaryList']]
return job_ids
@unittest.skip('Local test without starting up batch job not yet implemented')
def test_handler():
"""Test the lambda handler locally."""
dts = make_date_str()
key = f'models/test/test_model_{dts}.pkl'
event = {'Records': [{'s3': {'object': {'key': key}}}]}
context = None
res = lambda_handler(event, context)
print(res)
assert res['statusCode'] == 200, res
assert res['result'] == 'SUCCESS', res
assert res['job_id'], res
job_id = res['job_id']
results = {}
monitor = BatchMonitor(QUEUE, [{'jobId': job_id}])
monitor.watch_and_wait(result_record=results)
print(results)
assert job_id in [job_def['jobId'] for job_def in results['succeeded']], \
results['failed']
s3 = get_s3_client()
s3_res = s3.list_objects(Bucket='emmaa', Prefix='results/test/' + dts[:10])
print(s3_res.keys())
assert s3_res, s3_res
@unittest.skip('Unfinished test. See comments in code')
def test_s3_response():
"""Change a file on s3 and check for the correct response."""
# This will be a white-box test. We will check progress at various stages.
s3 = get_s3_client()
batch = boto3.client('batch')
# Define some fairly random parameters.
key = f'models/test/model_{make_date_str()}.pkl'
data = {'test_message': 'Hello world!'}
# This should trigger the lambda to start a batch job.
s3.put_object(Bucket='emmaa', Key=key, Body=pickle.dumps(data))
# TODO
# 1. verify that lambda has started a batch job
# 2. kill batch job
# 3. delete uploaded pickle
| en | 0.922263 | Test the lambda handler locally. Change a file on s3 and check for the correct response. # This will be a white-box test. We will check progress at various stages. # Define some fairly random parameters. # This should trigger the lambda to start a batch job. # TODO # 1. verify that lambda has started a batch job # 2. kill batch job # 3. delete uploaded pickle | 1.94814 | 2 |
backend/main/views.py | varenius/honte | 1 | 6618179 | import random
from django.http import HttpResponse
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Player
from .models import Game
from .serializers import PlayerSerializer
from .serializers import GameSerializer
from .name_generator import get_name
class PlayerViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows players to be viewed or edited.
"""
queryset = Player.objects.all().order_by('rating')
serializer_class = PlayerSerializer
class GameViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows games to be viewed or edited.
"""
queryset = Game.objects.all()
serializer_class = GameSerializer
def add_player(request):
player = Player()
player.first_name, player.last_name = get_name(False)
player.rating = 1000
player.egd_pin = random.randint(10000, 100000)
player.save()
return HttpResponse(200)
@api_view(['GET'])
def add_game(request):
all_players = Player.objects.values_list('pk', flat=True)
player1_pk, player2_pk = random.choices(population=all_players, k=2)
player1 = Player.objects.get(pk=player1_pk)
player2 = Player.objects.get(pk=player2_pk)
result=random.choice(Game.Results.choices)
result_id, _ = result
winner = None
if result_id == Game.Results.WON:
winner = random.choice([player1, player2])
game = Game.objects.create(player1=player1, player2=player2, result=result, winner=winner)
return Response(GameSerializer(game, context={'request': request}).data)
| import random
from django.http import HttpResponse
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Player
from .models import Game
from .serializers import PlayerSerializer
from .serializers import GameSerializer
from .name_generator import get_name
class PlayerViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows players to be viewed or edited.
"""
queryset = Player.objects.all().order_by('rating')
serializer_class = PlayerSerializer
class GameViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows games to be viewed or edited.
"""
queryset = Game.objects.all()
serializer_class = GameSerializer
def add_player(request):
player = Player()
player.first_name, player.last_name = get_name(False)
player.rating = 1000
player.egd_pin = random.randint(10000, 100000)
player.save()
return HttpResponse(200)
@api_view(['GET'])
def add_game(request):
all_players = Player.objects.values_list('pk', flat=True)
player1_pk, player2_pk = random.choices(population=all_players, k=2)
player1 = Player.objects.get(pk=player1_pk)
player2 = Player.objects.get(pk=player2_pk)
result=random.choice(Game.Results.choices)
result_id, _ = result
winner = None
if result_id == Game.Results.WON:
winner = random.choice([player1, player2])
game = Game.objects.create(player1=player1, player2=player2, result=result, winner=winner)
return Response(GameSerializer(game, context={'request': request}).data)
| en | 0.953595 | API endpoint that allows players to be viewed or edited. API endpoint that allows games to be viewed or edited. | 2.712292 | 3 |
train.py | Devanshu-singh-VR/Covid19-CT-ImageSegmentation | 0 | 6618180 | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import albumentations as A
import cv2
from data import COVIData
from model_v2 import Unet
from albumentations.pytorch import ToTensorV2
import torch.optim as optim
import matplotlib.pyplot as plt
# Hyper-parameters
learning_rate = 0.001
epochs = 200
batch_size = 50
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
image_path = '/kaggle/input/covid-segmentation/images_medseg.npy'
mask_path = '/kaggle/input/covid-segmentation/masks_medseg.npy'
out_channels = 4
# model
model = Unet(1, out_channels).to(device)
loss_f = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scaler = torch.cuda.amp.GradScaler()
# Albumentations transformation
transforms = A.Compose(
[
A.Resize(width=100, height=100),
#A.Normalize(mean=[0], std=[1], max_pixel_value=255.0),
ToTensorV2()
]
)
# for testing
test = image_test[1]
mask = mask_test[1]
augmentation = transforms(image=test, mask=mask)
test_image = augmentation['image'].unsqueeze(0)
test_mask = augmentation['mask'].permute(2, 0, 1).unsqueeze(0)
# load the dataset
dataset = COVIData(image_path, mask_path, transforms=transforms)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
# training baby
for epoch in range(epochs):
print(f'Epochs [{epoch}/{epochs}]')
losses = []
# Testing
model.eval()
pred = model(test_image.to(device))
pred = pred[0].permute(1, 2, 0).to('cpu').detach().numpy()
image = test_image[0].permute(1, 2, 0)
plt.imshow(image)
print(image.shape)
plt.show()
mask = test_mask[0].clone().permute(1, 2 ,0)
mask[..., 0] = mask[..., 0]*255
mask[..., 1] = mask[..., 1]*255
mask[..., 2] = mask[..., 2]*255
mask[..., 3] = mask[..., 3]*255
plt.imshow(mask[..., 1:4])
plt.show()
mask = np.expand_dims(np.argmax(pred, axis=2), axis=2) * 85
print(mask.shape)
#mask[..., 0] = mask[..., 0]*255
#mask[..., 1] = mask[..., 1]*255
#mask[..., 2] = mask[..., 2]*255
#mask[..., 3] = mask[..., 3]*255
plt.imshow(mask)
plt.show()
model.train()
for batch_idx, (train, label) in enumerate(loader):
train = train.to(device)
label = label.to(device).permute(0, 3, 1, 2)
with torch.cuda.amp.autocast():
score = model(train)
# reshaping for cross entropy loss
score = score.reshape(score.shape[0], out_channels, -1)
label = label.argmax(dim=1).reshape(score.shape[0], -1)
# loss value
loss = loss_f(score, label)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
losses.append(loss)
print(f'Loss {epoch} = {sum(losses)/len(losses)}')
| import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import albumentations as A
import cv2
from data import COVIData
from model_v2 import Unet
from albumentations.pytorch import ToTensorV2
import torch.optim as optim
import matplotlib.pyplot as plt
# Hyper-parameters
learning_rate = 0.001
epochs = 200
batch_size = 50
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
image_path = '/kaggle/input/covid-segmentation/images_medseg.npy'
mask_path = '/kaggle/input/covid-segmentation/masks_medseg.npy'
out_channels = 4
# model
model = Unet(1, out_channels).to(device)
loss_f = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scaler = torch.cuda.amp.GradScaler()
# Albumentations transformation
transforms = A.Compose(
[
A.Resize(width=100, height=100),
#A.Normalize(mean=[0], std=[1], max_pixel_value=255.0),
ToTensorV2()
]
)
# for testing
test = image_test[1]
mask = mask_test[1]
augmentation = transforms(image=test, mask=mask)
test_image = augmentation['image'].unsqueeze(0)
test_mask = augmentation['mask'].permute(2, 0, 1).unsqueeze(0)
# load the dataset
dataset = COVIData(image_path, mask_path, transforms=transforms)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
# training baby
for epoch in range(epochs):
print(f'Epochs [{epoch}/{epochs}]')
losses = []
# Testing
model.eval()
pred = model(test_image.to(device))
pred = pred[0].permute(1, 2, 0).to('cpu').detach().numpy()
image = test_image[0].permute(1, 2, 0)
plt.imshow(image)
print(image.shape)
plt.show()
mask = test_mask[0].clone().permute(1, 2 ,0)
mask[..., 0] = mask[..., 0]*255
mask[..., 1] = mask[..., 1]*255
mask[..., 2] = mask[..., 2]*255
mask[..., 3] = mask[..., 3]*255
plt.imshow(mask[..., 1:4])
plt.show()
mask = np.expand_dims(np.argmax(pred, axis=2), axis=2) * 85
print(mask.shape)
#mask[..., 0] = mask[..., 0]*255
#mask[..., 1] = mask[..., 1]*255
#mask[..., 2] = mask[..., 2]*255
#mask[..., 3] = mask[..., 3]*255
plt.imshow(mask)
plt.show()
model.train()
for batch_idx, (train, label) in enumerate(loader):
train = train.to(device)
label = label.to(device).permute(0, 3, 1, 2)
with torch.cuda.amp.autocast():
score = model(train)
# reshaping for cross entropy loss
score = score.reshape(score.shape[0], out_channels, -1)
label = label.argmax(dim=1).reshape(score.shape[0], -1)
# loss value
loss = loss_f(score, label)
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
losses.append(loss)
print(f'Loss {epoch} = {sum(losses)/len(losses)}')
| en | 0.487507 | # Hyper-parameters # model # Albumentations transformation #A.Normalize(mean=[0], std=[1], max_pixel_value=255.0), # for testing # load the dataset # training baby # Testing #mask[..., 0] = mask[..., 0]*255 #mask[..., 1] = mask[..., 1]*255 #mask[..., 2] = mask[..., 2]*255 #mask[..., 3] = mask[..., 3]*255 # reshaping for cross entropy loss # loss value | 2.221254 | 2 |
data_structures_and_algorithms/data_structures/sorting_algorithms/insertion_sort.py | aghyadalbalkhi-ASAC/data-structures-and-algorithms-python-401 | 0 | 6618181 | <reponame>aghyadalbalkhi-ASAC/data-structures-and-algorithms-python-401
def insertionSort(arr):
# loop through array elements [from 1 to last element]
if len(arr) == 0:
raise AttributeError ('Array is empty')
for i in range(len(arr)):
#point to the last sorted element
j = i-1
# the first unsorted element
temp = arr[i]
# shift element while the unsorted element less than current sorted element
while j >= 0 and temp < arr[j] :
arr[j + 1] = arr[j]
j -= 1
# replace the value of current element with unsorted one
arr[j + 1] = temp
return arr
if __name__ == '__main__':
arr = [10,77,23,55,2]
print(insertionSort(arr)) | def insertionSort(arr):
# loop through array elements [from 1 to last element]
if len(arr) == 0:
raise AttributeError ('Array is empty')
for i in range(len(arr)):
#point to the last sorted element
j = i-1
# the first unsorted element
temp = arr[i]
# shift element while the unsorted element less than current sorted element
while j >= 0 and temp < arr[j] :
arr[j + 1] = arr[j]
j -= 1
# replace the value of current element with unsorted one
arr[j + 1] = temp
return arr
if __name__ == '__main__':
arr = [10,77,23,55,2]
print(insertionSort(arr)) | en | 0.687072 | # loop through array elements [from 1 to last element] #point to the last sorted element # the first unsorted element # shift element while the unsorted element less than current sorted element # replace the value of current element with unsorted one | 4.400147 | 4 |
config.py | slmtpz/Cafemium | 0 | 6618182 | import urllib
import os
MONGO = {
'USERNAME': os.environ['MONGO_USERNAME'],
'PASSWORD': urllib.parse.quote(os.environ['MONGO_PASSWORD']),
'HOSTPORT': os.environ['MONGO_HOSTPORT'],
'DATABASE': os.environ['MONGO_DATABASE']
}
MONGO_URI = "mongodb://%s:%s@%s/%s" % (MONGO['USERNAME'], MONGO['PASSWORD'], MONGO['HOSTPORT'], MONGO['DATABASE'])
| import urllib
import os
MONGO = {
'USERNAME': os.environ['MONGO_USERNAME'],
'PASSWORD': urllib.parse.quote(os.environ['MONGO_PASSWORD']),
'HOSTPORT': os.environ['MONGO_HOSTPORT'],
'DATABASE': os.environ['MONGO_DATABASE']
}
MONGO_URI = "mongodb://%s:%s@%s/%s" % (MONGO['USERNAME'], MONGO['PASSWORD'], MONGO['HOSTPORT'], MONGO['DATABASE'])
| none | 1 | 2.346791 | 2 | |
py-impl/ramp_client.py | santhnm2/ramp-sigmod2014-code | 35 | 6618183 |
from bloom_filter import BloomFilter
from collections import defaultdict
from data_item import DataItem
from ramp_server import Partition, RAMPAlgorithm
BLOOM_FILTER_SIZE = 20
BLOOM_FILTER_HASHES = 4
class Client:
def __init__(self, id, partitions, algorithm):
assert(id < 1024)
self.id = id
self.sequence_number = 0
self.partitions = partitions
self.algorithm = algorithm
def key_to_partition(self, key):
return self.partitions[hash(key) % len(self.partitions)]
def next_timestamp(self):
self.sequence_number += 1
return self.sequence_number << 10 + self.id
def put_all(self, kvps):
timestamp = self.next_timestamp()
txn_keys = None
if self.algorithm == RAMPAlgorithm.Fast:
txn_keys = kvps.keys()
bloom_filter = None
if self.algorithm == RAMPAlgorithm.Hybrid:
bloom_filter = BloomFilter(BLOOM_FILTER_SIZE, BLOOM_FILTER_HASHES)
bloom_filter.list_to_bloom(kvps.keys())
for key in kvps:
self.key_to_partition(key).prepare(key,
DataItem(kvps[key],
timestamp,
txn_keys,
bloom_filter),
timestamp)
for key in kvps:
self.key_to_partition(key).commit(key, timestamp)
def get_all(self, keys):
results = self.get_all_items(keys)
# remove metadata
for key in results:
if results[key]:
results[key] = results[key].value
return results
def get_all_items(self, keys):
if self.algorithm == RAMPAlgorithm.Fast:
results = {}
for key in keys:
results[key] = self.key_to_partition(key).getRAMPFast(key, None)
vlatest = defaultdict(lambda: -1)
for value in results.values():
if value == None:
continue
for tx_key in value.txn_keys:
if vlatest[tx_key] < value.timestamp:
vlatest[tx_key] = value.timestamp
for key in keys:
if key in vlatest and (results[key] == None or
results[key].timestamp < vlatest[key]):
results[key] = self.key_to_partition(key).getRAMPFast(key, vlatest[key])
return results
elif self.algorithm == RAMPAlgorithm.Small:
ts_set = set()
for key in keys:
last_commit = self.key_to_partition(key).getRAMPSmall(key, None)
if last_commit:
ts_set.add(last_commit)
results = {}
for key in keys:
results[key] = self.key_to_partition(key).getRAMPSmall(key, ts_set)
return results
elif self.algorithm == RAMPAlgorithm.Hybrid:
results = {}
for key in keys:
results[key] = self.key_to_partition(key).getRAMPHybrid(key, None)
for key in keys:
current_result = results[key]
key_ts_set = set()
for value in results.values():
if value and (not current_result or value.timestamp > current_result.timestamp):
key_ts_set.add(value.timestamp)
if len(key_ts_set) > 0:
second_round_result = self.key_to_partition(key).getRAMPHybrid(key,
key_ts_set)
if second_round_result:
results[key] = second_round_result
return results
|
from bloom_filter import BloomFilter
from collections import defaultdict
from data_item import DataItem
from ramp_server import Partition, RAMPAlgorithm
BLOOM_FILTER_SIZE = 20
BLOOM_FILTER_HASHES = 4
class Client:
def __init__(self, id, partitions, algorithm):
assert(id < 1024)
self.id = id
self.sequence_number = 0
self.partitions = partitions
self.algorithm = algorithm
def key_to_partition(self, key):
return self.partitions[hash(key) % len(self.partitions)]
def next_timestamp(self):
self.sequence_number += 1
return self.sequence_number << 10 + self.id
def put_all(self, kvps):
timestamp = self.next_timestamp()
txn_keys = None
if self.algorithm == RAMPAlgorithm.Fast:
txn_keys = kvps.keys()
bloom_filter = None
if self.algorithm == RAMPAlgorithm.Hybrid:
bloom_filter = BloomFilter(BLOOM_FILTER_SIZE, BLOOM_FILTER_HASHES)
bloom_filter.list_to_bloom(kvps.keys())
for key in kvps:
self.key_to_partition(key).prepare(key,
DataItem(kvps[key],
timestamp,
txn_keys,
bloom_filter),
timestamp)
for key in kvps:
self.key_to_partition(key).commit(key, timestamp)
def get_all(self, keys):
results = self.get_all_items(keys)
# remove metadata
for key in results:
if results[key]:
results[key] = results[key].value
return results
def get_all_items(self, keys):
if self.algorithm == RAMPAlgorithm.Fast:
results = {}
for key in keys:
results[key] = self.key_to_partition(key).getRAMPFast(key, None)
vlatest = defaultdict(lambda: -1)
for value in results.values():
if value == None:
continue
for tx_key in value.txn_keys:
if vlatest[tx_key] < value.timestamp:
vlatest[tx_key] = value.timestamp
for key in keys:
if key in vlatest and (results[key] == None or
results[key].timestamp < vlatest[key]):
results[key] = self.key_to_partition(key).getRAMPFast(key, vlatest[key])
return results
elif self.algorithm == RAMPAlgorithm.Small:
ts_set = set()
for key in keys:
last_commit = self.key_to_partition(key).getRAMPSmall(key, None)
if last_commit:
ts_set.add(last_commit)
results = {}
for key in keys:
results[key] = self.key_to_partition(key).getRAMPSmall(key, ts_set)
return results
elif self.algorithm == RAMPAlgorithm.Hybrid:
results = {}
for key in keys:
results[key] = self.key_to_partition(key).getRAMPHybrid(key, None)
for key in keys:
current_result = results[key]
key_ts_set = set()
for value in results.values():
if value and (not current_result or value.timestamp > current_result.timestamp):
key_ts_set.add(value.timestamp)
if len(key_ts_set) > 0:
second_round_result = self.key_to_partition(key).getRAMPHybrid(key,
key_ts_set)
if second_round_result:
results[key] = second_round_result
return results
| ar | 0.084632 | # remove metadata | 2.252585 | 2 |
core/pbm/models.py | PanDAWMS/panda-bigmon-core-new | 3 | 6618184 | """
pbm.models
"""
from django.db import models
class DailyLog(models.Model):
dailylogid = models.BigIntegerField(null=False, db_column='DAILYLOGID', blank=True)
logdate = models.DateField(null=False, db_column='LOGDATE', blank=True)
category = models.CharField(max_length=3, db_column='CATEGORY', blank=True, null=False)
site = models.CharField(max_length=300, db_column='SITE', blank=True, null=True)
cloud = models.CharField(max_length=300, db_column='CLOUD', blank=True, null=True)
dnuser = models.CharField(max_length=300, db_column='DNUSER', blank=True, null=True)
jobdefcount = models.BigIntegerField(db_column='JOBDEFCOUNT')
jobcount = models.BigIntegerField(db_column='JOBCOUNT')
country = models.CharField(max_length=300, db_column='COUNTRY', blank=True, null=True)
jobset = models.CharField(max_length=300, db_column='JOBSET', blank=True, null=True)
class Meta:
app_label = 'pbm'
db_table = u'dailylogv3'
| """
pbm.models
"""
from django.db import models
class DailyLog(models.Model):
dailylogid = models.BigIntegerField(null=False, db_column='DAILYLOGID', blank=True)
logdate = models.DateField(null=False, db_column='LOGDATE', blank=True)
category = models.CharField(max_length=3, db_column='CATEGORY', blank=True, null=False)
site = models.CharField(max_length=300, db_column='SITE', blank=True, null=True)
cloud = models.CharField(max_length=300, db_column='CLOUD', blank=True, null=True)
dnuser = models.CharField(max_length=300, db_column='DNUSER', blank=True, null=True)
jobdefcount = models.BigIntegerField(db_column='JOBDEFCOUNT')
jobcount = models.BigIntegerField(db_column='JOBCOUNT')
country = models.CharField(max_length=300, db_column='COUNTRY', blank=True, null=True)
jobset = models.CharField(max_length=300, db_column='JOBSET', blank=True, null=True)
class Meta:
app_label = 'pbm'
db_table = u'dailylogv3'
| ca | 0.288952 | pbm.models | 2.256466 | 2 |
basic/node.py | oltionzefi/daily-coding-problem | 0 | 6618185 | class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def inorder_traversal(node):
if not node:
return
inorder_traversal(node.left)
print(node.data, end=" ")
inorder_traversal(node.right)
| class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def inorder_traversal(node):
if not node:
return
inorder_traversal(node.left)
print(node.data, end=" ")
inorder_traversal(node.right)
| none | 1 | 3.923115 | 4 | |
lakey_finicity/resources/institutions.py | jeremydeanlakey/lakey-finicity-python | 1 | 6618186 | from typing import Optional
from lakey_finicity.api_http_client import ApiHttpClient
from lakey_finicity.models import Institution
from lakey_finicity.queries.institutions_query import InstitutionsQuery
from lakey_finicity.responses.institution_detail_response import InstitutionDetailResponse
class Institutions(object):
def __init__(self, http_client: ApiHttpClient):
self.__http_client = http_client
def query(self, search_term: Optional[str] = None) -> InstitutionsQuery:
"""
:param search_term: search text to match against the name, urlHomeApp, or urlLogonApp
:return:
"""
return InstitutionsQuery(self.__http_client, search_term)
# https://community.finicity.com/s/article/Get-Institution
def get(self, institution_id: str) -> Institution:
"""Get details for the specified institution without the login form.
:param institution_id: ID of the institution to retrieve
:return:
"""
path = f"/institution/v2/institutions/{institution_id}"
response = self.__http_client.get(path)
response_dict = response.json()
return InstitutionDetailResponse.from_dict(response_dict).institution
| from typing import Optional
from lakey_finicity.api_http_client import ApiHttpClient
from lakey_finicity.models import Institution
from lakey_finicity.queries.institutions_query import InstitutionsQuery
from lakey_finicity.responses.institution_detail_response import InstitutionDetailResponse
class Institutions(object):
def __init__(self, http_client: ApiHttpClient):
self.__http_client = http_client
def query(self, search_term: Optional[str] = None) -> InstitutionsQuery:
"""
:param search_term: search text to match against the name, urlHomeApp, or urlLogonApp
:return:
"""
return InstitutionsQuery(self.__http_client, search_term)
# https://community.finicity.com/s/article/Get-Institution
def get(self, institution_id: str) -> Institution:
"""Get details for the specified institution without the login form.
:param institution_id: ID of the institution to retrieve
:return:
"""
path = f"/institution/v2/institutions/{institution_id}"
response = self.__http_client.get(path)
response_dict = response.json()
return InstitutionDetailResponse.from_dict(response_dict).institution
| en | 0.684318 | :param search_term: search text to match against the name, urlHomeApp, or urlLogonApp :return: # https://community.finicity.com/s/article/Get-Institution Get details for the specified institution without the login form. :param institution_id: ID of the institution to retrieve :return: | 2.733891 | 3 |
nornir/failed_tasks/failed_task.py | twin-bridges/pynet-ons | 1 | 6618187 | <gh_stars>1-10
from nornir import InitNornir
from nornir.core.exceptions import NornirExecutionError
from nornir.plugins.tasks.networking import netmiko_send_command
if __name__ == "__main__":
import ipdb
ipdb.set_trace()
nr = InitNornir(config_file="config.yaml")
aggr_result = nr.run(task=netmiko_send_command, command_string="show configuration")
print(aggr_result.failed)
print(aggr_result.failed_hosts.keys())
vmx1 = aggr_result.failed_hosts["vmx1"]
print(vmx1.exception)
try:
aggr_result.raise_on_error()
except NornirExecutionError:
print("We can cause this exception to be raised")
| from nornir import InitNornir
from nornir.core.exceptions import NornirExecutionError
from nornir.plugins.tasks.networking import netmiko_send_command
if __name__ == "__main__":
import ipdb
ipdb.set_trace()
nr = InitNornir(config_file="config.yaml")
aggr_result = nr.run(task=netmiko_send_command, command_string="show configuration")
print(aggr_result.failed)
print(aggr_result.failed_hosts.keys())
vmx1 = aggr_result.failed_hosts["vmx1"]
print(vmx1.exception)
try:
aggr_result.raise_on_error()
except NornirExecutionError:
print("We can cause this exception to be raised") | none | 1 | 2.137326 | 2 | |
webapp/element43/apps/feedreader/models.py | Ososope/eve_online | 0 | 6618188 | from django.db import models
#
# Newsfeed
#
class Feed(models.Model):
"""
Holds information about a news-feed which gets updated regularly by a Celery task.
"""
url = models.URLField(help_text='Newsfeed URL')
name = models.CharField(help_text='Name of the feed', max_length=100)
icon_file = models.CharField(help_text="Name of the feed's icon file", max_length=100)
next_update = models.DateTimeField(help_text='Timestamp for next update')
class Meta(object):
verbose_name = "Newsfeed"
verbose_name_plural = "Newsfeeds"
#
# News Item
#
class FeedItem(models.Model):
"""
Holds information about a news item in a news-feed.
"""
feed = models.ForeignKey('feedreader.Feed', help_text='FKey relationship to feed table')
title = models.CharField(help_text='Title of the item', max_length=100)
description = models.TextField(help_text='Short description of the item')
link = models.URLField(help_text='Link to the text')
published = models.DateTimeField(help_text='Time the item was published')
class Meta(object):
verbose_name = "Feed Item"
verbose_name_plural = "Feed Items"
| from django.db import models
#
# Newsfeed
#
class Feed(models.Model):
"""
Holds information about a news-feed which gets updated regularly by a Celery task.
"""
url = models.URLField(help_text='Newsfeed URL')
name = models.CharField(help_text='Name of the feed', max_length=100)
icon_file = models.CharField(help_text="Name of the feed's icon file", max_length=100)
next_update = models.DateTimeField(help_text='Timestamp for next update')
class Meta(object):
verbose_name = "Newsfeed"
verbose_name_plural = "Newsfeeds"
#
# News Item
#
class FeedItem(models.Model):
"""
Holds information about a news item in a news-feed.
"""
feed = models.ForeignKey('feedreader.Feed', help_text='FKey relationship to feed table')
title = models.CharField(help_text='Title of the item', max_length=100)
description = models.TextField(help_text='Short description of the item')
link = models.URLField(help_text='Link to the text')
published = models.DateTimeField(help_text='Time the item was published')
class Meta(object):
verbose_name = "Feed Item"
verbose_name_plural = "Feed Items"
| en | 0.864616 | # # Newsfeed # Holds information about a news-feed which gets updated regularly by a Celery task. # # News Item # Holds information about a news item in a news-feed. | 2.509565 | 3 |
test/__init__.py | CriimBow/VIA4CVE | 109 | 6618189 | <filename>test/__init__.py
import traceback
tests = {'D2sec': {'cve': 'CVE-2009-3534', 'key': "d2sec.%.name", 'val': "LionWiki 3.0.3 LFI"},
'ExploitDB': {'cve': 'CVE-2009-4186', 'key': "exploit-db.%.id", 'val': "10102"},
'IAVM': {'cve': 'CVE-2007-0214', 'key': "iavm.id", 'val': "2007-A-0014"},
'MSBulletin': {'cve': 'CVE-2016-7241', 'key': "msbulletin.%.bulletin_id", 'val': "MS16-142"},
'OVAL': {'cve': 'CVE-2007-5730', 'key': "oval.%.id", 'val': "oval:org.mitre.oval:def:10000"},
'RedHatInfo': {'cve': 'CVE-2003-0858', 'key': "redhat.advisories.%.rhsa.id", 'val': "RHSA-2003:315"},
'Saint': {'cve': 'CVE-2006-6183', 'key': "saint.%.id", 'val': "ftp_3cservertftp"},
'VendorStatements': {'cve': 'CVE-1999-0524', 'key': "statements.%.contributor", 'val': "<NAME>"},
'VMWare': {'cve': 'CVE-2015-5177', 'key': "vmware.%.id", 'val': "VMSA-2015-0007"},
}
_verbose = False
def testAll(cves, testdata, verbose):
failed_tests = set()
for name, data in testdata.items():
if not test(cves, name, data['cve'], data['key'], data['val'], verbose):
failed_tests.add(name)
if not verbose:
if len(failed_tests) != 0:
print("[-] Some unit tests failed!")
for failure in failed_tests: print(" -> %s"%failure)
else: print("[+] All tests successful")
def test(cves, collection, cve, key, val, verbose):
successful = False
def check_level(_map, key, val):
global successful
if type(key) == str: key = key.split(".")
for level, part in enumerate(key):
if level == len(key)-1:
if part == '%':
for item in _map:
if item == val: successful = True
else:
if _map[part] == val: successful = True
if part != "%": _map = _map[part]
else:
for item in _map:
check_level(item, key[level+1:], val)
break
return successful
try:
if check_level(cves[cve], key, val):
if verbose: print("[+] %s test succeeded!"%collection)
return True
else:
if verbose: print("[-] %s test not succesful!"%collection)
except Exception as e:
if verbose:
print("[-] %s test failed! %s"%(collection, e))
traceback.print_exc()
return False
| <filename>test/__init__.py
import traceback
tests = {'D2sec': {'cve': 'CVE-2009-3534', 'key': "d2sec.%.name", 'val': "LionWiki 3.0.3 LFI"},
'ExploitDB': {'cve': 'CVE-2009-4186', 'key': "exploit-db.%.id", 'val': "10102"},
'IAVM': {'cve': 'CVE-2007-0214', 'key': "iavm.id", 'val': "2007-A-0014"},
'MSBulletin': {'cve': 'CVE-2016-7241', 'key': "msbulletin.%.bulletin_id", 'val': "MS16-142"},
'OVAL': {'cve': 'CVE-2007-5730', 'key': "oval.%.id", 'val': "oval:org.mitre.oval:def:10000"},
'RedHatInfo': {'cve': 'CVE-2003-0858', 'key': "redhat.advisories.%.rhsa.id", 'val': "RHSA-2003:315"},
'Saint': {'cve': 'CVE-2006-6183', 'key': "saint.%.id", 'val': "ftp_3cservertftp"},
'VendorStatements': {'cve': 'CVE-1999-0524', 'key': "statements.%.contributor", 'val': "<NAME>"},
'VMWare': {'cve': 'CVE-2015-5177', 'key': "vmware.%.id", 'val': "VMSA-2015-0007"},
}
_verbose = False
def testAll(cves, testdata, verbose):
failed_tests = set()
for name, data in testdata.items():
if not test(cves, name, data['cve'], data['key'], data['val'], verbose):
failed_tests.add(name)
if not verbose:
if len(failed_tests) != 0:
print("[-] Some unit tests failed!")
for failure in failed_tests: print(" -> %s"%failure)
else: print("[+] All tests successful")
def test(cves, collection, cve, key, val, verbose):
successful = False
def check_level(_map, key, val):
global successful
if type(key) == str: key = key.split(".")
for level, part in enumerate(key):
if level == len(key)-1:
if part == '%':
for item in _map:
if item == val: successful = True
else:
if _map[part] == val: successful = True
if part != "%": _map = _map[part]
else:
for item in _map:
check_level(item, key[level+1:], val)
break
return successful
try:
if check_level(cves[cve], key, val):
if verbose: print("[+] %s test succeeded!"%collection)
return True
else:
if verbose: print("[-] %s test not succesful!"%collection)
except Exception as e:
if verbose:
print("[-] %s test failed! %s"%(collection, e))
traceback.print_exc()
return False
| none | 1 | 2.113378 | 2 | |
players/GlobalTimeABPlayer.py | MPTG94/AI-HW2 | 0 | 6618190 | """
MiniMax Player with AlphaBeta pruning and global time
"""
import statistics
import time
import numpy as np
from copy import deepcopy
from SearchAlgos import AlphaBeta, GameState, GameUtils
from players.AbstractPlayer import AbstractPlayer
# TODO: you can import more modules, if needed
import utils
class Player(AbstractPlayer):
def __init__(self, game_time):
AbstractPlayer.__init__(self, game_time) # keep the inheritance of the parent's (AbstractPlayer) __init__()
# TODO: initialize more fields, if needed, and the AlphaBeta algorithm from SearchAlgos.py
self.utils = GameUtils
self.game_time = game_time
self.initial_game_time = game_time
self.total_runtime_by_turn = {}
self.runtime_limits = []
def set_game_params(self, board):
"""Set the game parameters needed for this player.
This function is called before the game starts.
(See GameWrapper.py for more info where it is called)
input:
- board: np.array, of the board.
No output is expected.
"""
# TODO: erase the following line and implement this function.
self.board = board
self.prev_board = None
self.my_pos = np.full(9, -1)
self.rival_pos = np.full(9, -1)
self.turn = 0
self.next_depth_limit = np.inf
# Extra time management params
self.initial_balance_factor = (1 / 20)
self.curr_iteration_runtime = self.game_time * self.initial_balance_factor
self.safe_runtime_extension = 0.01
# early: turn >=25
self.phase2_early_extension = 1.2
# late: turn >=45
self.phase2_late_extension = 1.5
self.phase2_large_blocked_num_factor = (1 / 40)
self.phase2_large_dead_num_factor = (1 / 40)
def make_move(self, time_limit):
"""Make move with this Player.
input:
- time_limit: float, time limit for a single turn.
output:
- direction: tuple, specifing the Player's movement
"""
# TODO: erase the following line and implement this function.
print(f'======================== Starting turn {self.turn} =========================')
move_start_time = time.time()
curr_time_limit = self.curr_iteration_runtime
self.runtime_limits.append(curr_time_limit)
state = GameState(deepcopy(self.board), self.prev_board, self.my_pos, self.rival_pos, self.turn,
time.time() + curr_time_limit - self.safe_runtime_extension)
search_algo = AlphaBeta(self.utils.utility_method, self.utils.successor_func, None, self.utils.check_goal)
depth = 1
best_move = (None, None)
while True:
try:
if self.turn < 18 and depth == 5:
break
elif self.turn >= 18 and depth == 7:
break
elif depth > self.next_depth_limit:
break
print(f'Starting depth {depth}, with time limit: {curr_time_limit}')
start_time = time.time()
temp_move = search_algo.search(state, depth, True)
end_time = time.time()
print(f'{depth}: {end_time - start_time}')
if temp_move[1] is not None:
print(f'found move')
best_move = temp_move
try:
self.total_runtime_by_turn[self.turn].append(end_time - start_time)
except KeyError:
self.total_runtime_by_turn[self.turn] = [end_time - start_time]
print(self.total_runtime_by_turn)
else:
# TODO: are we sure this is fine?
print(f'GOT NONE!')
break
except TimeoutError:
break
depth += 1
move = best_move[1]
# ALIVE COUNT
our_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(self.board, 1))
rival_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(self.board, 2))
# BLOCKED COUNT
our_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(self.board, 1)
rival_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(self.board, 2)
self.prev_board = deepcopy(self.board)
new_state = GameState(self.board, self.prev_board, self.my_pos, self.rival_pos, self.turn,
time.time() + time_limit)
GameUtils.perform_move(new_state, move, 1)
self.turn += 1
# Need to look at the time the current iteration took
curr_iteration_runtime = time.time() - move_start_time
# if self.turn > 18:
# # ALIVE COUNT
# new_our_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(new_state.board, 1))
# new_rival_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(new_state.board, 2))
#
# # BLOCKED COUNT
# new_our_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(new_state.board, 1)
# new_rival_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(new_state.board, 2)
# if new_rival_blocked_count + new_our_blocked_count >= 6 and \
# new_our_blocked_count + new_rival_blocked_count > our_blocked_count + rival_blocked_count:
# self.curr_iteration_runtime = curr_iteration_runtime + self.game_time * self.phase2_large_blocked_num_factor
# print(f'#1# adjusted time to: {self.curr_iteration_runtime}')
# elif new_rival_blocked_count + new_our_blocked_count < 6 and \
# new_our_blocked_count + new_rival_blocked_count < our_blocked_count + rival_blocked_count:
# self.curr_iteration_runtime = curr_iteration_runtime - self.game_time * self.phase2_large_blocked_num_factor
# print(f'#2# adjusted time to: {self.curr_iteration_runtime}')
# if new_rival_dead_count + new_our_dead_count >= 7 and \
# new_our_dead_count + new_rival_dead_count > our_dead_count + rival_dead_count:
# self.curr_iteration_runtime = curr_iteration_runtime + self.game_time * self.phase2_large_dead_num_factor
# print(f'#3# adjusted time to: {self.curr_iteration_runtime}')
# elif new_rival_dead_count + new_our_dead_count < 7 and \
# new_our_dead_count + new_rival_dead_count < our_dead_count + rival_dead_count:
# self.curr_iteration_runtime = curr_iteration_runtime - self.game_time * self.phase2_large_dead_num_factor
# print(f'#4# adjusted time to: {self.curr_iteration_runtime}')
# else:
# self.curr_iteration_runtime = curr_iteration_runtime
if self.curr_iteration_runtime < self.initial_game_time * self.initial_balance_factor:
if len(self.total_runtime_by_turn[0]) > 1:
self.curr_iteration_runtime = self.total_runtime_by_turn[0][1] * 50
move_end_time = time.time()
# Update remaining game time
self.game_time -= move_end_time - move_start_time
if self.game_time > 100:
self.curr_iteration_runtime = 10
if 50 < self.game_time < 100:
self.curr_iteration_runtime = 5
if 35 < self.game_time < 50:
self.curr_iteration_runtime = 2.5
if 10 < self.game_time < 35:
self.curr_iteration_runtime = 1
if 5 < self.game_time < 10:
self.curr_iteration_runtime = 0.5
if self.game_time < 5:
self.curr_iteration_runtime = 0.3
if self.game_time < 1:
self.curr_iteration_runtime = 0.032
current_turn_num = self.turn - 1
# if len(self.total_runtime_by_turn[current_turn_num]) > 3 and self.total_runtime_by_turn[current_turn_num][
# 3] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][
# 3] * 70:
# self.next_depth_limit = 4
# if len(self.total_runtime_by_turn[current_turn_num]) > 2 and self.total_runtime_by_turn[current_turn_num][
# 2] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][
# 2] * 70:
# self.next_depth_limit = 3
# if len(self.total_runtime_by_turn[current_turn_num]) > 1 and self.total_runtime_by_turn[current_turn_num][
# 1] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][
# 1] * 70:
# self.next_depth_limit = 2
# else:
# self.next_depth_limit = 1
# print(self.runtime_limits)
print(f'Time remaining: {self.initial_game_time - self.game_time}')
return move
def set_rival_move(self, move):
"""Update your info, given the new position of the rival.
input:
- move: tuple, the new position of the rival.
No output is expected
"""
# TODO: erase the following line and implement this function.
rival_pos, rival_soldier, my_dead_pos = move
if self.turn < 18:
# Currently, still in the first part of the game
# Update the board to include the new enemy soldier
self.board[rival_pos] = 2
# In the array containing the positions of all enemy soldiers, put in the index of the new soldier,
# it's position on the board
self.rival_pos[rival_soldier] = rival_pos
else:
# Now in the second part of the game
rival_prev_pos = self.rival_pos[rival_soldier]
self.board[rival_prev_pos] = 0
self.board[rival_pos] = 2
self.rival_pos[rival_soldier] = rival_pos
if my_dead_pos != -1:
# The enemy player has killed one of our soldiers
self.board[my_dead_pos] = 0
# Get from the board the index of the killed soldier
dead_soldier = int(np.where(self.my_pos == my_dead_pos)[0][0])
# Mark our killed soldier as dead in our soldiers array
self.my_pos[dead_soldier] = -2
self.turn += 1
########## helper functions in class ##########
# TODO: add here helper functions in class, if needed
def calculate_actual_turn_runtime(self):
sum = 0
for value in self.total_runtime_by_turn[self.turn]:
sum += value
return sum
########## helper functions for AlphaBeta algorithm ##########
# TODO: add here the utility, succ, and perform_move functions used in AlphaBeta algorithm
| """
MiniMax Player with AlphaBeta pruning and global time
"""
import statistics
import time
import numpy as np
from copy import deepcopy
from SearchAlgos import AlphaBeta, GameState, GameUtils
from players.AbstractPlayer import AbstractPlayer
# TODO: you can import more modules, if needed
import utils
class Player(AbstractPlayer):
def __init__(self, game_time):
AbstractPlayer.__init__(self, game_time) # keep the inheritance of the parent's (AbstractPlayer) __init__()
# TODO: initialize more fields, if needed, and the AlphaBeta algorithm from SearchAlgos.py
self.utils = GameUtils
self.game_time = game_time
self.initial_game_time = game_time
self.total_runtime_by_turn = {}
self.runtime_limits = []
def set_game_params(self, board):
"""Set the game parameters needed for this player.
This function is called before the game starts.
(See GameWrapper.py for more info where it is called)
input:
- board: np.array, of the board.
No output is expected.
"""
# TODO: erase the following line and implement this function.
self.board = board
self.prev_board = None
self.my_pos = np.full(9, -1)
self.rival_pos = np.full(9, -1)
self.turn = 0
self.next_depth_limit = np.inf
# Extra time management params
self.initial_balance_factor = (1 / 20)
self.curr_iteration_runtime = self.game_time * self.initial_balance_factor
self.safe_runtime_extension = 0.01
# early: turn >=25
self.phase2_early_extension = 1.2
# late: turn >=45
self.phase2_late_extension = 1.5
self.phase2_large_blocked_num_factor = (1 / 40)
self.phase2_large_dead_num_factor = (1 / 40)
def make_move(self, time_limit):
"""Make move with this Player.
input:
- time_limit: float, time limit for a single turn.
output:
- direction: tuple, specifing the Player's movement
"""
# TODO: erase the following line and implement this function.
print(f'======================== Starting turn {self.turn} =========================')
move_start_time = time.time()
curr_time_limit = self.curr_iteration_runtime
self.runtime_limits.append(curr_time_limit)
state = GameState(deepcopy(self.board), self.prev_board, self.my_pos, self.rival_pos, self.turn,
time.time() + curr_time_limit - self.safe_runtime_extension)
search_algo = AlphaBeta(self.utils.utility_method, self.utils.successor_func, None, self.utils.check_goal)
depth = 1
best_move = (None, None)
while True:
try:
if self.turn < 18 and depth == 5:
break
elif self.turn >= 18 and depth == 7:
break
elif depth > self.next_depth_limit:
break
print(f'Starting depth {depth}, with time limit: {curr_time_limit}')
start_time = time.time()
temp_move = search_algo.search(state, depth, True)
end_time = time.time()
print(f'{depth}: {end_time - start_time}')
if temp_move[1] is not None:
print(f'found move')
best_move = temp_move
try:
self.total_runtime_by_turn[self.turn].append(end_time - start_time)
except KeyError:
self.total_runtime_by_turn[self.turn] = [end_time - start_time]
print(self.total_runtime_by_turn)
else:
# TODO: are we sure this is fine?
print(f'GOT NONE!')
break
except TimeoutError:
break
depth += 1
move = best_move[1]
# ALIVE COUNT
our_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(self.board, 1))
rival_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(self.board, 2))
# BLOCKED COUNT
our_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(self.board, 1)
rival_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(self.board, 2)
self.prev_board = deepcopy(self.board)
new_state = GameState(self.board, self.prev_board, self.my_pos, self.rival_pos, self.turn,
time.time() + time_limit)
GameUtils.perform_move(new_state, move, 1)
self.turn += 1
# Need to look at the time the current iteration took
curr_iteration_runtime = time.time() - move_start_time
# if self.turn > 18:
# # ALIVE COUNT
# new_our_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(new_state.board, 1))
# new_rival_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(new_state.board, 2))
#
# # BLOCKED COUNT
# new_our_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(new_state.board, 1)
# new_rival_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(new_state.board, 2)
# if new_rival_blocked_count + new_our_blocked_count >= 6 and \
# new_our_blocked_count + new_rival_blocked_count > our_blocked_count + rival_blocked_count:
# self.curr_iteration_runtime = curr_iteration_runtime + self.game_time * self.phase2_large_blocked_num_factor
# print(f'#1# adjusted time to: {self.curr_iteration_runtime}')
# elif new_rival_blocked_count + new_our_blocked_count < 6 and \
# new_our_blocked_count + new_rival_blocked_count < our_blocked_count + rival_blocked_count:
# self.curr_iteration_runtime = curr_iteration_runtime - self.game_time * self.phase2_large_blocked_num_factor
# print(f'#2# adjusted time to: {self.curr_iteration_runtime}')
# if new_rival_dead_count + new_our_dead_count >= 7 and \
# new_our_dead_count + new_rival_dead_count > our_dead_count + rival_dead_count:
# self.curr_iteration_runtime = curr_iteration_runtime + self.game_time * self.phase2_large_dead_num_factor
# print(f'#3# adjusted time to: {self.curr_iteration_runtime}')
# elif new_rival_dead_count + new_our_dead_count < 7 and \
# new_our_dead_count + new_rival_dead_count < our_dead_count + rival_dead_count:
# self.curr_iteration_runtime = curr_iteration_runtime - self.game_time * self.phase2_large_dead_num_factor
# print(f'#4# adjusted time to: {self.curr_iteration_runtime}')
# else:
# self.curr_iteration_runtime = curr_iteration_runtime
if self.curr_iteration_runtime < self.initial_game_time * self.initial_balance_factor:
if len(self.total_runtime_by_turn[0]) > 1:
self.curr_iteration_runtime = self.total_runtime_by_turn[0][1] * 50
move_end_time = time.time()
# Update remaining game time
self.game_time -= move_end_time - move_start_time
if self.game_time > 100:
self.curr_iteration_runtime = 10
if 50 < self.game_time < 100:
self.curr_iteration_runtime = 5
if 35 < self.game_time < 50:
self.curr_iteration_runtime = 2.5
if 10 < self.game_time < 35:
self.curr_iteration_runtime = 1
if 5 < self.game_time < 10:
self.curr_iteration_runtime = 0.5
if self.game_time < 5:
self.curr_iteration_runtime = 0.3
if self.game_time < 1:
self.curr_iteration_runtime = 0.032
current_turn_num = self.turn - 1
# if len(self.total_runtime_by_turn[current_turn_num]) > 3 and self.total_runtime_by_turn[current_turn_num][
# 3] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][
# 3] * 70:
# self.next_depth_limit = 4
# if len(self.total_runtime_by_turn[current_turn_num]) > 2 and self.total_runtime_by_turn[current_turn_num][
# 2] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][
# 2] * 70:
# self.next_depth_limit = 3
# if len(self.total_runtime_by_turn[current_turn_num]) > 1 and self.total_runtime_by_turn[current_turn_num][
# 1] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][
# 1] * 70:
# self.next_depth_limit = 2
# else:
# self.next_depth_limit = 1
# print(self.runtime_limits)
print(f'Time remaining: {self.initial_game_time - self.game_time}')
return move
def set_rival_move(self, move):
"""Update your info, given the new position of the rival.
input:
- move: tuple, the new position of the rival.
No output is expected
"""
# TODO: erase the following line and implement this function.
rival_pos, rival_soldier, my_dead_pos = move
if self.turn < 18:
# Currently, still in the first part of the game
# Update the board to include the new enemy soldier
self.board[rival_pos] = 2
# In the array containing the positions of all enemy soldiers, put in the index of the new soldier,
# it's position on the board
self.rival_pos[rival_soldier] = rival_pos
else:
# Now in the second part of the game
rival_prev_pos = self.rival_pos[rival_soldier]
self.board[rival_prev_pos] = 0
self.board[rival_pos] = 2
self.rival_pos[rival_soldier] = rival_pos
if my_dead_pos != -1:
# The enemy player has killed one of our soldiers
self.board[my_dead_pos] = 0
# Get from the board the index of the killed soldier
dead_soldier = int(np.where(self.my_pos == my_dead_pos)[0][0])
# Mark our killed soldier as dead in our soldiers array
self.my_pos[dead_soldier] = -2
self.turn += 1
########## helper functions in class ##########
# TODO: add here helper functions in class, if needed
def calculate_actual_turn_runtime(self):
sum = 0
for value in self.total_runtime_by_turn[self.turn]:
sum += value
return sum
########## helper functions for AlphaBeta algorithm ##########
# TODO: add here the utility, succ, and perform_move functions used in AlphaBeta algorithm
| en | 0.671514 | MiniMax Player with AlphaBeta pruning and global time # TODO: you can import more modules, if needed # keep the inheritance of the parent's (AbstractPlayer) __init__() # TODO: initialize more fields, if needed, and the AlphaBeta algorithm from SearchAlgos.py Set the game parameters needed for this player. This function is called before the game starts. (See GameWrapper.py for more info where it is called) input: - board: np.array, of the board. No output is expected. # TODO: erase the following line and implement this function. # Extra time management params # early: turn >=25 # late: turn >=45 Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifing the Player's movement # TODO: erase the following line and implement this function. # TODO: are we sure this is fine? # ALIVE COUNT # BLOCKED COUNT # Need to look at the time the current iteration took # if self.turn > 18: # # ALIVE COUNT # new_our_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(new_state.board, 1)) # new_rival_dead_count = 9 - len(GameUtils.get_soldier_position_by_player_index(new_state.board, 2)) # # # BLOCKED COUNT # new_our_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(new_state.board, 1) # new_rival_blocked_count = GameUtils.count_blocked_soldiers_by_player_index(new_state.board, 2) # if new_rival_blocked_count + new_our_blocked_count >= 6 and \ # new_our_blocked_count + new_rival_blocked_count > our_blocked_count + rival_blocked_count: # self.curr_iteration_runtime = curr_iteration_runtime + self.game_time * self.phase2_large_blocked_num_factor # print(f'#1# adjusted time to: {self.curr_iteration_runtime}') # elif new_rival_blocked_count + new_our_blocked_count < 6 and \ # new_our_blocked_count + new_rival_blocked_count < our_blocked_count + rival_blocked_count: # self.curr_iteration_runtime = curr_iteration_runtime - self.game_time * self.phase2_large_blocked_num_factor # print(f'#2# adjusted time to: {self.curr_iteration_runtime}') # if new_rival_dead_count + new_our_dead_count >= 7 and \ # new_our_dead_count + new_rival_dead_count > our_dead_count + rival_dead_count: # self.curr_iteration_runtime = curr_iteration_runtime + self.game_time * self.phase2_large_dead_num_factor # print(f'#3# adjusted time to: {self.curr_iteration_runtime}') # elif new_rival_dead_count + new_our_dead_count < 7 and \ # new_our_dead_count + new_rival_dead_count < our_dead_count + rival_dead_count: # self.curr_iteration_runtime = curr_iteration_runtime - self.game_time * self.phase2_large_dead_num_factor # print(f'#4# adjusted time to: {self.curr_iteration_runtime}') # else: # self.curr_iteration_runtime = curr_iteration_runtime # Update remaining game time # if len(self.total_runtime_by_turn[current_turn_num]) > 3 and self.total_runtime_by_turn[current_turn_num][ # 3] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][ # 3] * 70: # self.next_depth_limit = 4 # if len(self.total_runtime_by_turn[current_turn_num]) > 2 and self.total_runtime_by_turn[current_turn_num][ # 2] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][ # 2] * 70: # self.next_depth_limit = 3 # if len(self.total_runtime_by_turn[current_turn_num]) > 1 and self.total_runtime_by_turn[current_turn_num][ # 1] * 30 < self.game_time < self.total_runtime_by_turn[current_turn_num][ # 1] * 70: # self.next_depth_limit = 2 # else: # self.next_depth_limit = 1 # print(self.runtime_limits) Update your info, given the new position of the rival. input: - move: tuple, the new position of the rival. No output is expected # TODO: erase the following line and implement this function. # Currently, still in the first part of the game # Update the board to include the new enemy soldier # In the array containing the positions of all enemy soldiers, put in the index of the new soldier, # it's position on the board # Now in the second part of the game # The enemy player has killed one of our soldiers # Get from the board the index of the killed soldier # Mark our killed soldier as dead in our soldiers array ########## helper functions in class ########## # TODO: add here helper functions in class, if needed ########## helper functions for AlphaBeta algorithm ########## # TODO: add here the utility, succ, and perform_move functions used in AlphaBeta algorithm | 3.028507 | 3 |
expensetracker/expenses/serializers.py | Oscarious/ExpenseTracker-fullstack | 0 | 6618191 | from django.db.models import fields
from rest_framework import serializers
from expenses.models import Transaction
# transaction serializer
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields = '__all__' | from django.db.models import fields
from rest_framework import serializers
from expenses.models import Transaction
# transaction serializer
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields = '__all__' | en | 0.550518 | # transaction serializer | 1.701968 | 2 |
pipeline/scrapers/planalto/laws.py | juridics/brazilian-legal-text-dataset | 1 | 6618192 | <gh_stars>1-10
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from pipeline.utils import WorkProgress, DatasetManager, PathUtil
WAIT_TIMEOUT = 10
class PlanaltoLawScraper:
def __init__(self):
self.work_progress = WorkProgress()
self.dataset_manager = DatasetManager()
self.two_level_deep_urls = [
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/codigos-1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/estatutos'
]
self.three_level_deep_urls = [
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/leis-ordinarias',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/leis-complementares-1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/medidas-provisorias',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/decretos1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/decretos-leis',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/decretos-nao-numerados1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/leis-delegadas-1'
]
self.rootpath = PathUtil.build_path('output', 'mlm', 'planalto')
def execute(self):
self.work_progress.show('Starting scraper laws from planalto')
self._process_two_level_deep()
self._process_three_level_deep()
self.work_progress.show('Scraper has finished!')
def _process_three_level_deep(self):
for url in self.three_level_deep_urls:
self.work_progress.show(f'Getting links to internal pages from {url}')
foldername = self._get_foldername(url)
targetpath = self._create_folder(self.rootpath, foldername)
index1 = IndexPage(url)
names, hrefs = index1.get_links()
for href in hrefs:
self._process_index(targetpath, href)
def _process_two_level_deep(self):
for url in self.two_level_deep_urls:
self._process_index(self.rootpath, url)
def _process_index(self, rootpath, url):
self.work_progress.show(f'Getting links to internal pages from {url}')
foldername = self._get_foldername(url)
targetpath = self._create_folder(rootpath, foldername)
index = IndexPage(url)
names, hrefs = index.get_links()
for name, href in zip(names, hrefs):
self._process_detail(targetpath, name, href)
def _process_detail(self, targetpath, name, href):
try:
detail = DetailPage(href)
content = detail.get_content()
filename = f'{name}.html'
filepath = PathUtil.join(targetpath, filename)
self.dataset_manager.to_file(filepath, content)
self.work_progress.show(f'A file {filename} was created.')
except WebDriverException:
self.work_progress.show(f'Getting error {name} in {href}')
@staticmethod
def _get_foldername(url):
return url.split('/')[-1]
@staticmethod
def _create_folder(rootpath, foldername):
return PathUtil.create_dir(rootpath, foldername)
class IndexPage:
def __init__(self, url):
self.driver = webdriver.Firefox()
self.driver.get(url)
def __del__(self):
self.driver.close()
self.driver.quit()
def get_links(self):
xpath_container = "//table[@class='visaoQuadrosTabela'] | //div[@id='parent-fieldname-text']"
condition = EC.presence_of_element_located((By.XPATH, xpath_container))
container = WebDriverWait(self.driver, WAIT_TIMEOUT).until(condition)
links = container.find_elements_by_tag_name('a')
hrefs = [link.get_attribute('href') for link in links]
hrefs = [href for href in hrefs if not href.endswith('.doc') and not href.endswith('.pdf')]
titles = [href.split('/')[-1].replace('.htm', '') for href in hrefs]
return titles, hrefs
class DetailPage:
def __init__(self, url):
self.driver = webdriver.Firefox()
self.driver.get(url)
def __del__(self):
self.driver.close()
self.driver.quit()
def get_content(self):
condition = EC.presence_of_element_located((By.TAG_NAME, 'p'))
WebDriverWait(self.driver, WAIT_TIMEOUT).until(condition)
html = self.driver.page_source
return html
| from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from pipeline.utils import WorkProgress, DatasetManager, PathUtil
WAIT_TIMEOUT = 10
class PlanaltoLawScraper:
def __init__(self):
self.work_progress = WorkProgress()
self.dataset_manager = DatasetManager()
self.two_level_deep_urls = [
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/codigos-1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/estatutos'
]
self.three_level_deep_urls = [
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/leis-ordinarias',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/leis-complementares-1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/medidas-provisorias',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/decretos1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/decretos-leis',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/decretos-nao-numerados1',
'http://www4.planalto.gov.br/legislacao/portal-legis/legislacao-1/leis-delegadas-1'
]
self.rootpath = PathUtil.build_path('output', 'mlm', 'planalto')
def execute(self):
self.work_progress.show('Starting scraper laws from planalto')
self._process_two_level_deep()
self._process_three_level_deep()
self.work_progress.show('Scraper has finished!')
def _process_three_level_deep(self):
for url in self.three_level_deep_urls:
self.work_progress.show(f'Getting links to internal pages from {url}')
foldername = self._get_foldername(url)
targetpath = self._create_folder(self.rootpath, foldername)
index1 = IndexPage(url)
names, hrefs = index1.get_links()
for href in hrefs:
self._process_index(targetpath, href)
def _process_two_level_deep(self):
for url in self.two_level_deep_urls:
self._process_index(self.rootpath, url)
def _process_index(self, rootpath, url):
self.work_progress.show(f'Getting links to internal pages from {url}')
foldername = self._get_foldername(url)
targetpath = self._create_folder(rootpath, foldername)
index = IndexPage(url)
names, hrefs = index.get_links()
for name, href in zip(names, hrefs):
self._process_detail(targetpath, name, href)
def _process_detail(self, targetpath, name, href):
try:
detail = DetailPage(href)
content = detail.get_content()
filename = f'{name}.html'
filepath = PathUtil.join(targetpath, filename)
self.dataset_manager.to_file(filepath, content)
self.work_progress.show(f'A file {filename} was created.')
except WebDriverException:
self.work_progress.show(f'Getting error {name} in {href}')
@staticmethod
def _get_foldername(url):
return url.split('/')[-1]
@staticmethod
def _create_folder(rootpath, foldername):
return PathUtil.create_dir(rootpath, foldername)
class IndexPage:
def __init__(self, url):
self.driver = webdriver.Firefox()
self.driver.get(url)
def __del__(self):
self.driver.close()
self.driver.quit()
def get_links(self):
xpath_container = "//table[@class='visaoQuadrosTabela'] | //div[@id='parent-fieldname-text']"
condition = EC.presence_of_element_located((By.XPATH, xpath_container))
container = WebDriverWait(self.driver, WAIT_TIMEOUT).until(condition)
links = container.find_elements_by_tag_name('a')
hrefs = [link.get_attribute('href') for link in links]
hrefs = [href for href in hrefs if not href.endswith('.doc') and not href.endswith('.pdf')]
titles = [href.split('/')[-1].replace('.htm', '') for href in hrefs]
return titles, hrefs
class DetailPage:
def __init__(self, url):
self.driver = webdriver.Firefox()
self.driver.get(url)
def __del__(self):
self.driver.close()
self.driver.quit()
def get_content(self):
condition = EC.presence_of_element_located((By.TAG_NAME, 'p'))
WebDriverWait(self.driver, WAIT_TIMEOUT).until(condition)
html = self.driver.page_source
return html | none | 1 | 2.270834 | 2 | |
docker/test/integration/minifi/processors/GetFile.py | kevdoran/nifi-minifi-cpp | 0 | 6618193 | from ..core.Processor import Processor
class GetFile(Processor):
def __init__(self, input_dir ="/tmp/input", schedule={'scheduling period': '2 sec'}):
super(GetFile, self).__init__('GetFile',
properties={'Input Directory': input_dir, 'Keep Source File': 'true'},
schedule=schedule,
auto_terminate=['success'])
| from ..core.Processor import Processor
class GetFile(Processor):
def __init__(self, input_dir ="/tmp/input", schedule={'scheduling period': '2 sec'}):
super(GetFile, self).__init__('GetFile',
properties={'Input Directory': input_dir, 'Keep Source File': 'true'},
schedule=schedule,
auto_terminate=['success'])
| none | 1 | 2.591072 | 3 | |
pedlar/utils.py | nuric/pedlar | 61 | 6618194 | """pedlar utility functions."""
def calc_profit(order, bid: float, ask: float, leverage: float = 100):
"""Compute the profit of a given order, return closing price and profit."""
# BIG ASSUMPTION, account currency is the same as base currency
# Ex. GBP account trading on GBPUSD since we don't have other
# exchange rates streaming to us to handle conversion
isbuy = order.type == "buy"
closep = bid if isbuy else ask # The closing price of the order
diff = closep - order.price if isbuy else order.price - closep # Price difference
profit = diff*leverage*order.volume*1000*(1/closep)
return closep, round(profit, 2)
| """pedlar utility functions."""
def calc_profit(order, bid: float, ask: float, leverage: float = 100):
"""Compute the profit of a given order, return closing price and profit."""
# BIG ASSUMPTION, account currency is the same as base currency
# Ex. GBP account trading on GBPUSD since we don't have other
# exchange rates streaming to us to handle conversion
isbuy = order.type == "buy"
closep = bid if isbuy else ask # The closing price of the order
diff = closep - order.price if isbuy else order.price - closep # Price difference
profit = diff*leverage*order.volume*1000*(1/closep)
return closep, round(profit, 2)
| en | 0.893717 | pedlar utility functions. Compute the profit of a given order, return closing price and profit. # BIG ASSUMPTION, account currency is the same as base currency # Ex. GBP account trading on GBPUSD since we don't have other # exchange rates streaming to us to handle conversion # The closing price of the order # Price difference | 3.478703 | 3 |
todo/commands/group/preset.py | tomasdanjonsson/td-cli | 154 | 6618195 | <filename>todo/commands/group/preset.py
from todo.commands.base import Command
from todo.renderers import RenderOutput
class Preset(Command):
def run(self, args):
group = self._get_group_or_raise(args.name)
self.service.group.use(group[0])
RenderOutput("Set {blue}{group_name}{reset} as default").render(
group_name=group[0] or "global"
)
| <filename>todo/commands/group/preset.py
from todo.commands.base import Command
from todo.renderers import RenderOutput
class Preset(Command):
def run(self, args):
group = self._get_group_or_raise(args.name)
self.service.group.use(group[0])
RenderOutput("Set {blue}{group_name}{reset} as default").render(
group_name=group[0] or "global"
)
| none | 1 | 2.283221 | 2 | |
benchgen/generators/ansible_cis.py | ansible-lockdown/BenchmarkGenerator | 3 | 6618196 | <reponame>ansible-lockdown/BenchmarkGenerator
from os import path
import re
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(Path(Path(__file__).parent, '..', '..', 'templates').resolve()))
def generate(data, parser, output_path):
rule_set = get_tagged_rule_ids(data['profiles'])
if isinstance(rule_set, list):
for rs in rule_set:
render_rule_set(data, rs['rules'], f'{output_path}{rs["suffix"] or ""}')
else:
render_rule_set(data, rule_set, output_path)
def render_rule_set(data, rule_set, output_path):
Path(output_path).mkdir(parents=True, exist_ok=True)
manifest = []
for i, section in enumerate(range(1, 7)):
manifest.append(f'\nsection{i+1}')
groups = list(filter(lambda g: g['number'].startswith(str(section)), data['groups']))
tasks = []
for group in groups:
for rule in group['rules']:
tags = []
for t, r in rule_set.items():
if rule['id'] in r:
tags.append(t)
tags.append(f'rule_{rule["number"]}')
rule_name = re.sub(r'_', ' ', rule['id'].split(f'{rule["number"]}_')[1].strip())
tasks.append({
'name': rule_name,
'number': rule['number'],
'tags': tags
})
sort_by_number(tasks)
manifest.extend(map(lambda t: f'{t["number"]} - {t["name"]}', tasks))
render_tasks(tasks, path.join(output_path, f'section{section}.yml'))
with open(path.join(output_path, 'manifest.txt'), 'w', encoding='utf-8') as file:
file.write('\n'.join(manifest))
def get_tagged_rule_ids(profiles):
# If there is a single Level 1 profile produce one level1 rule set
if len(profiles) == 1 and 'Level_1' in profiles[0]['id']:
return {
'level1': set(map(lambda r: r['idref'], profiles[0]['selections']))
}
# If there are 2 profiles, Level 1 and Level 2, produce a rule set for each
if len(profiles) == 2 and 'Level_1' in profiles[0]['id'] and 'Level_2' in profiles[1]['id']:
return {
'level1': set(map(lambda r: r['idref'], profiles[0]['selections'])),
'level2': set(map(lambda r: r['idref'], profiles[1]['selections']))
}
# If there are 4 profiles, two designated as Server, use the server profiles for level1 and level2 and ignore the others
if len(profiles) == 4:
level1Server = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_1_-_Server'), None)
level2Server = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_2_-_Server'), None)
if level1Server and level2Server:
return {
'level1': set(map(lambda r: r['idref'], level1Server['selections'])),
'level2': set(map(lambda r: r['idref'], level2Server['selections']))
}
# If there are Domain Member/Controller Level 1 and Level 2 profiles, return one rule set for each. This produces multiple outputs.
if len(profiles) >= 4:
level1Domain = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_1_-_Domain_Controller'), None)
level2Domain = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_2_-_Domain_Controller'), None)
level1Member = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_1_-_Member_Server'), None)
level2Member = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_2_-_Member_Server'), None)
if level1Domain and level2Domain and level1Member and level2Member:
return [{
'suffix': '-Domain_Controller',
'rules': {
'level1': set(map(lambda r: r['idref'], level1Domain['selections'])),
'level2': set(map(lambda r: r['idref'], level2Domain['selections']))
}
},{
'suffix': '-Member_Server',
'rules': {
'level1': set(map(lambda r: r['idref'], level1Member['selections'])),
'level2': set(map(lambda r: r['idref'], level2Member['selections']))
}
}]
raise Exception(f'Generator does not support the following profiles: {list(map(lambda p: p["id"], profiles))}')
def render_tasks(tasks, output_path):
template = env.get_template('ansible_cis.yml.j2')
result = template.render(tasks=tasks)
with open(output_path, 'w', encoding='utf-8') as file:
file.write(result)
def sort_by_number(items):
items.sort(key=lambda item: [int(n) for n in item['number'].split('.')]) | from os import path
import re
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(Path(Path(__file__).parent, '..', '..', 'templates').resolve()))
def generate(data, parser, output_path):
rule_set = get_tagged_rule_ids(data['profiles'])
if isinstance(rule_set, list):
for rs in rule_set:
render_rule_set(data, rs['rules'], f'{output_path}{rs["suffix"] or ""}')
else:
render_rule_set(data, rule_set, output_path)
def render_rule_set(data, rule_set, output_path):
Path(output_path).mkdir(parents=True, exist_ok=True)
manifest = []
for i, section in enumerate(range(1, 7)):
manifest.append(f'\nsection{i+1}')
groups = list(filter(lambda g: g['number'].startswith(str(section)), data['groups']))
tasks = []
for group in groups:
for rule in group['rules']:
tags = []
for t, r in rule_set.items():
if rule['id'] in r:
tags.append(t)
tags.append(f'rule_{rule["number"]}')
rule_name = re.sub(r'_', ' ', rule['id'].split(f'{rule["number"]}_')[1].strip())
tasks.append({
'name': rule_name,
'number': rule['number'],
'tags': tags
})
sort_by_number(tasks)
manifest.extend(map(lambda t: f'{t["number"]} - {t["name"]}', tasks))
render_tasks(tasks, path.join(output_path, f'section{section}.yml'))
with open(path.join(output_path, 'manifest.txt'), 'w', encoding='utf-8') as file:
file.write('\n'.join(manifest))
def get_tagged_rule_ids(profiles):
# If there is a single Level 1 profile produce one level1 rule set
if len(profiles) == 1 and 'Level_1' in profiles[0]['id']:
return {
'level1': set(map(lambda r: r['idref'], profiles[0]['selections']))
}
# If there are 2 profiles, Level 1 and Level 2, produce a rule set for each
if len(profiles) == 2 and 'Level_1' in profiles[0]['id'] and 'Level_2' in profiles[1]['id']:
return {
'level1': set(map(lambda r: r['idref'], profiles[0]['selections'])),
'level2': set(map(lambda r: r['idref'], profiles[1]['selections']))
}
# If there are 4 profiles, two designated as Server, use the server profiles for level1 and level2 and ignore the others
if len(profiles) == 4:
level1Server = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_1_-_Server'), None)
level2Server = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_2_-_Server'), None)
if level1Server and level2Server:
return {
'level1': set(map(lambda r: r['idref'], level1Server['selections'])),
'level2': set(map(lambda r: r['idref'], level2Server['selections']))
}
# If there are Domain Member/Controller Level 1 and Level 2 profiles, return one rule set for each. This produces multiple outputs.
if len(profiles) >= 4:
level1Domain = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_1_-_Domain_Controller'), None)
level2Domain = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_2_-_Domain_Controller'), None)
level1Member = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_1_-_Member_Server'), None)
level2Member = next((p for p in profiles if p['id'] == 'xccdf_org.cisecurity.benchmarks_profile_Level_2_-_Member_Server'), None)
if level1Domain and level2Domain and level1Member and level2Member:
return [{
'suffix': '-Domain_Controller',
'rules': {
'level1': set(map(lambda r: r['idref'], level1Domain['selections'])),
'level2': set(map(lambda r: r['idref'], level2Domain['selections']))
}
},{
'suffix': '-Member_Server',
'rules': {
'level1': set(map(lambda r: r['idref'], level1Member['selections'])),
'level2': set(map(lambda r: r['idref'], level2Member['selections']))
}
}]
raise Exception(f'Generator does not support the following profiles: {list(map(lambda p: p["id"], profiles))}')
def render_tasks(tasks, output_path):
template = env.get_template('ansible_cis.yml.j2')
result = template.render(tasks=tasks)
with open(output_path, 'w', encoding='utf-8') as file:
file.write(result)
def sort_by_number(items):
items.sort(key=lambda item: [int(n) for n in item['number'].split('.')]) | en | 0.854519 | # If there is a single Level 1 profile produce one level1 rule set # If there are 2 profiles, Level 1 and Level 2, produce a rule set for each # If there are 4 profiles, two designated as Server, use the server profiles for level1 and level2 and ignore the others # If there are Domain Member/Controller Level 1 and Level 2 profiles, return one rule set for each. This produces multiple outputs. | 2.476251 | 2 |
venv/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/ec.py | Retraces/UkraineBot | 1 | 6618197 | /home/runner/.cache/pip/pool/03/fa/f2/935d1111bc02e27722178b940d0aab748e043ece786451a07da3c6964d | /home/runner/.cache/pip/pool/03/fa/f2/935d1111bc02e27722178b940d0aab748e043ece786451a07da3c6964d | none | 1 | 0.842711 | 1 | |
extractor-python/filters.py | TheBiggerGuy/iridium-toolk | 2 | 6618198 |
# Copyright 2012 <NAME> <<EMAIL>>
#
# This file is part of CommPy.
#
# CommPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CommPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
============================================
Pulse Shaping Filters (:mod:`commpy.filters`)
============================================
.. autosummary::
:toctree: generated/
rcosfilter -- Class representing convolutional code trellis.
rrcosfilter -- Convolutional Encoder.
gaussianfilter -- Convolutional Decoder using the Viterbi algorithm.
"""
import numpy as np
__all__=['rcosfilter', 'rrcosfilter', 'gaussianfilter']
def rcosfilter(N, alpha, Ts, Fs):
"""
Generates a raised cosine (RC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha: float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_rc : 1-D ndarray (float)
Impulse response of the raised cosine filter.
time_idx : 1-D ndarray (float)
Array containing the time indices, in seconds, for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rc[x] = 1.0
elif alpha != 0 and t == Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
elif alpha != 0 and t == -Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
else:
h_rc[x] = (np.sin(np.pi*t/Ts)/(np.pi*t/Ts))* \
(np.cos(np.pi*alpha*t/Ts)/(1-(((2*alpha*t)/Ts)*((2*alpha*t)/Ts))))
return time_idx, h_rc
def rrcosfilter(N, alpha, Ts, Fs):
"""
Generates a root raised cosine (RRC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha: float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
---------
h_rrc : 1-D ndarray of floats
Impulse response of the root raised cosine filter.
time_idx : 1-D ndarray of floats
Array containing the time indices, in seconds, for
the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rrc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rrc[x] = 1.0 - alpha + (4*alpha/np.pi)
elif alpha != 0 and t == Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
elif alpha != 0 and t == -Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
else:
h_rrc[x] = (np.sin(np.pi*t*(1-alpha)/Ts) + \
4*alpha*(t/Ts)*np.cos(np.pi*t*(1+alpha)/Ts))/ \
(np.pi*t*(1-(4*alpha*t/Ts)*(4*alpha*t/Ts))/Ts)
return time_idx, h_rrc
def gaussianfilter(N, alpha, Ts, Fs):
"""
Generates a gaussian filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha: float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_gaussian : 1-D ndarray of floats
Impulse response of the gaussian filter.
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
h_gaussian = (np.sqrt(np.pi)/alpha)*np.exp(-((np.pi*time_index/alpha)*(np.pi*time_index/alpha)))
return time_idx, h_gaussian
def rectfilter(N, Ts, Fs):
h_rect = np.ones(N)
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
return time_idx, h_rect |
# Copyright 2012 <NAME> <<EMAIL>>
#
# This file is part of CommPy.
#
# CommPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CommPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
============================================
Pulse Shaping Filters (:mod:`commpy.filters`)
============================================
.. autosummary::
:toctree: generated/
rcosfilter -- Class representing convolutional code trellis.
rrcosfilter -- Convolutional Encoder.
gaussianfilter -- Convolutional Decoder using the Viterbi algorithm.
"""
import numpy as np
__all__=['rcosfilter', 'rrcosfilter', 'gaussianfilter']
def rcosfilter(N, alpha, Ts, Fs):
"""
Generates a raised cosine (RC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha: float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_rc : 1-D ndarray (float)
Impulse response of the raised cosine filter.
time_idx : 1-D ndarray (float)
Array containing the time indices, in seconds, for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rc[x] = 1.0
elif alpha != 0 and t == Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
elif alpha != 0 and t == -Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
else:
h_rc[x] = (np.sin(np.pi*t/Ts)/(np.pi*t/Ts))* \
(np.cos(np.pi*alpha*t/Ts)/(1-(((2*alpha*t)/Ts)*((2*alpha*t)/Ts))))
return time_idx, h_rc
def rrcosfilter(N, alpha, Ts, Fs):
"""
Generates a root raised cosine (RRC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha: float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
---------
h_rrc : 1-D ndarray of floats
Impulse response of the root raised cosine filter.
time_idx : 1-D ndarray of floats
Array containing the time indices, in seconds, for
the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rrc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rrc[x] = 1.0 - alpha + (4*alpha/np.pi)
elif alpha != 0 and t == Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
elif alpha != 0 and t == -Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
else:
h_rrc[x] = (np.sin(np.pi*t*(1-alpha)/Ts) + \
4*alpha*(t/Ts)*np.cos(np.pi*t*(1+alpha)/Ts))/ \
(np.pi*t*(1-(4*alpha*t/Ts)*(4*alpha*t/Ts))/Ts)
return time_idx, h_rrc
def gaussianfilter(N, alpha, Ts, Fs):
"""
Generates a gaussian filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha: float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
h_gaussian : 1-D ndarray of floats
Impulse response of the gaussian filter.
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
h_gaussian = (np.sqrt(np.pi)/alpha)*np.exp(-((np.pi*time_index/alpha)*(np.pi*time_index/alpha)))
return time_idx, h_gaussian
def rectfilter(N, Ts, Fs):
h_rect = np.ones(N)
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
return time_idx, h_rect | en | 0.718218 | # Copyright 2012 <NAME> <<EMAIL>> # # This file is part of CommPy. # # CommPy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # CommPy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ============================================ Pulse Shaping Filters (:mod:`commpy.filters`) ============================================ .. autosummary:: :toctree: generated/ rcosfilter -- Class representing convolutional code trellis. rrcosfilter -- Convolutional Encoder. gaussianfilter -- Convolutional Decoder using the Viterbi algorithm. Generates a raised cosine (RC) filter (FIR) impulse response. Parameters ---------- N : int Length of the filter in samples. alpha: float Roll off factor (Valid values are [0, 1]). Ts : float Symbol period in seconds. Fs : float Sampling Rate in Hz. Returns ------- h_rc : 1-D ndarray (float) Impulse response of the raised cosine filter. time_idx : 1-D ndarray (float) Array containing the time indices, in seconds, for the impulse response. Generates a root raised cosine (RRC) filter (FIR) impulse response. Parameters ---------- N : int Length of the filter in samples. alpha: float Roll off factor (Valid values are [0, 1]). Ts : float Symbol period in seconds. Fs : float Sampling Rate in Hz. Returns --------- h_rrc : 1-D ndarray of floats Impulse response of the root raised cosine filter. time_idx : 1-D ndarray of floats Array containing the time indices, in seconds, for the impulse response. Generates a gaussian filter (FIR) impulse response. Parameters ---------- N : int Length of the filter in samples. alpha: float Roll off factor (Valid values are [0, 1]). Ts : float Symbol period in seconds. Fs : float Sampling Rate in Hz. Returns ------- h_gaussian : 1-D ndarray of floats Impulse response of the gaussian filter. time_index : 1-D ndarray of floats Array containing the time indices for the impulse response. | 2.175612 | 2 |
tests/integration/cases/c10.py | Ezra-H/autodist | 127 | 6618199 | <filename>tests/integration/cases/c10.py
import os
import numpy as np
import tensorflow as tf
from autodist.autodist import IS_AUTODIST_CHIEF
from autodist.const import ENV
from autodist.checkpoint.saver import Saver
from autodist.strategy import AllReduce, Parallax, PartitionedAR, RandomAxisPartitionAR
def main(autodist):
# Test saver on NFS system
TRUE_W = 3.0
TRUE_b = 2.0
NUM_EXAMPLES = 1000
EPOCHS = 1
seed = 456 if bool(ENV.AUTODIST_WORKER.val) else 123
np.random.seed(seed)
inputs = np.random.randn(NUM_EXAMPLES)
noises = np.random.randn(NUM_EXAMPLES)
outputs = inputs * TRUE_W + TRUE_b + noises
class MyIterator:
def initialize(self):
return tf.zeros(1)
def get_next(self):
return inputs
inputs_iterator = MyIterator()
with tf.Graph().as_default(), autodist.scope():
x = tf.compat.v1.placeholder(shape=[None], dtype=tf.float32)
y = tf.compat.v1.placeholder(shape=[None], dtype=tf.float32)
W = tf.Variable(5.0, name='W')
b = tf.Variable(0.0, name='b')
def train_step(x):
def f(x):
return W * x + b
def l(predicted_y, desired_y):
return tf.reduce_mean(tf.square(predicted_y - desired_y))
major_version, _, _ = tf.version.VERSION.split('.')
if major_version == '1':
optimizer = tf.train.GradientDescentOptimizer(0.01)
else:
optimizer = tf.optimizers.SGD(0.01)
with tf.GradientTape() as tape:
loss = l(f(x), y)
vs = [W, b]
gradients = tf.gradients(loss, vs)
train_op = optimizer.apply_gradients(zip(gradients, vs))
return loss, train_op, b
assert EPOCHS == 1
fetches = train_step(x)
saver = Saver([W, b])
session = autodist.create_distributed_session()
for epoch in range(EPOCHS):
l_val, _, _ = session.run(fetches=fetches, feed_dict={x: inputs_iterator.get_next(), y: outputs})
print('loss:', l_val)
# Seperate the fetches of var to guarantee the state
W_val, b_val = session.run([W, b])
# Try to save the two variables
checkpoint_dir = '/tmp/ckpt_c10/'
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
# Only save the model on master node if autodist is used with NFS.
checkpoint_suffix = 'c10'
checkpoint_name = checkpoint_dir + checkpoint_suffix
if IS_AUTODIST_CHIEF:
saver.save(session, checkpoint_name, global_step=epoch)
print('Checkpoint saved at {%s}' % checkpoint_name)
else:
print("Skip saving on worker nodes.")
# check the checkpoint existence only on master node
checkpoint = checkpoint_name + '-' + str(epoch)
if IS_AUTODIST_CHIEF:
assert(os.path.exists(checkpoint + '.meta')) # meta file
assert(os.path.exists(checkpoint + '.index')) # meta file
assert(os.path.exists(checkpoint + '.data-00000-of-00001')) # meta file
print('Checkpoint {} exists which saved by master.'.format(checkpoint))
else:
assert(not os.path.exists(checkpoint + '.meta')) # meta file
assert(not os.path.exists(checkpoint + '.index')) # meta file
assert(not os.path.exists(checkpoint + '.data-00000-of-00001')) # meta file
print("Checkpoint saving skipped on worker nodes confirmed.")
| <filename>tests/integration/cases/c10.py
import os
import numpy as np
import tensorflow as tf
from autodist.autodist import IS_AUTODIST_CHIEF
from autodist.const import ENV
from autodist.checkpoint.saver import Saver
from autodist.strategy import AllReduce, Parallax, PartitionedAR, RandomAxisPartitionAR
def main(autodist):
# Test saver on NFS system
TRUE_W = 3.0
TRUE_b = 2.0
NUM_EXAMPLES = 1000
EPOCHS = 1
seed = 456 if bool(ENV.AUTODIST_WORKER.val) else 123
np.random.seed(seed)
inputs = np.random.randn(NUM_EXAMPLES)
noises = np.random.randn(NUM_EXAMPLES)
outputs = inputs * TRUE_W + TRUE_b + noises
class MyIterator:
def initialize(self):
return tf.zeros(1)
def get_next(self):
return inputs
inputs_iterator = MyIterator()
with tf.Graph().as_default(), autodist.scope():
x = tf.compat.v1.placeholder(shape=[None], dtype=tf.float32)
y = tf.compat.v1.placeholder(shape=[None], dtype=tf.float32)
W = tf.Variable(5.0, name='W')
b = tf.Variable(0.0, name='b')
def train_step(x):
def f(x):
return W * x + b
def l(predicted_y, desired_y):
return tf.reduce_mean(tf.square(predicted_y - desired_y))
major_version, _, _ = tf.version.VERSION.split('.')
if major_version == '1':
optimizer = tf.train.GradientDescentOptimizer(0.01)
else:
optimizer = tf.optimizers.SGD(0.01)
with tf.GradientTape() as tape:
loss = l(f(x), y)
vs = [W, b]
gradients = tf.gradients(loss, vs)
train_op = optimizer.apply_gradients(zip(gradients, vs))
return loss, train_op, b
assert EPOCHS == 1
fetches = train_step(x)
saver = Saver([W, b])
session = autodist.create_distributed_session()
for epoch in range(EPOCHS):
l_val, _, _ = session.run(fetches=fetches, feed_dict={x: inputs_iterator.get_next(), y: outputs})
print('loss:', l_val)
# Seperate the fetches of var to guarantee the state
W_val, b_val = session.run([W, b])
# Try to save the two variables
checkpoint_dir = '/tmp/ckpt_c10/'
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
# Only save the model on master node if autodist is used with NFS.
checkpoint_suffix = 'c10'
checkpoint_name = checkpoint_dir + checkpoint_suffix
if IS_AUTODIST_CHIEF:
saver.save(session, checkpoint_name, global_step=epoch)
print('Checkpoint saved at {%s}' % checkpoint_name)
else:
print("Skip saving on worker nodes.")
# check the checkpoint existence only on master node
checkpoint = checkpoint_name + '-' + str(epoch)
if IS_AUTODIST_CHIEF:
assert(os.path.exists(checkpoint + '.meta')) # meta file
assert(os.path.exists(checkpoint + '.index')) # meta file
assert(os.path.exists(checkpoint + '.data-00000-of-00001')) # meta file
print('Checkpoint {} exists which saved by master.'.format(checkpoint))
else:
assert(not os.path.exists(checkpoint + '.meta')) # meta file
assert(not os.path.exists(checkpoint + '.index')) # meta file
assert(not os.path.exists(checkpoint + '.data-00000-of-00001')) # meta file
print("Checkpoint saving skipped on worker nodes confirmed.")
| en | 0.755975 | # Test saver on NFS system # Seperate the fetches of var to guarantee the state # Try to save the two variables # Only save the model on master node if autodist is used with NFS. # check the checkpoint existence only on master node # meta file # meta file # meta file # meta file # meta file # meta file | 2.032664 | 2 |
detect_secrets/plugins/aws.py | paulo-sampaio/detect-secrets | 2,212 | 6618200 | <filename>detect_secrets/plugins/aws.py<gh_stars>1000+
"""
This plugin searches for AWS key IDs
"""
import hashlib
import hmac
import re
import string
import textwrap
from datetime import datetime
from typing import cast
from typing import List
from typing import Union
import requests
from ..constants import VerifiedResult
from ..util.code_snippet import CodeSnippet
from .base import RegexBasedDetector
class AWSKeyDetector(RegexBasedDetector):
"""Scans for AWS keys."""
secret_type = 'AWS Access Key'
denylist = (
re.compile(r'AKIA[0-9A-Z]{16}'),
# This examines the variable name to identify AWS secret tokens.
# The order is important since we want to prefer finding `AKIA`-based
# keys (since they can be verified), rather than the secret tokens.
re.compile(r'aws.{0,20}?[\'\"]([0-9a-zA-Z/+]{40})[\'\"]'),
)
def verify( # type: ignore[override] # noqa: F821
self,
secret: str,
context: CodeSnippet,
) -> VerifiedResult:
# As this verification process looks for multi-factor secrets, by assuming that
# the identified secret token is the key ID (then looking for the corresponding secret).
# we quit early if it fails our assumptions.
if not self.denylist[0].match(secret):
return VerifiedResult.UNVERIFIED
secret_access_key_candidates = get_secret_access_keys(context)
if not secret_access_key_candidates:
return VerifiedResult.UNVERIFIED
for candidate in secret_access_key_candidates:
if verify_aws_secret_access_key(secret, candidate):
return VerifiedResult.VERIFIED_TRUE
return VerifiedResult.VERIFIED_FALSE
def get_secret_access_keys(content: CodeSnippet) -> List[str]:
# AWS secret access keys are 40 characters long.
# e.g. some_function('AKIA...', '[secret key]')
# e.g. secret_access_key = '[secret key]'
regex = re.compile(
r'(=|,|\() *([\'"]?)([%s]{40})(\2)(\))?' % (
re.escape(string.ascii_letters + string.digits + '+/=')
),
)
return [
match[2]
for line in content
for match in regex.findall(line)
]
def verify_aws_secret_access_key(key: str, secret: str) -> bool: # pragma: no cover
"""
Using requests, because we don't want to require boto3 for this one
optional verification step.
Loosely based off:
https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
"""
now = datetime.utcnow()
amazon_datetime = now.strftime('%Y%m%dT%H%M%SZ')
headers = {
# This is a required header for the signing process
'Host': 'sts.amazonaws.com',
'X-Amz-Date': amazon_datetime,
}
body = {
'Action': 'GetCallerIdentity',
'Version': '2011-06-15',
}
# Step #1: Canonical Request
signed_headers = ';'.join(
map(
lambda x: x.lower(),
headers.keys(),
),
)
canonical_request = textwrap.dedent("""
POST
/
{headers}
{signed_headers}
{hashed_payload}
""")[1:-1].format(
headers='\n'.join([
'{}:{}'.format(header.lower(), value)
for header, value in headers.items()
]),
signed_headers=signed_headers,
# Poor man's method, but works for this use case.
hashed_payload=hashlib.sha256(
'&'.join([
'{}={}'.format(header, value)
for header, value in body.items()
]).encode('utf-8'),
).hexdigest(),
)
# Step #2: String to Sign
region = 'us-east-1'
scope = '{request_date}/{region}/sts/aws4_request'.format(
request_date=now.strftime('%Y%m%d'),
# STS is a global service; this is just for latency control.
region=region,
)
string_to_sign = textwrap.dedent("""
AWS4-HMAC-SHA256
{request_datetime}
{scope}
{hashed_canonical_request}
""")[1:-1].format(
request_datetime=amazon_datetime,
scope=scope,
hashed_canonical_request=hashlib.sha256(
canonical_request.encode('utf-8'),
).hexdigest(),
)
# Step #3: Calculate signature
signing_key = _sign(
cast(
bytes, _sign(
cast(
bytes, _sign(
cast(
bytes, _sign(
'AWS4{}'.format(secret).encode('utf-8'),
now.strftime('%Y%m%d'),
),
),
region,
),
),
'sts',
),
),
'aws4_request',
)
signature = _sign(
cast(bytes, signing_key),
string_to_sign,
hex=True,
)
# Step #4: Add to request headers
headers['Authorization'] = (
'AWS4-HMAC-SHA256 '
f'Credential={key}/{scope}, '
f'SignedHeaders={signed_headers}, '
f'Signature={cast(str, signature)}'
)
# Step #5: Finally send the request
response = requests.post(
'https://sts.amazonaws.com',
headers=headers,
data=body,
)
if response.status_code == 403:
return False
return True
def _sign(key: bytes, message: str, hex: bool = False) -> Union[str, bytes]: # pragma: no cover
value = hmac.new(key, message.encode('utf-8'), hashlib.sha256)
if not hex:
return value.digest()
return value.hexdigest()
| <filename>detect_secrets/plugins/aws.py<gh_stars>1000+
"""
This plugin searches for AWS key IDs
"""
import hashlib
import hmac
import re
import string
import textwrap
from datetime import datetime
from typing import cast
from typing import List
from typing import Union
import requests
from ..constants import VerifiedResult
from ..util.code_snippet import CodeSnippet
from .base import RegexBasedDetector
class AWSKeyDetector(RegexBasedDetector):
"""Scans for AWS keys."""
secret_type = 'AWS Access Key'
denylist = (
re.compile(r'AKIA[0-9A-Z]{16}'),
# This examines the variable name to identify AWS secret tokens.
# The order is important since we want to prefer finding `AKIA`-based
# keys (since they can be verified), rather than the secret tokens.
re.compile(r'aws.{0,20}?[\'\"]([0-9a-zA-Z/+]{40})[\'\"]'),
)
def verify( # type: ignore[override] # noqa: F821
self,
secret: str,
context: CodeSnippet,
) -> VerifiedResult:
# As this verification process looks for multi-factor secrets, by assuming that
# the identified secret token is the key ID (then looking for the corresponding secret).
# we quit early if it fails our assumptions.
if not self.denylist[0].match(secret):
return VerifiedResult.UNVERIFIED
secret_access_key_candidates = get_secret_access_keys(context)
if not secret_access_key_candidates:
return VerifiedResult.UNVERIFIED
for candidate in secret_access_key_candidates:
if verify_aws_secret_access_key(secret, candidate):
return VerifiedResult.VERIFIED_TRUE
return VerifiedResult.VERIFIED_FALSE
def get_secret_access_keys(content: CodeSnippet) -> List[str]:
# AWS secret access keys are 40 characters long.
# e.g. some_function('AKIA...', '[secret key]')
# e.g. secret_access_key = '[secret key]'
regex = re.compile(
r'(=|,|\() *([\'"]?)([%s]{40})(\2)(\))?' % (
re.escape(string.ascii_letters + string.digits + '+/=')
),
)
return [
match[2]
for line in content
for match in regex.findall(line)
]
def verify_aws_secret_access_key(key: str, secret: str) -> bool: # pragma: no cover
"""
Using requests, because we don't want to require boto3 for this one
optional verification step.
Loosely based off:
https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
"""
now = datetime.utcnow()
amazon_datetime = now.strftime('%Y%m%dT%H%M%SZ')
headers = {
# This is a required header for the signing process
'Host': 'sts.amazonaws.com',
'X-Amz-Date': amazon_datetime,
}
body = {
'Action': 'GetCallerIdentity',
'Version': '2011-06-15',
}
# Step #1: Canonical Request
signed_headers = ';'.join(
map(
lambda x: x.lower(),
headers.keys(),
),
)
canonical_request = textwrap.dedent("""
POST
/
{headers}
{signed_headers}
{hashed_payload}
""")[1:-1].format(
headers='\n'.join([
'{}:{}'.format(header.lower(), value)
for header, value in headers.items()
]),
signed_headers=signed_headers,
# Poor man's method, but works for this use case.
hashed_payload=hashlib.sha256(
'&'.join([
'{}={}'.format(header, value)
for header, value in body.items()
]).encode('utf-8'),
).hexdigest(),
)
# Step #2: String to Sign
region = 'us-east-1'
scope = '{request_date}/{region}/sts/aws4_request'.format(
request_date=now.strftime('%Y%m%d'),
# STS is a global service; this is just for latency control.
region=region,
)
string_to_sign = textwrap.dedent("""
AWS4-HMAC-SHA256
{request_datetime}
{scope}
{hashed_canonical_request}
""")[1:-1].format(
request_datetime=amazon_datetime,
scope=scope,
hashed_canonical_request=hashlib.sha256(
canonical_request.encode('utf-8'),
).hexdigest(),
)
# Step #3: Calculate signature
signing_key = _sign(
cast(
bytes, _sign(
cast(
bytes, _sign(
cast(
bytes, _sign(
'AWS4{}'.format(secret).encode('utf-8'),
now.strftime('%Y%m%d'),
),
),
region,
),
),
'sts',
),
),
'aws4_request',
)
signature = _sign(
cast(bytes, signing_key),
string_to_sign,
hex=True,
)
# Step #4: Add to request headers
headers['Authorization'] = (
'AWS4-HMAC-SHA256 '
f'Credential={key}/{scope}, '
f'SignedHeaders={signed_headers}, '
f'Signature={cast(str, signature)}'
)
# Step #5: Finally send the request
response = requests.post(
'https://sts.amazonaws.com',
headers=headers,
data=body,
)
if response.status_code == 403:
return False
return True
def _sign(key: bytes, message: str, hex: bool = False) -> Union[str, bytes]: # pragma: no cover
value = hmac.new(key, message.encode('utf-8'), hashlib.sha256)
if not hex:
return value.digest()
return value.hexdigest()
| en | 0.809203 | This plugin searches for AWS key IDs Scans for AWS keys. # This examines the variable name to identify AWS secret tokens. # The order is important since we want to prefer finding `AKIA`-based # keys (since they can be verified), rather than the secret tokens. # type: ignore[override] # noqa: F821 # As this verification process looks for multi-factor secrets, by assuming that # the identified secret token is the key ID (then looking for the corresponding secret). # we quit early if it fails our assumptions. # AWS secret access keys are 40 characters long. # e.g. some_function('AKIA...', '[secret key]') # e.g. secret_access_key = '[secret key]' # pragma: no cover Using requests, because we don't want to require boto3 for this one optional verification step. Loosely based off: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html # This is a required header for the signing process # Step #1: Canonical Request POST / {headers} {signed_headers} {hashed_payload} # Poor man's method, but works for this use case. # Step #2: String to Sign # STS is a global service; this is just for latency control. AWS4-HMAC-SHA256 {request_datetime} {scope} {hashed_canonical_request} # Step #3: Calculate signature # Step #4: Add to request headers # Step #5: Finally send the request # pragma: no cover | 2.626962 | 3 |
Oasis_chiller/OasisChiller_LL.py | vstadnytskyi/drivers | 0 | 6618201 | <gh_stars>0
"""
Oasis Chiller Communication Low Level code
"""
from numpy import nan, mean, std, asarray, array, concatenate, delete, round, vstack, hstack, zeros, transpose, split
from serial import Serial
from time import time, sleep, clock
import sys
import os.path
import struct
from pdb import pm
from time import gmtime, strftime
import logging
from persistent_property import persistent_property
from struct import pack, unpack
from timeit import Timer
__version__ = '0.0.1' #
__date__ = "01-11-2018"
class driver(object):
def __init__(self):
#tested dec 17, 2017
print('bbb')
self._find_port()
self.ser.flushInput()
self.ser.flushOutput()
print("initialization of the driver is complete")
def _find_port(self):
#this function will scan com ports and find DI-245 devices by sending command A1 and receiving a word 2450 back
#import serial.tools.list_ports
#lst = serial.tools.list_ports.comports()
#print([comport.device for comport in serial.tools.list_ports.comports()])
for i in range(256):
com_port = 'COM' + str(i)
#print('trying ' + com_port)
try:
self.ser = Serial(com_port, baudrate=9600, timeout=0.1)
sleep(2)
try:
print('Oasis is found at port %r' % i)
if self._inquire('A',3)[0] == 'A':
print("the requested device is connected to COM Port %r" % self.ser.port)
else:
print("Oasis is not found")
self.ser.close()
print("closing com port")
except:
self.ser.close()
except:
pass
"""Set and Get persistent_property"""
# functions for persistent properties if needed
"""Basic serial communication functions"""
def _readall(self):
#tested dec 17, 2017
return self.ser.readall()
def _readN(self,N):
#tested dec 17, 2017
data = ""
if self._waiting()[0] >= N:
data = self.ser.read(N)
if len(data) != N:
print("%r where requested to read and only %N where read" % (N,len(data)))
data = nan
else:
data = nan
return data
def _write(self,command):
#tested dec 17, 2017
self.ser.flushOutput()
self.ser.write(command)
def _flush(self):
#tested dec 17, 2017
self.ser.flushInput()
self.ser.flushOutput()
def _inquire(self,command, N):
#tested dec 17, 2017
self.ser.write(command)
sleep(0.3)
while self.ser.inWaiting() != N:
sleep(0.1)
if self.ser.inWaiting() == N:
result = self._readN(N)
else:
result = nan
return result
def _waiting(self):
#tested dec 17, 2017
return [self.ser.inWaiting(),self.ser.outWaiting()]
def _close_port(self):
#tested dec 17, 2017
self.ser.close()
def _open_port(self):
#tested dec 17, 2017
self.ser.open()
def set_temperature(self,temperature):
local_byte = pack('h',round(temperature*10,0))
byte_temp = local_byte[0]+local_byte[1]
self._inquire('\xe1'+byte_temp,1)
def get_set_temperature(self):
res = self._inquire('\xc1',3)
temperature = unpack('h',res[1:3])[0]/10.
return temperature
def get_actual_temperature(self):
res = self._inquire('\xc9',3)
temperature = unpack('h',res[1:3])[0]/10.
return temperature
def get_faults(self):
res_temp = self._inquire('\xc8',2)
res = unpack('b',res_temp[1])[0]
if res == 0:
result = (0,res)
else:
result = (1,res)
return result
if __name__ == "__main__": #for testing
dev = driver()
print('the object dev(port %r) was created. Few test from below can be used.' % dev.ser.port)
print('dev.get_actual_temperature()')
print('dev.set_temperature(15)')
print('dev.get_set_temperature()')
| """
Oasis Chiller Communication Low Level code
"""
from numpy import nan, mean, std, asarray, array, concatenate, delete, round, vstack, hstack, zeros, transpose, split
from serial import Serial
from time import time, sleep, clock
import sys
import os.path
import struct
from pdb import pm
from time import gmtime, strftime
import logging
from persistent_property import persistent_property
from struct import pack, unpack
from timeit import Timer
__version__ = '0.0.1' #
__date__ = "01-11-2018"
class driver(object):
def __init__(self):
#tested dec 17, 2017
print('bbb')
self._find_port()
self.ser.flushInput()
self.ser.flushOutput()
print("initialization of the driver is complete")
def _find_port(self):
#this function will scan com ports and find DI-245 devices by sending command A1 and receiving a word 2450 back
#import serial.tools.list_ports
#lst = serial.tools.list_ports.comports()
#print([comport.device for comport in serial.tools.list_ports.comports()])
for i in range(256):
com_port = 'COM' + str(i)
#print('trying ' + com_port)
try:
self.ser = Serial(com_port, baudrate=9600, timeout=0.1)
sleep(2)
try:
print('Oasis is found at port %r' % i)
if self._inquire('A',3)[0] == 'A':
print("the requested device is connected to COM Port %r" % self.ser.port)
else:
print("Oasis is not found")
self.ser.close()
print("closing com port")
except:
self.ser.close()
except:
pass
"""Set and Get persistent_property"""
# functions for persistent properties if needed
"""Basic serial communication functions"""
def _readall(self):
#tested dec 17, 2017
return self.ser.readall()
def _readN(self,N):
#tested dec 17, 2017
data = ""
if self._waiting()[0] >= N:
data = self.ser.read(N)
if len(data) != N:
print("%r where requested to read and only %N where read" % (N,len(data)))
data = nan
else:
data = nan
return data
def _write(self,command):
#tested dec 17, 2017
self.ser.flushOutput()
self.ser.write(command)
def _flush(self):
#tested dec 17, 2017
self.ser.flushInput()
self.ser.flushOutput()
def _inquire(self,command, N):
#tested dec 17, 2017
self.ser.write(command)
sleep(0.3)
while self.ser.inWaiting() != N:
sleep(0.1)
if self.ser.inWaiting() == N:
result = self._readN(N)
else:
result = nan
return result
def _waiting(self):
#tested dec 17, 2017
return [self.ser.inWaiting(),self.ser.outWaiting()]
def _close_port(self):
#tested dec 17, 2017
self.ser.close()
def _open_port(self):
#tested dec 17, 2017
self.ser.open()
def set_temperature(self,temperature):
local_byte = pack('h',round(temperature*10,0))
byte_temp = local_byte[0]+local_byte[1]
self._inquire('\xe1'+byte_temp,1)
def get_set_temperature(self):
res = self._inquire('\xc1',3)
temperature = unpack('h',res[1:3])[0]/10.
return temperature
def get_actual_temperature(self):
res = self._inquire('\xc9',3)
temperature = unpack('h',res[1:3])[0]/10.
return temperature
def get_faults(self):
res_temp = self._inquire('\xc8',2)
res = unpack('b',res_temp[1])[0]
if res == 0:
result = (0,res)
else:
result = (1,res)
return result
if __name__ == "__main__": #for testing
dev = driver()
print('the object dev(port %r) was created. Few test from below can be used.' % dev.ser.port)
print('dev.get_actual_temperature()')
print('dev.set_temperature(15)')
print('dev.get_set_temperature()') | en | 0.686785 | Oasis Chiller Communication Low Level code # #tested dec 17, 2017 #this function will scan com ports and find DI-245 devices by sending command A1 and receiving a word 2450 back #import serial.tools.list_ports #lst = serial.tools.list_ports.comports() #print([comport.device for comport in serial.tools.list_ports.comports()]) #print('trying ' + com_port) Set and Get persistent_property # functions for persistent properties if needed Basic serial communication functions #tested dec 17, 2017 #tested dec 17, 2017 #tested dec 17, 2017 #tested dec 17, 2017 #tested dec 17, 2017 #tested dec 17, 2017 #tested dec 17, 2017 #tested dec 17, 2017 #for testing | 2.593733 | 3 |
comps/models/heatlist_dancer.py | dlanghorne0428/dancesport-tracker-projec | 0 | 6618202 | <filename>comps/models/heatlist_dancer.py
from django.db import models
from comps.models.comp import Comp
from rankings.models import Dancer
class Heatlist_Dancer(models.Model):
'''Define minimal info about a dancer read in from a heatlist.'''
# the name field is in last, first middle format
name = models.CharField(max_length=100, blank=True)
# the code field is used to obtain scoresheet results for this dancer
code = models.CharField(max_length = 20)
# the dancer object that matches this name
alias = models.ForeignKey("rankings.Dancer", on_delete=models.SET_NULL, null=True)
# the comp object that created this heatlist_dancer
comp = models.ForeignKey("comps.Comp", on_delete=models.CASCADE, null=True)
# flag to indicate if the name needs additional formatting by the user
formatting_needed = models.BooleanField(default=False)
def format_name(self, orig_name, simple=True, split_on=1):
'''This method converts a name into last, first format.
If simple is true, the method will not attempt to format names with three or more fields.
If simple is false, the split_on field will determine where to put the comma'''
fields = orig_name.split()
if simple:
if len(fields) == 2:
return fields[1] + ', ' + fields[0]
else:
print("format needed: " + orig_name)
self.formatting_needed = True
return None
elif len(fields) == 1:
return(orig_name)
else:
name = ""
for f in range(split_on, len(fields)):
if f > split_on:
name += " "
name += fields[f]
name += ","
for f in range(0, split_on):
name += " " + fields[f]
return name
def load_from_comp_mngr(self, line):
'''This method populates the object from a line of text from a CompMngr heatlist.'''
# get the name
start_pos = 8
end_pos = line.find("</td>")
self.name = line[start_pos:end_pos]
# find the code
start_pos = line.find("TABLE_CODE_") + len("TABLE_CODE_")
end_pos = line.find("'", start_pos)
self.code = line[start_pos:end_pos]
def load_from_comp_org(self, line):
'''This method populates the object from a line of text from a heatlist in CompOrganizer format.'''
# find the ID code for this dancer
start_pos = line.find('"id":"') + len('"id":"')
end_pos = line.find('"', start_pos)
self.code = line[start_pos:end_pos]
if self.code != "0":
# find the dancer's name
start_pos = line.find('"name":"') + len('"name":"')
end_pos = line.find('"', start_pos)
orig_name = line[start_pos:end_pos]
new_name = self.format_name(orig_name)
if new_name is None:
self.name = orig_name
else:
self.name = new_name
else:
print("Error - invalid code")
def load_from_ndca_premier(self, line):
'''This method populates the object from a line of text from a heatlist in NDCA Premier format.'''
# find the dancer's name
fields = line.split(">")
orig_name = fields[1]
new_name = self.format_name(orig_name)
if new_name is None:
self.name = orig_name
else:
self.name = new_name
# find the ID code for this dancer
pos = fields[0].find("competitor=") + len("competitor=")
self.code = fields[0][pos+1:-1]
def load_from_ndca_premier_feed(self, json_record):
'''This method populates the object from a JSON object from a heatlist in NDCA Premier format.'''
# find the dancer's name
name_field = json_record["Name"]
if len(name_field) == 2 and name_field[0] is not None and name_field[1] is not None:
self.name = name_field[1] + ", " + name_field[0]
else:
self.formatting_needed = True
self.name = name_field[0]
for f in range(1, len(name_field)):
if name_field[f] is not None:
self.name += " "
self.name += name_field[f]
# find the ID code for this dancer
self.code = json_record["ID"]
def load_from_o2cm(self, line):
'''This method populates the object from a line of text from a heatlist in o2cm.com format.'''
# find the dancer's name
fields = line.split(">")
self.name = fields[1]
# find the ID code for this dancer
pos = fields[0].find("VALUE=") + len("VALUE=")
self.code = fields[0][pos+1:-1]
def load_from_file(self, line):
'''This method populates the object from a line of text from a heatlist in custom file format.'''
# find the dancer's name
fields = line.split(":")
self.name = fields[0]
self.code = fields[1]
def __str__(self):
return self.name + ' ' + str(self.comp)
| <filename>comps/models/heatlist_dancer.py
from django.db import models
from comps.models.comp import Comp
from rankings.models import Dancer
class Heatlist_Dancer(models.Model):
'''Define minimal info about a dancer read in from a heatlist.'''
# the name field is in last, first middle format
name = models.CharField(max_length=100, blank=True)
# the code field is used to obtain scoresheet results for this dancer
code = models.CharField(max_length = 20)
# the dancer object that matches this name
alias = models.ForeignKey("rankings.Dancer", on_delete=models.SET_NULL, null=True)
# the comp object that created this heatlist_dancer
comp = models.ForeignKey("comps.Comp", on_delete=models.CASCADE, null=True)
# flag to indicate if the name needs additional formatting by the user
formatting_needed = models.BooleanField(default=False)
def format_name(self, orig_name, simple=True, split_on=1):
'''This method converts a name into last, first format.
If simple is true, the method will not attempt to format names with three or more fields.
If simple is false, the split_on field will determine where to put the comma'''
fields = orig_name.split()
if simple:
if len(fields) == 2:
return fields[1] + ', ' + fields[0]
else:
print("format needed: " + orig_name)
self.formatting_needed = True
return None
elif len(fields) == 1:
return(orig_name)
else:
name = ""
for f in range(split_on, len(fields)):
if f > split_on:
name += " "
name += fields[f]
name += ","
for f in range(0, split_on):
name += " " + fields[f]
return name
def load_from_comp_mngr(self, line):
'''This method populates the object from a line of text from a CompMngr heatlist.'''
# get the name
start_pos = 8
end_pos = line.find("</td>")
self.name = line[start_pos:end_pos]
# find the code
start_pos = line.find("TABLE_CODE_") + len("TABLE_CODE_")
end_pos = line.find("'", start_pos)
self.code = line[start_pos:end_pos]
def load_from_comp_org(self, line):
'''This method populates the object from a line of text from a heatlist in CompOrganizer format.'''
# find the ID code for this dancer
start_pos = line.find('"id":"') + len('"id":"')
end_pos = line.find('"', start_pos)
self.code = line[start_pos:end_pos]
if self.code != "0":
# find the dancer's name
start_pos = line.find('"name":"') + len('"name":"')
end_pos = line.find('"', start_pos)
orig_name = line[start_pos:end_pos]
new_name = self.format_name(orig_name)
if new_name is None:
self.name = orig_name
else:
self.name = new_name
else:
print("Error - invalid code")
def load_from_ndca_premier(self, line):
'''This method populates the object from a line of text from a heatlist in NDCA Premier format.'''
# find the dancer's name
fields = line.split(">")
orig_name = fields[1]
new_name = self.format_name(orig_name)
if new_name is None:
self.name = orig_name
else:
self.name = new_name
# find the ID code for this dancer
pos = fields[0].find("competitor=") + len("competitor=")
self.code = fields[0][pos+1:-1]
def load_from_ndca_premier_feed(self, json_record):
'''This method populates the object from a JSON object from a heatlist in NDCA Premier format.'''
# find the dancer's name
name_field = json_record["Name"]
if len(name_field) == 2 and name_field[0] is not None and name_field[1] is not None:
self.name = name_field[1] + ", " + name_field[0]
else:
self.formatting_needed = True
self.name = name_field[0]
for f in range(1, len(name_field)):
if name_field[f] is not None:
self.name += " "
self.name += name_field[f]
# find the ID code for this dancer
self.code = json_record["ID"]
def load_from_o2cm(self, line):
'''This method populates the object from a line of text from a heatlist in o2cm.com format.'''
# find the dancer's name
fields = line.split(">")
self.name = fields[1]
# find the ID code for this dancer
pos = fields[0].find("VALUE=") + len("VALUE=")
self.code = fields[0][pos+1:-1]
def load_from_file(self, line):
'''This method populates the object from a line of text from a heatlist in custom file format.'''
# find the dancer's name
fields = line.split(":")
self.name = fields[0]
self.code = fields[1]
def __str__(self):
return self.name + ' ' + str(self.comp)
| en | 0.854739 | Define minimal info about a dancer read in from a heatlist. # the name field is in last, first middle format # the code field is used to obtain scoresheet results for this dancer # the dancer object that matches this name # the comp object that created this heatlist_dancer # flag to indicate if the name needs additional formatting by the user This method converts a name into last, first format. If simple is true, the method will not attempt to format names with three or more fields. If simple is false, the split_on field will determine where to put the comma This method populates the object from a line of text from a CompMngr heatlist. # get the name # find the code This method populates the object from a line of text from a heatlist in CompOrganizer format. # find the ID code for this dancer # find the dancer's name This method populates the object from a line of text from a heatlist in NDCA Premier format. # find the dancer's name # find the ID code for this dancer This method populates the object from a JSON object from a heatlist in NDCA Premier format. # find the dancer's name # find the ID code for this dancer This method populates the object from a line of text from a heatlist in o2cm.com format. # find the dancer's name # find the ID code for this dancer This method populates the object from a line of text from a heatlist in custom file format. # find the dancer's name | 2.992829 | 3 |
src/sample/zad1.py | TestowanieAutomatyczneUG/laboratorium-7-Sienkowski99 | 0 | 6618203 | <filename>src/sample/zad1.py
class Hamming:
def distance(self, a, b):
if len(a) != len(b):
raise ValueError('err')
result = 0
for i in range(0, len(a)):
if a[i] != b[i]:
result += 1
return result | <filename>src/sample/zad1.py
class Hamming:
def distance(self, a, b):
if len(a) != len(b):
raise ValueError('err')
result = 0
for i in range(0, len(a)):
if a[i] != b[i]:
result += 1
return result | none | 1 | 3.201353 | 3 | |
setup.py | ChrisDickson/URLShortener | 0 | 6618204 | <filename>setup.py
from setuptools import find_packages, setup
setup(
name='URLShortener',
version='0.1',
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ChrisDickson/URLShortener",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask', 'Flask-MySQL'
],
classifiers=[
"Programming Language :: Python :: 3",
],
)
| <filename>setup.py
from setuptools import find_packages, setup
setup(
name='URLShortener',
version='0.1',
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ChrisDickson/URLShortener",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask', 'Flask-MySQL'
],
classifiers=[
"Programming Language :: Python :: 3",
],
)
| none | 1 | 1.278827 | 1 | |
bookstore/users/models.py | xitizbasnet/book-store | 0 | 6618205 | <filename>bookstore/users/models.py<gh_stars>0
from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser
# Create your models here.
class User(AbstractBaseUser):
pass | <filename>bookstore/users/models.py<gh_stars>0
from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser
# Create your models here.
class User(AbstractBaseUser):
pass | en | 0.963489 | # Create your models here. | 1.569527 | 2 |
h2o-py/tests/testdir_misc/pyunit_expr_as_list.py | suhassatish/h2o-dev | 0 | 6618206 | <reponame>suhassatish/h2o-dev
import sys
sys.path.insert(1, "../../")
import h2o
from h2o.expr import Expr
def expr_as_list(ip,port):
# Connect to h2o
h2o.init(ip,port)
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
print "iris:"
iris.show()
###################################################################
# expr[int], expr is pending
res = 2 - iris
res2 = h2o.as_list(res[0])
assert abs(res2[3][0] - -2.6) < 1e-10 and abs(res2[17][0] - -3.1) < 1e-10 and abs(res2[24][0] - -2.8) < 1e-10, \
"incorrect values"
# expr[int], expr is remote
res3 = h2o.as_list(res[0])
assert abs(res3[3][0] - -2.6) < 1e-10 and abs(res3[17][0] - -3.1) < 1e-10 and abs(res3[24][0] - -2.8) < 1e-10, \
"incorrect values"
# expr[int], expr is local
expr = h2o.as_list(Expr([1,2,3]))
res4 = expr[2]
assert res4 == 3, "incorrect values"
# expr[tuple], expr._data is pending
res = 2 - iris
res5 = h2o.as_list(res[5,2])
assert abs(res5[0][0] - 0.3) < 1e-10, "incorrect values"
# expr[tuple], expr._data is remote
res6 = h2o.as_list(res[5,2])
assert abs(res6[0][0] - 0.3) < 1e-10, "incorrect values"
# expr[tuple], expr._data is local
expr = h2o.as_list(Expr([[1,2,3], [4,5,6]]))
assert expr[1][1] == 5, "incorrect values"
if __name__ == "__main__":
h2o.run_test(sys.argv, expr_as_list) | import sys
sys.path.insert(1, "../../")
import h2o
from h2o.expr import Expr
def expr_as_list(ip,port):
# Connect to h2o
h2o.init(ip,port)
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
print "iris:"
iris.show()
###################################################################
# expr[int], expr is pending
res = 2 - iris
res2 = h2o.as_list(res[0])
assert abs(res2[3][0] - -2.6) < 1e-10 and abs(res2[17][0] - -3.1) < 1e-10 and abs(res2[24][0] - -2.8) < 1e-10, \
"incorrect values"
# expr[int], expr is remote
res3 = h2o.as_list(res[0])
assert abs(res3[3][0] - -2.6) < 1e-10 and abs(res3[17][0] - -3.1) < 1e-10 and abs(res3[24][0] - -2.8) < 1e-10, \
"incorrect values"
# expr[int], expr is local
expr = h2o.as_list(Expr([1,2,3]))
res4 = expr[2]
assert res4 == 3, "incorrect values"
# expr[tuple], expr._data is pending
res = 2 - iris
res5 = h2o.as_list(res[5,2])
assert abs(res5[0][0] - 0.3) < 1e-10, "incorrect values"
# expr[tuple], expr._data is remote
res6 = h2o.as_list(res[5,2])
assert abs(res6[0][0] - 0.3) < 1e-10, "incorrect values"
# expr[tuple], expr._data is local
expr = h2o.as_list(Expr([[1,2,3], [4,5,6]]))
assert expr[1][1] == 5, "incorrect values"
if __name__ == "__main__":
h2o.run_test(sys.argv, expr_as_list) | en | 0.280093 | # Connect to h2o ################################################################### # expr[int], expr is pending # expr[int], expr is remote # expr[int], expr is local # expr[tuple], expr._data is pending # expr[tuple], expr._data is remote # expr[tuple], expr._data is local | 2.759959 | 3 |
surface/constants/vision.py | ymber/surface | 5 | 6618207 | """
Computer vision constants.
"""
| """
Computer vision constants.
"""
| en | 0.679871 | Computer vision constants. | 1.006566 | 1 |
GED.py | jensengroup/GED | 0 | 6618208 | <filename>GED.py
'''
Written by <NAME>, 2020
'''
from rdkit import Chem
import networkx as nx
def get_graph(mol):
Chem.Kekulize(mol)
atoms = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
am = Chem.GetAdjacencyMatrix(mol,useBO=True)
for i,atom in enumerate(atoms):
am[i,i] = atom
G = nx.from_numpy_matrix(am)
return G
mol1 = Chem.MolFromSmiles('c1ccccc1')
#mol2 = Chem.MolFromSmiles('c1cnccc1')
mol2 = Chem.MolFromSmiles('C=CC=CC=C')
G1 = get_graph(mol1)
G2 = get_graph(mol2)
GDE = nx.graph_edit_distance(G1, G2, edge_match=lambda a,b: a['weight'] == b['weight'])
print(GDE)
| <filename>GED.py
'''
Written by <NAME>, 2020
'''
from rdkit import Chem
import networkx as nx
def get_graph(mol):
Chem.Kekulize(mol)
atoms = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
am = Chem.GetAdjacencyMatrix(mol,useBO=True)
for i,atom in enumerate(atoms):
am[i,i] = atom
G = nx.from_numpy_matrix(am)
return G
mol1 = Chem.MolFromSmiles('c1ccccc1')
#mol2 = Chem.MolFromSmiles('c1cnccc1')
mol2 = Chem.MolFromSmiles('C=CC=CC=C')
G1 = get_graph(mol1)
G2 = get_graph(mol2)
GDE = nx.graph_edit_distance(G1, G2, edge_match=lambda a,b: a['weight'] == b['weight'])
print(GDE)
| en | 0.546343 | Written by <NAME>, 2020 #mol2 = Chem.MolFromSmiles('c1cnccc1') | 2.660368 | 3 |
bin/utility.py | partamonov/cli-cloudlets | 3 | 6618209 | """
Copyright 2020 Akamai Technologies, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
class Utility(object):
def do_cloudlet_code_map(self):
"""
Function to map cloudlet abbrevations to code/id
Parameters
-----------
Returns
-------
cloudlet_code : cloudlet_code
(cloudlet_code) string with cloudlet code
"""
cloudlet_code= {'ER': 0, 'VP': 1,'FR': 3, 'IG' : 4,
'AP': 5, 'AS': 6, 'CD': 7, 'IV': 8, 'ALB': 9}
return cloudlet_code
def get_policy_by_name(self, session, cloudlet_object, policy_name, root_logger):
"""
Function to fetch policy details
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
cloudlet_object: <object>
policy_name: <string>
Returns
-------
policy_info : policy_info
(policy_info) Dictionary containing all the details of policy
"""
policy_info = dict()
cloudlet_policies_response = cloudlet_object.list_policies(session)
if cloudlet_policies_response.status_code == 200:
for policy in cloudlet_policies_response.json():
if policy_name is not None:
if(str(policy["name"].lower()) == str(policy_name).lower()):
policy_info = policy
else:
root_logger.info('ERROR: Unable to fetch policies')
root_logger.info(json.dumps(cloudlet_policies_response.json(), indent=4))
exit(-1)
#If policy_info is empty, we check for not null after return
return policy_info
def get_policy_by_id(self, session, cloudlet_object, policy_id, root_logger):
"""
Function to fetch policy details
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
cloudlet_object: <object>
policy_id: <int>
Returns
-------
policy_info : policy_info
(policy_info) Dictionary containing all the details of policy
"""
policy_info = dict()
policy_response = cloudlet_object.get_policy(session,policy_id)
if policy_response.status_code == 200:
policy_info = policy_response.json()
else:
root_logger.info('ERROR: Unable to find existing policy')
root_logger.info(json.dumps(policy_response.json(), indent=4))
exit(-1)
#If policy_info is empty, we check for not null after return
return policy_info
def get_latest_version(self, session, cloudlet_object, policy_id, root_logger):
"""
Function to fetch latest version
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
cloudlet_object: <object>
policy_id: <int>
Returns
-------
policy_version : policy_version
(policy_version) integer (latest policy version)
"""
policy_versions_response = cloudlet_object.list_policy_versions(session, policy_id, page_size=1)
if policy_versions_response.status_code ==200:
#If for some reason, can't find a version
if len(policy_versions_response.json()) > 0:
version = str(policy_versions_response.json()[0]['version'])
else:
root_logger.info('ERROR: Unable to find latest version. Check if version exists')
exit(-1)
else:
root_logger.info('ERROR: Unable to fetch policy versions')
root_logger.info(json.dumps(policy_versions_response.json(), indent=4))
exit(-1)
return version
| """
Copyright 2020 Akamai Technologies, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
class Utility(object):
def do_cloudlet_code_map(self):
"""
Function to map cloudlet abbrevations to code/id
Parameters
-----------
Returns
-------
cloudlet_code : cloudlet_code
(cloudlet_code) string with cloudlet code
"""
cloudlet_code= {'ER': 0, 'VP': 1,'FR': 3, 'IG' : 4,
'AP': 5, 'AS': 6, 'CD': 7, 'IV': 8, 'ALB': 9}
return cloudlet_code
def get_policy_by_name(self, session, cloudlet_object, policy_name, root_logger):
"""
Function to fetch policy details
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
cloudlet_object: <object>
policy_name: <string>
Returns
-------
policy_info : policy_info
(policy_info) Dictionary containing all the details of policy
"""
policy_info = dict()
cloudlet_policies_response = cloudlet_object.list_policies(session)
if cloudlet_policies_response.status_code == 200:
for policy in cloudlet_policies_response.json():
if policy_name is not None:
if(str(policy["name"].lower()) == str(policy_name).lower()):
policy_info = policy
else:
root_logger.info('ERROR: Unable to fetch policies')
root_logger.info(json.dumps(cloudlet_policies_response.json(), indent=4))
exit(-1)
#If policy_info is empty, we check for not null after return
return policy_info
def get_policy_by_id(self, session, cloudlet_object, policy_id, root_logger):
"""
Function to fetch policy details
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
cloudlet_object: <object>
policy_id: <int>
Returns
-------
policy_info : policy_info
(policy_info) Dictionary containing all the details of policy
"""
policy_info = dict()
policy_response = cloudlet_object.get_policy(session,policy_id)
if policy_response.status_code == 200:
policy_info = policy_response.json()
else:
root_logger.info('ERROR: Unable to find existing policy')
root_logger.info(json.dumps(policy_response.json(), indent=4))
exit(-1)
#If policy_info is empty, we check for not null after return
return policy_info
def get_latest_version(self, session, cloudlet_object, policy_id, root_logger):
"""
Function to fetch latest version
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
cloudlet_object: <object>
policy_id: <int>
Returns
-------
policy_version : policy_version
(policy_version) integer (latest policy version)
"""
policy_versions_response = cloudlet_object.list_policy_versions(session, policy_id, page_size=1)
if policy_versions_response.status_code ==200:
#If for some reason, can't find a version
if len(policy_versions_response.json()) > 0:
version = str(policy_versions_response.json()[0]['version'])
else:
root_logger.info('ERROR: Unable to find latest version. Check if version exists')
exit(-1)
else:
root_logger.info('ERROR: Unable to fetch policy versions')
root_logger.info(json.dumps(policy_versions_response.json(), indent=4))
exit(-1)
return version
| en | 0.660229 | Copyright 2020 Akamai Technologies, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Function to map cloudlet abbrevations to code/id Parameters ----------- Returns ------- cloudlet_code : cloudlet_code (cloudlet_code) string with cloudlet code Function to fetch policy details Parameters ----------- session : <string> An EdgeGrid Auth akamai session object cloudlet_object: <object> policy_name: <string> Returns ------- policy_info : policy_info (policy_info) Dictionary containing all the details of policy #If policy_info is empty, we check for not null after return Function to fetch policy details Parameters ----------- session : <string> An EdgeGrid Auth akamai session object cloudlet_object: <object> policy_id: <int> Returns ------- policy_info : policy_info (policy_info) Dictionary containing all the details of policy #If policy_info is empty, we check for not null after return Function to fetch latest version Parameters ----------- session : <string> An EdgeGrid Auth akamai session object cloudlet_object: <object> policy_id: <int> Returns ------- policy_version : policy_version (policy_version) integer (latest policy version) #If for some reason, can't find a version | 2.039542 | 2 |
setup.py | velascoluis/MLMDStorePlugIn | 0 | 6618210 | <gh_stars>0
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='mlflow-mldmstore',
version='0.1',
description='Plugin that provides MLMD Tracking Store functionality for MLflow',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/velascoluis",
packages=find_packages(),
install_requires=[
'mlflow', 'kubeflow-metadata'
],
entry_points={
"mlflow.tracking_store": [
"http=mlmdstore.store.tracking.mlmd_store:MLMDStore"
]
},
)
| from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='mlflow-mldmstore',
version='0.1',
description='Plugin that provides MLMD Tracking Store functionality for MLflow',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/velascoluis",
packages=find_packages(),
install_requires=[
'mlflow', 'kubeflow-metadata'
],
entry_points={
"mlflow.tracking_store": [
"http=mlmdstore.store.tracking.mlmd_store:MLMDStore"
]
},
) | none | 1 | 1.305017 | 1 | |
python-webrtc/python/webrtc/interfaces/media_stream.py | MarshalX/python-webrtc | 81 | 6618211 | <filename>python-webrtc/python/webrtc/interfaces/media_stream.py<gh_stars>10-100
#
# Copyright 2022 Il`ya (Marshal) <https://github.com/MarshalX>. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE.md file in the root of the project.
#
from typing import TYPE_CHECKING, List, Optional
import wrtc
from webrtc import WebRTCObject, MediaStreamTrack
if TYPE_CHECKING:
import webrtc
class MediaStream(WebRTCObject):
"""The MediaStream interface represents a stream of media content. A stream consists of several tracks,
such as video or audio tracks. Each track is specified as an instance of :obj:`webrtc.MediaStreamTrack`.
"""
_class = wrtc.MediaStream
@property
def id(self) -> str:
""":obj:`str`: A String containing 36 characters denoting a
universally unique identifier (UUID) for the object."""
return self._native_obj.id
@property
def active(self) -> bool:
""":obj:`bool`: A value that returns `true` if the :obj:`webrtc.MediaStream` is active, or `false` otherwise."""
return self._native_obj.active
def get_audio_tracks(self) -> List['webrtc.MediaStreamTrack']:
"""Returns a :obj:`list` of the :obj:`webrtc.MediaStreamTrack` objects
stored in the :obj:`webrtc.MediaStream` object that have their kind attribute set to "audio".
The order is not defined, and may not only vary from one machine to another, but also from one call to another.
"""
return MediaStreamTrack._wrap_many(self._native_obj.getAudioTracks())
def get_video_tracks(self) -> List['webrtc.MediaStreamTrack']:
"""Returns a :obj:`list` of the :obj:`webrtc.MediaStreamTrack` objects stored in the :obj:`webrtc.MediaStream`
object that have their kind attribute set to "video". The order is not defined,
and may not only vary from one machine to another, but also from one call to another.
"""
return MediaStreamTrack._wrap_many(self._native_obj.getVideoTracks())
def get_tracks(self) -> List['webrtc.MediaStreamTrack']:
"""Returns a :obj:`list` of all :obj:`webrtc.MediaStreamTrack` objects stored in the :obj:`webrtc.MediaStream`
object, regardless of the value of the kind attribute. The order is not defined,
and may not only vary from one machine to another, but also from one call to another.
"""
return MediaStreamTrack._wrap_many(self._native_obj.getTracks())
def get_track_by_id(self, track_id: str) -> Optional['webrtc.MediaStreamTrack']:
"""Returns the track whose ID corresponds to the one given in parameters, :obj:`track_id`.
If no parameter is given, or if no track with that ID does exist, it returns :obj:`None`.
If several tracks have the same ID, it returns the first one.
"""
return MediaStreamTrack._wrap(self._native_obj.getTrackById(track_id))
def add_track(self, track: 'webrtc.MediaStreamTrack'):
"""Stores a copy of the :obj:`webrtc.MediaStreamTrack` given as argument. If the track has already been added
to the :obj:`webrtc.MediaStream` object, nothing happens.
"""
return self._native_obj.addTrack(track._native_obj)
def remove_track(self, track: 'webrtc.MediaStreamTrack'):
"""Removes the :obj:`webrtc.MediaStreamTrack` given as argument. If the track is not part of the
:obj:`webrtc.MediaStream` object, nothing happens.
"""
return self._native_obj.removeTrack(track._native_obj)
def clone(self) -> 'webrtc.MediaStream':
"""Returns a clone of the :obj:`webrtc.MediaStream` object.
The clone will, however, have a unique value for :obj:`id`."""
return self._wrap(self._native_obj.clone())
#: Alias for :attr:`get_audio_tracks`
getAudioTracks = get_audio_tracks
#: Alias for :attr:`get_video_tracks`
getVideoTracks = get_video_tracks
#: Alias for :attr:`get_tracks`
getTracks = get_tracks
#: Alias for :attr:`get_track_by_id`
getTrackById = get_track_by_id
#: Alias for :attr:`add_track`
addTrack = add_track
#: Alias for :attr:`remove_track`
removeTrack = remove_track
| <filename>python-webrtc/python/webrtc/interfaces/media_stream.py<gh_stars>10-100
#
# Copyright 2022 Il`ya (Marshal) <https://github.com/MarshalX>. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE.md file in the root of the project.
#
from typing import TYPE_CHECKING, List, Optional
import wrtc
from webrtc import WebRTCObject, MediaStreamTrack
if TYPE_CHECKING:
import webrtc
class MediaStream(WebRTCObject):
"""The MediaStream interface represents a stream of media content. A stream consists of several tracks,
such as video or audio tracks. Each track is specified as an instance of :obj:`webrtc.MediaStreamTrack`.
"""
_class = wrtc.MediaStream
@property
def id(self) -> str:
""":obj:`str`: A String containing 36 characters denoting a
universally unique identifier (UUID) for the object."""
return self._native_obj.id
@property
def active(self) -> bool:
""":obj:`bool`: A value that returns `true` if the :obj:`webrtc.MediaStream` is active, or `false` otherwise."""
return self._native_obj.active
def get_audio_tracks(self) -> List['webrtc.MediaStreamTrack']:
"""Returns a :obj:`list` of the :obj:`webrtc.MediaStreamTrack` objects
stored in the :obj:`webrtc.MediaStream` object that have their kind attribute set to "audio".
The order is not defined, and may not only vary from one machine to another, but also from one call to another.
"""
return MediaStreamTrack._wrap_many(self._native_obj.getAudioTracks())
def get_video_tracks(self) -> List['webrtc.MediaStreamTrack']:
"""Returns a :obj:`list` of the :obj:`webrtc.MediaStreamTrack` objects stored in the :obj:`webrtc.MediaStream`
object that have their kind attribute set to "video". The order is not defined,
and may not only vary from one machine to another, but also from one call to another.
"""
return MediaStreamTrack._wrap_many(self._native_obj.getVideoTracks())
def get_tracks(self) -> List['webrtc.MediaStreamTrack']:
"""Returns a :obj:`list` of all :obj:`webrtc.MediaStreamTrack` objects stored in the :obj:`webrtc.MediaStream`
object, regardless of the value of the kind attribute. The order is not defined,
and may not only vary from one machine to another, but also from one call to another.
"""
return MediaStreamTrack._wrap_many(self._native_obj.getTracks())
def get_track_by_id(self, track_id: str) -> Optional['webrtc.MediaStreamTrack']:
"""Returns the track whose ID corresponds to the one given in parameters, :obj:`track_id`.
If no parameter is given, or if no track with that ID does exist, it returns :obj:`None`.
If several tracks have the same ID, it returns the first one.
"""
return MediaStreamTrack._wrap(self._native_obj.getTrackById(track_id))
def add_track(self, track: 'webrtc.MediaStreamTrack'):
"""Stores a copy of the :obj:`webrtc.MediaStreamTrack` given as argument. If the track has already been added
to the :obj:`webrtc.MediaStream` object, nothing happens.
"""
return self._native_obj.addTrack(track._native_obj)
def remove_track(self, track: 'webrtc.MediaStreamTrack'):
"""Removes the :obj:`webrtc.MediaStreamTrack` given as argument. If the track is not part of the
:obj:`webrtc.MediaStream` object, nothing happens.
"""
return self._native_obj.removeTrack(track._native_obj)
def clone(self) -> 'webrtc.MediaStream':
"""Returns a clone of the :obj:`webrtc.MediaStream` object.
The clone will, however, have a unique value for :obj:`id`."""
return self._wrap(self._native_obj.clone())
#: Alias for :attr:`get_audio_tracks`
getAudioTracks = get_audio_tracks
#: Alias for :attr:`get_video_tracks`
getVideoTracks = get_video_tracks
#: Alias for :attr:`get_tracks`
getTracks = get_tracks
#: Alias for :attr:`get_track_by_id`
getTrackById = get_track_by_id
#: Alias for :attr:`add_track`
addTrack = add_track
#: Alias for :attr:`remove_track`
removeTrack = remove_track
| en | 0.836094 | # # Copyright 2022 Il`ya (Marshal) <https://github.com/MarshalX>. All rights reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE.md file in the root of the project. # The MediaStream interface represents a stream of media content. A stream consists of several tracks, such as video or audio tracks. Each track is specified as an instance of :obj:`webrtc.MediaStreamTrack`. :obj:`str`: A String containing 36 characters denoting a universally unique identifier (UUID) for the object. :obj:`bool`: A value that returns `true` if the :obj:`webrtc.MediaStream` is active, or `false` otherwise. Returns a :obj:`list` of the :obj:`webrtc.MediaStreamTrack` objects stored in the :obj:`webrtc.MediaStream` object that have their kind attribute set to "audio". The order is not defined, and may not only vary from one machine to another, but also from one call to another. Returns a :obj:`list` of the :obj:`webrtc.MediaStreamTrack` objects stored in the :obj:`webrtc.MediaStream` object that have their kind attribute set to "video". The order is not defined, and may not only vary from one machine to another, but also from one call to another. Returns a :obj:`list` of all :obj:`webrtc.MediaStreamTrack` objects stored in the :obj:`webrtc.MediaStream` object, regardless of the value of the kind attribute. The order is not defined, and may not only vary from one machine to another, but also from one call to another. Returns the track whose ID corresponds to the one given in parameters, :obj:`track_id`. If no parameter is given, or if no track with that ID does exist, it returns :obj:`None`. If several tracks have the same ID, it returns the first one. Stores a copy of the :obj:`webrtc.MediaStreamTrack` given as argument. If the track has already been added to the :obj:`webrtc.MediaStream` object, nothing happens. Removes the :obj:`webrtc.MediaStreamTrack` given as argument. If the track is not part of the :obj:`webrtc.MediaStream` object, nothing happens. Returns a clone of the :obj:`webrtc.MediaStream` object. The clone will, however, have a unique value for :obj:`id`. #: Alias for :attr:`get_audio_tracks` #: Alias for :attr:`get_video_tracks` #: Alias for :attr:`get_tracks` #: Alias for :attr:`get_track_by_id` #: Alias for :attr:`add_track` #: Alias for :attr:`remove_track` | 2.464116 | 2 |
d6/d6.py | thomasburgess/adv2020 | 0 | 6618212 | from typing import List
import gzip
def read(file: str) -> List[List[str]]:
with gzip.open(file, "rt") as f:
return [[j for j in i.split("\n") if len(j)>0] for i in f.read().split("\n\n")]
def nyes1(group: List[str]) -> int:
return len(set("".join(group)))
def nyes2(group: List[str]) -> int:
return len(set.intersection(*[set(s) for s in group]))
def main():
arr = read("input_d6.txt.gz")
print(sum(map(nyes1, arr)))
print(sum(map(nyes2, arr)))
if __name__ == '__main__':
main() | from typing import List
import gzip
def read(file: str) -> List[List[str]]:
with gzip.open(file, "rt") as f:
return [[j for j in i.split("\n") if len(j)>0] for i in f.read().split("\n\n")]
def nyes1(group: List[str]) -> int:
return len(set("".join(group)))
def nyes2(group: List[str]) -> int:
return len(set.intersection(*[set(s) for s in group]))
def main():
arr = read("input_d6.txt.gz")
print(sum(map(nyes1, arr)))
print(sum(map(nyes2, arr)))
if __name__ == '__main__':
main() | none | 1 | 3.267085 | 3 | |
machine_learning/similarity/dtw/dtw_demo2.py | caserwin/daily-learning-python | 1 | 6618213 | <reponame>caserwin/daily-learning-python<filename>machine_learning/similarity/dtw/dtw_demo2.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-05-14 15:35
# @Author : erwin
from dtaidistance import dtw
import pandas as pd
import numpy as np
from dtaidistance import clustering
from common.util_function import *
from machine_learning.similarity.dtw.hierarchical_helper import ClusterHelper
from machine_learning.similarity.dtw.hierarchical_helper import HierarchicalHelper
print_line("distance_matrix_fast 测试")
s1 = [0, 0, 1, 2, 1, 0, 1, 0]
s2 = [0, 1, 2, 0, 0, 0, 0, 0]
s3 = [0, 0, 1, 2, 1, 0, 0, 0]
distance12, paths12 = dtw.warping_paths(s1, s2)
distance13, paths13 = dtw.warping_paths(s1, s3)
distance23, paths23 = dtw.warping_paths(s2, s3)
print("distance12:", distance12, " distance13:", distance13, " distance23:", distance23, "\n")
data = np.array([[0, 0, 0, 1, 3],
[0, 1, 0, 2, 4],
[1, 2, 1, 1, 5],
[2, 0, 2, 2, 1],
[1, 0, 1, 1, 0],
[0, 0, 0, 2, 0],
[1, 0, 0, 1, 1],
[0, 0, 0, 2, None]])
df = pd.DataFrame(data=data).fillna(0)
series = np.matrix(df.T, dtype=np.double)
ds = dtw.distance_matrix_fast(series)
print_br(ds)
print_line("Hierarchical clustering")
model3 = clustering.LinkageTree(dtw.distance_matrix_fast, {})
model3.fit(series)
model3.plot(show_ts_label=True, show_tr_label=True)
model3.plot(filename="./clustered.png", show_ts_label=True, show_tr_label=True)
print(model3.to_dot())
# 构建树的数据结构
tree_helper = HierarchicalHelper(model3)
print(tree_helper.root, tree_helper.root.isLeaf())
print(tree_helper.root.left_node, tree_helper.root.left_node.isLeaf())
print(tree_helper.root.right_node, tree_helper.root.right_node.isLeaf())
# 建立子节点到父节点的映射
cls_helper = ClusterHelper(model3, len(series))
print(cls_helper.toMap())
# 根据指定类别数,返回所有类别
cluster_keys = tree_helper.getClusterByNum(tree_helper.root, 3, {})
for i in cluster_keys:
print(i)
# 根据指定最小距离,返回所有类别
cluster_keys = tree_helper.getClusterByDist(tree_helper.root, 6, {})
for i in cluster_keys:
print(i)
# 返回一个节点下所有子节点
nodes = []
tree_helper.iterTree(tree_helper.root.right_node, nodes)
for node in nodes:
print(node)
# 根据idx 返回节点实例
print("=" * 10)
print(tree_helper.idx_node_map.get(0))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-05-14 15:35
# @Author : erwin
from dtaidistance import dtw
import pandas as pd
import numpy as np
from dtaidistance import clustering
from common.util_function import *
from machine_learning.similarity.dtw.hierarchical_helper import ClusterHelper
from machine_learning.similarity.dtw.hierarchical_helper import HierarchicalHelper
print_line("distance_matrix_fast 测试")
s1 = [0, 0, 1, 2, 1, 0, 1, 0]
s2 = [0, 1, 2, 0, 0, 0, 0, 0]
s3 = [0, 0, 1, 2, 1, 0, 0, 0]
distance12, paths12 = dtw.warping_paths(s1, s2)
distance13, paths13 = dtw.warping_paths(s1, s3)
distance23, paths23 = dtw.warping_paths(s2, s3)
print("distance12:", distance12, " distance13:", distance13, " distance23:", distance23, "\n")
data = np.array([[0, 0, 0, 1, 3],
[0, 1, 0, 2, 4],
[1, 2, 1, 1, 5],
[2, 0, 2, 2, 1],
[1, 0, 1, 1, 0],
[0, 0, 0, 2, 0],
[1, 0, 0, 1, 1],
[0, 0, 0, 2, None]])
df = pd.DataFrame(data=data).fillna(0)
series = np.matrix(df.T, dtype=np.double)
ds = dtw.distance_matrix_fast(series)
print_br(ds)
print_line("Hierarchical clustering")
model3 = clustering.LinkageTree(dtw.distance_matrix_fast, {})
model3.fit(series)
model3.plot(show_ts_label=True, show_tr_label=True)
model3.plot(filename="./clustered.png", show_ts_label=True, show_tr_label=True)
print(model3.to_dot())
# 构建树的数据结构
tree_helper = HierarchicalHelper(model3)
print(tree_helper.root, tree_helper.root.isLeaf())
print(tree_helper.root.left_node, tree_helper.root.left_node.isLeaf())
print(tree_helper.root.right_node, tree_helper.root.right_node.isLeaf())
# 建立子节点到父节点的映射
cls_helper = ClusterHelper(model3, len(series))
print(cls_helper.toMap())
# 根据指定类别数,返回所有类别
cluster_keys = tree_helper.getClusterByNum(tree_helper.root, 3, {})
for i in cluster_keys:
print(i)
# 根据指定最小距离,返回所有类别
cluster_keys = tree_helper.getClusterByDist(tree_helper.root, 6, {})
for i in cluster_keys:
print(i)
# 返回一个节点下所有子节点
nodes = []
tree_helper.iterTree(tree_helper.root.right_node, nodes)
for node in nodes:
print(node)
# 根据idx 返回节点实例
print("=" * 10)
print(tree_helper.idx_node_map.get(0)) | zh | 0.836659 | #!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019-05-14 15:35 # @Author : erwin # 构建树的数据结构 # 建立子节点到父节点的映射 # 根据指定类别数,返回所有类别 # 根据指定最小距离,返回所有类别 # 返回一个节点下所有子节点 # 根据idx 返回节点实例 | 2.417136 | 2 |
go/apps/opt_out/vumi_app.py | lynnUg/vumi-go | 0 | 6618214 | # -*- test-case-name: go.apps.opt_out.tests.test_vumi_app -*-
from twisted.internet.defer import inlineCallbacks
from vumi import log
from go.vumitools.app_worker import GoApplicationWorker
from go.vumitools.opt_out import OptOutStore
class OptOutApplication(GoApplicationWorker):
worker_name = 'opt_out_application'
@inlineCallbacks
def consume_user_message(self, message):
msg_mdh = self.get_metadata_helper(message)
if not msg_mdh.has_user_account():
# We don't have an account to opt out of.
# Since this can only happen for redirected messages, assume we
# aren't dealing with an API.
yield self.reply_to(
message, "Your opt-out was received but we failed to link it "
"to a specific service, please try again later.")
return
account_key = yield msg_mdh.get_account_key()
opt_out_store = OptOutStore(self.manager, account_key)
from_addr = message.get("from_addr")
# Note: for now we are hardcoding addr_type as 'msisdn'
# as only msisdn's are opting out currently
yield opt_out_store.new_opt_out("msisdn", from_addr, message)
if message.get('transport_type') == 'http_api':
yield self.reply_to(
message, '{"msisdn":"%s","opted_in": false}' % (from_addr,))
else:
yield self.reply_to(message, "You have opted out")
def process_command_start(self, user_account_key, conversation_key):
log.debug('OptOutApplication started: %s' % (conversation_key,))
return super(OptOutApplication, self).process_command_start(
user_account_key, conversation_key)
| # -*- test-case-name: go.apps.opt_out.tests.test_vumi_app -*-
from twisted.internet.defer import inlineCallbacks
from vumi import log
from go.vumitools.app_worker import GoApplicationWorker
from go.vumitools.opt_out import OptOutStore
class OptOutApplication(GoApplicationWorker):
worker_name = 'opt_out_application'
@inlineCallbacks
def consume_user_message(self, message):
msg_mdh = self.get_metadata_helper(message)
if not msg_mdh.has_user_account():
# We don't have an account to opt out of.
# Since this can only happen for redirected messages, assume we
# aren't dealing with an API.
yield self.reply_to(
message, "Your opt-out was received but we failed to link it "
"to a specific service, please try again later.")
return
account_key = yield msg_mdh.get_account_key()
opt_out_store = OptOutStore(self.manager, account_key)
from_addr = message.get("from_addr")
# Note: for now we are hardcoding addr_type as 'msisdn'
# as only msisdn's are opting out currently
yield opt_out_store.new_opt_out("msisdn", from_addr, message)
if message.get('transport_type') == 'http_api':
yield self.reply_to(
message, '{"msisdn":"%s","opted_in": false}' % (from_addr,))
else:
yield self.reply_to(message, "You have opted out")
def process_command_start(self, user_account_key, conversation_key):
log.debug('OptOutApplication started: %s' % (conversation_key,))
return super(OptOutApplication, self).process_command_start(
user_account_key, conversation_key)
| en | 0.915294 | # -*- test-case-name: go.apps.opt_out.tests.test_vumi_app -*- # We don't have an account to opt out of. # Since this can only happen for redirected messages, assume we # aren't dealing with an API. # Note: for now we are hardcoding addr_type as 'msisdn' # as only msisdn's are opting out currently | 1.68784 | 2 |
winter/web/output_processor.py | DmitryKhursevich/winter | 9 | 6618215 | import abc
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
import dataclasses
from rest_framework.request import Request as DRFRequest
from winter.core import ComponentMethod
from winter.core import annotate
class IOutputProcessor(abc.ABC):
"""Process controller method returned value so that it can be put to HttpResponse body.
Common usage is to serializer some DTO to dict."""
@abc.abstractmethod
def process_output(self, output, request: DRFRequest): # pragma: no cover
return output
@dataclasses.dataclass
class OutputProcessorAnnotation:
output_processor: IOutputProcessor
class IOutputProcessorResolver(abc.ABC):
"""
Resolves IOutputProcessor for a given body type.
Due to python dynamic typing it's called after every controller method call.
"""
@abc.abstractmethod
def is_supported(self, body: Any) -> bool: # pragma: no cover
return False
@abc.abstractmethod
def get_processor(self, body: Any) -> IOutputProcessor: # pragma: no cover
pass
_registered_resolvers: List[IOutputProcessorResolver] = []
def register_output_processor(method: Callable, output_processor: IOutputProcessor):
return annotate(OutputProcessorAnnotation(output_processor), single=True)(method)
def register_output_processor_resolver(output_processor_resolver: IOutputProcessorResolver):
_registered_resolvers.append(output_processor_resolver)
def get_output_processor(method: ComponentMethod, body: Any) -> Optional[IOutputProcessor]:
output_processor_annotation = method.annotations.get_one_or_none(OutputProcessorAnnotation)
if output_processor_annotation is not None:
return output_processor_annotation.output_processor
for resolver in _registered_resolvers:
if resolver.is_supported(body):
return resolver.get_processor(body)
return None
| import abc
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
import dataclasses
from rest_framework.request import Request as DRFRequest
from winter.core import ComponentMethod
from winter.core import annotate
class IOutputProcessor(abc.ABC):
"""Process controller method returned value so that it can be put to HttpResponse body.
Common usage is to serializer some DTO to dict."""
@abc.abstractmethod
def process_output(self, output, request: DRFRequest): # pragma: no cover
return output
@dataclasses.dataclass
class OutputProcessorAnnotation:
output_processor: IOutputProcessor
class IOutputProcessorResolver(abc.ABC):
"""
Resolves IOutputProcessor for a given body type.
Due to python dynamic typing it's called after every controller method call.
"""
@abc.abstractmethod
def is_supported(self, body: Any) -> bool: # pragma: no cover
return False
@abc.abstractmethod
def get_processor(self, body: Any) -> IOutputProcessor: # pragma: no cover
pass
_registered_resolvers: List[IOutputProcessorResolver] = []
def register_output_processor(method: Callable, output_processor: IOutputProcessor):
return annotate(OutputProcessorAnnotation(output_processor), single=True)(method)
def register_output_processor_resolver(output_processor_resolver: IOutputProcessorResolver):
_registered_resolvers.append(output_processor_resolver)
def get_output_processor(method: ComponentMethod, body: Any) -> Optional[IOutputProcessor]:
output_processor_annotation = method.annotations.get_one_or_none(OutputProcessorAnnotation)
if output_processor_annotation is not None:
return output_processor_annotation.output_processor
for resolver in _registered_resolvers:
if resolver.is_supported(body):
return resolver.get_processor(body)
return None
| en | 0.882383 | Process controller method returned value so that it can be put to HttpResponse body. Common usage is to serializer some DTO to dict. # pragma: no cover Resolves IOutputProcessor for a given body type. Due to python dynamic typing it's called after every controller method call. # pragma: no cover # pragma: no cover | 2.553204 | 3 |
scripts/modules/extractors/fodt/tpm2_partx_extraction_navigator_fodt.py | evolation/tpm2simulator | 41 | 6618216 | # -*- coding: utf-8 -*-
# custom stuff:
from bs4 import Tag
from modules import comment, constants
class ExtractionNavigator(object):
"""
"""
def __init__(self):
self.COMMAND_PATH = constants.SRC_PATH + constants.TPM_PATH + "command/"
self.comments = comment.Comment()
self.functions = []
# The selector function mainly serves the purpose of finding the next tag,
# whose string is a part of the code module (it will be interpreted as a
# comment). Hence, the selector looks for valid tags including the
# 'text:list', 'text:p', and 'table:table' tags. In case the tag is of type
# 'text:p', the selector additionally looks for the text-style of type
# 'Text_', representing an outlined comment within the code.
@staticmethod
def selector(tag):
"""
"""
if isinstance(tag, Tag):
if tag.name == constants.XML_TEXT_LIST:
return True
elif (tag.name == constants.XML_TEXT_P
and tag.has_attr(constants.XML_TEXT_STYLE_NAME)
and "Text_" in tag[constants.XML_TEXT_STYLE_NAME]):
return True
elif tag.name == constants.XML_TABLE_TABLE:
return True
return False
# Extracts section according to given name
# Parameters:
# entry
# name_section
# name_folder
def extract_section(self, entry, name_section, name_folder):
# find correct section
while isinstance(entry, Tag) and entry.get_text().strip() != name_section:
entry = entry.find_next(constants.XML_TEXT_H, {constants.XML_TEXT_OUTLINE_LEVEL: '1'})
# couldn't find the right section
if entry is None:
return
print("[+] Section name: {0}".format(entry.get_text().strip()))
self.extract_function(entry, name_section, name_folder)
# Function not implemented
def extract_function(self, main_entry, name_section, name_folder):
"""
interface 'extract_function' must be implemented by the child class or mixin
"""
raise NotImplementedError("[-] 'extract_function' not yet implemented...")
# Function not implemented
def next_function(self, entry):
"""
interface 'next_function' must be implemented by the child class or mixin
"""
raise NotImplementedError("[-] 'next_function' not yet implemented...")
# Function not implemented
def next_entry(self, entry):
"""
interface 'next_entry' must be implemented by the child class or mixin
"""
raise NotImplementedError("[-] 'next_entry' not yet implemented...")
# Extracts all functions from xml file
# Parameters:
# xml
# folders
# Returns:
# list of functions
def extract_fodt(self, xml, folders):
entry = xml.find(constants.XML_TEXT_H, {constants.XML_TEXT_OUTLINE_LEVEL: '1'})
for section in folders:
self.extract_section(entry, section, folders[section])
return self.functions
| # -*- coding: utf-8 -*-
# custom stuff:
from bs4 import Tag
from modules import comment, constants
class ExtractionNavigator(object):
"""
"""
def __init__(self):
self.COMMAND_PATH = constants.SRC_PATH + constants.TPM_PATH + "command/"
self.comments = comment.Comment()
self.functions = []
# The selector function mainly serves the purpose of finding the next tag,
# whose string is a part of the code module (it will be interpreted as a
# comment). Hence, the selector looks for valid tags including the
# 'text:list', 'text:p', and 'table:table' tags. In case the tag is of type
# 'text:p', the selector additionally looks for the text-style of type
# 'Text_', representing an outlined comment within the code.
@staticmethod
def selector(tag):
"""
"""
if isinstance(tag, Tag):
if tag.name == constants.XML_TEXT_LIST:
return True
elif (tag.name == constants.XML_TEXT_P
and tag.has_attr(constants.XML_TEXT_STYLE_NAME)
and "Text_" in tag[constants.XML_TEXT_STYLE_NAME]):
return True
elif tag.name == constants.XML_TABLE_TABLE:
return True
return False
# Extracts section according to given name
# Parameters:
# entry
# name_section
# name_folder
def extract_section(self, entry, name_section, name_folder):
# find correct section
while isinstance(entry, Tag) and entry.get_text().strip() != name_section:
entry = entry.find_next(constants.XML_TEXT_H, {constants.XML_TEXT_OUTLINE_LEVEL: '1'})
# couldn't find the right section
if entry is None:
return
print("[+] Section name: {0}".format(entry.get_text().strip()))
self.extract_function(entry, name_section, name_folder)
# Function not implemented
def extract_function(self, main_entry, name_section, name_folder):
"""
interface 'extract_function' must be implemented by the child class or mixin
"""
raise NotImplementedError("[-] 'extract_function' not yet implemented...")
# Function not implemented
def next_function(self, entry):
"""
interface 'next_function' must be implemented by the child class or mixin
"""
raise NotImplementedError("[-] 'next_function' not yet implemented...")
# Function not implemented
def next_entry(self, entry):
"""
interface 'next_entry' must be implemented by the child class or mixin
"""
raise NotImplementedError("[-] 'next_entry' not yet implemented...")
# Extracts all functions from xml file
# Parameters:
# xml
# folders
# Returns:
# list of functions
def extract_fodt(self, xml, folders):
entry = xml.find(constants.XML_TEXT_H, {constants.XML_TEXT_OUTLINE_LEVEL: '1'})
for section in folders:
self.extract_section(entry, section, folders[section])
return self.functions
| en | 0.794634 | # -*- coding: utf-8 -*- # custom stuff: # The selector function mainly serves the purpose of finding the next tag, # whose string is a part of the code module (it will be interpreted as a # comment). Hence, the selector looks for valid tags including the # 'text:list', 'text:p', and 'table:table' tags. In case the tag is of type # 'text:p', the selector additionally looks for the text-style of type # 'Text_', representing an outlined comment within the code. # Extracts section according to given name # Parameters: # entry # name_section # name_folder # find correct section # couldn't find the right section # Function not implemented interface 'extract_function' must be implemented by the child class or mixin # Function not implemented interface 'next_function' must be implemented by the child class or mixin # Function not implemented interface 'next_entry' must be implemented by the child class or mixin # Extracts all functions from xml file # Parameters: # xml # folders # Returns: # list of functions | 2.883431 | 3 |
vultr/v1_server_ipv4.py | nickruhl/python-vultr | 117 | 6618217 | <filename>vultr/v1_server_ipv4.py
'''Partial class to handle Vultr Server (IPv4) API calls'''
from .utils import VultrBase, update_params
class VultrServerIPv4(VultrBase):
'''Handles Vultr Server (IPv4) API calls'''
def __init__(self, api_key):
VultrBase.__init__(self, api_key)
def create(self, subid, params=None):
''' /v1/server/create_ipv4
POST - account
Add a new IPv4 address to a server. You will start being billed for
this immediately. The server will be rebooted unless you specify
otherwise. You must reboot the server before the IPv4 address can be
configured.
Link: https://www.vultr.com/api/#server_create_ipv4
'''
params = update_params(params, {'SUBID': subid})
return self.request('/v1/server/create_ipv4', params, 'POST')
def destroy(self, subid, ipaddr, params=None):
''' /v1/server/destroy_ipv4
POST - account
Removes a secondary IPv4 address from a server. Your server will be
hard-restarted. We suggest halting the machine gracefully before
removing IPs.
Link: https://www.vultr.com/api/#server_destroy_ipv4
'''
params = update_params(params, {
'SUBID': subid,
'ip': ipaddr
})
return self.request('/v1/server/destroy_ipv4', params, 'POST')
def list(self, subid, params=None):
''' /v1/server/list_ipv4
GET - account
List the IPv4 information of a virtual machine. IP information is only
available for virtual machines in the "active" state.
Link: https://www.vultr.com/api/#server_list_ipv4
'''
params = update_params(params, {'SUBID': subid})
return self.request('/v1/server/list_ipv4', params, 'GET')
def reverse_default(self, subid, ipaddr, params=None):
''' /v1/server/reverse_default_ipv4
POST - account
Set a reverse DNS entry for an IPv4 address of a virtual
machine to the original setting. Upon success, DNS changes
may take 6-12 hours to become active.
Link: https://www.vultr.com/api/#server_reverse_default_ipv4
'''
params = update_params(params, {
'SUBID': subid,
'ip': ipaddr
})
return self.request('/v1/server/reverse_default_ipv4', params, 'POST')
def reverse_set(self, subid, ipaddr, entry, params=None):
''' /v1/server/reverse_set_ipv4
POST - account
Set a reverse DNS entry for an IPv4 address of a virtual machine. Upon
success, DNS changes may take 6-12 hours to become active.
Link: https://www.vultr.com/api/#server_reverse_set_ipv4
'''
params = update_params(params, {
'SUBID': subid,
'ip': ipaddr,
'entry': entry
})
return self.request('/v1/server/reverse_set_ipv4', params, 'POST')
| <filename>vultr/v1_server_ipv4.py
'''Partial class to handle Vultr Server (IPv4) API calls'''
from .utils import VultrBase, update_params
class VultrServerIPv4(VultrBase):
'''Handles Vultr Server (IPv4) API calls'''
def __init__(self, api_key):
VultrBase.__init__(self, api_key)
def create(self, subid, params=None):
''' /v1/server/create_ipv4
POST - account
Add a new IPv4 address to a server. You will start being billed for
this immediately. The server will be rebooted unless you specify
otherwise. You must reboot the server before the IPv4 address can be
configured.
Link: https://www.vultr.com/api/#server_create_ipv4
'''
params = update_params(params, {'SUBID': subid})
return self.request('/v1/server/create_ipv4', params, 'POST')
def destroy(self, subid, ipaddr, params=None):
''' /v1/server/destroy_ipv4
POST - account
Removes a secondary IPv4 address from a server. Your server will be
hard-restarted. We suggest halting the machine gracefully before
removing IPs.
Link: https://www.vultr.com/api/#server_destroy_ipv4
'''
params = update_params(params, {
'SUBID': subid,
'ip': ipaddr
})
return self.request('/v1/server/destroy_ipv4', params, 'POST')
def list(self, subid, params=None):
''' /v1/server/list_ipv4
GET - account
List the IPv4 information of a virtual machine. IP information is only
available for virtual machines in the "active" state.
Link: https://www.vultr.com/api/#server_list_ipv4
'''
params = update_params(params, {'SUBID': subid})
return self.request('/v1/server/list_ipv4', params, 'GET')
def reverse_default(self, subid, ipaddr, params=None):
''' /v1/server/reverse_default_ipv4
POST - account
Set a reverse DNS entry for an IPv4 address of a virtual
machine to the original setting. Upon success, DNS changes
may take 6-12 hours to become active.
Link: https://www.vultr.com/api/#server_reverse_default_ipv4
'''
params = update_params(params, {
'SUBID': subid,
'ip': ipaddr
})
return self.request('/v1/server/reverse_default_ipv4', params, 'POST')
def reverse_set(self, subid, ipaddr, entry, params=None):
''' /v1/server/reverse_set_ipv4
POST - account
Set a reverse DNS entry for an IPv4 address of a virtual machine. Upon
success, DNS changes may take 6-12 hours to become active.
Link: https://www.vultr.com/api/#server_reverse_set_ipv4
'''
params = update_params(params, {
'SUBID': subid,
'ip': ipaddr,
'entry': entry
})
return self.request('/v1/server/reverse_set_ipv4', params, 'POST')
| en | 0.81505 | Partial class to handle Vultr Server (IPv4) API calls Handles Vultr Server (IPv4) API calls /v1/server/create_ipv4 POST - account Add a new IPv4 address to a server. You will start being billed for this immediately. The server will be rebooted unless you specify otherwise. You must reboot the server before the IPv4 address can be configured. Link: https://www.vultr.com/api/#server_create_ipv4 /v1/server/destroy_ipv4 POST - account Removes a secondary IPv4 address from a server. Your server will be hard-restarted. We suggest halting the machine gracefully before removing IPs. Link: https://www.vultr.com/api/#server_destroy_ipv4 /v1/server/list_ipv4 GET - account List the IPv4 information of a virtual machine. IP information is only available for virtual machines in the "active" state. Link: https://www.vultr.com/api/#server_list_ipv4 /v1/server/reverse_default_ipv4 POST - account Set a reverse DNS entry for an IPv4 address of a virtual machine to the original setting. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_default_ipv4 /v1/server/reverse_set_ipv4 POST - account Set a reverse DNS entry for an IPv4 address of a virtual machine. Upon success, DNS changes may take 6-12 hours to become active. Link: https://www.vultr.com/api/#server_reverse_set_ipv4 | 2.943004 | 3 |
noteapp/noteapp/views/index.py | Redamarx/opentok-web-samples | 0 | 6618218 | from flask import Blueprint, render_template
bp = Blueprint( __name__ , __name__,template_folder='templates')
@bp.route('/')
def show():
return render_template("index.html")
| from flask import Blueprint, render_template
bp = Blueprint( __name__ , __name__,template_folder='templates')
@bp.route('/')
def show():
return render_template("index.html")
| none | 1 | 2.139767 | 2 | |
core/telegram.py | SheetWithoutShit/sws-core | 0 | 6618219 | <filename>core/telegram.py
"""This module provides functionality for async functionality with telegram."""
import os
from .http import HTTPRequest
class TelegramBot(HTTPRequest):
"""Class that provides async interactions with telegram bot."""
def __init__(self, timeout=60):
"""Initialize client session for async telegram bot interactions."""
super().__init__(timeout)
token = os.environ["TELEGRAM_BOT_TOKEN"]
self.token = token
self.api = f"https://api.telegram.org/bot{token}"
async def send_message(self, chat_id, text, **kwargs):
"""
Send message to user by chat_id. Can be provided extra options
such as: silent notifications, change parse mode, etc.
"""
endpoint = f"{self.api}/sendMessage"
params = {"chat_id": chat_id, "text": text, **kwargs}
return await self.get(endpoint, params=params)
| <filename>core/telegram.py
"""This module provides functionality for async functionality with telegram."""
import os
from .http import HTTPRequest
class TelegramBot(HTTPRequest):
"""Class that provides async interactions with telegram bot."""
def __init__(self, timeout=60):
"""Initialize client session for async telegram bot interactions."""
super().__init__(timeout)
token = os.environ["TELEGRAM_BOT_TOKEN"]
self.token = token
self.api = f"https://api.telegram.org/bot{token}"
async def send_message(self, chat_id, text, **kwargs):
"""
Send message to user by chat_id. Can be provided extra options
such as: silent notifications, change parse mode, etc.
"""
endpoint = f"{self.api}/sendMessage"
params = {"chat_id": chat_id, "text": text, **kwargs}
return await self.get(endpoint, params=params)
| en | 0.690931 | This module provides functionality for async functionality with telegram. Class that provides async interactions with telegram bot. Initialize client session for async telegram bot interactions. Send message to user by chat_id. Can be provided extra options such as: silent notifications, change parse mode, etc. | 2.981668 | 3 |
02_variable_and_simple_data_types/birthday.py | simonhoch/python_basics | 0 | 6618220 | <reponame>simonhoch/python_basics<gh_stars>0
age = 23
message = "Happpy " + str(age) + "rd Birthday!"
print (message)
| age = 23
message = "Happpy " + str(age) + "rd Birthday!"
print (message) | none | 1 | 2.918842 | 3 | |
redis_queue/redis.py | lorne-luo/quicksilver | 0 | 6618221 | import redis
import config
queue_redis = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
decode_responses=True)
status_redis = queue_redis
| import redis
import config
queue_redis = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
decode_responses=True)
status_redis = queue_redis
| none | 1 | 1.60779 | 2 | |
xgds_timeseries/views.py | xgds/xgds_timeseries | 0 | 6618222 | # __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
import json
import traceback
from dateutil.parser import parse as dateparser
from django.conf import settings
from django.http import HttpResponseForbidden, JsonResponse, HttpResponseNotAllowed
from geocamUtil.loader import getModelByName
from geocamUtil.datetimeJsonEncoder import DatetimeJsonEncoder
from xgds_core.util import get_all_subclasses
from xgds_timeseries.models import TimeSeriesModel
def get_time_series_classes(skip_example=True):
"""
Return a list of time series classes
:param skip_example: True to skip the example classes, false otherwise
:return: a list of [app_label.classname] for classes that extend TimeSeriesModel
"""
list_result = []
for the_class in get_all_subclasses(TimeSeriesModel):
if skip_example and 'xample' in the_class.__name__: # skip example classes
continue
list_result.append('%s.%s' % (the_class._meta.app_label, the_class.__name__))
return list_result
def get_time_series_classes_json(request, skip_example=True):
"""
Return a json response with the list of time series classes
:param skip_example: True to skip the example classes, false otherwise
:return:
"""
return JsonResponse(get_time_series_classes(skip_example), safe=False)
def get_time_series_classes_metadata(skip_example=True, flight_ids=None):
"""
Return a list of dictionaries of time series classes and their titles
:param skip_example: True to skip the example classes, false otherwise
:param flight_ids: an optional list of flight ids; this will check for each timeseries data type for the given flights
:return: a list of dictionaries
"""
result = []
for the_class in get_all_subclasses(TimeSeriesModel):
if skip_example and 'xample' in the_class.__name__: # skip example classes
continue
if flight_ids:
if check_flight_values_exist(the_class, flight_ids):
result.append({'model_name': '%s.%s' % (the_class._meta.app_label, the_class.__name__),
'title': str(the_class.title),
'stateful': 'true' if the_class.stateful else 'false',
'sse_type': the_class.getSseType(),
})
else:
# no flight ids do not filter
result.append({'model_name': '%s.%s' % (the_class._meta.app_label, the_class.__name__),
'title': str(the_class.title),
'stateful': 'true' if the_class.stateful else 'false',
'sse_type': the_class.getSseType(),
})
return result
def get_time_series_classes_metadata_json(request, skip_example=True):
"""
Return a json response with the list of time series classes metadata
:param request: request.POST should contain a list of flight ids
:param skip_example: True to skip the example classes, false otherwise
:return:
"""
flight_ids = None
if 'flight_ids' in request.POST:
flight_ids = request.POST.getlist('flight_ids', None)
elif 'flight_ids[]' in request.POST:
flight_ids = request.POST.getlist('flight_ids[]', None)
return JsonResponse(get_time_series_classes_metadata(skip_example, flight_ids), safe=False)
def unravel_post(post_dict):
"""
Read the useful contents of the post dictionary
:param post_dict:
:return: the PostData properly filled out
"""
class PostData(object):
model = None
channel_names = None
flight_ids = None
start_time = None
end_time = None
filter_dict = None
time = None
downsample = None
result = PostData()
model_name = post_dict.get('model_name', None)
# model name is required
if model_name:
result.model = getModelByName(model_name)
result.channel_names = post_dict.getlist('channel_names', None)
if 'flight_ids' in post_dict:
result.flight_ids = post_dict.getlist('flight_ids', None)
elif 'flight_ids[]' in post_dict:
result.flight_ids = post_dict.getlist('flight_ids[]', None)
start_time_string = post_dict.get('start_time', None)
if start_time_string:
result.start_time = dateparser(start_time_string)
end_time_string = post_dict.get('end_time', None)
if end_time_string:
result.end_time = dateparser(end_time_string)
time_string = post_dict.get('time', None)
if time_string:
result.time = dateparser(time_string)
filter_json = post_dict.get('filter', None)
if filter_json:
result.filter_dict = json.loads(filter_json)
result.downsample = post_dict.get('downsample', None)
if result.downsample is not None:
result.downsample = int(result.downsample)
return result
def get_min_max(model, start_time=None, end_time=None, flight_ids=None, filter_dict=None, channel_names=None):
"""
Returns a dict with the min max values
:param model: The model to use
:param start_time: datetime of start time
:param end_time: datetime of end time
:param flight_ids: The list of channel names you are interested in
:param filter_dict: a dictionary of any other filter
:param channel_names: The list of channel names you are interested in
:return: a list of dicts with the min max values.
"""
if hasattr(model, 'dynamic') and model.dynamic:
return model.objects.get_dynamic_min_max(
start_time=start_time,
end_time=end_time,
flight_ids=flight_ids,
filter_dict=filter_dict,
channel_names=model.get_channel_names(),
dynamic_value=model.dynamic_value,
dynamic_separator=model.dynamic_separator,
)
return model.objects.get_min_max(start_time=start_time,
end_time=end_time,
flight_ids=flight_ids,
filter_dict=filter_dict,
channel_names=channel_names)
def get_min_max_json(request):
"""
Returns a JsonResponse with min and max values
:param request:
:return:
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
values = get_min_max(model=post_values.model,
start_time=post_values.start_time,
end_time=post_values.end_time,
flight_ids=post_values.flight_ids,
filter_dict=post_values.filter_dict,
channel_names=post_values.channel_names)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder)
else:
return JsonResponse({'status': 'error', 'message': 'No min/max values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(["POST"], content=traceback.format_exc())
return HttpResponseForbidden()
def get_packed_list(model, values, channel_names):
"""
Returns a list of lists with the values in the same order as the fields
:param model: the model
:param values: the iterable values, each value is a dictionary
:return: a list of lists
"""
fields = model.objects.get_fields(channel_names)
packed = []
for entry in values:
packed_entry = []
for f in fields:
packed_entry.append(entry[f])
packed.append(packed_entry)
return packed
def get_values_list(model, channel_names, flight_ids, start_time, end_time, filter_dict, packed=True,
downsample=settings.XGDS_TIMESERIES_DOWNSAMPLE_DATA_SECONDS):
"""
Returns a list of dicts of the data values
:param model: The model to use
:param channel_names: The list of channel names you are interested in
:param flight_ids: The list of channel names you are interested in
:param start_time: datetime of start time
:param end_time: datetime of end time
:param filter_dict: a dictionary of any other filter
:param packed: true to return a list of lists (no keys), false to return a list of dicts
:param downsample: Number of seconds to downsample or skip when filtering data
:return: a list of dicts with the results.
"""
if hasattr(model, 'dynamic') and model.dynamic:
values = model.objects.get_dynamic_values(start_time, end_time, flight_ids, filter_dict, channel_names,
downsample)
else:
values = model.objects.get_values(start_time, end_time, flight_ids, filter_dict, channel_names, downsample)
if not packed:
return list(values)
else:
return get_packed_list(model, values, channel_names)
def get_values_json(request, packed=True,
downsample=settings.XGDS_TIMESERIES_DOWNSAMPLE_DATA_SECONDS):
"""
Returns a JsonResponse of the data values described by the filters in the POST dictionary
:param request: the request
:request.POST:
: model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental
: channel_names: The list of channel names you are interested in
: flight_ids: The list of flight ids to filter by
: start_time: Isoformat start time
: end_time: Isoformat end time
: filter: Json string of a dictionary to further filter the data
:param packed: true to return a list of lists (no keys), false to return a list of dicts
:param downsample: Number of seconds to downsample or skip when filtering data
:return: a JsonResponse with a list of dicts with all the results
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
if post_values.downsample is not None:
downsample = int(post_values.downsample)
values = get_values_list(post_values.model, post_values.channel_names, post_values.flight_ids,
post_values.start_time, post_values.end_time, post_values.filter_dict,
packed, downsample)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder, safe=False)
else:
return JsonResponse({'status': 'error', 'message': 'No values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(e.message)
return HttpResponseForbidden()
def check_flight_values_exist(model, flight_ids):
"""
:param model: the model
:param flight_ids: list of flight ids to check
:return: Returns true if there are values of this type for all the given flight ids
"""
values = model.objects.get_flight_data(flight_ids)
return values.exists()
def get_flight_values_list(model, flight_ids, channel_names, packed=True, downsample=0):
"""
Returns a list of dicts of the data values
:param model: The model to use
:param flight_ids: The list of channel names you are interested in
:param packed: true to return a list of lists, false to return a list of dicts
:param downsample: number of seconds to skip between data samples
:return: a list of dicts with the results.
"""
if hasattr(model, 'dynamic') and model.dynamic:
values = model.objects.get_dynamic_flight_values(
flight_ids,
channel_names=model.get_channel_names(),
dynamic_value=model.dynamic_value,
dynamic_separator=model.dynamic_separator,
downsample=downsample
)
else:
values = model.objects.get_flight_values(flight_ids, channel_names, downsample)
if not packed:
return list(values)
else:
result = get_packed_list(model, values, channel_names)
return result
def get_flight_values_time_list(model, flight_ids, channel_names, packed=True, time=None):
"""
Returns a list of one dict of the data values
:param model: The model to use
:param flight_ids: The list of channel names you are interested in
:param packed: true to return a list of lists, false to return a list of dicts
:param time: the time for which we are looking for the data
:return: a list of dicts with the results.
"""
if not time:
raise Exception('Time is required')
values = model.objects.get_values_at_time(time, flight_ids, channel_names)
if not values:
return None
if not packed:
# print 'values time for %s:' % str(model)
# print str([values.first()])
return [values.first()]
else:
result = get_packed_list(model, [values.first()], channel_names)
return result
def get_flight_values_json(request, packed=True, downsample=0):
"""
Returns a JsonResponse of the data values described by the filters in the POST dictionary
:param request: the request
:request.POST:
: model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental
: channel_names: The list of channel names you are interested in
: flight_ids: The list of flight ids to filter by
: downsample: number of seconds to downsample by, takes priority
:param packed: true to return a list of lists, false to return a list of dicts
:param downsample: number of seconds to skip when getting data samples
:return: a JsonResponse with a list of dicts with all the results
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
if post_values.downsample is not None:
downsample = int(post_values.downsample)
values = get_flight_values_list(post_values.model, post_values.flight_ids, post_values.channel_names,
packed=packed, downsample=downsample)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder, safe=False)
else:
return JsonResponse({'status': 'error', 'message': 'No values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(["POST"], content=traceback.format_exc())
return HttpResponseForbidden()
def get_flight_values_time_json(request, packed=True, downsample=0):
"""
Returns a JsonResponse of the data values described by the filters in the POST dictionary
:param request: the request
:request.POST:
: model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental
: channel_names: The list of channel names you are interested in
: flight_ids: The list of flight ids to filter by
: time: The nearest time for the data
:param packed: true to return a list of lists, false to return a list of dicts
:param downsample: number of seconds to skip between data samples
:return: a JsonResponse with a list of dicts with all the results
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
if post_values.downsample is not None:
downsample = int(post_values.downsample)
values = get_flight_values_time_list(post_values.model, post_values.flight_ids, post_values.channel_names,
packed=packed, time=post_values.time, downsample=downsample)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder, safe=False)
else:
return JsonResponse({'status': 'error', 'message': 'No values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(e.message)
return HttpResponseForbidden()
def get_channel_descriptions(model, channel_name=None):
"""
Returns a dictionary of channel descriptions for the given model
:param model: the model
:param channel_name: the channel name
:return: dictionary of results, or None
"""
if not channel_name:
return model.get_channel_descriptions()
else:
return model.get_channel_description(channel_name)
def get_channel_descriptions_json(request):
"""
Returns a JsonResponse of the channel descriptions described by the model
:param request: the request
:param request.POST.model_name: the fully qualified name of the model
:param request.POST.channel_name: (optional) the name of the channel
:return: JsonResponse with the result.
"""
if request.method == 'POST':
try:
model_name = request.POST.get('model_name', None)
# model name is required
if model_name:
model = getModelByName(model_name)
if model:
channel_name = request.POST.get('channel_name', None)
result = get_channel_descriptions(model, channel_name)
if result:
for key, value in result.iteritems():
if not isinstance(value, dict):
result[key] = value.__dict__
return JsonResponse(result)
return JsonResponse({'error': 'bad parameters'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(["POST"], content=traceback.format_exc())
return HttpResponseForbidden()
| # __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
import json
import traceback
from dateutil.parser import parse as dateparser
from django.conf import settings
from django.http import HttpResponseForbidden, JsonResponse, HttpResponseNotAllowed
from geocamUtil.loader import getModelByName
from geocamUtil.datetimeJsonEncoder import DatetimeJsonEncoder
from xgds_core.util import get_all_subclasses
from xgds_timeseries.models import TimeSeriesModel
def get_time_series_classes(skip_example=True):
"""
Return a list of time series classes
:param skip_example: True to skip the example classes, false otherwise
:return: a list of [app_label.classname] for classes that extend TimeSeriesModel
"""
list_result = []
for the_class in get_all_subclasses(TimeSeriesModel):
if skip_example and 'xample' in the_class.__name__: # skip example classes
continue
list_result.append('%s.%s' % (the_class._meta.app_label, the_class.__name__))
return list_result
def get_time_series_classes_json(request, skip_example=True):
"""
Return a json response with the list of time series classes
:param skip_example: True to skip the example classes, false otherwise
:return:
"""
return JsonResponse(get_time_series_classes(skip_example), safe=False)
def get_time_series_classes_metadata(skip_example=True, flight_ids=None):
"""
Return a list of dictionaries of time series classes and their titles
:param skip_example: True to skip the example classes, false otherwise
:param flight_ids: an optional list of flight ids; this will check for each timeseries data type for the given flights
:return: a list of dictionaries
"""
result = []
for the_class in get_all_subclasses(TimeSeriesModel):
if skip_example and 'xample' in the_class.__name__: # skip example classes
continue
if flight_ids:
if check_flight_values_exist(the_class, flight_ids):
result.append({'model_name': '%s.%s' % (the_class._meta.app_label, the_class.__name__),
'title': str(the_class.title),
'stateful': 'true' if the_class.stateful else 'false',
'sse_type': the_class.getSseType(),
})
else:
# no flight ids do not filter
result.append({'model_name': '%s.%s' % (the_class._meta.app_label, the_class.__name__),
'title': str(the_class.title),
'stateful': 'true' if the_class.stateful else 'false',
'sse_type': the_class.getSseType(),
})
return result
def get_time_series_classes_metadata_json(request, skip_example=True):
"""
Return a json response with the list of time series classes metadata
:param request: request.POST should contain a list of flight ids
:param skip_example: True to skip the example classes, false otherwise
:return:
"""
flight_ids = None
if 'flight_ids' in request.POST:
flight_ids = request.POST.getlist('flight_ids', None)
elif 'flight_ids[]' in request.POST:
flight_ids = request.POST.getlist('flight_ids[]', None)
return JsonResponse(get_time_series_classes_metadata(skip_example, flight_ids), safe=False)
def unravel_post(post_dict):
"""
Read the useful contents of the post dictionary
:param post_dict:
:return: the PostData properly filled out
"""
class PostData(object):
model = None
channel_names = None
flight_ids = None
start_time = None
end_time = None
filter_dict = None
time = None
downsample = None
result = PostData()
model_name = post_dict.get('model_name', None)
# model name is required
if model_name:
result.model = getModelByName(model_name)
result.channel_names = post_dict.getlist('channel_names', None)
if 'flight_ids' in post_dict:
result.flight_ids = post_dict.getlist('flight_ids', None)
elif 'flight_ids[]' in post_dict:
result.flight_ids = post_dict.getlist('flight_ids[]', None)
start_time_string = post_dict.get('start_time', None)
if start_time_string:
result.start_time = dateparser(start_time_string)
end_time_string = post_dict.get('end_time', None)
if end_time_string:
result.end_time = dateparser(end_time_string)
time_string = post_dict.get('time', None)
if time_string:
result.time = dateparser(time_string)
filter_json = post_dict.get('filter', None)
if filter_json:
result.filter_dict = json.loads(filter_json)
result.downsample = post_dict.get('downsample', None)
if result.downsample is not None:
result.downsample = int(result.downsample)
return result
def get_min_max(model, start_time=None, end_time=None, flight_ids=None, filter_dict=None, channel_names=None):
"""
Returns a dict with the min max values
:param model: The model to use
:param start_time: datetime of start time
:param end_time: datetime of end time
:param flight_ids: The list of channel names you are interested in
:param filter_dict: a dictionary of any other filter
:param channel_names: The list of channel names you are interested in
:return: a list of dicts with the min max values.
"""
if hasattr(model, 'dynamic') and model.dynamic:
return model.objects.get_dynamic_min_max(
start_time=start_time,
end_time=end_time,
flight_ids=flight_ids,
filter_dict=filter_dict,
channel_names=model.get_channel_names(),
dynamic_value=model.dynamic_value,
dynamic_separator=model.dynamic_separator,
)
return model.objects.get_min_max(start_time=start_time,
end_time=end_time,
flight_ids=flight_ids,
filter_dict=filter_dict,
channel_names=channel_names)
def get_min_max_json(request):
"""
Returns a JsonResponse with min and max values
:param request:
:return:
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
values = get_min_max(model=post_values.model,
start_time=post_values.start_time,
end_time=post_values.end_time,
flight_ids=post_values.flight_ids,
filter_dict=post_values.filter_dict,
channel_names=post_values.channel_names)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder)
else:
return JsonResponse({'status': 'error', 'message': 'No min/max values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(["POST"], content=traceback.format_exc())
return HttpResponseForbidden()
def get_packed_list(model, values, channel_names):
"""
Returns a list of lists with the values in the same order as the fields
:param model: the model
:param values: the iterable values, each value is a dictionary
:return: a list of lists
"""
fields = model.objects.get_fields(channel_names)
packed = []
for entry in values:
packed_entry = []
for f in fields:
packed_entry.append(entry[f])
packed.append(packed_entry)
return packed
def get_values_list(model, channel_names, flight_ids, start_time, end_time, filter_dict, packed=True,
downsample=settings.XGDS_TIMESERIES_DOWNSAMPLE_DATA_SECONDS):
"""
Returns a list of dicts of the data values
:param model: The model to use
:param channel_names: The list of channel names you are interested in
:param flight_ids: The list of channel names you are interested in
:param start_time: datetime of start time
:param end_time: datetime of end time
:param filter_dict: a dictionary of any other filter
:param packed: true to return a list of lists (no keys), false to return a list of dicts
:param downsample: Number of seconds to downsample or skip when filtering data
:return: a list of dicts with the results.
"""
if hasattr(model, 'dynamic') and model.dynamic:
values = model.objects.get_dynamic_values(start_time, end_time, flight_ids, filter_dict, channel_names,
downsample)
else:
values = model.objects.get_values(start_time, end_time, flight_ids, filter_dict, channel_names, downsample)
if not packed:
return list(values)
else:
return get_packed_list(model, values, channel_names)
def get_values_json(request, packed=True,
downsample=settings.XGDS_TIMESERIES_DOWNSAMPLE_DATA_SECONDS):
"""
Returns a JsonResponse of the data values described by the filters in the POST dictionary
:param request: the request
:request.POST:
: model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental
: channel_names: The list of channel names you are interested in
: flight_ids: The list of flight ids to filter by
: start_time: Isoformat start time
: end_time: Isoformat end time
: filter: Json string of a dictionary to further filter the data
:param packed: true to return a list of lists (no keys), false to return a list of dicts
:param downsample: Number of seconds to downsample or skip when filtering data
:return: a JsonResponse with a list of dicts with all the results
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
if post_values.downsample is not None:
downsample = int(post_values.downsample)
values = get_values_list(post_values.model, post_values.channel_names, post_values.flight_ids,
post_values.start_time, post_values.end_time, post_values.filter_dict,
packed, downsample)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder, safe=False)
else:
return JsonResponse({'status': 'error', 'message': 'No values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(e.message)
return HttpResponseForbidden()
def check_flight_values_exist(model, flight_ids):
"""
:param model: the model
:param flight_ids: list of flight ids to check
:return: Returns true if there are values of this type for all the given flight ids
"""
values = model.objects.get_flight_data(flight_ids)
return values.exists()
def get_flight_values_list(model, flight_ids, channel_names, packed=True, downsample=0):
"""
Returns a list of dicts of the data values
:param model: The model to use
:param flight_ids: The list of channel names you are interested in
:param packed: true to return a list of lists, false to return a list of dicts
:param downsample: number of seconds to skip between data samples
:return: a list of dicts with the results.
"""
if hasattr(model, 'dynamic') and model.dynamic:
values = model.objects.get_dynamic_flight_values(
flight_ids,
channel_names=model.get_channel_names(),
dynamic_value=model.dynamic_value,
dynamic_separator=model.dynamic_separator,
downsample=downsample
)
else:
values = model.objects.get_flight_values(flight_ids, channel_names, downsample)
if not packed:
return list(values)
else:
result = get_packed_list(model, values, channel_names)
return result
def get_flight_values_time_list(model, flight_ids, channel_names, packed=True, time=None):
"""
Returns a list of one dict of the data values
:param model: The model to use
:param flight_ids: The list of channel names you are interested in
:param packed: true to return a list of lists, false to return a list of dicts
:param time: the time for which we are looking for the data
:return: a list of dicts with the results.
"""
if not time:
raise Exception('Time is required')
values = model.objects.get_values_at_time(time, flight_ids, channel_names)
if not values:
return None
if not packed:
# print 'values time for %s:' % str(model)
# print str([values.first()])
return [values.first()]
else:
result = get_packed_list(model, [values.first()], channel_names)
return result
def get_flight_values_json(request, packed=True, downsample=0):
"""
Returns a JsonResponse of the data values described by the filters in the POST dictionary
:param request: the request
:request.POST:
: model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental
: channel_names: The list of channel names you are interested in
: flight_ids: The list of flight ids to filter by
: downsample: number of seconds to downsample by, takes priority
:param packed: true to return a list of lists, false to return a list of dicts
:param downsample: number of seconds to skip when getting data samples
:return: a JsonResponse with a list of dicts with all the results
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
if post_values.downsample is not None:
downsample = int(post_values.downsample)
values = get_flight_values_list(post_values.model, post_values.flight_ids, post_values.channel_names,
packed=packed, downsample=downsample)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder, safe=False)
else:
return JsonResponse({'status': 'error', 'message': 'No values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(["POST"], content=traceback.format_exc())
return HttpResponseForbidden()
def get_flight_values_time_json(request, packed=True, downsample=0):
"""
Returns a JsonResponse of the data values described by the filters in the POST dictionary
:param request: the request
:request.POST:
: model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental
: channel_names: The list of channel names you are interested in
: flight_ids: The list of flight ids to filter by
: time: The nearest time for the data
:param packed: true to return a list of lists, false to return a list of dicts
:param downsample: number of seconds to skip between data samples
:return: a JsonResponse with a list of dicts with all the results
"""
if request.method == 'POST':
try:
post_values = unravel_post(request.POST)
if post_values.downsample is not None:
downsample = int(post_values.downsample)
values = get_flight_values_time_list(post_values.model, post_values.flight_ids, post_values.channel_names,
packed=packed, time=post_values.time, downsample=downsample)
if values:
return JsonResponse(values, encoder=DatetimeJsonEncoder, safe=False)
else:
return JsonResponse({'status': 'error', 'message': 'No values were found.'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(e.message)
return HttpResponseForbidden()
def get_channel_descriptions(model, channel_name=None):
"""
Returns a dictionary of channel descriptions for the given model
:param model: the model
:param channel_name: the channel name
:return: dictionary of results, or None
"""
if not channel_name:
return model.get_channel_descriptions()
else:
return model.get_channel_description(channel_name)
def get_channel_descriptions_json(request):
"""
Returns a JsonResponse of the channel descriptions described by the model
:param request: the request
:param request.POST.model_name: the fully qualified name of the model
:param request.POST.channel_name: (optional) the name of the channel
:return: JsonResponse with the result.
"""
if request.method == 'POST':
try:
model_name = request.POST.get('model_name', None)
# model name is required
if model_name:
model = getModelByName(model_name)
if model:
channel_name = request.POST.get('channel_name', None)
result = get_channel_descriptions(model, channel_name)
if result:
for key, value in result.iteritems():
if not isinstance(value, dict):
result[key] = value.__dict__
return JsonResponse(result)
return JsonResponse({'error': 'bad parameters'}, status=204)
except Exception as e:
return HttpResponseNotAllowed(["POST"], content=traceback.format_exc())
return HttpResponseForbidden()
| en | 0.784276 | # __BEGIN_LICENSE__ # Copyright (c) 2015, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. # All rights reserved. # # The xGDS platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # __END_LICENSE__ Return a list of time series classes :param skip_example: True to skip the example classes, false otherwise :return: a list of [app_label.classname] for classes that extend TimeSeriesModel # skip example classes Return a json response with the list of time series classes :param skip_example: True to skip the example classes, false otherwise :return: Return a list of dictionaries of time series classes and their titles :param skip_example: True to skip the example classes, false otherwise :param flight_ids: an optional list of flight ids; this will check for each timeseries data type for the given flights :return: a list of dictionaries # skip example classes # no flight ids do not filter Return a json response with the list of time series classes metadata :param request: request.POST should contain a list of flight ids :param skip_example: True to skip the example classes, false otherwise :return: Read the useful contents of the post dictionary :param post_dict: :return: the PostData properly filled out # model name is required Returns a dict with the min max values :param model: The model to use :param start_time: datetime of start time :param end_time: datetime of end time :param flight_ids: The list of channel names you are interested in :param filter_dict: a dictionary of any other filter :param channel_names: The list of channel names you are interested in :return: a list of dicts with the min max values. Returns a JsonResponse with min and max values :param request: :return: Returns a list of lists with the values in the same order as the fields :param model: the model :param values: the iterable values, each value is a dictionary :return: a list of lists Returns a list of dicts of the data values :param model: The model to use :param channel_names: The list of channel names you are interested in :param flight_ids: The list of channel names you are interested in :param start_time: datetime of start time :param end_time: datetime of end time :param filter_dict: a dictionary of any other filter :param packed: true to return a list of lists (no keys), false to return a list of dicts :param downsample: Number of seconds to downsample or skip when filtering data :return: a list of dicts with the results. Returns a JsonResponse of the data values described by the filters in the POST dictionary :param request: the request :request.POST: : model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental : channel_names: The list of channel names you are interested in : flight_ids: The list of flight ids to filter by : start_time: Isoformat start time : end_time: Isoformat end time : filter: Json string of a dictionary to further filter the data :param packed: true to return a list of lists (no keys), false to return a list of dicts :param downsample: Number of seconds to downsample or skip when filtering data :return: a JsonResponse with a list of dicts with all the results :param model: the model :param flight_ids: list of flight ids to check :return: Returns true if there are values of this type for all the given flight ids Returns a list of dicts of the data values :param model: The model to use :param flight_ids: The list of channel names you are interested in :param packed: true to return a list of lists, false to return a list of dicts :param downsample: number of seconds to skip between data samples :return: a list of dicts with the results. Returns a list of one dict of the data values :param model: The model to use :param flight_ids: The list of channel names you are interested in :param packed: true to return a list of lists, false to return a list of dicts :param time: the time for which we are looking for the data :return: a list of dicts with the results. # print 'values time for %s:' % str(model) # print str([values.first()]) Returns a JsonResponse of the data values described by the filters in the POST dictionary :param request: the request :request.POST: : model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental : channel_names: The list of channel names you are interested in : flight_ids: The list of flight ids to filter by : downsample: number of seconds to downsample by, takes priority :param packed: true to return a list of lists, false to return a list of dicts :param downsample: number of seconds to skip when getting data samples :return: a JsonResponse with a list of dicts with all the results Returns a JsonResponse of the data values described by the filters in the POST dictionary :param request: the request :request.POST: : model_name: The fully qualified name of the model, ie xgds_braille_app.Environmental : channel_names: The list of channel names you are interested in : flight_ids: The list of flight ids to filter by : time: The nearest time for the data :param packed: true to return a list of lists, false to return a list of dicts :param downsample: number of seconds to skip between data samples :return: a JsonResponse with a list of dicts with all the results Returns a dictionary of channel descriptions for the given model :param model: the model :param channel_name: the channel name :return: dictionary of results, or None Returns a JsonResponse of the channel descriptions described by the model :param request: the request :param request.POST.model_name: the fully qualified name of the model :param request.POST.channel_name: (optional) the name of the channel :return: JsonResponse with the result. # model name is required | 2.031564 | 2 |
diesel/__init__.py | byrgazov/diesel | 0 | 6618223 | # vim:ts=4:sw=4:expandtab
#from . import events
from .log import levels as loglevels
from .core import sleep, Loop, wait, fire, thread, until, Connection, UDPSocket, ConnectionClosed, ClientConnectionClosed, signal
from .core import until_eol, send, receive, call, first, fork, fork_child, label, fork_from_thread
from .core import ParentDiedException, ClientConnectionError, TerminateLoop, datagram
from .app import Application, Service, UDPService, quickstart, quickstop, Thunk
from .client import Client, UDPClient
#from .resolver import resolve_dns_name, DNSResolutionError
#from .runtime import is_running
#from .hub import ExistingSignalHandler
| # vim:ts=4:sw=4:expandtab
#from . import events
from .log import levels as loglevels
from .core import sleep, Loop, wait, fire, thread, until, Connection, UDPSocket, ConnectionClosed, ClientConnectionClosed, signal
from .core import until_eol, send, receive, call, first, fork, fork_child, label, fork_from_thread
from .core import ParentDiedException, ClientConnectionError, TerminateLoop, datagram
from .app import Application, Service, UDPService, quickstart, quickstop, Thunk
from .client import Client, UDPClient
#from .resolver import resolve_dns_name, DNSResolutionError
#from .runtime import is_running
#from .hub import ExistingSignalHandler
| en | 0.283816 | # vim:ts=4:sw=4:expandtab #from . import events #from .resolver import resolve_dns_name, DNSResolutionError #from .runtime import is_running #from .hub import ExistingSignalHandler | 1.387361 | 1 |
airflow_presto/operators/presto_kubernetes_operator.py | Qbizinc/airflow-presto | 1 | 6618224 | <gh_stars>1-10
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.utils.decorators import apply_defaults
class PrestoKubernetesOperator(KubernetesPodOperator):
"""
Executes a Presto SQL query in a Kubernetes Pod
:param sql: sql query string or path to sql file. (templated)
:type sql: str
:param output_path: if specified a path in s3 to upload the Query results in CSV. (templated)
:type output_path: str
:param output_cmd: if specified a cmd to be executed with the following pattern:
${OUTPUT_CMD} out.csv ${OUTPUT_PATH}.
:type output_cmd: str
:param image: Docker image you wish to launch. Use the one provided in this plugin.
:type image: str
:param name: name of the pod in which the task will run, will be used (plus a random
suffix) to generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]).
:type name: str
:param cmds: entrypoint of the container. (templated)
The docker images's entrypoint is used if this is not provided.
:type cmds: list[str]
:param arguments: arguments of the entrypoint. (templated)
The docker image's CMD is used if this is not provided.
:type arguments: list[str]
:param image_pull_policy: Specify a policy to cache or always pull an image.
:type image_pull_policy: str
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a
comma separated list: secret_a,secret_b
:type image_pull_secrets: str
:param ports: ports for launched pod.
:type ports: list[airflow.kubernetes.pod.Port]
:param volume_mounts: volumeMounts for launched pod.
:type volume_mounts: list[airflow.kubernetes.volume_mount.VolumeMount]
:param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes.
:type volumes: list[airflow.kubernetes.volume.Volume]
:param labels: labels to apply to the Pod.
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:type startup_timeout_seconds: int
:param name: name of the pod in which the task will run, will be used to
generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]).
:type name: str
:param env_vars: Environment variables initialized in the container. (templated)
:type env_vars: dict
:param secrets: Kubernetes secrets to inject in the container.
They can be exposed as environment vars or files in a volume.
:type secrets: list[airflow.kubernetes.secret.Secret]
:param in_cluster: run kubernetes client with in_cluster configuration.
:type in_cluster: bool
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:type cluster_context: str
:param reattach_on_restart: if the scheduler dies while the pod is running, reattach and monitor
:type reattach_on_restart: bool
:param labels: labels to apply to the Pod.
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:type startup_timeout_seconds: int
:param get_logs: get the stdout of the container as logs of the tasks.
:type get_logs: bool
:param annotations: non-identifying metadata you can attach to the Pod.
Can be a large range of data, and can include characters
that are not permitted by labels.
:type annotations: dict
:param resources: A dict containing resources requests and limits.
Possible keys are request_memory, request_cpu, limit_memory, limit_cpu,
and limit_gpu, which will be used to generate airflow.kubernetes.pod.Resources.
See also kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
:type resources: dict
:param affinity: A dict containing a group of affinity scheduling rules.
:type affinity: dict
:param node_selectors: A dict containing a group of scheduling rules.
:type node_selectors: dict
:param config_file: The path to the Kubernetes config file. (templated)
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
:type config_file: str
:param do_xcom_push: If do_xcom_push is True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param is_delete_operator_pod: What to do when the pod reaches its final
state, or the execution is interrupted.
If False (default): do nothing, If True: delete the pod
:type is_delete_operator_pod: bool
:param hostnetwork: If True enable host networking on the pod.
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations.
:type tolerations: list tolerations
:param configmaps: A list of configmap names objects that we
want mount as env variables.
:type configmaps: list[str]
:param pod_runtime_info_envs: environment variables about
pod runtime information (ip, namespace, nodeName, podName).
:type pod_runtime_info_envs: list[airflow.kubernetes.pod_runtime_info_env.PodRuntimeInfoEnv]
:param security_context: security options the pod should run with (PodSecurityContext).
:type security_context: dict
:param dnspolicy: dnspolicy for the pod.
:type dnspolicy: str
:param schedulername: Specify a schedulername for the pod
:type schedulername: str
:param full_pod_spec: The complete podSpec
:type full_pod_spec: kubernetes.client.models.V1Pod
:param init_containers: init container for the launched Pod
:type init_containers: list[kubernetes.client.models.V1Container]
:param log_events_on_failure: Log the pod's events if a failure occurs
:type log_events_on_failure: bool
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param pod_template_file: path to pod template file
:type pod_template_file: str
"""
template_fields = ('cmds', 'arguments', 'env_vars', 'config_file', 'pod_template_file', 'sql',
'output_path')
template_ext = ('.sql', '.hql') # Older versions of Airflow dont work with single element tuples
ui_color = '#1e6fd9'
@apply_defaults
def __init__(self,
sql,
output_path=None,
output_cmd=None,
cmds=None,
arguments=None,
env_vars=None,
pod_template_file=None,
config_file=None,
*args,
**kwargs):
super(PrestoKubernetesOperator, self).__init__(*args,
cmds=cmds,
arguments=arguments,
env_vars=env_vars,
pod_template_file=pod_template_file,
config_file=config_file,
**kwargs)
self.sql = sql
self.output_path = output_path
self.output_cmd = output_cmd
self.cmds = cmds or []
self.arguments = arguments or []
self.env_vars = env_vars or {}
self.config_file = config_file
self.pod_template_file = pod_template_file
def execute(self, context):
self.log.info('Executing: %s', self.sql)
# The docker image provided with this plugin receives the query through ENV variable.
self.env_vars['QUERY'] = self.sql
if self.output_path:
self.env_vars['OUTPUT_PATH'] = self.output_path
self.log.info('OUTPUT_PATH: %s', self.output_path)
if self.output_cmd:
self.env_vars['OUTPUT_CMD'] = self.output_cmd
self.log.info('OUTPUT_CMD: %s', self.output_cmd)
super().execute(context)
| from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.utils.decorators import apply_defaults
class PrestoKubernetesOperator(KubernetesPodOperator):
"""
Executes a Presto SQL query in a Kubernetes Pod
:param sql: sql query string or path to sql file. (templated)
:type sql: str
:param output_path: if specified a path in s3 to upload the Query results in CSV. (templated)
:type output_path: str
:param output_cmd: if specified a cmd to be executed with the following pattern:
${OUTPUT_CMD} out.csv ${OUTPUT_PATH}.
:type output_cmd: str
:param image: Docker image you wish to launch. Use the one provided in this plugin.
:type image: str
:param name: name of the pod in which the task will run, will be used (plus a random
suffix) to generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]).
:type name: str
:param cmds: entrypoint of the container. (templated)
The docker images's entrypoint is used if this is not provided.
:type cmds: list[str]
:param arguments: arguments of the entrypoint. (templated)
The docker image's CMD is used if this is not provided.
:type arguments: list[str]
:param image_pull_policy: Specify a policy to cache or always pull an image.
:type image_pull_policy: str
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a
comma separated list: secret_a,secret_b
:type image_pull_secrets: str
:param ports: ports for launched pod.
:type ports: list[airflow.kubernetes.pod.Port]
:param volume_mounts: volumeMounts for launched pod.
:type volume_mounts: list[airflow.kubernetes.volume_mount.VolumeMount]
:param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes.
:type volumes: list[airflow.kubernetes.volume.Volume]
:param labels: labels to apply to the Pod.
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:type startup_timeout_seconds: int
:param name: name of the pod in which the task will run, will be used to
generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]).
:type name: str
:param env_vars: Environment variables initialized in the container. (templated)
:type env_vars: dict
:param secrets: Kubernetes secrets to inject in the container.
They can be exposed as environment vars or files in a volume.
:type secrets: list[airflow.kubernetes.secret.Secret]
:param in_cluster: run kubernetes client with in_cluster configuration.
:type in_cluster: bool
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:type cluster_context: str
:param reattach_on_restart: if the scheduler dies while the pod is running, reattach and monitor
:type reattach_on_restart: bool
:param labels: labels to apply to the Pod.
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:type startup_timeout_seconds: int
:param get_logs: get the stdout of the container as logs of the tasks.
:type get_logs: bool
:param annotations: non-identifying metadata you can attach to the Pod.
Can be a large range of data, and can include characters
that are not permitted by labels.
:type annotations: dict
:param resources: A dict containing resources requests and limits.
Possible keys are request_memory, request_cpu, limit_memory, limit_cpu,
and limit_gpu, which will be used to generate airflow.kubernetes.pod.Resources.
See also kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
:type resources: dict
:param affinity: A dict containing a group of affinity scheduling rules.
:type affinity: dict
:param node_selectors: A dict containing a group of scheduling rules.
:type node_selectors: dict
:param config_file: The path to the Kubernetes config file. (templated)
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
:type config_file: str
:param do_xcom_push: If do_xcom_push is True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param is_delete_operator_pod: What to do when the pod reaches its final
state, or the execution is interrupted.
If False (default): do nothing, If True: delete the pod
:type is_delete_operator_pod: bool
:param hostnetwork: If True enable host networking on the pod.
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations.
:type tolerations: list tolerations
:param configmaps: A list of configmap names objects that we
want mount as env variables.
:type configmaps: list[str]
:param pod_runtime_info_envs: environment variables about
pod runtime information (ip, namespace, nodeName, podName).
:type pod_runtime_info_envs: list[airflow.kubernetes.pod_runtime_info_env.PodRuntimeInfoEnv]
:param security_context: security options the pod should run with (PodSecurityContext).
:type security_context: dict
:param dnspolicy: dnspolicy for the pod.
:type dnspolicy: str
:param schedulername: Specify a schedulername for the pod
:type schedulername: str
:param full_pod_spec: The complete podSpec
:type full_pod_spec: kubernetes.client.models.V1Pod
:param init_containers: init container for the launched Pod
:type init_containers: list[kubernetes.client.models.V1Container]
:param log_events_on_failure: Log the pod's events if a failure occurs
:type log_events_on_failure: bool
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param pod_template_file: path to pod template file
:type pod_template_file: str
"""
template_fields = ('cmds', 'arguments', 'env_vars', 'config_file', 'pod_template_file', 'sql',
'output_path')
template_ext = ('.sql', '.hql') # Older versions of Airflow dont work with single element tuples
ui_color = '#1e6fd9'
@apply_defaults
def __init__(self,
sql,
output_path=None,
output_cmd=None,
cmds=None,
arguments=None,
env_vars=None,
pod_template_file=None,
config_file=None,
*args,
**kwargs):
super(PrestoKubernetesOperator, self).__init__(*args,
cmds=cmds,
arguments=arguments,
env_vars=env_vars,
pod_template_file=pod_template_file,
config_file=config_file,
**kwargs)
self.sql = sql
self.output_path = output_path
self.output_cmd = output_cmd
self.cmds = cmds or []
self.arguments = arguments or []
self.env_vars = env_vars or {}
self.config_file = config_file
self.pod_template_file = pod_template_file
def execute(self, context):
self.log.info('Executing: %s', self.sql)
# The docker image provided with this plugin receives the query through ENV variable.
self.env_vars['QUERY'] = self.sql
if self.output_path:
self.env_vars['OUTPUT_PATH'] = self.output_path
self.log.info('OUTPUT_PATH: %s', self.output_path)
if self.output_cmd:
self.env_vars['OUTPUT_CMD'] = self.output_cmd
self.log.info('OUTPUT_CMD: %s', self.output_cmd)
super().execute(context) | en | 0.645776 | Executes a Presto SQL query in a Kubernetes Pod :param sql: sql query string or path to sql file. (templated) :type sql: str :param output_path: if specified a path in s3 to upload the Query results in CSV. (templated) :type output_path: str :param output_cmd: if specified a cmd to be executed with the following pattern: ${OUTPUT_CMD} out.csv ${OUTPUT_PATH}. :type output_cmd: str :param image: Docker image you wish to launch. Use the one provided in this plugin. :type image: str :param name: name of the pod in which the task will run, will be used (plus a random suffix) to generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]). :type name: str :param cmds: entrypoint of the container. (templated) The docker images's entrypoint is used if this is not provided. :type cmds: list[str] :param arguments: arguments of the entrypoint. (templated) The docker image's CMD is used if this is not provided. :type arguments: list[str] :param image_pull_policy: Specify a policy to cache or always pull an image. :type image_pull_policy: str :param image_pull_secrets: Any image pull secrets to be given to the pod. If more than one secret is required, provide a comma separated list: secret_a,secret_b :type image_pull_secrets: str :param ports: ports for launched pod. :type ports: list[airflow.kubernetes.pod.Port] :param volume_mounts: volumeMounts for launched pod. :type volume_mounts: list[airflow.kubernetes.volume_mount.VolumeMount] :param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes. :type volumes: list[airflow.kubernetes.volume.Volume] :param labels: labels to apply to the Pod. :type labels: dict :param startup_timeout_seconds: timeout in seconds to startup the pod. :type startup_timeout_seconds: int :param name: name of the pod in which the task will run, will be used to generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]). :type name: str :param env_vars: Environment variables initialized in the container. (templated) :type env_vars: dict :param secrets: Kubernetes secrets to inject in the container. They can be exposed as environment vars or files in a volume. :type secrets: list[airflow.kubernetes.secret.Secret] :param in_cluster: run kubernetes client with in_cluster configuration. :type in_cluster: bool :param cluster_context: context that points to kubernetes cluster. Ignored when in_cluster is True. If None, current-context is used. :type cluster_context: str :param reattach_on_restart: if the scheduler dies while the pod is running, reattach and monitor :type reattach_on_restart: bool :param labels: labels to apply to the Pod. :type labels: dict :param startup_timeout_seconds: timeout in seconds to startup the pod. :type startup_timeout_seconds: int :param get_logs: get the stdout of the container as logs of the tasks. :type get_logs: bool :param annotations: non-identifying metadata you can attach to the Pod. Can be a large range of data, and can include characters that are not permitted by labels. :type annotations: dict :param resources: A dict containing resources requests and limits. Possible keys are request_memory, request_cpu, limit_memory, limit_cpu, and limit_gpu, which will be used to generate airflow.kubernetes.pod.Resources. See also kubernetes.io/docs/concepts/configuration/manage-compute-resources-container :type resources: dict :param affinity: A dict containing a group of affinity scheduling rules. :type affinity: dict :param node_selectors: A dict containing a group of scheduling rules. :type node_selectors: dict :param config_file: The path to the Kubernetes config file. (templated) :param config_file: The path to the Kubernetes config file. (templated) If not specified, default value is ``~/.kube/config`` :type config_file: str :param do_xcom_push: If do_xcom_push is True, the content of the file /airflow/xcom/return.json in the container will also be pushed to an XCom when the container completes. :type do_xcom_push: bool :param is_delete_operator_pod: What to do when the pod reaches its final state, or the execution is interrupted. If False (default): do nothing, If True: delete the pod :type is_delete_operator_pod: bool :param hostnetwork: If True enable host networking on the pod. :type hostnetwork: bool :param tolerations: A list of kubernetes tolerations. :type tolerations: list tolerations :param configmaps: A list of configmap names objects that we want mount as env variables. :type configmaps: list[str] :param pod_runtime_info_envs: environment variables about pod runtime information (ip, namespace, nodeName, podName). :type pod_runtime_info_envs: list[airflow.kubernetes.pod_runtime_info_env.PodRuntimeInfoEnv] :param security_context: security options the pod should run with (PodSecurityContext). :type security_context: dict :param dnspolicy: dnspolicy for the pod. :type dnspolicy: str :param schedulername: Specify a schedulername for the pod :type schedulername: str :param full_pod_spec: The complete podSpec :type full_pod_spec: kubernetes.client.models.V1Pod :param init_containers: init container for the launched Pod :type init_containers: list[kubernetes.client.models.V1Container] :param log_events_on_failure: Log the pod's events if a failure occurs :type log_events_on_failure: bool :param do_xcom_push: If True, the content of the file /airflow/xcom/return.json in the container will also be pushed to an XCom when the container completes. :type do_xcom_push: bool :param pod_template_file: path to pod template file :type pod_template_file: str # Older versions of Airflow dont work with single element tuples # The docker image provided with this plugin receives the query through ENV variable. | 2.355968 | 2 |
moire/nn/sparses/conjugate_embedding.py | speedcell4/moire | 2 | 6618225 | <filename>moire/nn/sparses/conjugate_embedding.py
import dynet as dy
import moire
from moire import Expression, ParameterCollection, nn
from moire.nn.initializers import Uniform
__all__ = [
'ConjugateEmbedding',
]
class ConjugateEmbedding(nn.Module):
def __init__(self, pc: ParameterCollection, num_embeddings: int,
embedding_dim_fixed: int, embedding_dim_training: int, initializer=Uniform()) -> None:
super(ConjugateEmbedding, self).__init__(pc)
self.num_embeddings = num_embeddings
self.embedding_dim_fixed = embedding_dim_fixed
self.embedding_dim_training = embedding_dim_training
self.embedding_dim = embedding_dim_fixed + embedding_dim_training
self.embedding_fixed = self.add_lookup((num_embeddings, embedding_dim_fixed), initializer)
self.embedding_training = self.add_lookup((num_embeddings, embedding_dim_training), initializer)
def __repr__(self):
return f'{self.__class__.__name__} ({self.num_embeddings} tokens, {self.embedding_dim} dim)'
def __call__(self, ix: int) -> Expression:
f = dy.lookup(self.embedding_fixed, ix, update=False)
t = dy.lookup(self.embedding_training, ix, update=moire.config.train)
return dy.concatenate([f, t])
if __name__ == '__main__':
embedding = ConjugateEmbedding(ParameterCollection(), 100, 2, 3)
dy.renew_cg(True, True)
moire.debug(embedding(2).dim())
| <filename>moire/nn/sparses/conjugate_embedding.py
import dynet as dy
import moire
from moire import Expression, ParameterCollection, nn
from moire.nn.initializers import Uniform
__all__ = [
'ConjugateEmbedding',
]
class ConjugateEmbedding(nn.Module):
def __init__(self, pc: ParameterCollection, num_embeddings: int,
embedding_dim_fixed: int, embedding_dim_training: int, initializer=Uniform()) -> None:
super(ConjugateEmbedding, self).__init__(pc)
self.num_embeddings = num_embeddings
self.embedding_dim_fixed = embedding_dim_fixed
self.embedding_dim_training = embedding_dim_training
self.embedding_dim = embedding_dim_fixed + embedding_dim_training
self.embedding_fixed = self.add_lookup((num_embeddings, embedding_dim_fixed), initializer)
self.embedding_training = self.add_lookup((num_embeddings, embedding_dim_training), initializer)
def __repr__(self):
return f'{self.__class__.__name__} ({self.num_embeddings} tokens, {self.embedding_dim} dim)'
def __call__(self, ix: int) -> Expression:
f = dy.lookup(self.embedding_fixed, ix, update=False)
t = dy.lookup(self.embedding_training, ix, update=moire.config.train)
return dy.concatenate([f, t])
if __name__ == '__main__':
embedding = ConjugateEmbedding(ParameterCollection(), 100, 2, 3)
dy.renew_cg(True, True)
moire.debug(embedding(2).dim())
| none | 1 | 2.138262 | 2 | |
py_Learn/ex16.py | tripdubroot/archive | 0 | 6618226 | from sys import argv
script, filename = argv
print "We are going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you want that, hit return."
raw_input("?")
print "Opening file..."
target = open(filename, 'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1 = raw_input("Line 1: ")
line2 = raw_input("Line 2: ")
line3 = raw_input("Line 3: ")
print "I'm going to write these to the file."
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print "And finally we close the file."
target.close() | from sys import argv
script, filename = argv
print "We are going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)."
print "If you want that, hit return."
raw_input("?")
print "Opening file..."
target = open(filename, 'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1 = raw_input("Line 1: ")
line2 = raw_input("Line 2: ")
line3 = raw_input("Line 3: ")
print "I'm going to write these to the file."
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print "And finally we close the file."
target.close() | none | 1 | 4.168036 | 4 | |
requestsapi/requestdata.py | furio/py-google-safelist | 6 | 6618227 | import requests
import responseobjects
__CLIENT_VERSION__ = "0.1.0"
__ANY_PLATFORM__ = "ANY_PLATFORM"
__THREAT_URL__ = "URL"
class RequestData(object):
def __init__(self,apikey,companyname,maxsize=4096):
self.__apikey = apikey
self.__reqobj = {
"client": { "clientId": companyname, "clientVersion": __CLIENT_VERSION__},
"listUpdateRequests": [
{"threatType": "", "platformType": __ANY_PLATFORM__, "threatEntryType": __THREAT_URL__,
"constraints": { "region": "US", "supportedCompressions": ["RAW"]}}
]}
self.__detailobj = {
"client": { "clientId": companyname, "clientVersion": __CLIENT_VERSION__},
"clientStates": [],
"threatInfo": {
"threatTypes": [],
"platformTypes": [__ANY_PLATFORM__],
"threatEntryTypes": [__THREAT_URL__],
"threatEntries": []
}
}
if maxsize != -1:
self.__reqobj['listUpdateRequests'][0]['constraints'].update({"maxUpdateEntries": maxsize})
def getthreatlists(self):
r = requests.get("https://safebrowsing.googleapis.com/v4/threatLists", {'key': self.__apikey})
if r.status_code < 400:
respObject = r.json()
# With __ANY_PLATFORM__ i get the same list multiple times
return list(set([x["threatType"] for x in respObject["threatLists"]]))
return []
def getupdateforthreat(self, threat, clistate=None):
"Accept a 'threatname'' and optional 'clistate'"
reqdict = self.__reqobj.copy()
reqdict['listUpdateRequests'][0]['threatType'] = threat
if not clistate == None:
reqdict['listUpdateRequests'][0]['state'] = clistate
r = requests.post("https://safebrowsing.googleapis.com/v4/threatListUpdates:fetch?key=" + self.__apikey, json=reqdict)
if r.status_code < 400:
return responseobjects.ListUpdateResponse(r.json())
return None
def getthreatspecific(self, threatandstates, hashes):
"Accept a [(threat,clistate)] and []"
reqdict = self.__detailobj.copy()
for tands in threatandstates:
reqdict['clientStates'].append(tands[1])
reqdict['threatInfo']['threatTypes'].append(tands[0])
for hashprefix in hashes:
reqdict['threatInfo']['threatEntries'].append({"hash": hashprefix})
r = requests.post("https://safebrowsing.googleapis.com/v4/fullHashes:find?key=" + self.__apikey, json=reqdict)
# print r.text
if r.status_code < 400:
return responseobjects.FullHashesFindResponse(r.json())
return None
| import requests
import responseobjects
__CLIENT_VERSION__ = "0.1.0"
__ANY_PLATFORM__ = "ANY_PLATFORM"
__THREAT_URL__ = "URL"
class RequestData(object):
def __init__(self,apikey,companyname,maxsize=4096):
self.__apikey = apikey
self.__reqobj = {
"client": { "clientId": companyname, "clientVersion": __CLIENT_VERSION__},
"listUpdateRequests": [
{"threatType": "", "platformType": __ANY_PLATFORM__, "threatEntryType": __THREAT_URL__,
"constraints": { "region": "US", "supportedCompressions": ["RAW"]}}
]}
self.__detailobj = {
"client": { "clientId": companyname, "clientVersion": __CLIENT_VERSION__},
"clientStates": [],
"threatInfo": {
"threatTypes": [],
"platformTypes": [__ANY_PLATFORM__],
"threatEntryTypes": [__THREAT_URL__],
"threatEntries": []
}
}
if maxsize != -1:
self.__reqobj['listUpdateRequests'][0]['constraints'].update({"maxUpdateEntries": maxsize})
def getthreatlists(self):
r = requests.get("https://safebrowsing.googleapis.com/v4/threatLists", {'key': self.__apikey})
if r.status_code < 400:
respObject = r.json()
# With __ANY_PLATFORM__ i get the same list multiple times
return list(set([x["threatType"] for x in respObject["threatLists"]]))
return []
def getupdateforthreat(self, threat, clistate=None):
"Accept a 'threatname'' and optional 'clistate'"
reqdict = self.__reqobj.copy()
reqdict['listUpdateRequests'][0]['threatType'] = threat
if not clistate == None:
reqdict['listUpdateRequests'][0]['state'] = clistate
r = requests.post("https://safebrowsing.googleapis.com/v4/threatListUpdates:fetch?key=" + self.__apikey, json=reqdict)
if r.status_code < 400:
return responseobjects.ListUpdateResponse(r.json())
return None
def getthreatspecific(self, threatandstates, hashes):
"Accept a [(threat,clistate)] and []"
reqdict = self.__detailobj.copy()
for tands in threatandstates:
reqdict['clientStates'].append(tands[1])
reqdict['threatInfo']['threatTypes'].append(tands[0])
for hashprefix in hashes:
reqdict['threatInfo']['threatEntries'].append({"hash": hashprefix})
r = requests.post("https://safebrowsing.googleapis.com/v4/fullHashes:find?key=" + self.__apikey, json=reqdict)
# print r.text
if r.status_code < 400:
return responseobjects.FullHashesFindResponse(r.json())
return None
| en | 0.315124 | # With __ANY_PLATFORM__ i get the same list multiple times # print r.text | 2.532339 | 3 |
analysis/plot_hop_distribution.py | dennis-tra/optimistic-provide | 1 | 6618228 | import numpy as np
import seaborn as sns
from analysis.model_peer_info import PeerInfo
from model_loader import ModelLoader
import matplotlib.pyplot as plt
def show_values_on_bars(axs, total):
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.1f}%'.format(100 * p.get_height() / total)
ax.text(_x, _y, value, ha="center")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
def plot():
sns.set_theme()
def calc_hop_count(peer_id: str, peer_infos: dict[str, PeerInfo], hop_count: int) -> int:
peer_info = peer_infos[peer_id]
if peer_info.discovered_from == "":
return hop_count
return calc_hop_count(peer_info.discovered_from, peer_infos, hop_count + 1)
hop_count_distribution = []
measurements = ModelLoader.open("../data")
for measurement in measurements:
for span in measurement.provider.spans:
if span.type != "ADD_PROVIDER":
continue
hop_count = calc_hop_count(span.peer_id, measurement.provider.peer_infos, 0)
hop_count_distribution += [hop_count]
fig, ax = plt.subplots(figsize=(15, 6))
sns.histplot(ax=ax, x=hop_count_distribution, bins=np.arange(0, 10))
ax.set_xticks(np.arange(0, 10))
ax.set_xlabel("Number of Hops")
ax.set_ylabel("Count (log scale)")
ax.set_yscale('log')
ax.title.set_text(
f"Number of Hops to Discover a Peer that was Selected to Store a Provider Record (Sample Size {len(hop_count_distribution)})")
plt.tight_layout()
show_values_on_bars(ax, len(hop_count_distribution))
plt.savefig("../plots/hop_distribution.png")
plt.show()
if __name__ == '__main__':
plot()
| import numpy as np
import seaborn as sns
from analysis.model_peer_info import PeerInfo
from model_loader import ModelLoader
import matplotlib.pyplot as plt
def show_values_on_bars(axs, total):
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = '{:.1f}%'.format(100 * p.get_height() / total)
ax.text(_x, _y, value, ha="center")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
def plot():
sns.set_theme()
def calc_hop_count(peer_id: str, peer_infos: dict[str, PeerInfo], hop_count: int) -> int:
peer_info = peer_infos[peer_id]
if peer_info.discovered_from == "":
return hop_count
return calc_hop_count(peer_info.discovered_from, peer_infos, hop_count + 1)
hop_count_distribution = []
measurements = ModelLoader.open("../data")
for measurement in measurements:
for span in measurement.provider.spans:
if span.type != "ADD_PROVIDER":
continue
hop_count = calc_hop_count(span.peer_id, measurement.provider.peer_infos, 0)
hop_count_distribution += [hop_count]
fig, ax = plt.subplots(figsize=(15, 6))
sns.histplot(ax=ax, x=hop_count_distribution, bins=np.arange(0, 10))
ax.set_xticks(np.arange(0, 10))
ax.set_xlabel("Number of Hops")
ax.set_ylabel("Count (log scale)")
ax.set_yscale('log')
ax.title.set_text(
f"Number of Hops to Discover a Peer that was Selected to Store a Provider Record (Sample Size {len(hop_count_distribution)})")
plt.tight_layout()
show_values_on_bars(ax, len(hop_count_distribution))
plt.savefig("../plots/hop_distribution.png")
plt.show()
if __name__ == '__main__':
plot()
| none | 1 | 2.297873 | 2 | |
HsinchuCityWebsite/HsinchuCityWebsite/HsinchuCityWebsite/__init__.py | kaochiuan/HsinchuCityWebsite | 2 | 6618229 | """
Package for HsinchuCityWebsite.
"""
| """
Package for HsinchuCityWebsite.
"""
| en | 0.528745 | Package for HsinchuCityWebsite. | 0.907641 | 1 |
scripts/calendar_view/utils/batch_utils.py | VP-GEO/cbm | 0 | 6618230 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import time
import geopandas
import download_utils, extract_utils, plot_utils
from glob import glob
import os
import lut
from osgeo import ogr
import datetime
import collections
import warnings
import calendar
def select_parcel(vector_file_name, parcel_id_column, parcel_id, logfile):
fout = open(logfile, 'a')
start = time.time()
parcels = geopandas.read_file(vector_file_name)
parcel = parcels[parcels[parcel_id_column]==parcel_id]
print(f"Parcel selected in: {time.time() - start} seconds")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.select_parcel:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return parcel
def run_get_scl_imagettes(parcel, parcel_id, crop, out_tif_folder_base,
search_window_start_date, search_window_end_date, search_split_days,
raw_chips_by_location_url, username, password, chipsize,
url_base, lon, lat, logfile
):
fout = open(logfile, 'a')
start = time.time()
# get the list of SCL imagettes for the parcel in a given date range
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# lon, lat = download_utils.get_centroid_of_parcel(parcel)
date_ranges = download_utils.split_date_range(search_window_start_date, search_window_end_date, search_split_days)
for date_range in date_ranges:
start_date = date_range[0]
end_date = date_range[1]
print("Getting SCL imagettes from" , start_date, "to", end_date)
was_error_1 = True
was_error_2 = True
while was_error_1:
locurl, list_of_scl_imagettes, was_error_1 = download_utils.get_scl_imagettes(raw_chips_by_location_url, lon, lat,
start_date, end_date,
username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_scl_imagettes(url_base, list_of_scl_imagettes, out_tif_folder, username, password)
print(f"Got list of SCL imagettes and downloaded in: {time.time() - start} seconds")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_get_scl_imagettes:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
def create_list_of_tiles_to_be_downloaded(parcel, parcel_id, crop, out_tif_folder_base, cloud_categories, logfile):
# create the list of tiles to be downloaded
warnings.simplefilter(action='ignore', category=FutureWarning)
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# get downloaded SCL tile tifs and see if they are cloudfree
downloaded_scl_files_pattern = out_tif_folder + "/*/*.SCL.tif"
downloaded_scl_files = glob(downloaded_scl_files_pattern)
tiles_to_download = []
for downloaded_scl_file in downloaded_scl_files:
is_tile_cloudy = download_utils.is_tile_cloudy_geopandas(downloaded_scl_file, parcel, cloud_categories)
if not is_tile_cloudy:
tile_scl_name = os.path.basename(downloaded_scl_file)
tile_name = tile_scl_name.split(".")[0]
tiles_to_download.append(tile_name)
print(f"List of tiles to be downloaded created in {time.time() - start} seconds")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.create_list_of_tiles_to_be_downloaded:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return tiles_to_download
def run_get_and_download_band_imagettes(max_number_of_tiles_per_request, tiles_to_download, raw_chips_batch_url,
lon, lat, bands, username, password, chipsize, url_base,
parcel_id, crop, out_tif_folder_base, logfile):
# run the batch chip extract query with the JSON input as POST
# and get the response which contains the download folder of the extracted chips
# and download the cloudfree band imagettes
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# max_number_of_tiles_per_request = 12
number_of_full_requests = len(tiles_to_download)//max_number_of_tiles_per_request
if number_of_full_requests == 0:
number_of_full_requests = 1
for request in range(0,number_of_full_requests):
list_of_band_imagettes = {}
request_end_index = max_number_of_tiles_per_request*(request+1)
request_start_index = request_end_index - max_number_of_tiles_per_request
print("request number:", request)
tiles_to_download_subset = tiles_to_download[request_start_index:request_end_index]
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_band_imagettes, was_error_1 = download_utils.get_band_imagettes(raw_chips_batch_url, lon, lat,
tiles_to_download_subset,
bands, username, password, chipsize )
while was_error_2:
was_error_2 = download_utils.download_band_imagettes(url_base, list_of_band_imagettes, out_tif_folder, username, password)
# print("*******************************************")
# print(list_of_band_imagettes)
# print("*******************************************")
last_request_end_index = len(tiles_to_download) + 1
last_request_start_index = request_end_index
print("last bunch")
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_band_imagettes, was_error_1 = download_utils.get_band_imagettes(raw_chips_batch_url, lon, lat,
tiles_to_download[last_request_start_index:last_request_end_index],
bands, username, password, chipsize )
while was_error_2:
was_error_2 = download_utils.download_band_imagettes(url_base, list_of_band_imagettes, out_tif_folder, username, password)
# print("*******************************************")
# print(list_of_band_imagettes)
# print("*******************************************")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_get_and_download_band_imagettes:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"Got list of cloudfree bands and downloaded images: {time.time() - start} seconds")
def run_merge_bands(parcel_id, crop, out_tif_folder_base, logfile):
# look around in the date folders where the bands were downloade and merge bands
# B08, B11, B04 for each tile where these bands were downloaded and the bands were
# not yet merged
fout = open(logfile, 'a')
start = time.time()
download_utils.merge_bands(parcel_id, crop, out_tif_folder_base)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_merge_bands:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"Merging cloudfree bands images in {time.time() - start} seconds")
def run_merge_4_bands(parcel_id, crop, out_tif_folder_base):
# look around in the date folders where the bands were downloade and merge bands
# B08, B11, B04 for each tile where these bands were downloaded and the bands were
# not yet merged
start = time.time()
download_utils.merge_4_bands(parcel_id, crop, out_tif_folder_base)
print(f"Merging 4 bands images in {time.time() - start} seconds")
def run_lut_stretch(parcel_id, crop, out_tif_folder_base, left_percent, right_percent, lut_txt_file, logfile):
# lut stretch
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
merge_lut_folder = out_tif_folder + "_merged_lut_magic"
# merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
if not os.path.exists(merge_lut_folder):
os.makedirs(merge_lut_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_lut_folder + "/" + tile_name + ".tif"
# here again: if the lut stretched image is already created we do not create it again
if os.path.isfile(output):
# we already created the lut stretched image for this date for this parcel so we skip it
print(tile_name + " already created")
else:
print("LUT stretching tile: ", tile_name, end="")
lut.writeMinMaxToFile(merged_file, acq_date, lut_bands, left_percent, right_percent, lut_txt_file, tile_name)
lut.lutStretchMagicLut(merged_file, output, lut_bands )
# lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands )
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_lut_stretch:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"LUT stretch: {time.time() - start} seconds")
def run_lut_stretch_dynamic(parcel_id, crop, out_tif_folder_base, left_percent, right_percent, lut_txt_file, logfile):
# lut stretch
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
if not os.path.exists(merge_lut_folder):
os.makedirs(merge_lut_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_lut_folder + "/" + tile_name + ".tif"
# here again: if the lut stretched image is already created we do not create it again
if os.path.isfile(output):
# we already created the lut stretched image for this date for this parcel so we skip it
print(tile_name + " already created")
else:
print("LUT stretching tile: ", tile_name, end="")
lut.writeMinMaxToFile(merged_file, acq_date, lut_bands, left_percent, right_percent, lut_txt_file, tile_name)
lut.lutStretchMagicLut(merged_file, output, lut_bands )
# lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands )
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_lut_stretch_dynamic:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"LUT stretch: {time.time() - start} seconds")
def get_merged_lutstretched_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged_lut_magic"
# merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.get_merged_lutstretched_files_and_acquisition_dates:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return acq_dates, merged_lut_files
def get_merged_lutstretched_files_and_acquisition_dates_dynamic(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.get_merged_lutstretched_files_and_acquisition_dates_dynamic:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return acq_dates, merged_lut_files
def get_merged_ndvi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged_ndvi"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
return acq_dates, merged_lut_files
def get_merged_ndwi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged_ndwi"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
return acq_dates, merged_lut_files
def get_merged_tif_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
return acq_dates, merged_lut_files
def get_merged_tif_files_and_acquisition_dates_in_dict(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates_tif_files_dict = {}
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates_tif_files_dict[acq_date]=merged_lut_file
return collections.OrderedDict(sorted(acq_dates_tif_files_dict.items()))
def run_ndvi_creation(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
# create ndvi image
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
merge_ndvi_folder = out_tif_folder + "_merged_ndvi"
if not os.path.exists(merge_ndvi_folder):
os.makedirs(merge_ndvi_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_ndvi_folder + "/" + tile_name + ".tif"
# here again: if the ndvi image image is already created we do not create it again
if os.path.isfile(output):
# we already created the ndvi image for this date for this parcel so we skip it
print(tile_name + " ndvi already created")
else:
print("Creating NDVI for tile: ", tile_name, end="")
extract_utils.calculate_ndvi(merged_file, output)
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_ndvi_creation:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"NDVI created in: {time.time() - start} seconds")
def run_ndwi_creation(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
# create ndwi image
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
merge_ndwi_folder = out_tif_folder + "_merged_ndwi"
if not os.path.exists(merge_ndwi_folder):
os.makedirs(merge_ndwi_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_ndwi_folder + "/" + tile_name + ".tif"
# here again: if the ndwi image image is already created we do not create it again
if os.path.isfile(output):
# we already created the ndwi image for this date for this parcel so we skip it
print(tile_name + " ndwi already created")
else:
print("Creating NDWI for tile: ", tile_name, end="")
extract_utils.calculate_ndwi(merged_file, output)
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_ndwi_creation:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"NDWI created in: {time.time() - start} seconds")
def calculate_ndvi_statistics(parcel_id, crop, out_tif_folder_base, tiles_to_download, parcel, vector_file_name, parcel_id_column, logfile):
fout = open(logfile, 'a')
start = time.time()
acq_dates, merged_ndvi_files = get_merged_ndvi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base)
chip_folder = str(parcel_id) + '_' + crop
output_ndvi_folder = out_tif_folder_base + "/ndvi"
output_ndvi_csv_file = output_ndvi_folder + "/" + chip_folder + "_ndvi.csv"
if not os.path.exists(output_ndvi_folder):
os.makedirs(output_ndvi_folder)
first_line ="Field_ID,acq_date,ndvi_mean,ndvi_count,ndvi_std"
print(first_line, file=open(output_ndvi_csv_file, "w"))
for merged_ndvi_file in merged_ndvi_files:
merged_ndvi_file_base = os.path.basename(merged_ndvi_file)
merged_ndvi_file_path = os.path.dirname(merged_ndvi_file)
tile_name = merged_ndvi_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(merged_ndvi_file)
ndvi_mean, ndvi_count, ndvi_std = extract_utils.extract_stats_for_one_parcel_geopandas_presel(merged_ndvi_file, parcel)
# print(parcel_id, acq_date, ndvi_mean, ndvi_count, ndvi_std, sep=',')
print(parcel_id, acq_date, ndvi_mean, ndvi_count, ndvi_std, sep=',',
file=open(output_ndvi_csv_file, "a"))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.calculate_ndvi_statistics:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"NDVI stats read in: {time.time() - start} seconds")
def calculate_ndwi_statistics(parcel_id, crop, out_tif_folder_base, tiles_to_download, parcel, vector_file_name, parcel_id_column, logfile):
fout = open(logfile, 'a')
start = time.time()
acq_dates, merged_ndwi_files = get_merged_ndwi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base)
chip_folder = str(parcel_id) + '_' + crop
output_ndwi_folder = out_tif_folder_base + "/ndwi"
output_ndwi_csv_file = output_ndwi_folder + "/" + chip_folder + "_ndwi.csv"
if not os.path.exists(output_ndwi_folder):
os.makedirs(output_ndwi_folder)
first_line ="Field_ID,acq_date,ndwi_mean,ndwi_count,ndwi_std"
print(first_line, file=open(output_ndwi_csv_file, "w"))
for merged_ndwi_file in merged_ndwi_files:
merged_ndwi_file_base = os.path.basename(merged_ndwi_file)
merged_ndwi_file_path = os.path.dirname(merged_ndwi_file)
tile_name = merged_ndwi_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(merged_ndwi_file)
ndwi_mean, ndwi_count, ndwi_std = extract_utils.extract_stats_for_one_parcel_geopandas_presel(merged_ndwi_file, parcel)
# print(parcel_id, acq_date, ndwi_mean, ndwi_count, ndwi_std, sep=',')
print(parcel_id, acq_date, ndwi_mean, ndwi_count, ndwi_std, sep=',',
file=open(output_ndwi_csv_file, "a"))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.calculate_ndwi_statistics:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"ndwi stats read in: {time.time() - start} seconds")
def calculate_bs_statistics(parcel_id, crop, out_tif_folder_base, parcel, logfile, polarisation, orbit_orientation):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
output_s1_bs_folder = out_tif_folder_base + "/s1_bs"
output_s1_bs_csv_file = output_s1_bs_folder + "/" + chip_folder + "_s1bs_" + polarisation + "_" + orbit_orientation + ".csv"
acquisition_dates_and_s1_bs_files_dict = plot_utils.get_acquisition_dates_and_s1_bs_files_dict(out_tif_folder_base + "/" + chip_folder + "_s1_bs", polarisation, orbit_orientation)
if not os.path.exists(output_s1_bs_folder):
os.makedirs(output_s1_bs_folder)
first_line ="Field_ID,acq_date,bs_mean,bs_count,bs_std"
print(first_line, file=open(output_s1_bs_csv_file, "w"))
for acq_date, s1_bs_file in acquisition_dates_and_s1_bs_files_dict.items():
bs_mean, bs_count, bs_std = extract_utils.extract_stats_for_one_parcel_geopandas_presel_bs(s1_bs_file, parcel)
if bs_mean != None:
# print(parcel_id, acq_date, bs_mean, bs_count, bs_std, sep=',')
print(parcel_id, acq_date, bs_mean, bs_count, bs_std, sep=',',
file=open(output_s1_bs_csv_file, "a"))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.calculate_bs_statistics:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print("S1 BS_" + polarisation + "_" + orbit_orientation + f" stats read in: {time.time() - start} seconds")
def get_all_parcel_ids_from_parcel_shape(parcel_shp, parcel_id_column, crop_name_column):
ds=ogr.Open(parcel_shp)
lyr=ds.GetLayer()
parcel_id_crop_list = []
for feat in lyr:
parcel_id = feat.GetField(parcel_id_column)
crop_name = feat.GetField(crop_name_column)
if crop_name is None:
crop_name = ""
parcel_id_crop_list.append((parcel_id,crop_name.replace(" ", "_")))
parcel_id_crop_list = sorted(parcel_id_crop_list, key=getKey)
return parcel_id_crop_list
def getKey(item):
return item[0]
# l = [[2, 3], [6, 7], [3, 34], [24, 64], [1, 43]]
# sorted(l, key=getKey)
def does_ndvi_csv_exist(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
output_ndvi_folder = out_tif_folder_base + "/ndvi"
output_ndvi_csv_file = output_ndvi_folder + "/" + chip_folder + "_ndvi.csv"
if os.path.isfile(output_ndvi_csv_file):
return True
else:
return False
def does_ndvi_graph_exist(parcel_id, out_tif_folder_base):
output_ndvi_graph_folder = out_tif_folder_base + "/ndvi_graphs"
output_ndvi_graph_file = output_ndvi_graph_folder + "/parcel_id_" + str(parcel_id) + "_NDVI.jpg"
if os.path.isfile(output_ndvi_graph_file):
return True
else:
return False
def run_get_and_download_s1_bs_imagettes(raw_chips_s1_batch_url, out_s1_bs_folder,
search_window_start_date, search_window_end_date,
lon, lat, username, password, chipsize, url_base, logfile):
# list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, start_date, end_date, username, password, chipsize)
# download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
# run the batch chip extract query with the JSON input as POST
# and get the response which contains the download folder of the extracted chips
# and download the s1 backscatter imagettes
fout = open(logfile, 'a')
start = time.time()
# we get and download the s1 bs images by month
# search_window_start_date, search_window_end_date
# search_window_start_date = "2019-11-15"
# search_window_end_date = "2020-09-15"
dt_search_window_start_date = plot_utils.get_date_from_string(search_window_start_date)
dt_search_window_end_date = plot_utils.get_date_from_string(search_window_end_date)
# print(last_day_of_month(dt_search_window_start_date))
# print(add_one_month(dt_search_window_start_date))
act_start_date = dt_search_window_start_date
while act_start_date < dt_search_window_end_date:
act_end_date = last_day_of_month(act_start_date)
if act_start_date == dt_search_window_start_date:
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, str(act_start_date), str(act_end_date), username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
elif act_end_date > dt_search_window_end_date:
act_end_date = dt_search_window_end_date
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, str(act_start_date), str(act_end_date), username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
else:
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, str(act_start_date), str(act_end_date), username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
act_start_date = add_one_month(act_start_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t\tbatch_utils.run_get_and_download_s1_bs_imagettes:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"Got list of cloudfree bands and downloaded images: {time.time() - start} seconds")
def run_rescale_s1_bs_images(out_s1_bs_folder, out_s1_bs_folder_rescale):
# we take all the downloaded s1 bs images for the given parcel and rescale them to uint16
if not os.path.exists(out_s1_bs_folder_rescale):
os.makedirs(out_s1_bs_folder_rescale)
raw_files_pattern = out_s1_bs_folder + "/*.tif"
raw_files = glob(raw_files_pattern)
for raw_file in raw_files:
raw_file_base = os.path.basename(raw_file)
actdate = raw_file_base.split(".")[0]
# print(tile_name)
output = out_s1_bs_folder_rescale + "/" + actdate + ".tif"
download_utils.rescale_s1_bs_image(raw_file, output)
def run_lut_stretch_one_band_s1_bs(out_s1_bs_folder_rescale, out_s1_bs_folder_rescale_lut, s1_bs_left_percent, s1_bs_right_percent):
# we take all the downloaded s1 bs images for the given parcel and rescale them to uint16
if not os.path.exists(out_s1_bs_folder_rescale_lut):
os.makedirs(out_s1_bs_folder_rescale_lut)
rescaled_files_pattern = out_s1_bs_folder_rescale + "/*.tif"
rescaled_files = glob(rescaled_files_pattern)
for rescaled_file in rescaled_files:
rescaled_file_base = os.path.basename(rescaled_file)
actdate = rescaled_file_base.split(".")[0]
print(actdate)
output = out_s1_bs_folder_rescale_lut + "/" + actdate + ".tif"
lut.lut_stretch_one_band_s1_bs(rescaled_file, output, s1_bs_left_percent, s1_bs_right_percent)
def add_one_month(orig_date):
# advance year and month by one month
new_year = orig_date.year
new_month = orig_date.month + 1
# note: in datetime.date, months go from 1 to 12
if new_month > 12:
new_year += 1
new_month -= 12
last_day_of_month = calendar.monthrange(new_year, new_month)[1]
new_day = min(orig_date.day, last_day_of_month)
return orig_date.replace(year=new_year, month=new_month, day=new_day)
def last_day_of_month(any_day):
next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail
return next_month - datetime.timedelta(days=next_month.day)
def run_lut_stretch_dynamic(parcel_id, crop, out_tif_folder_base, left_percent, right_percent, lut_txt_file, logfile):
# lut stretch
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
if not os.path.exists(merge_lut_folder):
os.makedirs(merge_lut_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_lut_folder + "/" + tile_name + ".tif"
# here again: if the lut stretched image is already created we do not create it again
if os.path.isfile(output):
# we already created the lut stretched image for this date for this parcel so we skip it
print(tile_name + " already created")
else:
print("LUT stretching tile: ", tile_name, end="")
lut.writeMinMaxToFile(merged_file, acq_date, lut_bands, left_percent, right_percent, lut_txt_file, tile_name)
# lut.lutStretchMagicLut(merged_file, output, lut_bands )
lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands )
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_lut_stretch_dynamic:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"LUT stretch dynamic: {time.time() - start} seconds")
def get_merged_dynamically_lutstretched_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.get_merged_dynamically_lutstretched_files_and_acquisition_dates:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return acq_dates, merged_lut_files | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import time
import geopandas
import download_utils, extract_utils, plot_utils
from glob import glob
import os
import lut
from osgeo import ogr
import datetime
import collections
import warnings
import calendar
def select_parcel(vector_file_name, parcel_id_column, parcel_id, logfile):
fout = open(logfile, 'a')
start = time.time()
parcels = geopandas.read_file(vector_file_name)
parcel = parcels[parcels[parcel_id_column]==parcel_id]
print(f"Parcel selected in: {time.time() - start} seconds")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.select_parcel:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return parcel
def run_get_scl_imagettes(parcel, parcel_id, crop, out_tif_folder_base,
search_window_start_date, search_window_end_date, search_split_days,
raw_chips_by_location_url, username, password, chipsize,
url_base, lon, lat, logfile
):
fout = open(logfile, 'a')
start = time.time()
# get the list of SCL imagettes for the parcel in a given date range
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# lon, lat = download_utils.get_centroid_of_parcel(parcel)
date_ranges = download_utils.split_date_range(search_window_start_date, search_window_end_date, search_split_days)
for date_range in date_ranges:
start_date = date_range[0]
end_date = date_range[1]
print("Getting SCL imagettes from" , start_date, "to", end_date)
was_error_1 = True
was_error_2 = True
while was_error_1:
locurl, list_of_scl_imagettes, was_error_1 = download_utils.get_scl_imagettes(raw_chips_by_location_url, lon, lat,
start_date, end_date,
username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_scl_imagettes(url_base, list_of_scl_imagettes, out_tif_folder, username, password)
print(f"Got list of SCL imagettes and downloaded in: {time.time() - start} seconds")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_get_scl_imagettes:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
def create_list_of_tiles_to_be_downloaded(parcel, parcel_id, crop, out_tif_folder_base, cloud_categories, logfile):
# create the list of tiles to be downloaded
warnings.simplefilter(action='ignore', category=FutureWarning)
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# get downloaded SCL tile tifs and see if they are cloudfree
downloaded_scl_files_pattern = out_tif_folder + "/*/*.SCL.tif"
downloaded_scl_files = glob(downloaded_scl_files_pattern)
tiles_to_download = []
for downloaded_scl_file in downloaded_scl_files:
is_tile_cloudy = download_utils.is_tile_cloudy_geopandas(downloaded_scl_file, parcel, cloud_categories)
if not is_tile_cloudy:
tile_scl_name = os.path.basename(downloaded_scl_file)
tile_name = tile_scl_name.split(".")[0]
tiles_to_download.append(tile_name)
print(f"List of tiles to be downloaded created in {time.time() - start} seconds")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.create_list_of_tiles_to_be_downloaded:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return tiles_to_download
def run_get_and_download_band_imagettes(max_number_of_tiles_per_request, tiles_to_download, raw_chips_batch_url,
lon, lat, bands, username, password, chipsize, url_base,
parcel_id, crop, out_tif_folder_base, logfile):
# run the batch chip extract query with the JSON input as POST
# and get the response which contains the download folder of the extracted chips
# and download the cloudfree band imagettes
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# max_number_of_tiles_per_request = 12
number_of_full_requests = len(tiles_to_download)//max_number_of_tiles_per_request
if number_of_full_requests == 0:
number_of_full_requests = 1
for request in range(0,number_of_full_requests):
list_of_band_imagettes = {}
request_end_index = max_number_of_tiles_per_request*(request+1)
request_start_index = request_end_index - max_number_of_tiles_per_request
print("request number:", request)
tiles_to_download_subset = tiles_to_download[request_start_index:request_end_index]
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_band_imagettes, was_error_1 = download_utils.get_band_imagettes(raw_chips_batch_url, lon, lat,
tiles_to_download_subset,
bands, username, password, chipsize )
while was_error_2:
was_error_2 = download_utils.download_band_imagettes(url_base, list_of_band_imagettes, out_tif_folder, username, password)
# print("*******************************************")
# print(list_of_band_imagettes)
# print("*******************************************")
last_request_end_index = len(tiles_to_download) + 1
last_request_start_index = request_end_index
print("last bunch")
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_band_imagettes, was_error_1 = download_utils.get_band_imagettes(raw_chips_batch_url, lon, lat,
tiles_to_download[last_request_start_index:last_request_end_index],
bands, username, password, chipsize )
while was_error_2:
was_error_2 = download_utils.download_band_imagettes(url_base, list_of_band_imagettes, out_tif_folder, username, password)
# print("*******************************************")
# print(list_of_band_imagettes)
# print("*******************************************")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_get_and_download_band_imagettes:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"Got list of cloudfree bands and downloaded images: {time.time() - start} seconds")
def run_merge_bands(parcel_id, crop, out_tif_folder_base, logfile):
# look around in the date folders where the bands were downloade and merge bands
# B08, B11, B04 for each tile where these bands were downloaded and the bands were
# not yet merged
fout = open(logfile, 'a')
start = time.time()
download_utils.merge_bands(parcel_id, crop, out_tif_folder_base)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_merge_bands:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"Merging cloudfree bands images in {time.time() - start} seconds")
def run_merge_4_bands(parcel_id, crop, out_tif_folder_base):
# look around in the date folders where the bands were downloade and merge bands
# B08, B11, B04 for each tile where these bands were downloaded and the bands were
# not yet merged
start = time.time()
download_utils.merge_4_bands(parcel_id, crop, out_tif_folder_base)
print(f"Merging 4 bands images in {time.time() - start} seconds")
def run_lut_stretch(parcel_id, crop, out_tif_folder_base, left_percent, right_percent, lut_txt_file, logfile):
# lut stretch
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
merge_lut_folder = out_tif_folder + "_merged_lut_magic"
# merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
if not os.path.exists(merge_lut_folder):
os.makedirs(merge_lut_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_lut_folder + "/" + tile_name + ".tif"
# here again: if the lut stretched image is already created we do not create it again
if os.path.isfile(output):
# we already created the lut stretched image for this date for this parcel so we skip it
print(tile_name + " already created")
else:
print("LUT stretching tile: ", tile_name, end="")
lut.writeMinMaxToFile(merged_file, acq_date, lut_bands, left_percent, right_percent, lut_txt_file, tile_name)
lut.lutStretchMagicLut(merged_file, output, lut_bands )
# lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands )
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_lut_stretch:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"LUT stretch: {time.time() - start} seconds")
def run_lut_stretch_dynamic(parcel_id, crop, out_tif_folder_base, left_percent, right_percent, lut_txt_file, logfile):
# lut stretch
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
if not os.path.exists(merge_lut_folder):
os.makedirs(merge_lut_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_lut_folder + "/" + tile_name + ".tif"
# here again: if the lut stretched image is already created we do not create it again
if os.path.isfile(output):
# we already created the lut stretched image for this date for this parcel so we skip it
print(tile_name + " already created")
else:
print("LUT stretching tile: ", tile_name, end="")
lut.writeMinMaxToFile(merged_file, acq_date, lut_bands, left_percent, right_percent, lut_txt_file, tile_name)
lut.lutStretchMagicLut(merged_file, output, lut_bands )
# lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands )
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_lut_stretch_dynamic:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"LUT stretch: {time.time() - start} seconds")
def get_merged_lutstretched_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged_lut_magic"
# merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.get_merged_lutstretched_files_and_acquisition_dates:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return acq_dates, merged_lut_files
def get_merged_lutstretched_files_and_acquisition_dates_dynamic(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.get_merged_lutstretched_files_and_acquisition_dates_dynamic:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return acq_dates, merged_lut_files
def get_merged_ndvi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged_ndvi"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
return acq_dates, merged_lut_files
def get_merged_ndwi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged_ndwi"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
return acq_dates, merged_lut_files
def get_merged_tif_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
return acq_dates, merged_lut_files
def get_merged_tif_files_and_acquisition_dates_in_dict(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
merge_lut_folder = out_tif_folder + "_merged"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates_tif_files_dict = {}
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates_tif_files_dict[acq_date]=merged_lut_file
return collections.OrderedDict(sorted(acq_dates_tif_files_dict.items()))
def run_ndvi_creation(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
# create ndvi image
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
merge_ndvi_folder = out_tif_folder + "_merged_ndvi"
if not os.path.exists(merge_ndvi_folder):
os.makedirs(merge_ndvi_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_ndvi_folder + "/" + tile_name + ".tif"
# here again: if the ndvi image image is already created we do not create it again
if os.path.isfile(output):
# we already created the ndvi image for this date for this parcel so we skip it
print(tile_name + " ndvi already created")
else:
print("Creating NDVI for tile: ", tile_name, end="")
extract_utils.calculate_ndvi(merged_file, output)
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_ndvi_creation:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"NDVI created in: {time.time() - start} seconds")
def run_ndwi_creation(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
# create ndwi image
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
merge_ndwi_folder = out_tif_folder + "_merged_ndwi"
if not os.path.exists(merge_ndwi_folder):
os.makedirs(merge_ndwi_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_ndwi_folder + "/" + tile_name + ".tif"
# here again: if the ndwi image image is already created we do not create it again
if os.path.isfile(output):
# we already created the ndwi image for this date for this parcel so we skip it
print(tile_name + " ndwi already created")
else:
print("Creating NDWI for tile: ", tile_name, end="")
extract_utils.calculate_ndwi(merged_file, output)
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_ndwi_creation:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"NDWI created in: {time.time() - start} seconds")
def calculate_ndvi_statistics(parcel_id, crop, out_tif_folder_base, tiles_to_download, parcel, vector_file_name, parcel_id_column, logfile):
fout = open(logfile, 'a')
start = time.time()
acq_dates, merged_ndvi_files = get_merged_ndvi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base)
chip_folder = str(parcel_id) + '_' + crop
output_ndvi_folder = out_tif_folder_base + "/ndvi"
output_ndvi_csv_file = output_ndvi_folder + "/" + chip_folder + "_ndvi.csv"
if not os.path.exists(output_ndvi_folder):
os.makedirs(output_ndvi_folder)
first_line ="Field_ID,acq_date,ndvi_mean,ndvi_count,ndvi_std"
print(first_line, file=open(output_ndvi_csv_file, "w"))
for merged_ndvi_file in merged_ndvi_files:
merged_ndvi_file_base = os.path.basename(merged_ndvi_file)
merged_ndvi_file_path = os.path.dirname(merged_ndvi_file)
tile_name = merged_ndvi_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(merged_ndvi_file)
ndvi_mean, ndvi_count, ndvi_std = extract_utils.extract_stats_for_one_parcel_geopandas_presel(merged_ndvi_file, parcel)
# print(parcel_id, acq_date, ndvi_mean, ndvi_count, ndvi_std, sep=',')
print(parcel_id, acq_date, ndvi_mean, ndvi_count, ndvi_std, sep=',',
file=open(output_ndvi_csv_file, "a"))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.calculate_ndvi_statistics:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"NDVI stats read in: {time.time() - start} seconds")
def calculate_ndwi_statistics(parcel_id, crop, out_tif_folder_base, tiles_to_download, parcel, vector_file_name, parcel_id_column, logfile):
fout = open(logfile, 'a')
start = time.time()
acq_dates, merged_ndwi_files = get_merged_ndwi_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base)
chip_folder = str(parcel_id) + '_' + crop
output_ndwi_folder = out_tif_folder_base + "/ndwi"
output_ndwi_csv_file = output_ndwi_folder + "/" + chip_folder + "_ndwi.csv"
if not os.path.exists(output_ndwi_folder):
os.makedirs(output_ndwi_folder)
first_line ="Field_ID,acq_date,ndwi_mean,ndwi_count,ndwi_std"
print(first_line, file=open(output_ndwi_csv_file, "w"))
for merged_ndwi_file in merged_ndwi_files:
merged_ndwi_file_base = os.path.basename(merged_ndwi_file)
merged_ndwi_file_path = os.path.dirname(merged_ndwi_file)
tile_name = merged_ndwi_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(merged_ndwi_file)
ndwi_mean, ndwi_count, ndwi_std = extract_utils.extract_stats_for_one_parcel_geopandas_presel(merged_ndwi_file, parcel)
# print(parcel_id, acq_date, ndwi_mean, ndwi_count, ndwi_std, sep=',')
print(parcel_id, acq_date, ndwi_mean, ndwi_count, ndwi_std, sep=',',
file=open(output_ndwi_csv_file, "a"))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.calculate_ndwi_statistics:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"ndwi stats read in: {time.time() - start} seconds")
def calculate_bs_statistics(parcel_id, crop, out_tif_folder_base, parcel, logfile, polarisation, orbit_orientation):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
output_s1_bs_folder = out_tif_folder_base + "/s1_bs"
output_s1_bs_csv_file = output_s1_bs_folder + "/" + chip_folder + "_s1bs_" + polarisation + "_" + orbit_orientation + ".csv"
acquisition_dates_and_s1_bs_files_dict = plot_utils.get_acquisition_dates_and_s1_bs_files_dict(out_tif_folder_base + "/" + chip_folder + "_s1_bs", polarisation, orbit_orientation)
if not os.path.exists(output_s1_bs_folder):
os.makedirs(output_s1_bs_folder)
first_line ="Field_ID,acq_date,bs_mean,bs_count,bs_std"
print(first_line, file=open(output_s1_bs_csv_file, "w"))
for acq_date, s1_bs_file in acquisition_dates_and_s1_bs_files_dict.items():
bs_mean, bs_count, bs_std = extract_utils.extract_stats_for_one_parcel_geopandas_presel_bs(s1_bs_file, parcel)
if bs_mean != None:
# print(parcel_id, acq_date, bs_mean, bs_count, bs_std, sep=',')
print(parcel_id, acq_date, bs_mean, bs_count, bs_std, sep=',',
file=open(output_s1_bs_csv_file, "a"))
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.calculate_bs_statistics:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print("S1 BS_" + polarisation + "_" + orbit_orientation + f" stats read in: {time.time() - start} seconds")
def get_all_parcel_ids_from_parcel_shape(parcel_shp, parcel_id_column, crop_name_column):
ds=ogr.Open(parcel_shp)
lyr=ds.GetLayer()
parcel_id_crop_list = []
for feat in lyr:
parcel_id = feat.GetField(parcel_id_column)
crop_name = feat.GetField(crop_name_column)
if crop_name is None:
crop_name = ""
parcel_id_crop_list.append((parcel_id,crop_name.replace(" ", "_")))
parcel_id_crop_list = sorted(parcel_id_crop_list, key=getKey)
return parcel_id_crop_list
def getKey(item):
return item[0]
# l = [[2, 3], [6, 7], [3, 34], [24, 64], [1, 43]]
# sorted(l, key=getKey)
def does_ndvi_csv_exist(parcel_id, crop, out_tif_folder_base):
chip_folder = str(parcel_id) + '_' + crop
output_ndvi_folder = out_tif_folder_base + "/ndvi"
output_ndvi_csv_file = output_ndvi_folder + "/" + chip_folder + "_ndvi.csv"
if os.path.isfile(output_ndvi_csv_file):
return True
else:
return False
def does_ndvi_graph_exist(parcel_id, out_tif_folder_base):
output_ndvi_graph_folder = out_tif_folder_base + "/ndvi_graphs"
output_ndvi_graph_file = output_ndvi_graph_folder + "/parcel_id_" + str(parcel_id) + "_NDVI.jpg"
if os.path.isfile(output_ndvi_graph_file):
return True
else:
return False
def run_get_and_download_s1_bs_imagettes(raw_chips_s1_batch_url, out_s1_bs_folder,
search_window_start_date, search_window_end_date,
lon, lat, username, password, chipsize, url_base, logfile):
# list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, start_date, end_date, username, password, chipsize)
# download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
# run the batch chip extract query with the JSON input as POST
# and get the response which contains the download folder of the extracted chips
# and download the s1 backscatter imagettes
fout = open(logfile, 'a')
start = time.time()
# we get and download the s1 bs images by month
# search_window_start_date, search_window_end_date
# search_window_start_date = "2019-11-15"
# search_window_end_date = "2020-09-15"
dt_search_window_start_date = plot_utils.get_date_from_string(search_window_start_date)
dt_search_window_end_date = plot_utils.get_date_from_string(search_window_end_date)
# print(last_day_of_month(dt_search_window_start_date))
# print(add_one_month(dt_search_window_start_date))
act_start_date = dt_search_window_start_date
while act_start_date < dt_search_window_end_date:
act_end_date = last_day_of_month(act_start_date)
if act_start_date == dt_search_window_start_date:
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, str(act_start_date), str(act_end_date), username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
elif act_end_date > dt_search_window_end_date:
act_end_date = dt_search_window_end_date
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, str(act_start_date), str(act_end_date), username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
else:
was_error_1 = True
was_error_2 = True
while was_error_1:
list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, str(act_start_date), str(act_end_date), username, password, chipsize)
while was_error_2:
was_error_2 = download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password)
act_start_date = add_one_month(act_start_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t\tbatch_utils.run_get_and_download_s1_bs_imagettes:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"Got list of cloudfree bands and downloaded images: {time.time() - start} seconds")
def run_rescale_s1_bs_images(out_s1_bs_folder, out_s1_bs_folder_rescale):
# we take all the downloaded s1 bs images for the given parcel and rescale them to uint16
if not os.path.exists(out_s1_bs_folder_rescale):
os.makedirs(out_s1_bs_folder_rescale)
raw_files_pattern = out_s1_bs_folder + "/*.tif"
raw_files = glob(raw_files_pattern)
for raw_file in raw_files:
raw_file_base = os.path.basename(raw_file)
actdate = raw_file_base.split(".")[0]
# print(tile_name)
output = out_s1_bs_folder_rescale + "/" + actdate + ".tif"
download_utils.rescale_s1_bs_image(raw_file, output)
def run_lut_stretch_one_band_s1_bs(out_s1_bs_folder_rescale, out_s1_bs_folder_rescale_lut, s1_bs_left_percent, s1_bs_right_percent):
# we take all the downloaded s1 bs images for the given parcel and rescale them to uint16
if not os.path.exists(out_s1_bs_folder_rescale_lut):
os.makedirs(out_s1_bs_folder_rescale_lut)
rescaled_files_pattern = out_s1_bs_folder_rescale + "/*.tif"
rescaled_files = glob(rescaled_files_pattern)
for rescaled_file in rescaled_files:
rescaled_file_base = os.path.basename(rescaled_file)
actdate = rescaled_file_base.split(".")[0]
print(actdate)
output = out_s1_bs_folder_rescale_lut + "/" + actdate + ".tif"
lut.lut_stretch_one_band_s1_bs(rescaled_file, output, s1_bs_left_percent, s1_bs_right_percent)
def add_one_month(orig_date):
# advance year and month by one month
new_year = orig_date.year
new_month = orig_date.month + 1
# note: in datetime.date, months go from 1 to 12
if new_month > 12:
new_year += 1
new_month -= 12
last_day_of_month = calendar.monthrange(new_year, new_month)[1]
new_day = min(orig_date.day, last_day_of_month)
return orig_date.replace(year=new_year, month=new_month, day=new_day)
def last_day_of_month(any_day):
next_month = any_day.replace(day=28) + datetime.timedelta(days=4) # this will never fail
return next_month - datetime.timedelta(days=next_month.day)
def run_lut_stretch_dynamic(parcel_id, crop, out_tif_folder_base, left_percent, right_percent, lut_txt_file, logfile):
# lut stretch
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
lut_bands=[1,2,3]
merge_folder = out_tif_folder + "_merged"
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
if not os.path.exists(merge_lut_folder):
os.makedirs(merge_lut_folder)
merged_files_pattern = merge_folder + "/*.tif"
merged_files = glob(merged_files_pattern)
for merged_file in merged_files:
# print(merged_file)
merged_file_base = os.path.basename(merged_file)
merged_file_path = os.path.dirname(merged_file)
tile_name = merged_file_base.split(".")[0]
#get acquisition date from tile name
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
# print(tile_name)
output = merge_lut_folder + "/" + tile_name + ".tif"
# here again: if the lut stretched image is already created we do not create it again
if os.path.isfile(output):
# we already created the lut stretched image for this date for this parcel so we skip it
print(tile_name + " already created")
else:
print("LUT stretching tile: ", tile_name, end="")
lut.writeMinMaxToFile(merged_file, acq_date, lut_bands, left_percent, right_percent, lut_txt_file, tile_name)
# lut.lutStretchMagicLut(merged_file, output, lut_bands )
lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands )
print("...done")
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.run_lut_stretch_dynamic:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
print(f"LUT stretch dynamic: {time.time() - start} seconds")
def get_merged_dynamically_lutstretched_files_and_acquisition_dates(parcel_id, crop, out_tif_folder_base, logfile):
fout = open(logfile, 'a')
start = time.time()
chip_folder = str(parcel_id) + '_' + crop
out_tif_folder = out_tif_folder_base + "/" + chip_folder
# merge_lut_folder = out_tif_folder + "_merged_lut_magic"
merge_lut_folder = out_tif_folder + "_merged_lut_dynamic"
merged_lut_files_pattern = merge_lut_folder + "/*.tif"
merged_lut_files = glob(merged_lut_files_pattern)
acq_dates = []
for merged_lut_file in merged_lut_files:
merged_lut_file_base = os.path.basename(merged_lut_file)
merged_lut_file_path = os.path.dirname(merged_lut_file)
tile_name = merged_lut_file_base.split(".")[0]
acq_date = download_utils.get_acquisition_date_from_tile_name(tile_name)
acq_dates.append(acq_date)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "\t", parcel_id, "\tbatch_utils.get_merged_dynamically_lutstretched_files_and_acquisition_dates:\t", "{0:.3f}".format(time.time() - start), file=fout)
fout.close()
return acq_dates, merged_lut_files | en | 0.782514 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # This file is part of CbM (https://github.com/ec-jrc/cbm). # Author : <NAME> # Credits : GTCAP Team # Copyright : 2021 European Commission, Joint Research Centre # License : 3-Clause BSD # get the list of SCL imagettes for the parcel in a given date range # lon, lat = download_utils.get_centroid_of_parcel(parcel) # create the list of tiles to be downloaded # get downloaded SCL tile tifs and see if they are cloudfree # run the batch chip extract query with the JSON input as POST # and get the response which contains the download folder of the extracted chips # and download the cloudfree band imagettes # max_number_of_tiles_per_request = 12 # print("*******************************************") # print(list_of_band_imagettes) # print("*******************************************") # print("*******************************************") # print(list_of_band_imagettes) # print("*******************************************") # look around in the date folders where the bands were downloade and merge bands # B08, B11, B04 for each tile where these bands were downloaded and the bands were # not yet merged # look around in the date folders where the bands were downloade and merge bands # B08, B11, B04 for each tile where these bands were downloaded and the bands were # not yet merged # lut stretch # merge_lut_folder = out_tif_folder + "_merged_lut_dynamic" # print(merged_file) #get acquisition date from tile name # print(tile_name) # here again: if the lut stretched image is already created we do not create it again # we already created the lut stretched image for this date for this parcel so we skip it # lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands ) # lut stretch # merge_lut_folder = out_tif_folder + "_merged_lut_magic" # print(merged_file) #get acquisition date from tile name # print(tile_name) # here again: if the lut stretched image is already created we do not create it again # we already created the lut stretched image for this date for this parcel so we skip it # lut.lutStretch(merged_file, output, left_percent, right_percent, lut_bands ) # merge_lut_folder = out_tif_folder + "_merged_lut_dynamic" # merge_lut_folder = out_tif_folder + "_merged_lut_magic" # create ndvi image # print(merged_file) #get acquisition date from tile name # print(tile_name) # here again: if the ndvi image image is already created we do not create it again # we already created the ndvi image for this date for this parcel so we skip it # create ndwi image # print(merged_file) #get acquisition date from tile name # print(tile_name) # here again: if the ndwi image image is already created we do not create it again # we already created the ndwi image for this date for this parcel so we skip it # print(merged_ndvi_file) # print(parcel_id, acq_date, ndvi_mean, ndvi_count, ndvi_std, sep=',') # print(merged_ndwi_file) # print(parcel_id, acq_date, ndwi_mean, ndwi_count, ndwi_std, sep=',') # print(parcel_id, acq_date, bs_mean, bs_count, bs_std, sep=',') # l = [[2, 3], [6, 7], [3, 34], [24, 64], [1, 43]] # sorted(l, key=getKey) # list_of_s1_bs_imagettes, was_error_1 = download_utils.get_s1_bs_imagettes(raw_chips_s1_batch_url, lon, lat, start_date, end_date, username, password, chipsize) # download_utils.download_s1_bs_imagettes(url_base, list_of_s1_bs_imagettes, out_s1_bs_folder, username, password) # run the batch chip extract query with the JSON input as POST # and get the response which contains the download folder of the extracted chips # and download the s1 backscatter imagettes # we get and download the s1 bs images by month # search_window_start_date, search_window_end_date # search_window_start_date = "2019-11-15" # search_window_end_date = "2020-09-15" # print(last_day_of_month(dt_search_window_start_date)) # print(add_one_month(dt_search_window_start_date)) # we take all the downloaded s1 bs images for the given parcel and rescale them to uint16 # print(tile_name) # we take all the downloaded s1 bs images for the given parcel and rescale them to uint16 # advance year and month by one month # note: in datetime.date, months go from 1 to 12 # this will never fail # lut stretch # merge_lut_folder = out_tif_folder + "_merged_lut_magic" # print(merged_file) #get acquisition date from tile name # print(tile_name) # here again: if the lut stretched image is already created we do not create it again # we already created the lut stretched image for this date for this parcel so we skip it # lut.lutStretchMagicLut(merged_file, output, lut_bands ) # merge_lut_folder = out_tif_folder + "_merged_lut_magic" | 2.34955 | 2 |
2874.py | ErFer7/URI-Python | 1 | 6618231 | <filename>2874.py
# -*- coding: utf-8 -*-
while True:
try:
n = int(input())
res = ""
for _ in range(n):
res += bytes([int(input(), 2)]).decode("cp437")
print(res)
except EOFError:
break | <filename>2874.py
# -*- coding: utf-8 -*-
while True:
try:
n = int(input())
res = ""
for _ in range(n):
res += bytes([int(input(), 2)]).decode("cp437")
print(res)
except EOFError:
break | en | 0.769321 | # -*- coding: utf-8 -*- | 3.11845 | 3 |
catkin_ws/src/mrta/src/DCOP/function/TabularFunction.py | utkarshjp7/Multi_Robot_Task_Allocation | 8 | 6618232 | <reponame>utkarshjp7/Multi_Robot_Task_Allocation
# coding=utf-8
'''
Created on 09 mag 2017
@author: <NAME>
Tabular Function, implementation of abstract Function Evaluator.
Used for discrete functions.
This class manages the cost functions and the maximization/minimization
'''
import sys, os
sys.path.append(os.path.abspath('../function/'))
sys.path.append(os.path.abspath('../misc/'))
from FunctionEvaluator import FunctionEvaluator
from NodeArgumentArray import NodeArgumentArray
from itertools import product
class TabularFunction(FunctionEvaluator):
'''
Correspondence between parameters and function values.
The parameters are nodeVariables and the values are costs [NodeVariable -> cost]
'''
costTable = dict()
'''
list of parameters of cost function (NodeVariables)
'''
parameters = list()
'''
minimun cost of function
'''
minCost = None
'''
maximun cost of function
'''
maxCost = None
report = ""
def __init__(self):
self.parameters = list()
self.costTable = dict()
self.minCost = None
self.maxCost = None
self.report = ""
def setReport(self, report):
self.report = report
def getReport(self):
return self.report
def searchKey(self, params):
'''
params: parameters list, key of the function cost
looks for params in the function
if it finds it returns the key else returns -1
'''
for key in self.costTable.keys():
count = 0
'''
the parameters -> key
'''
array = key.getArray()
for i in range(len(array)):
if ((array[i].getValue() == (params[i].getValue()))):
count = count + 1
if(count == len(params)):
return key
'''
there isn't the params in the function
'''
return -1
def addParametersCost(self, params, cost):
'''
params: key of cost function (list of NodeVariables)
cost: cost function with params
Saves the function value for NodeArgument[] of parameter.
The params become the key of the cost table.
'''
'''
if there isn't the association parameters - cost
'''
if self.searchKey(params) == -1:
nodeargumentarray = NodeArgumentArray(params)
self.costTable[nodeargumentarray] = cost
else:
'''
update the cost
'''
key = self.searchKey(params)
self.costTable[key] = cost
'''
update the minimun cost
'''
if (self.minCost == None):
self.minCost = cost
elif(cost < self.minCost):
self.minCost = cost
'''
update the maximun cost
'''
if (self.maxCost == None):
self.maxCost = cost
elif(cost > self.maxCost):
self.maxCost = cost
def entryNumber(self):
'''
How much values does this function have?
'''
return len(self.costTable)
def getCostValues(self):
'''
returns the costs of table
'''
return self.costTable.values()
def clearCosts(self):
'''
clears the cost function
'''
self.costTable = dict()
def evaluateMod(self, params, modifierTable):
'''
params: parameters to evalute
modifierTable: cost function
This method evaluates the function when a list of qmessages are given
'''
'''
if modifierTable is empty
'''
if(len(modifierTable) == 0):
return self.evaluate(params)
cost = self.evaluate(params)
indexOfModifier = -15
for nodeVariable in modifierTable:
indexOfModifier = 0
if params[self.getParameterPosition(nodeVariable)] < 0:
indexOfModifier = 1
cost = cost + modifierTable[nodeVariable].getValue(indexOfModifier)
return cost
def maximizeWRT(self, x, modifierTable, sender):
'''
x: variable respect to maximize
modifierTable: cost function
calls the maximization function
'''
return self.maxminWRT("max", x, modifierTable, sender)
def minimizeWRT(self, x, modifierTable):
'''
x: variable respect to minimize
modifierTable: cost function
calls the minimization function
'''
return self.maxminWRT("min", x, modifierTable)
def maxmin(self, op, maxes, functionArgument, x, xIndex, modifierTable, sender):
'''
op: max/min
maxes: actual maxes about variable
functionArgument: actual parameters
x: variable to maximize
xIndex: index of x in cost function
modifierTable: cost function
Calculates the maxes with functionArgument respect x
'''
if(op == "max"):
cost = float("-Infinity")
elif(op == "min"):
cost = float("+Infinity")
indexes = [x.getIndexOfValue(sender.function_id), x.getIndexOfValue(sender.function_id) + x.size()/2]
for i, xParamIndex in enumerate(indexes):
functionArgument[xIndex] = xParamIndex
'''
NOW it's pretty ready
this is the part where it is maximized
'''
if(modifierTable == None):
cost = self.evaluate(self.functionArgument(functionArgument))
else:
cost = (self.evaluateMod(self.functionArgument(functionArgument), modifierTable))
if(op == "max"):
if (maxes[i] < cost):
maxes[i] = (cost)
elif(op == "min"):
if (maxes[i] > cost):
maxes[i] = (cost)
return maxes
def maxminWRT(self, op, x, modifierTable, sender):
'''
op: max/min
x: variable respect to maximize
modifierTable: cost function
Calculates the max value on function respect x
'''
'''
index of x in function
'''
xIndex = self.getParameterPosition(x)
'''
number of parameters of f
'''
fzParametersNumber = self.parametersNumber()
'''
The i-th position of list will be the number of possible values of
the i-th argument of f. At the position of x, there will be
only one value available
'''
numberOfValues = list()
'''
the array filled with variable value positions that's gonna be evaluated
'''
functionArgument = list()
'''
set to zero functionArgument
'''
for i in range(fzParametersNumber):
if i != xIndex:
pos_index = self.getParameter(i).getIndexOfValue(sender.function_id)
neg_index = self.getParameter(i).getIndexOfValue(-sender.function_id)
functionArgument.append([pos_index, neg_index])
functionArguments = product(*functionArgument)
'''
maximization array, wrt x possible values
'''
maxes = list()
for index in range(2):
if(op == "max"):
maxes.append(float("-Infinity"))
elif(op == "min"):
maxes.append(float("+Infinity"))
for i in range(fzParametersNumber):
numberOfValues.append(2)
numberOfValues[xIndex] = 1
imax = len(numberOfValues) - 1
i = imax
for argument in functionArguments:
argument = list(argument)
argument.insert(xIndex, 0)
maxes = self.maxmin(op, maxes, argument, x, xIndex, modifierTable, sender)
return maxes
def toString(self):
ris = "Function evaluator with " + str(self.entryNumber()) + " entries\n"
ris = ris + "NodeVariable used: "
for i in range(self.parameters.__len__()):
ris = ris + str(self.parameters[i].toString()) + " "
ris = ris + "\n"
for entry in self.costTable:
ris = ris + "[ "
nodeArguments = entry.getArray()
for i in range(len(nodeArguments)):
ris = ris + str(nodeArguments[i].toString()) + " "
ris = ris + "Value: " + str(self.costTable[entry]) + " ]\n"
return ris
| # coding=utf-8
'''
Created on 09 mag 2017
@author: <NAME>
Tabular Function, implementation of abstract Function Evaluator.
Used for discrete functions.
This class manages the cost functions and the maximization/minimization
'''
import sys, os
sys.path.append(os.path.abspath('../function/'))
sys.path.append(os.path.abspath('../misc/'))
from FunctionEvaluator import FunctionEvaluator
from NodeArgumentArray import NodeArgumentArray
from itertools import product
class TabularFunction(FunctionEvaluator):
'''
Correspondence between parameters and function values.
The parameters are nodeVariables and the values are costs [NodeVariable -> cost]
'''
costTable = dict()
'''
list of parameters of cost function (NodeVariables)
'''
parameters = list()
'''
minimun cost of function
'''
minCost = None
'''
maximun cost of function
'''
maxCost = None
report = ""
def __init__(self):
self.parameters = list()
self.costTable = dict()
self.minCost = None
self.maxCost = None
self.report = ""
def setReport(self, report):
self.report = report
def getReport(self):
return self.report
def searchKey(self, params):
'''
params: parameters list, key of the function cost
looks for params in the function
if it finds it returns the key else returns -1
'''
for key in self.costTable.keys():
count = 0
'''
the parameters -> key
'''
array = key.getArray()
for i in range(len(array)):
if ((array[i].getValue() == (params[i].getValue()))):
count = count + 1
if(count == len(params)):
return key
'''
there isn't the params in the function
'''
return -1
def addParametersCost(self, params, cost):
'''
params: key of cost function (list of NodeVariables)
cost: cost function with params
Saves the function value for NodeArgument[] of parameter.
The params become the key of the cost table.
'''
'''
if there isn't the association parameters - cost
'''
if self.searchKey(params) == -1:
nodeargumentarray = NodeArgumentArray(params)
self.costTable[nodeargumentarray] = cost
else:
'''
update the cost
'''
key = self.searchKey(params)
self.costTable[key] = cost
'''
update the minimun cost
'''
if (self.minCost == None):
self.minCost = cost
elif(cost < self.minCost):
self.minCost = cost
'''
update the maximun cost
'''
if (self.maxCost == None):
self.maxCost = cost
elif(cost > self.maxCost):
self.maxCost = cost
def entryNumber(self):
'''
How much values does this function have?
'''
return len(self.costTable)
def getCostValues(self):
'''
returns the costs of table
'''
return self.costTable.values()
def clearCosts(self):
'''
clears the cost function
'''
self.costTable = dict()
def evaluateMod(self, params, modifierTable):
'''
params: parameters to evalute
modifierTable: cost function
This method evaluates the function when a list of qmessages are given
'''
'''
if modifierTable is empty
'''
if(len(modifierTable) == 0):
return self.evaluate(params)
cost = self.evaluate(params)
indexOfModifier = -15
for nodeVariable in modifierTable:
indexOfModifier = 0
if params[self.getParameterPosition(nodeVariable)] < 0:
indexOfModifier = 1
cost = cost + modifierTable[nodeVariable].getValue(indexOfModifier)
return cost
def maximizeWRT(self, x, modifierTable, sender):
'''
x: variable respect to maximize
modifierTable: cost function
calls the maximization function
'''
return self.maxminWRT("max", x, modifierTable, sender)
def minimizeWRT(self, x, modifierTable):
'''
x: variable respect to minimize
modifierTable: cost function
calls the minimization function
'''
return self.maxminWRT("min", x, modifierTable)
def maxmin(self, op, maxes, functionArgument, x, xIndex, modifierTable, sender):
'''
op: max/min
maxes: actual maxes about variable
functionArgument: actual parameters
x: variable to maximize
xIndex: index of x in cost function
modifierTable: cost function
Calculates the maxes with functionArgument respect x
'''
if(op == "max"):
cost = float("-Infinity")
elif(op == "min"):
cost = float("+Infinity")
indexes = [x.getIndexOfValue(sender.function_id), x.getIndexOfValue(sender.function_id) + x.size()/2]
for i, xParamIndex in enumerate(indexes):
functionArgument[xIndex] = xParamIndex
'''
NOW it's pretty ready
this is the part where it is maximized
'''
if(modifierTable == None):
cost = self.evaluate(self.functionArgument(functionArgument))
else:
cost = (self.evaluateMod(self.functionArgument(functionArgument), modifierTable))
if(op == "max"):
if (maxes[i] < cost):
maxes[i] = (cost)
elif(op == "min"):
if (maxes[i] > cost):
maxes[i] = (cost)
return maxes
def maxminWRT(self, op, x, modifierTable, sender):
'''
op: max/min
x: variable respect to maximize
modifierTable: cost function
Calculates the max value on function respect x
'''
'''
index of x in function
'''
xIndex = self.getParameterPosition(x)
'''
number of parameters of f
'''
fzParametersNumber = self.parametersNumber()
'''
The i-th position of list will be the number of possible values of
the i-th argument of f. At the position of x, there will be
only one value available
'''
numberOfValues = list()
'''
the array filled with variable value positions that's gonna be evaluated
'''
functionArgument = list()
'''
set to zero functionArgument
'''
for i in range(fzParametersNumber):
if i != xIndex:
pos_index = self.getParameter(i).getIndexOfValue(sender.function_id)
neg_index = self.getParameter(i).getIndexOfValue(-sender.function_id)
functionArgument.append([pos_index, neg_index])
functionArguments = product(*functionArgument)
'''
maximization array, wrt x possible values
'''
maxes = list()
for index in range(2):
if(op == "max"):
maxes.append(float("-Infinity"))
elif(op == "min"):
maxes.append(float("+Infinity"))
for i in range(fzParametersNumber):
numberOfValues.append(2)
numberOfValues[xIndex] = 1
imax = len(numberOfValues) - 1
i = imax
for argument in functionArguments:
argument = list(argument)
argument.insert(xIndex, 0)
maxes = self.maxmin(op, maxes, argument, x, xIndex, modifierTable, sender)
return maxes
def toString(self):
ris = "Function evaluator with " + str(self.entryNumber()) + " entries\n"
ris = ris + "NodeVariable used: "
for i in range(self.parameters.__len__()):
ris = ris + str(self.parameters[i].toString()) + " "
ris = ris + "\n"
for entry in self.costTable:
ris = ris + "[ "
nodeArguments = entry.getArray()
for i in range(len(nodeArguments)):
ris = ris + str(nodeArguments[i].toString()) + " "
ris = ris + "Value: " + str(self.costTable[entry]) + " ]\n"
return ris | en | 0.499279 | # coding=utf-8 Created on 09 mag 2017 @author: <NAME> Tabular Function, implementation of abstract Function Evaluator. Used for discrete functions. This class manages the cost functions and the maximization/minimization Correspondence between parameters and function values. The parameters are nodeVariables and the values are costs [NodeVariable -> cost] list of parameters of cost function (NodeVariables) minimun cost of function maximun cost of function params: parameters list, key of the function cost looks for params in the function if it finds it returns the key else returns -1 the parameters -> key there isn't the params in the function params: key of cost function (list of NodeVariables) cost: cost function with params Saves the function value for NodeArgument[] of parameter. The params become the key of the cost table. if there isn't the association parameters - cost update the cost update the minimun cost update the maximun cost How much values does this function have? returns the costs of table clears the cost function params: parameters to evalute modifierTable: cost function This method evaluates the function when a list of qmessages are given if modifierTable is empty x: variable respect to maximize modifierTable: cost function calls the maximization function x: variable respect to minimize modifierTable: cost function calls the minimization function op: max/min maxes: actual maxes about variable functionArgument: actual parameters x: variable to maximize xIndex: index of x in cost function modifierTable: cost function Calculates the maxes with functionArgument respect x NOW it's pretty ready this is the part where it is maximized op: max/min x: variable respect to maximize modifierTable: cost function Calculates the max value on function respect x index of x in function number of parameters of f The i-th position of list will be the number of possible values of the i-th argument of f. At the position of x, there will be only one value available the array filled with variable value positions that's gonna be evaluated set to zero functionArgument maximization array, wrt x possible values | 3.743526 | 4 |
Learn GIT/File_2.py | novdima1/TAU-intro-selenium-1 | 0 | 6618233 | "File 2 data some info"
# test br specific data
| "File 2 data some info"
# test br specific data
| en | 0.35377 | # test br specific data | 0.765555 | 1 |
ecg_balancing/migrations/0001_initial.py | sinnwerkstatt/ecg-balancing | 0 | 6618234 | <gh_stars>0
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ECGMatrix'
db.create_table(u'ecg_balancing_ecgmatrix', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('version', self.gf('django.db.models.fields.CharField')(default=u'4.1', max_length=6)),
('contact', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal(u'ecg_balancing', ['ECGMatrix'])
# Adding model 'Indicator'
db.create_table(u'ecg_balancing_indicator', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('matrix', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'indicators', to=orm['ecg_balancing.ECGMatrix'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('stakeholder', self.gf('django.db.models.fields.CharField')(max_length=1)),
('ecg_value', self.gf('django.db.models.fields.CharField')(max_length=1)),
('max_evaluation', self.gf('django.db.models.fields.IntegerField')()),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'parent_indicator', to=orm['ecg_balancing.Indicator'])),
('contact', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
))
db.send_create_signal(u'ecg_balancing', ['Indicator'])
# Adding model 'Company'
db.create_table(u'ecg_balancing_company', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('logo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('street', self.gf('django.db.models.fields.CharField')(max_length=50)),
('zipcode', self.gf('django.db.models.fields.PositiveIntegerField')()),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('country', self.gf('django.db.models.fields.CharField')(max_length=50)),
('website', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('fax', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('industry', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('activities', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('employees_number', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('revenue', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('foundation_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('owners', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('managing_directors', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('model_creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'ecg_balancing', ['Company'])
# Adding model 'CompanyBalance'
db.create_table(u'ecg_balancing_companybalance', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('matrix', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'company_balances', to=orm['ecg_balancing.ECGMatrix'])),
('start_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('auditor', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('common_good', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('prospect', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('process_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'ecg_balancing', ['CompanyBalance'])
# Adding M2M table for field peer_companies on 'CompanyBalance'
m2m_table_name = db.shorten_name(u'ecg_balancing_companybalance_peer_companies')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('companybalance', models.ForeignKey(orm[u'ecg_balancing.companybalance'], null=False)),
('company', models.ForeignKey(orm[u'ecg_balancing.company'], null=False))
))
db.create_unique(m2m_table_name, ['companybalance_id', 'company_id'])
# Adding model 'CompanyBalanceIndicator'
db.create_table(u'ecg_balancing_companybalanceindicator', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('company_balance', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'company_balance', to=orm['ecg_balancing.CompanyBalance'])),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'company_balance', to=orm['ecg_balancing.Indicator'])),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('evaluation', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'ecg_balancing', ['CompanyBalanceIndicator'])
# Adding model 'UserRole'
db.create_table(u'ecg_balancing_userrole', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('company', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ecg_balancing.Company'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('role', self.gf('django.db.models.fields.CharField')(max_length=5)),
))
db.send_create_signal(u'ecg_balancing', ['UserRole'])
def backwards(self, orm):
# Deleting model 'ECGMatrix'
db.delete_table(u'ecg_balancing_ecgmatrix')
# Deleting model 'Indicator'
db.delete_table(u'ecg_balancing_indicator')
# Deleting model 'Company'
db.delete_table(u'ecg_balancing_company')
# Deleting model 'CompanyBalance'
db.delete_table(u'ecg_balancing_companybalance')
# Removing M2M table for field peer_companies on 'CompanyBalance'
db.delete_table(db.shorten_name(u'ecg_balancing_companybalance_peer_companies'))
# Deleting model 'CompanyBalanceIndicator'
db.delete_table(u'ecg_balancing_companybalanceindicator')
# Deleting model 'UserRole'
db.delete_table(u'ecg_balancing_userrole')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ecg_balancing.company': {
'Meta': {'object_name': 'Company'},
'activities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employees_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'foundation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'managing_directors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model_creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owners': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'revenue': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcode': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'ecg_balancing.companybalance': {
'Meta': {'object_name': 'CompanyBalance'},
'auditor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_good': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balances'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'peer_companies': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '255', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'process_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prospect': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'ecg_balancing.companybalanceindicator': {
'Meta': {'object_name': 'CompanyBalanceIndicator'},
'company_balance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.CompanyBalance']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'evaluation': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.Indicator']"})
},
u'ecg_balancing.ecgmatrix': {
'Meta': {'object_name': 'ECGMatrix'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'4.1'", 'max_length': '6'})
},
u'ecg_balancing.indicator': {
'Meta': {'object_name': 'Indicator'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'ecg_value': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'indicators'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'max_evaluation': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parent_indicator'", 'to': u"orm['ecg_balancing.Indicator']"}),
'stakeholder': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ecg_balancing.userrole': {
'Meta': {'object_name': 'UserRole'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ecg_balancing.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['ecg_balancing'] | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ECGMatrix'
db.create_table(u'ecg_balancing_ecgmatrix', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('version', self.gf('django.db.models.fields.CharField')(default=u'4.1', max_length=6)),
('contact', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal(u'ecg_balancing', ['ECGMatrix'])
# Adding model 'Indicator'
db.create_table(u'ecg_balancing_indicator', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('matrix', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'indicators', to=orm['ecg_balancing.ECGMatrix'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('stakeholder', self.gf('django.db.models.fields.CharField')(max_length=1)),
('ecg_value', self.gf('django.db.models.fields.CharField')(max_length=1)),
('max_evaluation', self.gf('django.db.models.fields.IntegerField')()),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'parent_indicator', to=orm['ecg_balancing.Indicator'])),
('contact', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
))
db.send_create_signal(u'ecg_balancing', ['Indicator'])
# Adding model 'Company'
db.create_table(u'ecg_balancing_company', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('logo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
('street', self.gf('django.db.models.fields.CharField')(max_length=50)),
('zipcode', self.gf('django.db.models.fields.PositiveIntegerField')()),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('country', self.gf('django.db.models.fields.CharField')(max_length=50)),
('website', self.gf('django.db.models.fields.CharField')(max_length=255)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('fax', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('industry', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('activities', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('employees_number', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('revenue', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('foundation_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('owners', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('managing_directors', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('model_creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'ecg_balancing', ['Company'])
# Adding model 'CompanyBalance'
db.create_table(u'ecg_balancing_companybalance', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('matrix', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'company_balances', to=orm['ecg_balancing.ECGMatrix'])),
('start_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('auditor', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('common_good', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('prospect', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('process_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'ecg_balancing', ['CompanyBalance'])
# Adding M2M table for field peer_companies on 'CompanyBalance'
m2m_table_name = db.shorten_name(u'ecg_balancing_companybalance_peer_companies')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('companybalance', models.ForeignKey(orm[u'ecg_balancing.companybalance'], null=False)),
('company', models.ForeignKey(orm[u'ecg_balancing.company'], null=False))
))
db.create_unique(m2m_table_name, ['companybalance_id', 'company_id'])
# Adding model 'CompanyBalanceIndicator'
db.create_table(u'ecg_balancing_companybalanceindicator', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('company_balance', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'company_balance', to=orm['ecg_balancing.CompanyBalance'])),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'company_balance', to=orm['ecg_balancing.Indicator'])),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('evaluation', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'ecg_balancing', ['CompanyBalanceIndicator'])
# Adding model 'UserRole'
db.create_table(u'ecg_balancing_userrole', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('company', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ecg_balancing.Company'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('role', self.gf('django.db.models.fields.CharField')(max_length=5)),
))
db.send_create_signal(u'ecg_balancing', ['UserRole'])
def backwards(self, orm):
# Deleting model 'ECGMatrix'
db.delete_table(u'ecg_balancing_ecgmatrix')
# Deleting model 'Indicator'
db.delete_table(u'ecg_balancing_indicator')
# Deleting model 'Company'
db.delete_table(u'ecg_balancing_company')
# Deleting model 'CompanyBalance'
db.delete_table(u'ecg_balancing_companybalance')
# Removing M2M table for field peer_companies on 'CompanyBalance'
db.delete_table(db.shorten_name(u'ecg_balancing_companybalance_peer_companies'))
# Deleting model 'CompanyBalanceIndicator'
db.delete_table(u'ecg_balancing_companybalanceindicator')
# Deleting model 'UserRole'
db.delete_table(u'ecg_balancing_userrole')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ecg_balancing.company': {
'Meta': {'object_name': 'Company'},
'activities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employees_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'foundation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'managing_directors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model_creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owners': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'revenue': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcode': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'ecg_balancing.companybalance': {
'Meta': {'object_name': 'CompanyBalance'},
'auditor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_good': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balances'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'peer_companies': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '255', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'process_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prospect': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'ecg_balancing.companybalanceindicator': {
'Meta': {'object_name': 'CompanyBalanceIndicator'},
'company_balance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.CompanyBalance']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'evaluation': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.Indicator']"})
},
u'ecg_balancing.ecgmatrix': {
'Meta': {'object_name': 'ECGMatrix'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'4.1'", 'max_length': '6'})
},
u'ecg_balancing.indicator': {
'Meta': {'object_name': 'Indicator'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'ecg_value': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'indicators'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'max_evaluation': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parent_indicator'", 'to': u"orm['ecg_balancing.Indicator']"}),
'stakeholder': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ecg_balancing.userrole': {
'Meta': {'object_name': 'UserRole'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ecg_balancing.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['ecg_balancing'] | en | 0.750654 | # -*- coding: utf-8 -*- # Adding model 'ECGMatrix' # Adding model 'Indicator' # Adding model 'Company' # Adding model 'CompanyBalance' # Adding M2M table for field peer_companies on 'CompanyBalance' # Adding model 'CompanyBalanceIndicator' # Adding model 'UserRole' # Deleting model 'ECGMatrix' # Deleting model 'Indicator' # Deleting model 'Company' # Deleting model 'CompanyBalance' # Removing M2M table for field peer_companies on 'CompanyBalance' # Deleting model 'CompanyBalanceIndicator' # Deleting model 'UserRole' | 2.287059 | 2 |
ptv/client.py | lucky962/ptv-python-wrapper | 2 | 6618235 | from hashlib import sha1
import json
import hmac
import requests
import urllib
BASE_URL = 'timetableapi.ptv.vic.gov.au'
class PTVClient(object):
""" Class to make calls to PTV Api """
def __init__(self, dev_id, api_key, not_secure=None):
"""
Initialize a PTVClient
Parameters
----------
dev_id : str
Developer ID from PTV
api_key : str
API key from PTV
Optional Parameters
-------------------
not_secure : bool
Indicates whether or not to use http (default = false)
"""
self.dev_id = dev_id
self.api_key = api_key
if not_secure:
self.protoc = 'http://'
else:
self.protoc = 'https://'
def _calculateSignature(self, path):
"""
Calculates a signature from url
Parameters
----------
path : str
The target path of the url (e.g '/v3/search/')
Returns
-------
signature : str
The hex signature.
"""
key = bytes(self.api_key, 'UTF-8')
raw = bytes(path, 'UTF-8')
return hmac.new(key, raw, sha1).hexdigest().upper()
def _getUrl(self, path, params = {}):
"""
Creates URL
Parameters
----------
path : str
The target path of the url (e.g '/v3/search/')
params : dict
Dictionary containing parameters for request
Returns
-------
url : str
The url for the request
"""
params['devid'] = self.dev_id
query = "?" + urllib.parse.urlencode(params,doseq=True)
url = self.protoc + BASE_URL + path + query + "&signature=" + self._calculateSignature(path + query)
return url
def _callApi(self, path, params = {}):
"""
Calls API
Parameters
----------
path : str
The target path of the url (e.g '/v3/search/')
params : dict
Dictionary containing parameters for request
Returns
-------
response : dict
Response of api call as dict
"""
response = requests.get(self._getUrl(path, params))
response.raise_for_status()
return response.json()
def get_departures_from_stop(self, route_type, stop_id, route_id=None, platform_numbers=None, direction_id=None, look_backwards=None, gtfs=None, date_utc=None, max_results=None, include_cancelled = None, expand = None):
"""
View departures from a stop
Parameters
----------
route_type : integer
Number identifying transport mode; values returned via RouteTypes API
stop_id : integer
Identifier of stop; values returned by Stops API
Optional Parameters
-------------------
route_id : string
Identifier of route; values returned by RoutesAPI - v3/routes
platform_numbers : Array[integer]
Filter by platform number at stop
direction_id : integer
Filter by indentifier of direction of travel; values returned by Directions Api - /v3/directions/route/{route_id}
look_backwards : boolean
Indicates if filtering runs (and their departures) to those that arrive at destination before date_utc (default = false). Requires max_results > 0.
gtfs : boolean
Indicates that stop_id parameter will accept "GTFS stop_id" data
date_utc : string
Filter by the date and time of the request (ISO 8601 UTC format) (default = current date and time)
max_results : integer
Maximum number of results returned
include_cancelled : boolean
Indicates if cancelled services (if they exist) are returned (default = false) - metropolitan train only
expand : Array[string]
List objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption
Returns
-------
Departures : dict
Dictionary of departures
"""
path = f"/v3/departures/route_type/{route_type}/stop/{stop_id}"
params = {}
if route_id:
path += f"/route/{route_id}"
if platform_numbers:
params['platform_numbers'] = platform_numbers
if direction_id:
params['direction_id'] = direction_id
if look_backwards:
params['look_backwards'] = look_backwards
if gtfs:
params['gtfs'] = str(gtfs).lower()
if date_utc:
params['date_utc'] = date_utc
if max_results:
params['max_results'] = max_results
if include_cancelled:
params['include_cancelled'] = str(include_cancelled).lower()
if expand:
params['expand'] = str(expand).lower()
return self._callApi(path, params)
def get_direction_for_route(self, route_id, route_type=None):
"""
View directions for route
Parameters
----------
route_id : int
Identifier of route; values returned by Routes API - v3/routes
Optional Parameters
-------------------
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Returns
-------
Directions : dict
The directions that a specified route travels in.
"""
path = f"/v3/directions/route/{route_id}"
params = {}
if route_type:
path += f"/route_type/{route_type}"
return self._callApi(path, params)
def get_route_for_direction(self, direction_id):
"""
View all routes for direction.
Parameters
----------
direction_id : int
Identifier of direction of travel; values returned by Directions API - /v3/directions/route/{route_id}
Returns
-------
Routes : dict
All routes that travel in the specified direction.
"""
path = f"/v3/directions/{direction_id}"
params = {}
return self._callApi(path, params)
def get_disruptions(self, route_id=None, stop_id=None, disruption_status=None):
"""
View all disruptions
Optional Parameters
-------------------
route_id : int
Identifier of route; values returned by Routes API - v3/routes
stop_id : int
Identifier of stop; values returned by Stops API - v3/stops
disruption_status : str
Filter by status of disruption
Returns
-------
disruptions : dict
All disruption information (if any exists).
"""
path = "/v3/disruptions"
params = {}
if route_id:
path += f"/route/{route_id}"
if stop_id:
path += f"/stop/{stop_id}"
if disruption_status:
params['disruption_status'] = disruption_status
return self._callApi(path, params)
def get_disruption(self, disruption_id):
"""
View a specific disruption
Parameters
----------
disruption_id : int
Identifier of disruption; values returned by Disruptions API - /v3/disruptions OR /v3/disruptions/route/{route_id}
Returns
-------
disruptions : dict
Disruption information for the specified disruption ID.
"""
path = f"/v3/disruptions/{disruption_id}"
params = {}
return self._callApi(path, params)
def get_disruption_modes(self):
"""
Get all disruption modes
Returns
-------
modes : dict
Disruption specific modes
"""
path = "/v3/disruptions/modes"
params = {}
return self._callApi(path, params)
def get_outlets(self, latitude=None, longitude=None, max_distance=None, max_results=None):
"""
List all ticket outlets
Optional Parameters
-------------------
latitude : int
Geographic coordinate of latitude
longitude : int
Geographic coordinate of longitude
max_distance : int
Maximum number of results returned
max_results : int
Maximum number of results returned (default = 30)
Returns
-------
outlets : dict
Ticket outlets
"""
path = "/v3/outlets"
params = {}
if latitude and longitude:
path += f"/location/{latitude},{longitude}"
if max_distance:
params['max_distance'] = max_distance
if max_results:
params['max_results'] = max_results
return self._callApi(path, params)
def get_pattern(self, run_id, route_type, expand, stop_id=None, date_utc=None):
"""
View the stopping pattern for a specific trip/service run
Parameters
----------
run_id : int
Identifier of a trip/service run; values returned by Runs API - /v3/route/{route_id} and Departures API
route_type : int
Number identifying transport mode; values returned via RouteTypes API
expand : Array[str]
Objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption. By default disruptions are expanded.
Optional Parameters
-------------------
stop_id : int
Filter by stop_id; values returned by Stops API
date_utc : str
Filter by the date and time of the request (ISO 8601 UTC format)
Returns
-------
pattern : dict
The stopping pattern of the specified trip/service run and route type.
"""
path = f"/v3/pattern/run/{run_id}/route_type/{route_type}"
params = {}
params['expand'] = expand
if stop_id:
params['stop_id'] = stop_id
if date_utc:
params['date_utc'] = date_utc
return self._callApi(path, params)
def get_routes(self, route_types=None, route_name=None):
"""
View route names and numbers for all routes
Optional Parameters
-------------------
route_types : Array[int]
Filter by route_type; values returned via RouteTypes API
route_name : str
Filter by name of route (accepts partial route name matches)
Returns
-------
routes : dict
Route names and numbers for all routes of all route types.
"""
path = "/v3/routes"
params = {}
if route_types:
params['route_types'] = route_types
if route_name:
params['route_name'] = route_name
return self._callApi(path, params)
def get_route(self, route_id):
"""
View route name and number for specific route ID
Parameters
----------
route_id : int
Identifier of route; values returned by Departures, Directions and Disruptions APIs
Returns
-------
route : dict
The route name and number for the specified route ID.
"""
path = f"/v3/routes/{route_id}"
params = {}
return self._callApi(path, params)
def get_route_types(self):
"""
View all route types and their names
Returns
-------
RouteTypes : dict
All route types (i.e. identifiers of transport modes) and their names.
"""
path = "/v3/route_types"
params = {}
return self._callApi(path, params)
def get_run(self, run_id, route_type=None):
"""
View the trip/service for a specific run ID and route type
Parameters
----------
run_id : int
Identifier of a trip/service run; values returned by Runs API - /v3/route/{route_id} and Departures API
Optional Parameters
-------------------
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Returns
-------
run : dict
The trip/service run details for the run ID and route type specified.
"""
path = f"/v3/runs/{run_id}"
params = {}
if route_type:
path += f"/route_type/{route_type}"
return self._callApi(path, params)
def get_runs_for_route(self, route_id, route_type=None):
"""
View all trip/service runs for a specific route ID
Parameters
----------
route_id : int
Identifier of route; values returned by Routes API - v3/routes.
Optional Parameters
-------------------
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Returns
-------
runs : dict
All trip/service run details for the specified route ID.
"""
path = f"/v3/runs/route/{route_id}"
params = {}
if route_type:
path += f"/route_type/{route_type}"
return self._callApi(path, params)
def search(self, search_term, route_types=None, latitude=None, longitude=None, max_distance=None, include_addresses=None, include_outlets=None, match_stop_by_suburb=None, match_route_by_suburb=None, match_stop_by_gtfs_stop_id=None):
"""
View stops, routes and myki outlets that match the search term
Parameters
----------
search_term : str
Search text (note: if search text is numeric and/or less than 3 characters, the API will only return routes)
Optional Parameters
-------------------
route_types : Array[int]
Filter by route_type; values returned via RouteTypes API (note: stops and routes are ordered by route_types specified)
latitude : float
Filter by geographic coordinate of latitude
longitude : float
Filter by geographic coordinate of longitude
max_distance : float
Filter by maximum distance (in metres) from location specified via latitude and longitude parameters
include_addresses : bool
Placeholder for future development; currently unavailable
include_outlets : bool
Indicates if outlets will be returned in response (default = true)
match_stop_by_suburb : bool
Indicates whether to find stops by suburbs in the search term (default = true)
match_route_by_suburb : bool
Indicates whether to find routes by suburbs in the search term (default = true)
match_stop_by_gtfs_stop_id : bool
Indicates whether to search for stops according to a metlink stop ID (default = false)
Returns
-------
SearchResponse : dict
Stops, routes and myki ticket outlets that contain the search term (note: stops and routes are ordered by route_type by default).
"""
path = f"/v3/search/{urllib.parse.quote(search_term)}"
params = {}
if route_types:
params['route_types'] = route_types
if latitude:
params['latitude'] = latitude
if longitude:
params['longitude'] = longitude
if max_distance:
params['max_distance'] = max_distance
if include_addresses != None:
params['include_addresses'] = str(include_addresses).lower()
if include_outlets != None:
params['include_outlets'] = str(include_outlets).lower()
if match_stop_by_suburb != None:
params['match_stop_by_suburb'] = str(match_stop_by_suburb).lower()
if match_route_by_suburb != None:
params['match_route_by_suburb'] = str(match_route_by_suburb).lower()
if match_stop_by_gtfs_stop_id != None:
params['match_stop_by_gtfs_stop_id'] = str(match_stop_by_gtfs_stop_id).lower()
return self._callApi(path, params)
def get_stop(self, stop_id, route_type, stop_location=None, stop_amenities=None, stop_accessibility=None, stop_contact=None, stop_ticket=None, gtfs=None, stop_staffing=None, stop_disruptions=None):
"""
View facilities at a specific stop (Metro and V/Line stations only)
Parameters
----------
stop_id : int
Identifier of stop; values returned by Stops API
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Optional Parameters
-------------------
stop_location : bool
Indicates if stop location information will be returned (default = false)
stop_amenities : bool
Indicates if stop amenity information will be returned (default = false)
stop_accessibility : bool
Indicates if stop accessibility information will be returned (default = false)
stop_contact : bool
Indicates if stop contact information will be returned (default = false)
stop_ticket : bool
Indicates if stop ticket information will be returned (default = false)
gtfs : bool
Incdicates whether the stop_id is a GTFS ID or not
stop_staffing : bool
Indicates if stop staffing information will be returned (default = false)
stop_disruptions : bool
Indicates if stop disruption information will be returned (default = false)
Returns
-------
Stop : dict
Stop location, amenity and accessibility facility information for the specified stop (metropolitan and V/Line stations only).
"""
path = f"/v3/stops/{stop_id}/route_type/{route_type}"
params = {}
if stop_location != None:
params['stop_location'] = str(stop_location).lower()
if stop_amenities != None:
params['stop_amenities'] = str(stop_amenities).lower()
if stop_accessibility != None:
params['stop_accessibility'] = str(stop_accessibility).lower()
if stop_contact != None:
params['stop_contact'] = str(stop_contact).lower()
if stop_ticket != None:
params['stop_ticket'] = str(stop_ticket).lower()
if gtfs != None:
params['gtfs'] = str(gtfs).lower()
if stop_staffing != None:
params['stop_staffing'] = str(stop_staffing).lower()
if stop_disruptions != None:
params['stop_disruptions'] = str(stop_disruptions).lower()
return self._callApi(path, params)
def get_stops_for_route(self, route_id, route_type, direction_id=None, stop_disruptions=None):
"""
View all stops on a specific route
Parameters
----------
route_id : int
Identifier of route; values returned by Routes API - v3/routes
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Optional Parameters
-------------------
direction_id : int
An optional direction; values returned by Directions API. When this is set, stop sequence information is returned in the response.
stop_disruptions : bool
Indicates if stop disruption information will be returned (default = false)
Returns
-------
stops : dict
All stops on the specified route.
"""
path = f"/v3/stops/route/{route_id}/route_type/{route_type}"
params = {}
if direction_id:
params['direction_id'] = direction_id
if stop_disruptions:
params['stop_disruptions'] = str(stop_disruptions).lower()
return self._callApi(path, params)
def get_stops_for_location(self, latitude, longitude, route_types=None, max_results=None, max_distance=None, stop_disruptions=None):
"""
View all stops near a specific location
Parameters
----------
latitude : float
Geographic coordinate of latitude
longitude : float
Geographic coordinate of longitude
Optional Parameters
-------------------
route_types : Array[int]
Filter by route_type; values returned via RouteTypes API
max_results : int
Maximum number of results returned (default = 30)
max_distance : double
Filter by maximum distance (in metres) from location specified via latitude and longitude parameters (default = 300)
stop_disruptions : bool
Indicates if stop disruption information will be returned (default = false)
Returns
-------
stops : dict
All stops near the specified location.
"""
path = f"/v3/stops/location/{latitude},{longitude}"
params = {}
if route_types:
params['route_types'] = route_types
if max_results:
params['max_results'] = max_results
if max_distance:
params['max_distance'] = max_distance
if stop_disruptions:
params['stop_disruptions'] = stop_disruptions
return self._callApi(path, params)
| from hashlib import sha1
import json
import hmac
import requests
import urllib
BASE_URL = 'timetableapi.ptv.vic.gov.au'
class PTVClient(object):
""" Class to make calls to PTV Api """
def __init__(self, dev_id, api_key, not_secure=None):
"""
Initialize a PTVClient
Parameters
----------
dev_id : str
Developer ID from PTV
api_key : str
API key from PTV
Optional Parameters
-------------------
not_secure : bool
Indicates whether or not to use http (default = false)
"""
self.dev_id = dev_id
self.api_key = api_key
if not_secure:
self.protoc = 'http://'
else:
self.protoc = 'https://'
def _calculateSignature(self, path):
"""
Calculates a signature from url
Parameters
----------
path : str
The target path of the url (e.g '/v3/search/')
Returns
-------
signature : str
The hex signature.
"""
key = bytes(self.api_key, 'UTF-8')
raw = bytes(path, 'UTF-8')
return hmac.new(key, raw, sha1).hexdigest().upper()
def _getUrl(self, path, params = {}):
"""
Creates URL
Parameters
----------
path : str
The target path of the url (e.g '/v3/search/')
params : dict
Dictionary containing parameters for request
Returns
-------
url : str
The url for the request
"""
params['devid'] = self.dev_id
query = "?" + urllib.parse.urlencode(params,doseq=True)
url = self.protoc + BASE_URL + path + query + "&signature=" + self._calculateSignature(path + query)
return url
def _callApi(self, path, params = {}):
"""
Calls API
Parameters
----------
path : str
The target path of the url (e.g '/v3/search/')
params : dict
Dictionary containing parameters for request
Returns
-------
response : dict
Response of api call as dict
"""
response = requests.get(self._getUrl(path, params))
response.raise_for_status()
return response.json()
def get_departures_from_stop(self, route_type, stop_id, route_id=None, platform_numbers=None, direction_id=None, look_backwards=None, gtfs=None, date_utc=None, max_results=None, include_cancelled = None, expand = None):
"""
View departures from a stop
Parameters
----------
route_type : integer
Number identifying transport mode; values returned via RouteTypes API
stop_id : integer
Identifier of stop; values returned by Stops API
Optional Parameters
-------------------
route_id : string
Identifier of route; values returned by RoutesAPI - v3/routes
platform_numbers : Array[integer]
Filter by platform number at stop
direction_id : integer
Filter by indentifier of direction of travel; values returned by Directions Api - /v3/directions/route/{route_id}
look_backwards : boolean
Indicates if filtering runs (and their departures) to those that arrive at destination before date_utc (default = false). Requires max_results > 0.
gtfs : boolean
Indicates that stop_id parameter will accept "GTFS stop_id" data
date_utc : string
Filter by the date and time of the request (ISO 8601 UTC format) (default = current date and time)
max_results : integer
Maximum number of results returned
include_cancelled : boolean
Indicates if cancelled services (if they exist) are returned (default = false) - metropolitan train only
expand : Array[string]
List objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption
Returns
-------
Departures : dict
Dictionary of departures
"""
path = f"/v3/departures/route_type/{route_type}/stop/{stop_id}"
params = {}
if route_id:
path += f"/route/{route_id}"
if platform_numbers:
params['platform_numbers'] = platform_numbers
if direction_id:
params['direction_id'] = direction_id
if look_backwards:
params['look_backwards'] = look_backwards
if gtfs:
params['gtfs'] = str(gtfs).lower()
if date_utc:
params['date_utc'] = date_utc
if max_results:
params['max_results'] = max_results
if include_cancelled:
params['include_cancelled'] = str(include_cancelled).lower()
if expand:
params['expand'] = str(expand).lower()
return self._callApi(path, params)
def get_direction_for_route(self, route_id, route_type=None):
"""
View directions for route
Parameters
----------
route_id : int
Identifier of route; values returned by Routes API - v3/routes
Optional Parameters
-------------------
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Returns
-------
Directions : dict
The directions that a specified route travels in.
"""
path = f"/v3/directions/route/{route_id}"
params = {}
if route_type:
path += f"/route_type/{route_type}"
return self._callApi(path, params)
def get_route_for_direction(self, direction_id):
"""
View all routes for direction.
Parameters
----------
direction_id : int
Identifier of direction of travel; values returned by Directions API - /v3/directions/route/{route_id}
Returns
-------
Routes : dict
All routes that travel in the specified direction.
"""
path = f"/v3/directions/{direction_id}"
params = {}
return self._callApi(path, params)
def get_disruptions(self, route_id=None, stop_id=None, disruption_status=None):
"""
View all disruptions
Optional Parameters
-------------------
route_id : int
Identifier of route; values returned by Routes API - v3/routes
stop_id : int
Identifier of stop; values returned by Stops API - v3/stops
disruption_status : str
Filter by status of disruption
Returns
-------
disruptions : dict
All disruption information (if any exists).
"""
path = "/v3/disruptions"
params = {}
if route_id:
path += f"/route/{route_id}"
if stop_id:
path += f"/stop/{stop_id}"
if disruption_status:
params['disruption_status'] = disruption_status
return self._callApi(path, params)
def get_disruption(self, disruption_id):
"""
View a specific disruption
Parameters
----------
disruption_id : int
Identifier of disruption; values returned by Disruptions API - /v3/disruptions OR /v3/disruptions/route/{route_id}
Returns
-------
disruptions : dict
Disruption information for the specified disruption ID.
"""
path = f"/v3/disruptions/{disruption_id}"
params = {}
return self._callApi(path, params)
def get_disruption_modes(self):
"""
Get all disruption modes
Returns
-------
modes : dict
Disruption specific modes
"""
path = "/v3/disruptions/modes"
params = {}
return self._callApi(path, params)
def get_outlets(self, latitude=None, longitude=None, max_distance=None, max_results=None):
"""
List all ticket outlets
Optional Parameters
-------------------
latitude : int
Geographic coordinate of latitude
longitude : int
Geographic coordinate of longitude
max_distance : int
Maximum number of results returned
max_results : int
Maximum number of results returned (default = 30)
Returns
-------
outlets : dict
Ticket outlets
"""
path = "/v3/outlets"
params = {}
if latitude and longitude:
path += f"/location/{latitude},{longitude}"
if max_distance:
params['max_distance'] = max_distance
if max_results:
params['max_results'] = max_results
return self._callApi(path, params)
def get_pattern(self, run_id, route_type, expand, stop_id=None, date_utc=None):
"""
View the stopping pattern for a specific trip/service run
Parameters
----------
run_id : int
Identifier of a trip/service run; values returned by Runs API - /v3/route/{route_id} and Departures API
route_type : int
Number identifying transport mode; values returned via RouteTypes API
expand : Array[str]
Objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption. By default disruptions are expanded.
Optional Parameters
-------------------
stop_id : int
Filter by stop_id; values returned by Stops API
date_utc : str
Filter by the date and time of the request (ISO 8601 UTC format)
Returns
-------
pattern : dict
The stopping pattern of the specified trip/service run and route type.
"""
path = f"/v3/pattern/run/{run_id}/route_type/{route_type}"
params = {}
params['expand'] = expand
if stop_id:
params['stop_id'] = stop_id
if date_utc:
params['date_utc'] = date_utc
return self._callApi(path, params)
def get_routes(self, route_types=None, route_name=None):
"""
View route names and numbers for all routes
Optional Parameters
-------------------
route_types : Array[int]
Filter by route_type; values returned via RouteTypes API
route_name : str
Filter by name of route (accepts partial route name matches)
Returns
-------
routes : dict
Route names and numbers for all routes of all route types.
"""
path = "/v3/routes"
params = {}
if route_types:
params['route_types'] = route_types
if route_name:
params['route_name'] = route_name
return self._callApi(path, params)
def get_route(self, route_id):
"""
View route name and number for specific route ID
Parameters
----------
route_id : int
Identifier of route; values returned by Departures, Directions and Disruptions APIs
Returns
-------
route : dict
The route name and number for the specified route ID.
"""
path = f"/v3/routes/{route_id}"
params = {}
return self._callApi(path, params)
def get_route_types(self):
"""
View all route types and their names
Returns
-------
RouteTypes : dict
All route types (i.e. identifiers of transport modes) and their names.
"""
path = "/v3/route_types"
params = {}
return self._callApi(path, params)
def get_run(self, run_id, route_type=None):
"""
View the trip/service for a specific run ID and route type
Parameters
----------
run_id : int
Identifier of a trip/service run; values returned by Runs API - /v3/route/{route_id} and Departures API
Optional Parameters
-------------------
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Returns
-------
run : dict
The trip/service run details for the run ID and route type specified.
"""
path = f"/v3/runs/{run_id}"
params = {}
if route_type:
path += f"/route_type/{route_type}"
return self._callApi(path, params)
def get_runs_for_route(self, route_id, route_type=None):
"""
View all trip/service runs for a specific route ID
Parameters
----------
route_id : int
Identifier of route; values returned by Routes API - v3/routes.
Optional Parameters
-------------------
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Returns
-------
runs : dict
All trip/service run details for the specified route ID.
"""
path = f"/v3/runs/route/{route_id}"
params = {}
if route_type:
path += f"/route_type/{route_type}"
return self._callApi(path, params)
def search(self, search_term, route_types=None, latitude=None, longitude=None, max_distance=None, include_addresses=None, include_outlets=None, match_stop_by_suburb=None, match_route_by_suburb=None, match_stop_by_gtfs_stop_id=None):
"""
View stops, routes and myki outlets that match the search term
Parameters
----------
search_term : str
Search text (note: if search text is numeric and/or less than 3 characters, the API will only return routes)
Optional Parameters
-------------------
route_types : Array[int]
Filter by route_type; values returned via RouteTypes API (note: stops and routes are ordered by route_types specified)
latitude : float
Filter by geographic coordinate of latitude
longitude : float
Filter by geographic coordinate of longitude
max_distance : float
Filter by maximum distance (in metres) from location specified via latitude and longitude parameters
include_addresses : bool
Placeholder for future development; currently unavailable
include_outlets : bool
Indicates if outlets will be returned in response (default = true)
match_stop_by_suburb : bool
Indicates whether to find stops by suburbs in the search term (default = true)
match_route_by_suburb : bool
Indicates whether to find routes by suburbs in the search term (default = true)
match_stop_by_gtfs_stop_id : bool
Indicates whether to search for stops according to a metlink stop ID (default = false)
Returns
-------
SearchResponse : dict
Stops, routes and myki ticket outlets that contain the search term (note: stops and routes are ordered by route_type by default).
"""
path = f"/v3/search/{urllib.parse.quote(search_term)}"
params = {}
if route_types:
params['route_types'] = route_types
if latitude:
params['latitude'] = latitude
if longitude:
params['longitude'] = longitude
if max_distance:
params['max_distance'] = max_distance
if include_addresses != None:
params['include_addresses'] = str(include_addresses).lower()
if include_outlets != None:
params['include_outlets'] = str(include_outlets).lower()
if match_stop_by_suburb != None:
params['match_stop_by_suburb'] = str(match_stop_by_suburb).lower()
if match_route_by_suburb != None:
params['match_route_by_suburb'] = str(match_route_by_suburb).lower()
if match_stop_by_gtfs_stop_id != None:
params['match_stop_by_gtfs_stop_id'] = str(match_stop_by_gtfs_stop_id).lower()
return self._callApi(path, params)
def get_stop(self, stop_id, route_type, stop_location=None, stop_amenities=None, stop_accessibility=None, stop_contact=None, stop_ticket=None, gtfs=None, stop_staffing=None, stop_disruptions=None):
"""
View facilities at a specific stop (Metro and V/Line stations only)
Parameters
----------
stop_id : int
Identifier of stop; values returned by Stops API
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Optional Parameters
-------------------
stop_location : bool
Indicates if stop location information will be returned (default = false)
stop_amenities : bool
Indicates if stop amenity information will be returned (default = false)
stop_accessibility : bool
Indicates if stop accessibility information will be returned (default = false)
stop_contact : bool
Indicates if stop contact information will be returned (default = false)
stop_ticket : bool
Indicates if stop ticket information will be returned (default = false)
gtfs : bool
Incdicates whether the stop_id is a GTFS ID or not
stop_staffing : bool
Indicates if stop staffing information will be returned (default = false)
stop_disruptions : bool
Indicates if stop disruption information will be returned (default = false)
Returns
-------
Stop : dict
Stop location, amenity and accessibility facility information for the specified stop (metropolitan and V/Line stations only).
"""
path = f"/v3/stops/{stop_id}/route_type/{route_type}"
params = {}
if stop_location != None:
params['stop_location'] = str(stop_location).lower()
if stop_amenities != None:
params['stop_amenities'] = str(stop_amenities).lower()
if stop_accessibility != None:
params['stop_accessibility'] = str(stop_accessibility).lower()
if stop_contact != None:
params['stop_contact'] = str(stop_contact).lower()
if stop_ticket != None:
params['stop_ticket'] = str(stop_ticket).lower()
if gtfs != None:
params['gtfs'] = str(gtfs).lower()
if stop_staffing != None:
params['stop_staffing'] = str(stop_staffing).lower()
if stop_disruptions != None:
params['stop_disruptions'] = str(stop_disruptions).lower()
return self._callApi(path, params)
def get_stops_for_route(self, route_id, route_type, direction_id=None, stop_disruptions=None):
"""
View all stops on a specific route
Parameters
----------
route_id : int
Identifier of route; values returned by Routes API - v3/routes
route_type : int
Number identifying transport mode; values returned via RouteTypes API
Optional Parameters
-------------------
direction_id : int
An optional direction; values returned by Directions API. When this is set, stop sequence information is returned in the response.
stop_disruptions : bool
Indicates if stop disruption information will be returned (default = false)
Returns
-------
stops : dict
All stops on the specified route.
"""
path = f"/v3/stops/route/{route_id}/route_type/{route_type}"
params = {}
if direction_id:
params['direction_id'] = direction_id
if stop_disruptions:
params['stop_disruptions'] = str(stop_disruptions).lower()
return self._callApi(path, params)
def get_stops_for_location(self, latitude, longitude, route_types=None, max_results=None, max_distance=None, stop_disruptions=None):
"""
View all stops near a specific location
Parameters
----------
latitude : float
Geographic coordinate of latitude
longitude : float
Geographic coordinate of longitude
Optional Parameters
-------------------
route_types : Array[int]
Filter by route_type; values returned via RouteTypes API
max_results : int
Maximum number of results returned (default = 30)
max_distance : double
Filter by maximum distance (in metres) from location specified via latitude and longitude parameters (default = 300)
stop_disruptions : bool
Indicates if stop disruption information will be returned (default = false)
Returns
-------
stops : dict
All stops near the specified location.
"""
path = f"/v3/stops/location/{latitude},{longitude}"
params = {}
if route_types:
params['route_types'] = route_types
if max_results:
params['max_results'] = max_results
if max_distance:
params['max_distance'] = max_distance
if stop_disruptions:
params['stop_disruptions'] = stop_disruptions
return self._callApi(path, params)
| en | 0.591041 | Class to make calls to PTV Api Initialize a PTVClient Parameters ---------- dev_id : str Developer ID from PTV api_key : str API key from PTV Optional Parameters ------------------- not_secure : bool Indicates whether or not to use http (default = false) Calculates a signature from url Parameters ---------- path : str The target path of the url (e.g '/v3/search/') Returns ------- signature : str The hex signature. Creates URL Parameters ---------- path : str The target path of the url (e.g '/v3/search/') params : dict Dictionary containing parameters for request Returns ------- url : str The url for the request Calls API Parameters ---------- path : str The target path of the url (e.g '/v3/search/') params : dict Dictionary containing parameters for request Returns ------- response : dict Response of api call as dict View departures from a stop Parameters ---------- route_type : integer Number identifying transport mode; values returned via RouteTypes API stop_id : integer Identifier of stop; values returned by Stops API Optional Parameters ------------------- route_id : string Identifier of route; values returned by RoutesAPI - v3/routes platform_numbers : Array[integer] Filter by platform number at stop direction_id : integer Filter by indentifier of direction of travel; values returned by Directions Api - /v3/directions/route/{route_id} look_backwards : boolean Indicates if filtering runs (and their departures) to those that arrive at destination before date_utc (default = false). Requires max_results > 0. gtfs : boolean Indicates that stop_id parameter will accept "GTFS stop_id" data date_utc : string Filter by the date and time of the request (ISO 8601 UTC format) (default = current date and time) max_results : integer Maximum number of results returned include_cancelled : boolean Indicates if cancelled services (if they exist) are returned (default = false) - metropolitan train only expand : Array[string] List objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption Returns ------- Departures : dict Dictionary of departures View directions for route Parameters ---------- route_id : int Identifier of route; values returned by Routes API - v3/routes Optional Parameters ------------------- route_type : int Number identifying transport mode; values returned via RouteTypes API Returns ------- Directions : dict The directions that a specified route travels in. View all routes for direction. Parameters ---------- direction_id : int Identifier of direction of travel; values returned by Directions API - /v3/directions/route/{route_id} Returns ------- Routes : dict All routes that travel in the specified direction. View all disruptions Optional Parameters ------------------- route_id : int Identifier of route; values returned by Routes API - v3/routes stop_id : int Identifier of stop; values returned by Stops API - v3/stops disruption_status : str Filter by status of disruption Returns ------- disruptions : dict All disruption information (if any exists). View a specific disruption Parameters ---------- disruption_id : int Identifier of disruption; values returned by Disruptions API - /v3/disruptions OR /v3/disruptions/route/{route_id} Returns ------- disruptions : dict Disruption information for the specified disruption ID. Get all disruption modes Returns ------- modes : dict Disruption specific modes List all ticket outlets Optional Parameters ------------------- latitude : int Geographic coordinate of latitude longitude : int Geographic coordinate of longitude max_distance : int Maximum number of results returned max_results : int Maximum number of results returned (default = 30) Returns ------- outlets : dict Ticket outlets View the stopping pattern for a specific trip/service run Parameters ---------- run_id : int Identifier of a trip/service run; values returned by Runs API - /v3/route/{route_id} and Departures API route_type : int Number identifying transport mode; values returned via RouteTypes API expand : Array[str] Objects to be returned in full (i.e. expanded) - options include: all, stop, route, run, direction, disruption. By default disruptions are expanded. Optional Parameters ------------------- stop_id : int Filter by stop_id; values returned by Stops API date_utc : str Filter by the date and time of the request (ISO 8601 UTC format) Returns ------- pattern : dict The stopping pattern of the specified trip/service run and route type. View route names and numbers for all routes Optional Parameters ------------------- route_types : Array[int] Filter by route_type; values returned via RouteTypes API route_name : str Filter by name of route (accepts partial route name matches) Returns ------- routes : dict Route names and numbers for all routes of all route types. View route name and number for specific route ID Parameters ---------- route_id : int Identifier of route; values returned by Departures, Directions and Disruptions APIs Returns ------- route : dict The route name and number for the specified route ID. View all route types and their names Returns ------- RouteTypes : dict All route types (i.e. identifiers of transport modes) and their names. View the trip/service for a specific run ID and route type Parameters ---------- run_id : int Identifier of a trip/service run; values returned by Runs API - /v3/route/{route_id} and Departures API Optional Parameters ------------------- route_type : int Number identifying transport mode; values returned via RouteTypes API Returns ------- run : dict The trip/service run details for the run ID and route type specified. View all trip/service runs for a specific route ID Parameters ---------- route_id : int Identifier of route; values returned by Routes API - v3/routes. Optional Parameters ------------------- route_type : int Number identifying transport mode; values returned via RouteTypes API Returns ------- runs : dict All trip/service run details for the specified route ID. View stops, routes and myki outlets that match the search term Parameters ---------- search_term : str Search text (note: if search text is numeric and/or less than 3 characters, the API will only return routes) Optional Parameters ------------------- route_types : Array[int] Filter by route_type; values returned via RouteTypes API (note: stops and routes are ordered by route_types specified) latitude : float Filter by geographic coordinate of latitude longitude : float Filter by geographic coordinate of longitude max_distance : float Filter by maximum distance (in metres) from location specified via latitude and longitude parameters include_addresses : bool Placeholder for future development; currently unavailable include_outlets : bool Indicates if outlets will be returned in response (default = true) match_stop_by_suburb : bool Indicates whether to find stops by suburbs in the search term (default = true) match_route_by_suburb : bool Indicates whether to find routes by suburbs in the search term (default = true) match_stop_by_gtfs_stop_id : bool Indicates whether to search for stops according to a metlink stop ID (default = false) Returns ------- SearchResponse : dict Stops, routes and myki ticket outlets that contain the search term (note: stops and routes are ordered by route_type by default). View facilities at a specific stop (Metro and V/Line stations only) Parameters ---------- stop_id : int Identifier of stop; values returned by Stops API route_type : int Number identifying transport mode; values returned via RouteTypes API Optional Parameters ------------------- stop_location : bool Indicates if stop location information will be returned (default = false) stop_amenities : bool Indicates if stop amenity information will be returned (default = false) stop_accessibility : bool Indicates if stop accessibility information will be returned (default = false) stop_contact : bool Indicates if stop contact information will be returned (default = false) stop_ticket : bool Indicates if stop ticket information will be returned (default = false) gtfs : bool Incdicates whether the stop_id is a GTFS ID or not stop_staffing : bool Indicates if stop staffing information will be returned (default = false) stop_disruptions : bool Indicates if stop disruption information will be returned (default = false) Returns ------- Stop : dict Stop location, amenity and accessibility facility information for the specified stop (metropolitan and V/Line stations only). View all stops on a specific route Parameters ---------- route_id : int Identifier of route; values returned by Routes API - v3/routes route_type : int Number identifying transport mode; values returned via RouteTypes API Optional Parameters ------------------- direction_id : int An optional direction; values returned by Directions API. When this is set, stop sequence information is returned in the response. stop_disruptions : bool Indicates if stop disruption information will be returned (default = false) Returns ------- stops : dict All stops on the specified route. View all stops near a specific location Parameters ---------- latitude : float Geographic coordinate of latitude longitude : float Geographic coordinate of longitude Optional Parameters ------------------- route_types : Array[int] Filter by route_type; values returned via RouteTypes API max_results : int Maximum number of results returned (default = 30) max_distance : double Filter by maximum distance (in metres) from location specified via latitude and longitude parameters (default = 300) stop_disruptions : bool Indicates if stop disruption information will be returned (default = false) Returns ------- stops : dict All stops near the specified location. | 3.202261 | 3 |
src/apps/core/purpleserver/providers/urls.py | blueprin4/purplship-server | 0 | 6618236 | """
purplship server carriers module urls
"""
from django.urls import include, path
from purpleserver.providers.views import router
app_name = 'purpleserver.carriers'
urlpatterns = [
path('v1/', include(router.urls)),
]
| """
purplship server carriers module urls
"""
from django.urls import include, path
from purpleserver.providers.views import router
app_name = 'purpleserver.carriers'
urlpatterns = [
path('v1/', include(router.urls)),
]
| en | 0.592237 | purplship server carriers module urls | 1.774584 | 2 |
nz_django/day3/book_manager/front/views.py | gaohj/nzflask_bbs | 0 | 6618237 | from django.shortcuts import render,redirect,reverse
from django.db import connection
def get_corsor():
return connection.cursor()
# Create your views here.
#主要是用来展示所有的图书列表
def index(request):
cursor = get_corsor()
cursor.execute("select id,name,author from book")
books = cursor.fetchall()
print(books)
#(),()
context = {
'books':books
}
return render(request,'index.html',context=context)
def add_book(request):
if request.method == 'GET': #django 判断请求方式
error=''
return render(request, 'add_book.html',context={'error':error})
else:
name = request.POST.get('name')
author = request.POST.get('author')
cursor = get_corsor()
cursor.execute("insert into book(id,name,author) values (null,'%s','%s')" %(name,author))
return redirect(reverse('index'))
def book_detail(request,book_id):
cursor = get_corsor()
cursor.execute("select id,name,author from book where id=%s" % book_id)
book = cursor.fetchone()
return render(request,'book_detail.html',context={"book":book})
def delete_book(request):
if request.method == 'POST':
book_id = request.POST.get('book_id')
cursor = get_corsor()
cursor.execute("delete from book where id=%s" % book_id)
return redirect(reverse('index'))
else:
raise RuntimeError('删除图书的方法错误')
| from django.shortcuts import render,redirect,reverse
from django.db import connection
def get_corsor():
return connection.cursor()
# Create your views here.
#主要是用来展示所有的图书列表
def index(request):
cursor = get_corsor()
cursor.execute("select id,name,author from book")
books = cursor.fetchall()
print(books)
#(),()
context = {
'books':books
}
return render(request,'index.html',context=context)
def add_book(request):
if request.method == 'GET': #django 判断请求方式
error=''
return render(request, 'add_book.html',context={'error':error})
else:
name = request.POST.get('name')
author = request.POST.get('author')
cursor = get_corsor()
cursor.execute("insert into book(id,name,author) values (null,'%s','%s')" %(name,author))
return redirect(reverse('index'))
def book_detail(request,book_id):
cursor = get_corsor()
cursor.execute("select id,name,author from book where id=%s" % book_id)
book = cursor.fetchone()
return render(request,'book_detail.html',context={"book":book})
def delete_book(request):
if request.method == 'POST':
book_id = request.POST.get('book_id')
cursor = get_corsor()
cursor.execute("delete from book where id=%s" % book_id)
return redirect(reverse('index'))
else:
raise RuntimeError('删除图书的方法错误')
| zh | 0.870256 | # Create your views here. #主要是用来展示所有的图书列表 #(),() #django 判断请求方式 | 3.155178 | 3 |
TDPC-D/resolve.py | staguchi0703/ant_book_dp | 0 | 6618238 | <filename>TDPC-D/resolve.py
def resolve():
'''
code here
'''
N, D = [int(item) for item in input().split()]
def get_factor(num):
d_factor = [0,0,0,0,0,0]
divisor = 1
while num >= 1 and divisor <= 5:
divisor += 1
while num % divisor == 0:
num //= divisor
d_factor[divisor] += 1
if num > 1:
return False
else:
return d_factor
d_fact = get_factor(D)
if d_fact and D != 1:
dp = [[[[0.
for _ in range(d_fact[2]+1)]
for _ in range(d_fact[3]+1)]
for _ in range(d_fact[5]+1)]
for _ in range(N+1)]
dp[0][0][0][0] = 1.
for i in range(N):
for j5 in range(d_fact[5]+1):
for j3 in range(d_fact[3]+1):
for j2 in range(d_fact[2]+1):
dp[i+1][j5][j3][j2] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][j3][min(j2+1, d_fact[2])] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][min(j3+1, d_fact[3])][j2] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][j3][min(j2+2, d_fact[2])] += dp[i][j5][j3][j2] * 1/6
dp[i+1][min(j5+1, d_fact[5])][j3][j2] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][min(j3+1, d_fact[3])][min(j2+1, d_fact[2])] += dp[i][j5][j3][j2] * 1/6
print(dp[N][d_fact[5]][d_fact[3]][d_fact[2]])
else:
print(0.)
if __name__ == "__main__":
resolve()
| <filename>TDPC-D/resolve.py
def resolve():
'''
code here
'''
N, D = [int(item) for item in input().split()]
def get_factor(num):
d_factor = [0,0,0,0,0,0]
divisor = 1
while num >= 1 and divisor <= 5:
divisor += 1
while num % divisor == 0:
num //= divisor
d_factor[divisor] += 1
if num > 1:
return False
else:
return d_factor
d_fact = get_factor(D)
if d_fact and D != 1:
dp = [[[[0.
for _ in range(d_fact[2]+1)]
for _ in range(d_fact[3]+1)]
for _ in range(d_fact[5]+1)]
for _ in range(N+1)]
dp[0][0][0][0] = 1.
for i in range(N):
for j5 in range(d_fact[5]+1):
for j3 in range(d_fact[3]+1):
for j2 in range(d_fact[2]+1):
dp[i+1][j5][j3][j2] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][j3][min(j2+1, d_fact[2])] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][min(j3+1, d_fact[3])][j2] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][j3][min(j2+2, d_fact[2])] += dp[i][j5][j3][j2] * 1/6
dp[i+1][min(j5+1, d_fact[5])][j3][j2] += dp[i][j5][j3][j2] * 1/6
dp[i+1][j5][min(j3+1, d_fact[3])][min(j2+1, d_fact[2])] += dp[i][j5][j3][j2] * 1/6
print(dp[N][d_fact[5]][d_fact[3]][d_fact[2]])
else:
print(0.)
if __name__ == "__main__":
resolve()
| none | 1 | 2.950999 | 3 | |
chiplabel/chip_label.py | Velko/chiplabel_py | 2 | 6618239 | <reponame>Velko/chiplabel_py
#!/usr/bin/env python3
import logging
import os
import sys
from PIL import Image
from .args import parse_args
from .chip import Chip
from .chip_list import ChipList
from .chip_printer import ChipPrinter
from .chip_grid_printer import ChipGridPrinter
from ._version import print_version_info
log = logging.getLogger()
def _to_chip_list(chip_list, chip_ids):
chips = []
for chip_id in chip_ids:
chip = chip_list[chip_id]
if not chip:
log.warning('Chip not found: %s, skipping', chip_id)
else:
chips.append(chip)
return chips
def print_chips_text(chip_list, args):
log.info('Printing %s chips to text', len(chip_list))
for chip in chip_list:
print()
chip.print_ASCII()
def print_chips_image(chip_list, args):
if not os.path.isdir(args.output):
log.error('Output directory not found [%s]', args.output)
return
log.info('Printing %s chips to .png', len(chip_list))
output_dir = args.output
if output_dir[-1] not in ('/', '\\'):
output_dir = output_dir + '/'
config = vars(args)
log.debug('config: %s', config)
if not args.page:
chip_printer = ChipPrinter(**config)
for chip in chip_list:
log.info('Generating label for chip [%s]', chip.id)
#TODO: Prefix lib name flag
output_file = f"{output_dir}{chip.unscoped_id}.png"
chip_printer.print_chip_to_file(chip, output_file)
else:
#TODO: Output directory/file pattern
gridPrinter = ChipGridPrinter(**config)
gridPrinter.print_chips(chip_list)
class LogFormatter(logging.Formatter):
def format(self, record):
if record.levelno == logging.INFO:
self._style._fmt = "%(message)s"
else:
self._style._fmt = "%(levelname)s: %(message)s"
return super().format(record)
def main(argv):
args = parse_args(argv[1:])
if args.version:
print_version_info()
return
# Configure logging
old_loglevel = log.level
handler = logging.StreamHandler()
handler.setFormatter(LogFormatter())
log.setLevel(args.loglevel)
log.addHandler(handler)
try:
chip_list = ChipList()
try:
chip_list.load(args.input)
except IOError as ex:
log.error('Error loading chip list [%s]: %s', args.input, ex)
if not len(chip_list):
log.error('No chip loaded')
return
print_chips = print_chips_text if args.text else print_chips_image
if args.list:
for chip in sorted(chip_list.names, key=str.casefold):
print(chip)
elif args.all:
print_chips(chip_list, args)
else:
chips = _to_chip_list(chip_list, args.chip)
if chips and len(chips):
out_of = f'(out of {len(args.chip)})' if len(chips) != len(args.chip) else ''
log.info('Found %d chips %s', len(chips), out_of)
print_chips(chips, args)
else:
log.warning('Nothing to do')
finally:
# Reset log in case we're not running as a standalong app
log.removeHandler(handler)
log.setLevel(old_loglevel)
if __name__ == '__main__':
MIN_PYTHON = (3, 6)
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required.\n" % MIN_PYTHON)
main(sys.argv)
| #!/usr/bin/env python3
import logging
import os
import sys
from PIL import Image
from .args import parse_args
from .chip import Chip
from .chip_list import ChipList
from .chip_printer import ChipPrinter
from .chip_grid_printer import ChipGridPrinter
from ._version import print_version_info
log = logging.getLogger()
def _to_chip_list(chip_list, chip_ids):
chips = []
for chip_id in chip_ids:
chip = chip_list[chip_id]
if not chip:
log.warning('Chip not found: %s, skipping', chip_id)
else:
chips.append(chip)
return chips
def print_chips_text(chip_list, args):
log.info('Printing %s chips to text', len(chip_list))
for chip in chip_list:
print()
chip.print_ASCII()
def print_chips_image(chip_list, args):
if not os.path.isdir(args.output):
log.error('Output directory not found [%s]', args.output)
return
log.info('Printing %s chips to .png', len(chip_list))
output_dir = args.output
if output_dir[-1] not in ('/', '\\'):
output_dir = output_dir + '/'
config = vars(args)
log.debug('config: %s', config)
if not args.page:
chip_printer = ChipPrinter(**config)
for chip in chip_list:
log.info('Generating label for chip [%s]', chip.id)
#TODO: Prefix lib name flag
output_file = f"{output_dir}{chip.unscoped_id}.png"
chip_printer.print_chip_to_file(chip, output_file)
else:
#TODO: Output directory/file pattern
gridPrinter = ChipGridPrinter(**config)
gridPrinter.print_chips(chip_list)
class LogFormatter(logging.Formatter):
def format(self, record):
if record.levelno == logging.INFO:
self._style._fmt = "%(message)s"
else:
self._style._fmt = "%(levelname)s: %(message)s"
return super().format(record)
def main(argv):
args = parse_args(argv[1:])
if args.version:
print_version_info()
return
# Configure logging
old_loglevel = log.level
handler = logging.StreamHandler()
handler.setFormatter(LogFormatter())
log.setLevel(args.loglevel)
log.addHandler(handler)
try:
chip_list = ChipList()
try:
chip_list.load(args.input)
except IOError as ex:
log.error('Error loading chip list [%s]: %s', args.input, ex)
if not len(chip_list):
log.error('No chip loaded')
return
print_chips = print_chips_text if args.text else print_chips_image
if args.list:
for chip in sorted(chip_list.names, key=str.casefold):
print(chip)
elif args.all:
print_chips(chip_list, args)
else:
chips = _to_chip_list(chip_list, args.chip)
if chips and len(chips):
out_of = f'(out of {len(args.chip)})' if len(chips) != len(args.chip) else ''
log.info('Found %d chips %s', len(chips), out_of)
print_chips(chips, args)
else:
log.warning('Nothing to do')
finally:
# Reset log in case we're not running as a standalong app
log.removeHandler(handler)
log.setLevel(old_loglevel)
if __name__ == '__main__':
MIN_PYTHON = (3, 6)
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required.\n" % MIN_PYTHON)
main(sys.argv) | en | 0.532652 | #!/usr/bin/env python3 #TODO: Prefix lib name flag #TODO: Output directory/file pattern # Configure logging # Reset log in case we're not running as a standalong app | 2.623286 | 3 |
wqio/hydro.py | Geosyntec/wqio | 18 | 6618240 | <reponame>Geosyntec/wqio
import warnings
import numpy
from matplotlib import pyplot
from matplotlib import dates
from matplotlib import gridspec
import seaborn
import pandas
from wqio import utils
from wqio import viz
from wqio import validate
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
SEC_PER_MINUTE = 60.0
MIN_PER_HOUR = 60.0
HOUR_PER_DAY = 24.0
SEC_PER_HOUR = SEC_PER_MINUTE * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOUR_PER_DAY
def _wet_first_row(df, wetcol, diffcol):
# make sure that if the first record is associated with the first
# storm if it's wet
firstrow = df.iloc[0]
if firstrow[wetcol]:
df.loc[firstrow.name, diffcol] = 1
return df
def _wet_window_diff(is_wet, ie_periods):
return (
is_wet.rolling(int(ie_periods), min_periods=1)
.apply(lambda window: window.any(), raw=False)
.diff()
)
def parse_storm_events(
data,
intereventHours,
outputfreqMinutes,
precipcol=None,
inflowcol=None,
outflowcol=None,
baseflowcol=None,
stormcol="storm",
debug=False,
):
"""Parses the hydrologic data into distinct storms.
In this context, a storm is defined as starting whenever the
hydrologic records shows non-zero precipitation or [in|out]flow
from the BMP after a minimum inter-event dry period duration
specified in the the function call. The storms ends the observation
*after* the last non-zero precipitation or flow value.
Parameters
----------
data : pandas.DataFrame
intereventHours : float
The Inter-Event dry duration (in hours) that classifies the
next hydrlogic activity as a new event.
precipcol : string, optional (default = None)
Name of column in `hydrodata` containing precipiation data.
inflowcol : string, optional (default = None)
Name of column in `hydrodata` containing influent flow data.
outflowcol : string, optional (default = None)
Name of column in `hydrodata` containing effluent flow data.
baseflowcol : string, optional (default = None)
Name of column in `hydrodata` containing boolean indicating
which records are considered baseflow.
stormcol : string (default = 'storm')
Name of column in `hydrodata` indentifying distinct storms.
debug : bool (default = False)
If True, diagnostic columns will not be dropped prior to
returning the dataframe of parsed_storms.
Writes
------
None
Returns
-------
parsed_storms : pandas.DataFrame
Copy of the origin `hydrodata` DataFrame, but resampled to a
fixed frequency, columns possibly renamed, and a `storm` column
added to denote the storm to which each record belongs. Records
where `storm` == 0 are not a part of any storm.
"""
# pull out the rain and flow data
if precipcol is None:
precipcol = "precip"
data.loc[:, precipcol] = numpy.nan
if inflowcol is None:
inflowcol = "inflow"
data.loc[:, inflowcol] = numpy.nan
if outflowcol is None:
outflowcol = "outflow"
data.loc[:, outflowcol] = numpy.nan
if baseflowcol is None:
baseflowcol = "baseflow"
data.loc[:, baseflowcol] = False
# bool column where True means there's rain or flow of some kind
water_columns = [inflowcol, outflowcol, precipcol]
cols_to_use = water_columns + [baseflowcol]
agg_dict = {
precipcol: numpy.sum,
inflowcol: numpy.mean,
outflowcol: numpy.mean,
baseflowcol: numpy.any,
}
freq = pandas.offsets.Minute(outputfreqMinutes)
ie_periods = int(MIN_PER_HOUR / freq.n * intereventHours)
# periods between storms are where the cumulative number
# of storms that have ended are equal to the cumulative
# number of storms that have started.
# Stack Overflow: http://tinyurl.com/lsjkr9x
res = (
data.resample(freq)
.agg(agg_dict)
.loc[:, lambda df: df.columns.isin(cols_to_use)]
.assign(
__wet=lambda df: numpy.any(df[water_columns] > 0, axis=1) & ~df[baseflowcol]
)
.assign(__windiff=lambda df: _wet_window_diff(df["__wet"], ie_periods))
.pipe(_wet_first_row, "__wet", "__windiff")
.assign(__event_start=lambda df: df["__windiff"] == 1)
.assign(__event_end=lambda df: df["__windiff"].shift(-1 * ie_periods) == -1)
.assign(__storm=lambda df: df["__event_start"].cumsum())
.assign(
storm=lambda df: numpy.where(
df["__storm"] == df["__event_end"].shift(2).cumsum(),
0, # inter-event periods marked as zero
df["__storm"], # actual events keep their number
)
)
)
if not debug:
res = res.loc[:, res.columns.map(lambda c: not c.startswith("__"))]
return res
class Storm(object):
""" Object representing a storm event
Parameters
----------
dataframe : pandas.DataFrame
A datetime-indexed Dataframe containing all of the hydrologic
data and am interger column indentifying distinct storms.
stormnumber : int
The storm we care about.
precipcol, inflowcol, outflow, tempcol, stormcol : string, optional
Names for columns representing each hydrologic quantity.
freqMinutes : float (default = 5)
The time period, in minutes, between observations.
volume_conversion : float, optional (default = 1)
Conversion factor to go from flow to volume for a single
observation.
"""
# TODO: rename freqMinutes to periodMinutes
def __init__(
self,
dataframe,
stormnumber,
precipcol="precip",
inflowcol="inflow",
outflowcol="outflow",
tempcol="temp",
stormcol="storm",
freqMinutes=5,
volume_conversion=1,
):
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.precipcol = precipcol
self.tempcol = tempcol
self.stormnumber = stormnumber
self.freqMinutes = freqMinutes
self.volume_conversion = volume_conversion * SEC_PER_MINUTE * self.freqMinutes
# basic data
self.data = dataframe[dataframe[stormcol] == self.stormnumber].copy()
self.hydrofreq_label = "{0} min".format(self.freqMinutes)
# tease out start/stop info
self.start = self.data.index[0]
self.end = self.data.index[-1]
self._season = utils.getSeason(self.start)
# storm duration (hours)
duration = self.end - self.start
self.duration_hours = duration.total_seconds() / SEC_PER_HOUR
# antecedent dry period (hours)
if self.stormnumber > 1:
prev_storm_mask = dataframe[stormcol] == self.stormnumber - 1
previous_end = dataframe[prev_storm_mask].index[-1]
antecedent_timedelta = self.start - previous_end
self.antecedent_period_days = (
antecedent_timedelta.total_seconds() / SEC_PER_DAY
)
else:
self.antecedent_period_days = numpy.nan
# quantities
self._precip = None
self._inflow = None
self._outflow = None
# starts and stop
self._precip_start = None
self._precip_end = None
self._inflow_start = None
self._inflow_end = None
self._outflow_start = None
self._outflow_end = None
# peaks
self._peak_precip_intensity = None
self._peak_inflow = None
self._peak_outflow = None
# times of peaks
self._peak_precip_intensity_time = None
self._peak_inflow_time = None
self._peak_outflow_time = None
self._peak_lag_hours = None
# centroids
self._centroid_precip_time = None
self._centroid_inflow_time = None
self._centroid_outflow_time = None
self._centroid_lag_hours = None
# totals
self._total_precip_depth = None
self._total_inflow_volume = None
self._total_outflow_volume = None
self.meta = {
self.outflowcol: {
"name": "Flow (calculated, L/s)",
"ylabel": "Effluent flow (L/s)",
"color": "CornFlowerBlue",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": 0,
},
self.inflowcol: {
"name": "Inflow (estimated, L/s)",
"ylabel": "Estimated influent flow (L/s)",
"color": "Maroon",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": 0,
},
self.precipcol: {
"name": "Precip (mm)",
"ylabel": "%s Precip.\nDepth (mm)" % self.hydrofreq_label,
"color": "DarkGreen",
"linewidth": 1.5,
"alpha": 0.4,
"ymin": 0,
},
self.tempcol: {
"name": "Air Temp (deg C)",
"ylabel": "Air Temperature (deg. C)",
"color": "DarkGoldenRod",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": None,
},
}
self._summary_dict = None
@property
def precip(self):
if self._precip is None:
if self.precipcol is not None:
self._precip = self.data[self.data[self.precipcol] > 0][self.precipcol]
else:
self._precip = numpy.array([])
return self._precip
@property
def inflow(self):
if self._inflow is None:
if self.inflowcol is not None:
self._inflow = self.data[self.data[self.inflowcol] > 0][self.inflowcol]
else:
self._inflow = numpy.array([])
return self._inflow
@property
def outflow(self):
if self._outflow is None:
if self.outflowcol is not None:
self._outflow = self.data[self.data[self.outflowcol] > 0][
self.outflowcol
]
else:
self._outflow = numpy.array([])
return self._outflow
@property
def has_precip(self):
return self.precip.shape[0] > 0
@property
def has_inflow(self):
return self.inflow.shape[0] > 0
@property
def has_outflow(self):
return self.outflow.shape[0] > 0
@property
def season(self):
return self._season
@season.setter
def season(self, value):
self._season = value
# starts and stops
@property
def precip_start(self):
if self._precip_start is None and self.has_precip:
self._precip_start = self._get_event_time(self.precipcol, "start")
return self._precip_start
@property
def precip_end(self):
if self._precip_end is None and self.has_precip:
self._precip_end = self._get_event_time(self.precipcol, "end")
return self._precip_end
@property
def inflow_start(self):
if self._inflow_start is None and self.has_inflow:
self._inflow_start = self._get_event_time(self.inflowcol, "start")
return self._inflow_start
@property
def inflow_end(self):
if self._inflow_end is None and self.has_inflow:
self._inflow_end = self._get_event_time(self.inflowcol, "end")
return self._inflow_end
@property
def outflow_start(self):
if self._outflow_start is None and self.has_outflow:
self._outflow_start = self._get_event_time(self.outflowcol, "start")
return self._outflow_start
@property
def outflow_end(self):
if self._outflow_end is None and self.has_outflow:
self._outflow_end = self._get_event_time(self.outflowcol, "end")
return self._outflow_end
@property
def _peak_depth(self):
if self.has_precip:
return self.precip.max()
@property
def peak_precip_intensity(self):
if self._peak_precip_intensity is None and self.has_precip:
self._peak_precip_intensity = (
self._peak_depth * MIN_PER_HOUR / self.freqMinutes
)
return self._peak_precip_intensity
@property
def peak_inflow(self):
if self._peak_inflow is None and self.has_inflow:
self._peak_inflow = self.inflow.max()
return self._peak_inflow
@property
def peak_outflow(self):
if self._peak_outflow is None and self.has_outflow:
self._peak_outflow = self.outflow.max()
return self._peak_outflow
@property
def total_precip_depth(self):
if self._total_precip_depth is None and self.has_precip:
self._total_precip_depth = self.data[self.precipcol].sum()
return self._total_precip_depth
@property
def total_inflow_volume(self):
if self._total_inflow_volume is None and self.has_inflow:
self._total_inflow_volume = (
self.data[self.inflowcol].sum() * self.volume_conversion
)
return self._total_inflow_volume
@property
def total_outflow_volume(self):
if self._total_outflow_volume is None and self.has_outflow:
self._total_outflow_volume = (
self.data[self.outflowcol].sum() * self.volume_conversion
)
return self._total_outflow_volume
@property
def centroid_precip_time(self):
if self._centroid_precip_time is None and self.has_precip:
self._centroid_precip_time = self._compute_centroid(self.precipcol)
return self._centroid_precip_time
@property
def centroid_inflow_time(self):
if self._centroid_inflow_time is None and self.has_inflow:
self._centroid_inflow_time = self._compute_centroid(self.inflowcol)
return self._centroid_inflow_time
@property
def centroid_outflow_time(self):
if self._centroid_outflow_time is None and self.has_outflow:
self._centroid_outflow_time = self._compute_centroid(self.outflowcol)
return self._centroid_outflow_time
@property
def centroid_lag_hours(self):
if (
self._centroid_lag_hours is None
and self.centroid_outflow_time is not None
and self.centroid_inflow_time is not None
):
self._centroid_lag_hours = (
self.centroid_outflow_time - self.centroid_inflow_time
).total_seconds() / SEC_PER_HOUR
return self._centroid_lag_hours
@property
def peak_precip_intensity_time(self):
if self._peak_precip_intensity_time is None and self.has_precip:
PI_selector = self.data[self.precipcol] == self._peak_depth
self._peak_precip_intensity_time = self.data[PI_selector].index[0]
return self._peak_precip_intensity_time
@property
def peak_inflow_time(self):
if self._peak_inflow_time is None and self.has_inflow:
PInf_selector = self.data[self.inflowcol] == self.peak_inflow
self._peak_inflow_time = self.data[PInf_selector].index[0]
return self._peak_inflow_time
@property
def peak_outflow_time(self):
if self._peak_outflow_time is None and self.has_outflow:
PEff_selector = self.data[self.outflowcol] == self.peak_outflow
if PEff_selector.sum() > 0:
self._peak_outflow_time = self.data[PEff_selector].index[0]
return self._peak_outflow_time
@property
def peak_lag_hours(self):
if (
self._peak_lag_hours is None
and self.peak_outflow_time is not None
and self.peak_inflow_time is not None
):
time_delta = self.peak_outflow_time - self.peak_inflow_time
self._peak_lag_hours = time_delta.total_seconds() / SEC_PER_HOUR
return self._peak_lag_hours
@property
def summary_dict(self):
if self._summary_dict is None:
self._summary_dict = {
"Storm Number": self.stormnumber,
"Antecedent Days": self.antecedent_period_days,
"Start Date": self.start,
"End Date": self.end,
"Duration Hours": self.duration_hours,
"Peak Precip Intensity": self.peak_precip_intensity,
"Total Precip Depth": self.total_precip_depth,
"Total Inflow Volume": self.total_inflow_volume,
"Peak Inflow": self.peak_inflow,
"Total Outflow Volume": self.total_outflow_volume,
"Peak Outflow": self.peak_outflow,
"Peak Lag Hours": self.peak_lag_hours,
"Centroid Lag Hours": self.centroid_lag_hours,
"Season": self.season,
}
return self._summary_dict
def is_small(self, minprecip=0.0, mininflow=0.0, minoutflow=0.0):
""" Determines whether a storm can be considered "small".
Parameters
----------
minprecip, mininflow, minoutflow : float, optional (default = 0)
The minimum amount of each hydrologic quantity below which a
storm can be considered "small".
Returns
-------
storm_is_small : bool
True if the storm is considered small.
"""
storm_is_small = (
(
self.total_precip_depth is not None
and self.total_precip_depth < minprecip
)
or (
self.total_inflow_volume is not None
and self.total_inflow_volume < mininflow
)
or (
self.total_outflow_volume is not None
and self.total_outflow_volume < minoutflow
)
)
return storm_is_small
def _get_event_time(self, column, bound):
index_map = {"start": 0, "end": -1}
quantity = self.data[self.data[column] > 0]
if quantity.shape[0] == 0:
warnings.warn("Storm has no {}".format(column), UserWarning)
else:
return quantity.index[index_map[bound]]
def _get_max_quantity(self, column):
return self.data[column].max()
def _compute_centroid(self, column):
# ordinal time index of storm
time_idx = [
dates.date2num(idx.to_pydatetime()) for idx in self.data.index.tolist()
]
centroid = numpy.sum(self.data[column] * time_idx) / numpy.sum(
self.data[column]
)
if numpy.isnan(centroid):
return None
else:
return pandas.Timestamp(dates.num2date(centroid)).tz_convert(None)
def _plot_centroids(self, ax, yfactor=0.5):
artists = []
labels = []
y_val = yfactor * ax.get_ylim()[1]
if self.centroid_precip is not None:
ax.plot(
[self.centroid_precip],
[y_val],
color="DarkGreen",
marker="o",
linestyle="none",
zorder=20,
markersize=6,
)
artists.append(
pyplot.Line2D(
[0],
[0],
marker=".",
markersize=6,
linestyle="none",
color="DarkGreen",
)
)
labels.append("Precip. centroid")
if self.centroid_flow is not None:
ax.plot(
[self.centroid_flow],
[y_val],
color="CornflowerBlue",
marker="s",
linestyle="none",
zorder=20,
markersize=6,
)
artists.append(
pyplot.Line2D(
[0],
[0],
marker="s",
markersize=6,
linestyle="none",
color="CornflowerBlue",
)
)
labels.append("Effluent centroid")
if self.centroid_precip is not None and self.centroid_flow is not None:
ax.annotate(
"",
(self.centroid_flow, y_val),
arrowprops=dict(arrowstyle="-|>"),
xytext=(self.centroid_precip, y_val),
)
return artists, labels
def plot_hydroquantity(
self, quantity, ax=None, label=None, otherlabels=None, artists=None
):
""" Draws a hydrologic quantity to a matplotlib axes.
Parameters
----------
quantity : string
Column name of the quantity you want to plot.
ax : matplotlib axes object, optional
The axes on which the data will be plotted. If None, a new
one will be created.
label : string, optional
How the series should be labeled in the figure legend.
otherlabels : list of strings, optional
A list of other legend labels that have already been plotted
to ``ax``. If provided, ``label`` will be appended. If not
provided, and new list will be created.
artists : list of matplotlib artists, optional
A list of other legend items that have already been plotted
to ``ax``. If provided, the artist created will be appended.
If not provided, and new list will be created.
Returns
-------
fig : matplotlib.Figure
The figure containing the plot.
labels : list of strings
Labels to be included in a legend for the figure.
artists : list of matplotlib artists
Symbology for the figure legend.
"""
# setup the figure
fig, ax = validate.axes(ax)
if label is None:
label = quantity
# select the plot props based on the column
try:
meta = self.meta[quantity]
except KeyError:
raise KeyError("{} not available".format(quantity))
# plot the data
self.data[quantity].fillna(0).plot(
ax=ax, kind="area", color=meta["color"], alpha=meta["alpha"], zorder=5
)
if artists is not None:
proxy = pyplot.Rectangle(
(0, 0), 1, 1, facecolor=meta["color"], linewidth=0, alpha=meta["alpha"]
)
artists.append(proxy)
if otherlabels is not None:
otherlabels.append(label)
return fig, otherlabels, artists
def summaryPlot(
self,
axratio=2,
filename=None,
showLegend=True,
precip=True,
inflow=True,
outflow=True,
figopts={},
serieslabels={},
):
"""
Creates a figure showing the hydrlogic record (flow and
precipitation) of the storm
Input:
axratio : optional float or int (default = 2)
Relative height of the flow axis compared to the
precipiation axis.
filename : optional string (default = None)
Filename to which the figure will be saved.
**figwargs will be passed on to `pyplot.Figure`
Writes:
Figure of flow and precipitation for a storm
Returns:
None
"""
fig = pyplot.figure(**figopts)
gs = gridspec.GridSpec(
nrows=2, ncols=1, height_ratios=[1, axratio], hspace=0.12
)
rainax = fig.add_subplot(gs[0])
rainax.yaxis.set_major_locator(pyplot.MaxNLocator(5))
flowax = fig.add_subplot(gs[1], sharex=rainax)
# create the legend proxy artists
artists = []
labels = []
# in the label assignment: `serieslabels.pop(item, item)` might
# seem odd. What it does is looks for a label (value) in the
# dictionary with the key equal to `item`. If there is no valur
# for that key in the dictionary the `item` itself is returned.
# so if there's nothing called "test" in mydict,
# `mydict.pop("test", "test")` returns `"test"`.
if inflow:
fig, labels, artists = self.plot_hydroquantity(
self.inflowcol,
ax=flowax,
label=serieslabels.pop(self.inflowcol, self.inflowcol),
otherlabels=labels,
artists=artists,
)
if outflow:
fig, labels, arti = self.plot_hydroquantity(
self.outflowcol,
ax=flowax,
label=serieslabels.pop(self.outflowcol, self.outflowcol),
otherlabels=labels,
artists=artists,
)
if precip:
fig, labels, arti = self.plot_hydroquantity(
self.precipcol,
ax=rainax,
label=serieslabels.pop(self.precipcol, self.precipcol),
otherlabels=labels,
artists=artists,
)
rainax.invert_yaxis()
if showLegend:
leg = rainax.legend(
artists,
labels,
fontsize=7,
ncol=1,
markerscale=0.75,
frameon=False,
loc="lower right",
)
leg.get_frame().set_zorder(25)
_leg = [leg]
else:
_leg = None
seaborn.despine(ax=rainax, bottom=True, top=False)
seaborn.despine(ax=flowax)
flowax.set_xlabel("")
rainax.set_xlabel("")
if filename is not None:
fig.savefig(
filename,
dpi=300,
transparent=True,
bbox_inches="tight",
bbox_extra_artists=_leg,
)
return fig, artists, labels
class HydroRecord(object):
""" Class representing an entire hydrologic record.
Parameters
----------
hydrodata : pandas.DataFrame
DataFrame of hydrologic data of the storm. Should contain
a unique index of type pandas.DatetimeIndex.
precipcol : string, optional (default = None)
Name of column in `hydrodata` containing precipiation data.
inflowcol : string, optional (default = None)
Name of column in `hydrodata` containing influent flow data.
outflowcol : string, optional (default = None)
Name of column in `hydrodata` containing effluent flow data.
baseflowcol : string, optional (default = None)
Name of column in `hydrodata` containing boolean indicating
which records are considered baseflow.
stormcol : string (default = 'storm')
Name of column in `hydrodata` indentifying distinct storms.
minprecip, mininflow, minoutflow : float, optional (default = 0)
The minimum amount of each hydrologic quantity below which a
storm can be considered "small".
outputfreqMinutes : int, optional (default = 10)
The default frequency (minutes) to which all data will be
resampled. Precipitation data will be summed up across '
multiple timesteps during resampling, while flow will be
averaged.
intereventHours : int, optional (default = 6)
The dry duration (no flow or rain) required to signal the end of
a storm.
volume_conversion : float, optional (default = 1)
Conversion factor to go from flow to volume for a single
observation.
stormclass : object, optional
Defaults to wqio.hydro.Storm. Can be a subclass of that in cases
where custom functionality is needed.
lowmem : bool (default = False)
If True, all dry observations are removed from the dataframe.
"""
# TODO: rename `outputfreqMinutes` to `outputPeriodMinutes`
def __init__(
self,
hydrodata,
precipcol=None,
inflowcol=None,
outflowcol=None,
baseflowcol=None,
tempcol=None,
stormcol="storm",
minprecip=0.0,
mininflow=0.0,
minoutflow=0.0,
outputfreqMinutes=10,
intereventHours=6,
volume_conversion=1,
stormclass=None,
lowmem=False,
):
# validate input
if precipcol is None and inflowcol is None and outflowcol is None:
msg = "`hydrodata` must have at least a precip or in/outflow column"
raise ValueError(msg)
self.stormclass = stormclass or Storm
# static input
self._raw_data = hydrodata
self.precipcol = precipcol
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.baseflowcol = baseflowcol
self.stormcol = stormcol
self.tempcol = tempcol
self.outputfreq = pandas.offsets.Minute(outputfreqMinutes)
self.intereventHours = intereventHours
self.intereventPeriods = MIN_PER_HOUR / self.outputfreq.n * self.intereventHours
self.minprecip = minprecip
self.mininflow = mininflow
self.minoutflow = minoutflow
self.volume_conversion = volume_conversion
self.lowmem = lowmem
# properties
self._data = None
self._all_storms = None
self._storms = None
self._storm_stats = None
@property
def data(self):
if self._data is None:
self._data = self._define_storms()
if self.lowmem:
self._data = self._data[self._data[self.stormcol] != 0]
return self._data
@property
def all_storms(self):
if self._all_storms is None:
self._all_storms = {}
for storm_number in self.data[self.stormcol].unique():
if storm_number > 0:
this_storm = self.stormclass(
self.data,
storm_number,
precipcol=self.precipcol,
inflowcol=self.inflowcol,
outflowcol=self.outflowcol,
tempcol=self.tempcol,
stormcol=self.stormcol,
volume_conversion=self.volume_conversion,
freqMinutes=self.outputfreq.n,
)
self._all_storms[storm_number] = this_storm
return self._all_storms
@property
def storms(self):
if self._storms is None:
self._storms = {}
for snum, storm in self.all_storms.items():
is_small = storm.is_small(
minprecip=self.minprecip,
mininflow=self.mininflow,
minoutflow=self.minoutflow,
)
if not is_small:
self._storms[snum] = storm
return self._storms
@property
def storm_stats(self):
col_order = [
"Storm Number",
"Antecedent Days",
"Season",
"Start Date",
"End Date",
"Duration Hours",
"Peak Precip Intensity",
"Total Precip Depth",
"Total Inflow Volume",
"Peak Inflow",
"Total Outflow Volume",
"Peak Outflow",
"Peak Lag Hours",
"Centroid Lag Hours",
]
if self._storm_stats is None:
storm_stats = pandas.DataFrame(
[self.storms[sn].summary_dict for sn in self.storms]
)
self._storm_stats = storm_stats[col_order]
return self._storm_stats.sort_values(by=["Storm Number"]).reset_index(drop=True)
def _define_storms(self, debug=False):
parsed = parse_storm_events(
self._raw_data,
self.intereventHours,
self.outputfreq.n,
precipcol=self.precipcol,
inflowcol=self.inflowcol,
outflowcol=self.outflowcol,
baseflowcol=self.baseflowcol,
stormcol="storm",
debug=debug,
)
return parsed
def getStormFromTimestamp(self, timestamp, lookback_hours=0, smallstorms=False):
""" Get the storm associdated with a give (sample) date
Parameters
----------
timestamp : pandas.Timestamp
The date/time for which to search within the hydrologic
record.
lookback_hours : positive int or float, optional (default = 0)
If no storm is actively occuring at the provided timestamp,
we can optionally look backwards in the hydrologic record a
fixed amount of time (specified in hours). Negative values
are ignored.
smallstorms : bool, optional (default = False)
If True, small storms will be included in the search.
Returns
-------
storm_number : int
storm : wqio.Storm
"""
# santize date input
timestamp = validate.timestamp(timestamp)
# check lookback hours
if lookback_hours < 0:
raise ValueError("`lookback_hours` must be greater than 0")
# initial search for the storm
storm_number = int(self.data.loc[:timestamp, self.stormcol].iloc[-1])
# look backwards if we have too
if (storm_number == 0 or pandas.isnull(storm_number)) and lookback_hours != 0:
lookback_time = timestamp - pandas.offsets.Hour(lookback_hours)
storms = self.data.loc[lookback_time:timestamp, [self.stormcol]]
storms = storms[storms > 0].dropna()
if storms.shape[0] == 0:
# no storm
storm_number = None
else:
# storm w/i the lookback period
storm_number = int(storms.iloc[-1])
# return storm_number and storms
if smallstorms:
return storm_number, self.all_storms.get(storm_number, None)
else:
return storm_number, self.storms.get(storm_number, None)
def histogram(self, valuecol, bins, **factoropts):
""" Plot a faceted, categorical histogram of storms.
Parameters
----------
valuecol : str, optional
The name of the column that should be categorized and plotted.
bins : array-like, optional
The right-edges of the histogram bins.
factoropts : keyword arguments, optional
Options passed directly to seaborn.factorplot
Returns
-------
fig : seaborn.FacetGrid
See also
--------
viz.categorical_histogram
seaborn.factorplot
"""
fg = viz.categorical_histogram(self.storm_stats, valuecol, bins, **factoropts)
fg.fig.tight_layout()
return fg
class DrainageArea(object):
def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0):
""" A simple object representing the drainage area of a BMP.
Units are not enforced, so keep them consistent yourself. The
calculations available assume that the area of the BMP and the
"total" area are mutually exclusive. In other words,
the watershed outlet is at the BMP inlet.
Parameters
----------
total_area : float, optional (default = 1.0)
The total geometric area of the BMP's catchment
imp_area : float, optional (default = 1.0)
The impervious area of the BMP's catchment
bmp_area : float, optional (default = 0.0)
The geometric area of the BMP itself.
"""
self.total_area = float(total_area)
self.imp_area = float(imp_area)
self.bmp_area = float(bmp_area)
def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0):
"""
Estimate runoff volume via <NAME>'s Simple Method.
Parameters
----------
storm_depth : float
Depth of the storm.
volume_conversion : float, optional (default = 1.0)
Conversion factor to go from [area units] * [depth units] to
the desired [volume units]. If [area] = m^2, [depth] = mm,
and [volume] = L, then `volume_conversion` = 1.
annual_factor : float, optional (default = 1.0)
The Simple Method's annual correction factor to account for
small storms that do not produce runoff.
Returns
-------
runoff_volume : float
The volume of water entering the BMP immediately downstream
of the drainage area.
"""
# volumetric run off coneffiecient
Rv = 0.05 + (0.9 * (self.imp_area / self.total_area))
# run per unit storm depth
drainage_conversion = Rv * self.total_area * volume_conversion
bmp_conversion = self.bmp_area * volume_conversion
# total runoff based on actual storm depth
runoff_volume = (
drainage_conversion * annual_factor + bmp_conversion
) * storm_depth
return runoff_volume
| import warnings
import numpy
from matplotlib import pyplot
from matplotlib import dates
from matplotlib import gridspec
import seaborn
import pandas
from wqio import utils
from wqio import viz
from wqio import validate
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
SEC_PER_MINUTE = 60.0
MIN_PER_HOUR = 60.0
HOUR_PER_DAY = 24.0
SEC_PER_HOUR = SEC_PER_MINUTE * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOUR_PER_DAY
def _wet_first_row(df, wetcol, diffcol):
# make sure that if the first record is associated with the first
# storm if it's wet
firstrow = df.iloc[0]
if firstrow[wetcol]:
df.loc[firstrow.name, diffcol] = 1
return df
def _wet_window_diff(is_wet, ie_periods):
return (
is_wet.rolling(int(ie_periods), min_periods=1)
.apply(lambda window: window.any(), raw=False)
.diff()
)
def parse_storm_events(
data,
intereventHours,
outputfreqMinutes,
precipcol=None,
inflowcol=None,
outflowcol=None,
baseflowcol=None,
stormcol="storm",
debug=False,
):
"""Parses the hydrologic data into distinct storms.
In this context, a storm is defined as starting whenever the
hydrologic records shows non-zero precipitation or [in|out]flow
from the BMP after a minimum inter-event dry period duration
specified in the the function call. The storms ends the observation
*after* the last non-zero precipitation or flow value.
Parameters
----------
data : pandas.DataFrame
intereventHours : float
The Inter-Event dry duration (in hours) that classifies the
next hydrlogic activity as a new event.
precipcol : string, optional (default = None)
Name of column in `hydrodata` containing precipiation data.
inflowcol : string, optional (default = None)
Name of column in `hydrodata` containing influent flow data.
outflowcol : string, optional (default = None)
Name of column in `hydrodata` containing effluent flow data.
baseflowcol : string, optional (default = None)
Name of column in `hydrodata` containing boolean indicating
which records are considered baseflow.
stormcol : string (default = 'storm')
Name of column in `hydrodata` indentifying distinct storms.
debug : bool (default = False)
If True, diagnostic columns will not be dropped prior to
returning the dataframe of parsed_storms.
Writes
------
None
Returns
-------
parsed_storms : pandas.DataFrame
Copy of the origin `hydrodata` DataFrame, but resampled to a
fixed frequency, columns possibly renamed, and a `storm` column
added to denote the storm to which each record belongs. Records
where `storm` == 0 are not a part of any storm.
"""
# pull out the rain and flow data
if precipcol is None:
precipcol = "precip"
data.loc[:, precipcol] = numpy.nan
if inflowcol is None:
inflowcol = "inflow"
data.loc[:, inflowcol] = numpy.nan
if outflowcol is None:
outflowcol = "outflow"
data.loc[:, outflowcol] = numpy.nan
if baseflowcol is None:
baseflowcol = "baseflow"
data.loc[:, baseflowcol] = False
# bool column where True means there's rain or flow of some kind
water_columns = [inflowcol, outflowcol, precipcol]
cols_to_use = water_columns + [baseflowcol]
agg_dict = {
precipcol: numpy.sum,
inflowcol: numpy.mean,
outflowcol: numpy.mean,
baseflowcol: numpy.any,
}
freq = pandas.offsets.Minute(outputfreqMinutes)
ie_periods = int(MIN_PER_HOUR / freq.n * intereventHours)
# periods between storms are where the cumulative number
# of storms that have ended are equal to the cumulative
# number of storms that have started.
# Stack Overflow: http://tinyurl.com/lsjkr9x
res = (
data.resample(freq)
.agg(agg_dict)
.loc[:, lambda df: df.columns.isin(cols_to_use)]
.assign(
__wet=lambda df: numpy.any(df[water_columns] > 0, axis=1) & ~df[baseflowcol]
)
.assign(__windiff=lambda df: _wet_window_diff(df["__wet"], ie_periods))
.pipe(_wet_first_row, "__wet", "__windiff")
.assign(__event_start=lambda df: df["__windiff"] == 1)
.assign(__event_end=lambda df: df["__windiff"].shift(-1 * ie_periods) == -1)
.assign(__storm=lambda df: df["__event_start"].cumsum())
.assign(
storm=lambda df: numpy.where(
df["__storm"] == df["__event_end"].shift(2).cumsum(),
0, # inter-event periods marked as zero
df["__storm"], # actual events keep their number
)
)
)
if not debug:
res = res.loc[:, res.columns.map(lambda c: not c.startswith("__"))]
return res
class Storm(object):
""" Object representing a storm event
Parameters
----------
dataframe : pandas.DataFrame
A datetime-indexed Dataframe containing all of the hydrologic
data and am interger column indentifying distinct storms.
stormnumber : int
The storm we care about.
precipcol, inflowcol, outflow, tempcol, stormcol : string, optional
Names for columns representing each hydrologic quantity.
freqMinutes : float (default = 5)
The time period, in minutes, between observations.
volume_conversion : float, optional (default = 1)
Conversion factor to go from flow to volume for a single
observation.
"""
# TODO: rename freqMinutes to periodMinutes
def __init__(
self,
dataframe,
stormnumber,
precipcol="precip",
inflowcol="inflow",
outflowcol="outflow",
tempcol="temp",
stormcol="storm",
freqMinutes=5,
volume_conversion=1,
):
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.precipcol = precipcol
self.tempcol = tempcol
self.stormnumber = stormnumber
self.freqMinutes = freqMinutes
self.volume_conversion = volume_conversion * SEC_PER_MINUTE * self.freqMinutes
# basic data
self.data = dataframe[dataframe[stormcol] == self.stormnumber].copy()
self.hydrofreq_label = "{0} min".format(self.freqMinutes)
# tease out start/stop info
self.start = self.data.index[0]
self.end = self.data.index[-1]
self._season = utils.getSeason(self.start)
# storm duration (hours)
duration = self.end - self.start
self.duration_hours = duration.total_seconds() / SEC_PER_HOUR
# antecedent dry period (hours)
if self.stormnumber > 1:
prev_storm_mask = dataframe[stormcol] == self.stormnumber - 1
previous_end = dataframe[prev_storm_mask].index[-1]
antecedent_timedelta = self.start - previous_end
self.antecedent_period_days = (
antecedent_timedelta.total_seconds() / SEC_PER_DAY
)
else:
self.antecedent_period_days = numpy.nan
# quantities
self._precip = None
self._inflow = None
self._outflow = None
# starts and stop
self._precip_start = None
self._precip_end = None
self._inflow_start = None
self._inflow_end = None
self._outflow_start = None
self._outflow_end = None
# peaks
self._peak_precip_intensity = None
self._peak_inflow = None
self._peak_outflow = None
# times of peaks
self._peak_precip_intensity_time = None
self._peak_inflow_time = None
self._peak_outflow_time = None
self._peak_lag_hours = None
# centroids
self._centroid_precip_time = None
self._centroid_inflow_time = None
self._centroid_outflow_time = None
self._centroid_lag_hours = None
# totals
self._total_precip_depth = None
self._total_inflow_volume = None
self._total_outflow_volume = None
self.meta = {
self.outflowcol: {
"name": "Flow (calculated, L/s)",
"ylabel": "Effluent flow (L/s)",
"color": "CornFlowerBlue",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": 0,
},
self.inflowcol: {
"name": "Inflow (estimated, L/s)",
"ylabel": "Estimated influent flow (L/s)",
"color": "Maroon",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": 0,
},
self.precipcol: {
"name": "Precip (mm)",
"ylabel": "%s Precip.\nDepth (mm)" % self.hydrofreq_label,
"color": "DarkGreen",
"linewidth": 1.5,
"alpha": 0.4,
"ymin": 0,
},
self.tempcol: {
"name": "Air Temp (deg C)",
"ylabel": "Air Temperature (deg. C)",
"color": "DarkGoldenRod",
"linewidth": 1.5,
"alpha": 0.5,
"ymin": None,
},
}
self._summary_dict = None
@property
def precip(self):
if self._precip is None:
if self.precipcol is not None:
self._precip = self.data[self.data[self.precipcol] > 0][self.precipcol]
else:
self._precip = numpy.array([])
return self._precip
@property
def inflow(self):
if self._inflow is None:
if self.inflowcol is not None:
self._inflow = self.data[self.data[self.inflowcol] > 0][self.inflowcol]
else:
self._inflow = numpy.array([])
return self._inflow
@property
def outflow(self):
if self._outflow is None:
if self.outflowcol is not None:
self._outflow = self.data[self.data[self.outflowcol] > 0][
self.outflowcol
]
else:
self._outflow = numpy.array([])
return self._outflow
@property
def has_precip(self):
return self.precip.shape[0] > 0
@property
def has_inflow(self):
return self.inflow.shape[0] > 0
@property
def has_outflow(self):
return self.outflow.shape[0] > 0
@property
def season(self):
return self._season
@season.setter
def season(self, value):
self._season = value
# starts and stops
@property
def precip_start(self):
if self._precip_start is None and self.has_precip:
self._precip_start = self._get_event_time(self.precipcol, "start")
return self._precip_start
@property
def precip_end(self):
if self._precip_end is None and self.has_precip:
self._precip_end = self._get_event_time(self.precipcol, "end")
return self._precip_end
@property
def inflow_start(self):
if self._inflow_start is None and self.has_inflow:
self._inflow_start = self._get_event_time(self.inflowcol, "start")
return self._inflow_start
@property
def inflow_end(self):
if self._inflow_end is None and self.has_inflow:
self._inflow_end = self._get_event_time(self.inflowcol, "end")
return self._inflow_end
@property
def outflow_start(self):
if self._outflow_start is None and self.has_outflow:
self._outflow_start = self._get_event_time(self.outflowcol, "start")
return self._outflow_start
@property
def outflow_end(self):
if self._outflow_end is None and self.has_outflow:
self._outflow_end = self._get_event_time(self.outflowcol, "end")
return self._outflow_end
@property
def _peak_depth(self):
if self.has_precip:
return self.precip.max()
@property
def peak_precip_intensity(self):
if self._peak_precip_intensity is None and self.has_precip:
self._peak_precip_intensity = (
self._peak_depth * MIN_PER_HOUR / self.freqMinutes
)
return self._peak_precip_intensity
@property
def peak_inflow(self):
if self._peak_inflow is None and self.has_inflow:
self._peak_inflow = self.inflow.max()
return self._peak_inflow
@property
def peak_outflow(self):
if self._peak_outflow is None and self.has_outflow:
self._peak_outflow = self.outflow.max()
return self._peak_outflow
@property
def total_precip_depth(self):
if self._total_precip_depth is None and self.has_precip:
self._total_precip_depth = self.data[self.precipcol].sum()
return self._total_precip_depth
@property
def total_inflow_volume(self):
if self._total_inflow_volume is None and self.has_inflow:
self._total_inflow_volume = (
self.data[self.inflowcol].sum() * self.volume_conversion
)
return self._total_inflow_volume
@property
def total_outflow_volume(self):
if self._total_outflow_volume is None and self.has_outflow:
self._total_outflow_volume = (
self.data[self.outflowcol].sum() * self.volume_conversion
)
return self._total_outflow_volume
@property
def centroid_precip_time(self):
if self._centroid_precip_time is None and self.has_precip:
self._centroid_precip_time = self._compute_centroid(self.precipcol)
return self._centroid_precip_time
@property
def centroid_inflow_time(self):
if self._centroid_inflow_time is None and self.has_inflow:
self._centroid_inflow_time = self._compute_centroid(self.inflowcol)
return self._centroid_inflow_time
@property
def centroid_outflow_time(self):
if self._centroid_outflow_time is None and self.has_outflow:
self._centroid_outflow_time = self._compute_centroid(self.outflowcol)
return self._centroid_outflow_time
@property
def centroid_lag_hours(self):
if (
self._centroid_lag_hours is None
and self.centroid_outflow_time is not None
and self.centroid_inflow_time is not None
):
self._centroid_lag_hours = (
self.centroid_outflow_time - self.centroid_inflow_time
).total_seconds() / SEC_PER_HOUR
return self._centroid_lag_hours
@property
def peak_precip_intensity_time(self):
if self._peak_precip_intensity_time is None and self.has_precip:
PI_selector = self.data[self.precipcol] == self._peak_depth
self._peak_precip_intensity_time = self.data[PI_selector].index[0]
return self._peak_precip_intensity_time
@property
def peak_inflow_time(self):
if self._peak_inflow_time is None and self.has_inflow:
PInf_selector = self.data[self.inflowcol] == self.peak_inflow
self._peak_inflow_time = self.data[PInf_selector].index[0]
return self._peak_inflow_time
@property
def peak_outflow_time(self):
if self._peak_outflow_time is None and self.has_outflow:
PEff_selector = self.data[self.outflowcol] == self.peak_outflow
if PEff_selector.sum() > 0:
self._peak_outflow_time = self.data[PEff_selector].index[0]
return self._peak_outflow_time
@property
def peak_lag_hours(self):
if (
self._peak_lag_hours is None
and self.peak_outflow_time is not None
and self.peak_inflow_time is not None
):
time_delta = self.peak_outflow_time - self.peak_inflow_time
self._peak_lag_hours = time_delta.total_seconds() / SEC_PER_HOUR
return self._peak_lag_hours
@property
def summary_dict(self):
if self._summary_dict is None:
self._summary_dict = {
"Storm Number": self.stormnumber,
"Antecedent Days": self.antecedent_period_days,
"Start Date": self.start,
"End Date": self.end,
"Duration Hours": self.duration_hours,
"Peak Precip Intensity": self.peak_precip_intensity,
"Total Precip Depth": self.total_precip_depth,
"Total Inflow Volume": self.total_inflow_volume,
"Peak Inflow": self.peak_inflow,
"Total Outflow Volume": self.total_outflow_volume,
"Peak Outflow": self.peak_outflow,
"Peak Lag Hours": self.peak_lag_hours,
"Centroid Lag Hours": self.centroid_lag_hours,
"Season": self.season,
}
return self._summary_dict
def is_small(self, minprecip=0.0, mininflow=0.0, minoutflow=0.0):
""" Determines whether a storm can be considered "small".
Parameters
----------
minprecip, mininflow, minoutflow : float, optional (default = 0)
The minimum amount of each hydrologic quantity below which a
storm can be considered "small".
Returns
-------
storm_is_small : bool
True if the storm is considered small.
"""
storm_is_small = (
(
self.total_precip_depth is not None
and self.total_precip_depth < minprecip
)
or (
self.total_inflow_volume is not None
and self.total_inflow_volume < mininflow
)
or (
self.total_outflow_volume is not None
and self.total_outflow_volume < minoutflow
)
)
return storm_is_small
def _get_event_time(self, column, bound):
index_map = {"start": 0, "end": -1}
quantity = self.data[self.data[column] > 0]
if quantity.shape[0] == 0:
warnings.warn("Storm has no {}".format(column), UserWarning)
else:
return quantity.index[index_map[bound]]
def _get_max_quantity(self, column):
return self.data[column].max()
def _compute_centroid(self, column):
# ordinal time index of storm
time_idx = [
dates.date2num(idx.to_pydatetime()) for idx in self.data.index.tolist()
]
centroid = numpy.sum(self.data[column] * time_idx) / numpy.sum(
self.data[column]
)
if numpy.isnan(centroid):
return None
else:
return pandas.Timestamp(dates.num2date(centroid)).tz_convert(None)
def _plot_centroids(self, ax, yfactor=0.5):
artists = []
labels = []
y_val = yfactor * ax.get_ylim()[1]
if self.centroid_precip is not None:
ax.plot(
[self.centroid_precip],
[y_val],
color="DarkGreen",
marker="o",
linestyle="none",
zorder=20,
markersize=6,
)
artists.append(
pyplot.Line2D(
[0],
[0],
marker=".",
markersize=6,
linestyle="none",
color="DarkGreen",
)
)
labels.append("Precip. centroid")
if self.centroid_flow is not None:
ax.plot(
[self.centroid_flow],
[y_val],
color="CornflowerBlue",
marker="s",
linestyle="none",
zorder=20,
markersize=6,
)
artists.append(
pyplot.Line2D(
[0],
[0],
marker="s",
markersize=6,
linestyle="none",
color="CornflowerBlue",
)
)
labels.append("Effluent centroid")
if self.centroid_precip is not None and self.centroid_flow is not None:
ax.annotate(
"",
(self.centroid_flow, y_val),
arrowprops=dict(arrowstyle="-|>"),
xytext=(self.centroid_precip, y_val),
)
return artists, labels
def plot_hydroquantity(
self, quantity, ax=None, label=None, otherlabels=None, artists=None
):
""" Draws a hydrologic quantity to a matplotlib axes.
Parameters
----------
quantity : string
Column name of the quantity you want to plot.
ax : matplotlib axes object, optional
The axes on which the data will be plotted. If None, a new
one will be created.
label : string, optional
How the series should be labeled in the figure legend.
otherlabels : list of strings, optional
A list of other legend labels that have already been plotted
to ``ax``. If provided, ``label`` will be appended. If not
provided, and new list will be created.
artists : list of matplotlib artists, optional
A list of other legend items that have already been plotted
to ``ax``. If provided, the artist created will be appended.
If not provided, and new list will be created.
Returns
-------
fig : matplotlib.Figure
The figure containing the plot.
labels : list of strings
Labels to be included in a legend for the figure.
artists : list of matplotlib artists
Symbology for the figure legend.
"""
# setup the figure
fig, ax = validate.axes(ax)
if label is None:
label = quantity
# select the plot props based on the column
try:
meta = self.meta[quantity]
except KeyError:
raise KeyError("{} not available".format(quantity))
# plot the data
self.data[quantity].fillna(0).plot(
ax=ax, kind="area", color=meta["color"], alpha=meta["alpha"], zorder=5
)
if artists is not None:
proxy = pyplot.Rectangle(
(0, 0), 1, 1, facecolor=meta["color"], linewidth=0, alpha=meta["alpha"]
)
artists.append(proxy)
if otherlabels is not None:
otherlabels.append(label)
return fig, otherlabels, artists
def summaryPlot(
self,
axratio=2,
filename=None,
showLegend=True,
precip=True,
inflow=True,
outflow=True,
figopts={},
serieslabels={},
):
"""
Creates a figure showing the hydrlogic record (flow and
precipitation) of the storm
Input:
axratio : optional float or int (default = 2)
Relative height of the flow axis compared to the
precipiation axis.
filename : optional string (default = None)
Filename to which the figure will be saved.
**figwargs will be passed on to `pyplot.Figure`
Writes:
Figure of flow and precipitation for a storm
Returns:
None
"""
fig = pyplot.figure(**figopts)
gs = gridspec.GridSpec(
nrows=2, ncols=1, height_ratios=[1, axratio], hspace=0.12
)
rainax = fig.add_subplot(gs[0])
rainax.yaxis.set_major_locator(pyplot.MaxNLocator(5))
flowax = fig.add_subplot(gs[1], sharex=rainax)
# create the legend proxy artists
artists = []
labels = []
# in the label assignment: `serieslabels.pop(item, item)` might
# seem odd. What it does is looks for a label (value) in the
# dictionary with the key equal to `item`. If there is no valur
# for that key in the dictionary the `item` itself is returned.
# so if there's nothing called "test" in mydict,
# `mydict.pop("test", "test")` returns `"test"`.
if inflow:
fig, labels, artists = self.plot_hydroquantity(
self.inflowcol,
ax=flowax,
label=serieslabels.pop(self.inflowcol, self.inflowcol),
otherlabels=labels,
artists=artists,
)
if outflow:
fig, labels, arti = self.plot_hydroquantity(
self.outflowcol,
ax=flowax,
label=serieslabels.pop(self.outflowcol, self.outflowcol),
otherlabels=labels,
artists=artists,
)
if precip:
fig, labels, arti = self.plot_hydroquantity(
self.precipcol,
ax=rainax,
label=serieslabels.pop(self.precipcol, self.precipcol),
otherlabels=labels,
artists=artists,
)
rainax.invert_yaxis()
if showLegend:
leg = rainax.legend(
artists,
labels,
fontsize=7,
ncol=1,
markerscale=0.75,
frameon=False,
loc="lower right",
)
leg.get_frame().set_zorder(25)
_leg = [leg]
else:
_leg = None
seaborn.despine(ax=rainax, bottom=True, top=False)
seaborn.despine(ax=flowax)
flowax.set_xlabel("")
rainax.set_xlabel("")
if filename is not None:
fig.savefig(
filename,
dpi=300,
transparent=True,
bbox_inches="tight",
bbox_extra_artists=_leg,
)
return fig, artists, labels
class HydroRecord(object):
""" Class representing an entire hydrologic record.
Parameters
----------
hydrodata : pandas.DataFrame
DataFrame of hydrologic data of the storm. Should contain
a unique index of type pandas.DatetimeIndex.
precipcol : string, optional (default = None)
Name of column in `hydrodata` containing precipiation data.
inflowcol : string, optional (default = None)
Name of column in `hydrodata` containing influent flow data.
outflowcol : string, optional (default = None)
Name of column in `hydrodata` containing effluent flow data.
baseflowcol : string, optional (default = None)
Name of column in `hydrodata` containing boolean indicating
which records are considered baseflow.
stormcol : string (default = 'storm')
Name of column in `hydrodata` indentifying distinct storms.
minprecip, mininflow, minoutflow : float, optional (default = 0)
The minimum amount of each hydrologic quantity below which a
storm can be considered "small".
outputfreqMinutes : int, optional (default = 10)
The default frequency (minutes) to which all data will be
resampled. Precipitation data will be summed up across '
multiple timesteps during resampling, while flow will be
averaged.
intereventHours : int, optional (default = 6)
The dry duration (no flow or rain) required to signal the end of
a storm.
volume_conversion : float, optional (default = 1)
Conversion factor to go from flow to volume for a single
observation.
stormclass : object, optional
Defaults to wqio.hydro.Storm. Can be a subclass of that in cases
where custom functionality is needed.
lowmem : bool (default = False)
If True, all dry observations are removed from the dataframe.
"""
# TODO: rename `outputfreqMinutes` to `outputPeriodMinutes`
def __init__(
self,
hydrodata,
precipcol=None,
inflowcol=None,
outflowcol=None,
baseflowcol=None,
tempcol=None,
stormcol="storm",
minprecip=0.0,
mininflow=0.0,
minoutflow=0.0,
outputfreqMinutes=10,
intereventHours=6,
volume_conversion=1,
stormclass=None,
lowmem=False,
):
# validate input
if precipcol is None and inflowcol is None and outflowcol is None:
msg = "`hydrodata` must have at least a precip or in/outflow column"
raise ValueError(msg)
self.stormclass = stormclass or Storm
# static input
self._raw_data = hydrodata
self.precipcol = precipcol
self.inflowcol = inflowcol
self.outflowcol = outflowcol
self.baseflowcol = baseflowcol
self.stormcol = stormcol
self.tempcol = tempcol
self.outputfreq = pandas.offsets.Minute(outputfreqMinutes)
self.intereventHours = intereventHours
self.intereventPeriods = MIN_PER_HOUR / self.outputfreq.n * self.intereventHours
self.minprecip = minprecip
self.mininflow = mininflow
self.minoutflow = minoutflow
self.volume_conversion = volume_conversion
self.lowmem = lowmem
# properties
self._data = None
self._all_storms = None
self._storms = None
self._storm_stats = None
@property
def data(self):
if self._data is None:
self._data = self._define_storms()
if self.lowmem:
self._data = self._data[self._data[self.stormcol] != 0]
return self._data
@property
def all_storms(self):
if self._all_storms is None:
self._all_storms = {}
for storm_number in self.data[self.stormcol].unique():
if storm_number > 0:
this_storm = self.stormclass(
self.data,
storm_number,
precipcol=self.precipcol,
inflowcol=self.inflowcol,
outflowcol=self.outflowcol,
tempcol=self.tempcol,
stormcol=self.stormcol,
volume_conversion=self.volume_conversion,
freqMinutes=self.outputfreq.n,
)
self._all_storms[storm_number] = this_storm
return self._all_storms
@property
def storms(self):
if self._storms is None:
self._storms = {}
for snum, storm in self.all_storms.items():
is_small = storm.is_small(
minprecip=self.minprecip,
mininflow=self.mininflow,
minoutflow=self.minoutflow,
)
if not is_small:
self._storms[snum] = storm
return self._storms
@property
def storm_stats(self):
col_order = [
"Storm Number",
"Antecedent Days",
"Season",
"Start Date",
"End Date",
"Duration Hours",
"Peak Precip Intensity",
"Total Precip Depth",
"Total Inflow Volume",
"Peak Inflow",
"Total Outflow Volume",
"Peak Outflow",
"Peak Lag Hours",
"Centroid Lag Hours",
]
if self._storm_stats is None:
storm_stats = pandas.DataFrame(
[self.storms[sn].summary_dict for sn in self.storms]
)
self._storm_stats = storm_stats[col_order]
return self._storm_stats.sort_values(by=["Storm Number"]).reset_index(drop=True)
def _define_storms(self, debug=False):
parsed = parse_storm_events(
self._raw_data,
self.intereventHours,
self.outputfreq.n,
precipcol=self.precipcol,
inflowcol=self.inflowcol,
outflowcol=self.outflowcol,
baseflowcol=self.baseflowcol,
stormcol="storm",
debug=debug,
)
return parsed
def getStormFromTimestamp(self, timestamp, lookback_hours=0, smallstorms=False):
""" Get the storm associdated with a give (sample) date
Parameters
----------
timestamp : pandas.Timestamp
The date/time for which to search within the hydrologic
record.
lookback_hours : positive int or float, optional (default = 0)
If no storm is actively occuring at the provided timestamp,
we can optionally look backwards in the hydrologic record a
fixed amount of time (specified in hours). Negative values
are ignored.
smallstorms : bool, optional (default = False)
If True, small storms will be included in the search.
Returns
-------
storm_number : int
storm : wqio.Storm
"""
# santize date input
timestamp = validate.timestamp(timestamp)
# check lookback hours
if lookback_hours < 0:
raise ValueError("`lookback_hours` must be greater than 0")
# initial search for the storm
storm_number = int(self.data.loc[:timestamp, self.stormcol].iloc[-1])
# look backwards if we have too
if (storm_number == 0 or pandas.isnull(storm_number)) and lookback_hours != 0:
lookback_time = timestamp - pandas.offsets.Hour(lookback_hours)
storms = self.data.loc[lookback_time:timestamp, [self.stormcol]]
storms = storms[storms > 0].dropna()
if storms.shape[0] == 0:
# no storm
storm_number = None
else:
# storm w/i the lookback period
storm_number = int(storms.iloc[-1])
# return storm_number and storms
if smallstorms:
return storm_number, self.all_storms.get(storm_number, None)
else:
return storm_number, self.storms.get(storm_number, None)
def histogram(self, valuecol, bins, **factoropts):
""" Plot a faceted, categorical histogram of storms.
Parameters
----------
valuecol : str, optional
The name of the column that should be categorized and plotted.
bins : array-like, optional
The right-edges of the histogram bins.
factoropts : keyword arguments, optional
Options passed directly to seaborn.factorplot
Returns
-------
fig : seaborn.FacetGrid
See also
--------
viz.categorical_histogram
seaborn.factorplot
"""
fg = viz.categorical_histogram(self.storm_stats, valuecol, bins, **factoropts)
fg.fig.tight_layout()
return fg
class DrainageArea(object):
def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0):
""" A simple object representing the drainage area of a BMP.
Units are not enforced, so keep them consistent yourself. The
calculations available assume that the area of the BMP and the
"total" area are mutually exclusive. In other words,
the watershed outlet is at the BMP inlet.
Parameters
----------
total_area : float, optional (default = 1.0)
The total geometric area of the BMP's catchment
imp_area : float, optional (default = 1.0)
The impervious area of the BMP's catchment
bmp_area : float, optional (default = 0.0)
The geometric area of the BMP itself.
"""
self.total_area = float(total_area)
self.imp_area = float(imp_area)
self.bmp_area = float(bmp_area)
def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0):
"""
Estimate runoff volume via <NAME>'s Simple Method.
Parameters
----------
storm_depth : float
Depth of the storm.
volume_conversion : float, optional (default = 1.0)
Conversion factor to go from [area units] * [depth units] to
the desired [volume units]. If [area] = m^2, [depth] = mm,
and [volume] = L, then `volume_conversion` = 1.
annual_factor : float, optional (default = 1.0)
The Simple Method's annual correction factor to account for
small storms that do not produce runoff.
Returns
-------
runoff_volume : float
The volume of water entering the BMP immediately downstream
of the drainage area.
"""
# volumetric run off coneffiecient
Rv = 0.05 + (0.9 * (self.imp_area / self.total_area))
# run per unit storm depth
drainage_conversion = Rv * self.total_area * volume_conversion
bmp_conversion = self.bmp_area * volume_conversion
# total runoff based on actual storm depth
runoff_volume = (
drainage_conversion * annual_factor + bmp_conversion
) * storm_depth
return runoff_volume | en | 0.69149 | # make sure that if the first record is associated with the first # storm if it's wet Parses the hydrologic data into distinct storms. In this context, a storm is defined as starting whenever the hydrologic records shows non-zero precipitation or [in|out]flow from the BMP after a minimum inter-event dry period duration specified in the the function call. The storms ends the observation *after* the last non-zero precipitation or flow value. Parameters ---------- data : pandas.DataFrame intereventHours : float The Inter-Event dry duration (in hours) that classifies the next hydrlogic activity as a new event. precipcol : string, optional (default = None) Name of column in `hydrodata` containing precipiation data. inflowcol : string, optional (default = None) Name of column in `hydrodata` containing influent flow data. outflowcol : string, optional (default = None) Name of column in `hydrodata` containing effluent flow data. baseflowcol : string, optional (default = None) Name of column in `hydrodata` containing boolean indicating which records are considered baseflow. stormcol : string (default = 'storm') Name of column in `hydrodata` indentifying distinct storms. debug : bool (default = False) If True, diagnostic columns will not be dropped prior to returning the dataframe of parsed_storms. Writes ------ None Returns ------- parsed_storms : pandas.DataFrame Copy of the origin `hydrodata` DataFrame, but resampled to a fixed frequency, columns possibly renamed, and a `storm` column added to denote the storm to which each record belongs. Records where `storm` == 0 are not a part of any storm. # pull out the rain and flow data # bool column where True means there's rain or flow of some kind # periods between storms are where the cumulative number # of storms that have ended are equal to the cumulative # number of storms that have started. # Stack Overflow: http://tinyurl.com/lsjkr9x # inter-event periods marked as zero # actual events keep their number Object representing a storm event Parameters ---------- dataframe : pandas.DataFrame A datetime-indexed Dataframe containing all of the hydrologic data and am interger column indentifying distinct storms. stormnumber : int The storm we care about. precipcol, inflowcol, outflow, tempcol, stormcol : string, optional Names for columns representing each hydrologic quantity. freqMinutes : float (default = 5) The time period, in minutes, between observations. volume_conversion : float, optional (default = 1) Conversion factor to go from flow to volume for a single observation. # TODO: rename freqMinutes to periodMinutes # basic data # tease out start/stop info # storm duration (hours) # antecedent dry period (hours) # quantities # starts and stop # peaks # times of peaks # centroids # totals # starts and stops Determines whether a storm can be considered "small". Parameters ---------- minprecip, mininflow, minoutflow : float, optional (default = 0) The minimum amount of each hydrologic quantity below which a storm can be considered "small". Returns ------- storm_is_small : bool True if the storm is considered small. # ordinal time index of storm Draws a hydrologic quantity to a matplotlib axes. Parameters ---------- quantity : string Column name of the quantity you want to plot. ax : matplotlib axes object, optional The axes on which the data will be plotted. If None, a new one will be created. label : string, optional How the series should be labeled in the figure legend. otherlabels : list of strings, optional A list of other legend labels that have already been plotted to ``ax``. If provided, ``label`` will be appended. If not provided, and new list will be created. artists : list of matplotlib artists, optional A list of other legend items that have already been plotted to ``ax``. If provided, the artist created will be appended. If not provided, and new list will be created. Returns ------- fig : matplotlib.Figure The figure containing the plot. labels : list of strings Labels to be included in a legend for the figure. artists : list of matplotlib artists Symbology for the figure legend. # setup the figure # select the plot props based on the column # plot the data Creates a figure showing the hydrlogic record (flow and precipitation) of the storm Input: axratio : optional float or int (default = 2) Relative height of the flow axis compared to the precipiation axis. filename : optional string (default = None) Filename to which the figure will be saved. **figwargs will be passed on to `pyplot.Figure` Writes: Figure of flow and precipitation for a storm Returns: None # create the legend proxy artists # in the label assignment: `serieslabels.pop(item, item)` might # seem odd. What it does is looks for a label (value) in the # dictionary with the key equal to `item`. If there is no valur # for that key in the dictionary the `item` itself is returned. # so if there's nothing called "test" in mydict, # `mydict.pop("test", "test")` returns `"test"`. Class representing an entire hydrologic record. Parameters ---------- hydrodata : pandas.DataFrame DataFrame of hydrologic data of the storm. Should contain a unique index of type pandas.DatetimeIndex. precipcol : string, optional (default = None) Name of column in `hydrodata` containing precipiation data. inflowcol : string, optional (default = None) Name of column in `hydrodata` containing influent flow data. outflowcol : string, optional (default = None) Name of column in `hydrodata` containing effluent flow data. baseflowcol : string, optional (default = None) Name of column in `hydrodata` containing boolean indicating which records are considered baseflow. stormcol : string (default = 'storm') Name of column in `hydrodata` indentifying distinct storms. minprecip, mininflow, minoutflow : float, optional (default = 0) The minimum amount of each hydrologic quantity below which a storm can be considered "small". outputfreqMinutes : int, optional (default = 10) The default frequency (minutes) to which all data will be resampled. Precipitation data will be summed up across ' multiple timesteps during resampling, while flow will be averaged. intereventHours : int, optional (default = 6) The dry duration (no flow or rain) required to signal the end of a storm. volume_conversion : float, optional (default = 1) Conversion factor to go from flow to volume for a single observation. stormclass : object, optional Defaults to wqio.hydro.Storm. Can be a subclass of that in cases where custom functionality is needed. lowmem : bool (default = False) If True, all dry observations are removed from the dataframe. # TODO: rename `outputfreqMinutes` to `outputPeriodMinutes` # validate input # static input # properties Get the storm associdated with a give (sample) date Parameters ---------- timestamp : pandas.Timestamp The date/time for which to search within the hydrologic record. lookback_hours : positive int or float, optional (default = 0) If no storm is actively occuring at the provided timestamp, we can optionally look backwards in the hydrologic record a fixed amount of time (specified in hours). Negative values are ignored. smallstorms : bool, optional (default = False) If True, small storms will be included in the search. Returns ------- storm_number : int storm : wqio.Storm # santize date input # check lookback hours # initial search for the storm # look backwards if we have too # no storm # storm w/i the lookback period # return storm_number and storms Plot a faceted, categorical histogram of storms. Parameters ---------- valuecol : str, optional The name of the column that should be categorized and plotted. bins : array-like, optional The right-edges of the histogram bins. factoropts : keyword arguments, optional Options passed directly to seaborn.factorplot Returns ------- fig : seaborn.FacetGrid See also -------- viz.categorical_histogram seaborn.factorplot A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the "total" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself. Estimate runoff volume via <NAME>'s Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area. # volumetric run off coneffiecient # run per unit storm depth # total runoff based on actual storm depth | 3.100556 | 3 |
training/lesson3/answer34/not_a_lucky_ticket_all_the_time.py | ndo1989/pythoncourse2020 | 0 | 6618241 | <gh_stars>0
def sum_the_first_three_digits_of_the_ticket_number(first_three_digits_of_the_ticket_number):
# вычисляем первое число
a = first_three_digits_of_the_ticket_number
a1 = a // 100
# вычисляем второе число
a2 = (a % 100) // 10
# вычисляем третье число
a3 = a - a1*100 - a2*10
# сумма 3-х чисел в биллете
sum1 = int(a1) + int(a2) + int(a3)
return sum1
def sum_the_second_three_digits_of_the_ticket_number(second_three_digits_of_the_ticket_number):
# вычисляем первое число
a = second_three_digits_of_the_ticket_number
a1 = a // 100
# вычисляем второе число
a2 = (a % 100) // 10
# вычисляем третье число
a3 = a - a1*100 - a2*10
# сумма 3-х чисел в биллете
sum2 = int(a1) + int(a2) + int(a3)
return sum2
def plus_one_to_the_original_number(second_three_digits_of_the_ticket_number):
new_second_three_digits_of_the_ticket_number = second_three_digits_of_the_ticket_number + 1
return new_second_three_digits_of_the_ticket_number
def convert_999_to_000(second_three_digits_of_the_ticket_number):
second_three_digits_of_the_ticket_number = to_000(0)
return second_three_digits_of_the_ticket_number
# преобразовываем число "0" в "000"
def to_000(second_three_digits_of_the_ticket_number):
new_three_digits_of_the_ticket_number = str(second_three_digits_of_the_ticket_number).zfill(3)
return new_three_digits_of_the_ticket_number
if __name__ == "__main__":
num_1 = int(input("Введите первые три цифры номера билета(первая цифра не может быть равна 0): "))
num_2 = int(input("Введите вторые три цифры номера: "))
sum_1 = sum_the_first_three_digits_of_the_ticket_number(num_1)
sum_2 = sum_the_second_three_digits_of_the_ticket_number(num_2)
if sum_1 != sum_2:
print(str(num_1) + str(to_000(num_2)))
elif sum_1 == sum_2 and num_2 == 999:
print(str(num_1) + str(to_000(convert_999_to_000(num_2))))
else:
print(str(num_1) + str(to_000(plus_one_to_the_original_number(num_2))))
| def sum_the_first_three_digits_of_the_ticket_number(first_three_digits_of_the_ticket_number):
# вычисляем первое число
a = first_three_digits_of_the_ticket_number
a1 = a // 100
# вычисляем второе число
a2 = (a % 100) // 10
# вычисляем третье число
a3 = a - a1*100 - a2*10
# сумма 3-х чисел в биллете
sum1 = int(a1) + int(a2) + int(a3)
return sum1
def sum_the_second_three_digits_of_the_ticket_number(second_three_digits_of_the_ticket_number):
# вычисляем первое число
a = second_three_digits_of_the_ticket_number
a1 = a // 100
# вычисляем второе число
a2 = (a % 100) // 10
# вычисляем третье число
a3 = a - a1*100 - a2*10
# сумма 3-х чисел в биллете
sum2 = int(a1) + int(a2) + int(a3)
return sum2
def plus_one_to_the_original_number(second_three_digits_of_the_ticket_number):
new_second_three_digits_of_the_ticket_number = second_three_digits_of_the_ticket_number + 1
return new_second_three_digits_of_the_ticket_number
def convert_999_to_000(second_three_digits_of_the_ticket_number):
second_three_digits_of_the_ticket_number = to_000(0)
return second_three_digits_of_the_ticket_number
# преобразовываем число "0" в "000"
def to_000(second_three_digits_of_the_ticket_number):
new_three_digits_of_the_ticket_number = str(second_three_digits_of_the_ticket_number).zfill(3)
return new_three_digits_of_the_ticket_number
if __name__ == "__main__":
num_1 = int(input("Введите первые три цифры номера билета(первая цифра не может быть равна 0): "))
num_2 = int(input("Введите вторые три цифры номера: "))
sum_1 = sum_the_first_three_digits_of_the_ticket_number(num_1)
sum_2 = sum_the_second_three_digits_of_the_ticket_number(num_2)
if sum_1 != sum_2:
print(str(num_1) + str(to_000(num_2)))
elif sum_1 == sum_2 and num_2 == 999:
print(str(num_1) + str(to_000(convert_999_to_000(num_2))))
else:
print(str(num_1) + str(to_000(plus_one_to_the_original_number(num_2)))) | ru | 0.959734 | # вычисляем первое число # вычисляем второе число # вычисляем третье число # сумма 3-х чисел в биллете # вычисляем первое число # вычисляем второе число # вычисляем третье число # сумма 3-х чисел в биллете # преобразовываем число "0" в "000" | 4.024995 | 4 |
tests/test_sagemaker_studio.py | bilardi/aws-saving | 1 | 6618242 | import unittest
import json
import datetime
from botocore.exceptions import ClientError
import tests.helper as hlp
from aws_saving.sagemaker_studio import SagemakerStudio
class SagemakerClient():
ld = None
lup = None
la = None
lt = None
ddd = None
ddi = None
es = False
ne = False
net = False
policy = None
def __init__(self):
with open('tests/sagemaker-studio-list-domains.json') as json_file:
self.ld = json.load(json_file)
with open('tests/sagemaker-studio-list-user-profiles.json') as json_file:
self.lup = json.load(json_file)
with open('tests/sagemaker-studio-list-apps.json') as json_file:
self.la = json.load(json_file)
with open('tests/sagemaker-studio-list-tags.json') as json_file:
self.lt = json.load(json_file)
with open('tests/sagemaker-studio-describe-domain.Deleting.json') as json_file:
self.ddd = json.load(json_file)
with open('tests/sagemaker-studio-describe-domain.InService.json') as json_file:
self.ddi = json.load(json_file)
def list_domains(self):
return self.ld
def list_user_profiles(self, DomainIdEquals):
if isinstance(DomainIdEquals, str):
return self.lup
raise ValueError
def list_apps(self, DomainIdEquals, UserProfileNameEquals):
if isinstance(DomainIdEquals, str) and isinstance(UserProfileNameEquals, str):
return self.la
raise ValueError
def list_tags(self, ResourceArn):
if isinstance(ResourceArn, str):
if self.net is True:
return {"Tags":[]}
return self.lt
raise ValueError
def describe_domain(self, DomainId):
if DomainId == 1:
return {}
if self.ne is False:
return self.ddi
return self.ddd
def set_except_simulation(self, boolean):
self.es = boolean
def set_not_exists_simulation(self, boolean):
self.ne = boolean
def set_not_exists_tag_simulation(self, boolean):
self.net = boolean
def set_policy_none(self):
self.policy = None
def delete_app(self, DomainId, UserProfileName, AppType, AppName):
if isinstance(DomainId, str) and isinstance(UserProfileName, str) and isinstance(AppType, str) and isinstance(AppName, str):
return
raise ValueError
def delete_user_profile(self, DomainId, UserProfileName):
if isinstance(DomainId, str) and isinstance(UserProfileName, str):
return
raise ValueError
def delete_domain(self, DomainId, RetentionPolicy):
if isinstance(DomainId, str) and isinstance(RetentionPolicy, dict) and self.es is False:
self.policy = RetentionPolicy['HomeEfsFileSystem']
return
raise ValueError
class TestService(unittest.TestCase, SagemakerStudio):
s = None
def __init__(self, *args, **kwargs):
self.s = SagemakerStudio({})
self.s.sagemaker = SagemakerClient()
unittest.TestCase.__init__(self, *args, **kwargs)
def get_output(self, event = {}):
with hlp.captured_output() as (out, err):
self.s.run(event)
return out.getvalue().strip()
def test_get_instances(self):
instances = self.s.get_instances()
self.assertEqual(instances[0]['DomainName'], 'studio')
def test_get_instances_exception(self):
self.s.sagemaker.set_not_exists_tag_simulation(True)
instances = self.s.get_instances()
self.s.sagemaker.set_not_exists_tag_simulation(False)
self.assertEqual(instances, [])
def test_already_exists(self):
self.s.sagemaker.set_not_exists_simulation(False)
self.assertTrue(self.s.already_exists('id'))
self.s.sagemaker.set_not_exists_simulation(True)
with hlp.captured_output() as (out, err):
self.assertFalse(self.s.already_exists('domain-id'))
self.assertEqual(out.getvalue().strip(), "The domain domain-id not exists")
def test_empty_user_profile(self):
with hlp.captured_output() as (out, err):
self.s.empty_user_profile('user|id')
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user|id\nDeleting app named datascience")
with hlp.captured_output() as (out, err):
self.s.empty_user_profile('user|id', False)
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user|id\nDeleting app named datascience")
with hlp.captured_output() as (out, err):
self.s.empty_user_profile('user|id', True)
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user|id\nDeleting app named default\nDeleting app named datascience")
with self.assertRaises(ValueError):
self.s.empty_user_profile('user')
def test_empty_domain(self):
with hlp.captured_output() as (out, err):
self.s.empty_domain('id')
self.assertEqual(out.getvalue().strip(), "Deleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience")
with hlp.captured_output() as (out, err):
self.s.empty_domain(1)
self.assertEqual(out.getvalue().strip(), "The domain 1 not exists")
def test_stop_apps(self):
with hlp.captured_output() as (out, err):
self.s.stop_apps('id')
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user\nDeleting app named datascience")
def test_run(self):
now = datetime.datetime.now()
self.s.sagemaker.lt['Tags'][0]['Key'] = 'Stop'
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(False)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(True)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.s.sagemaker.lt['Tags'][0]['Key'] = 'Delete'
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(False)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio")
self.assertEqual(self.s.sagemaker.policy, 'Retain')
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio")
self.assertEqual(self.s.sagemaker.policy, 'Delete')
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(True)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio\nWarning: domain named studio is not empty, you have to force for deleting it")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio\nWarning: domain named studio is not empty, you have to force for deleting it")
self.assertEqual(self.s.sagemaker.policy, None)
# Apps, "Status": "Deleted"|"Deleting"|"Failed"|"InService"|"Pending"
# Domain and User, "Status": "Deleting"|"Failed"|"InService"|"Pending"|"Updating"|"Update_Failed"|"Delete_Failed",
if __name__ == '__main__':
unittest.main() | import unittest
import json
import datetime
from botocore.exceptions import ClientError
import tests.helper as hlp
from aws_saving.sagemaker_studio import SagemakerStudio
class SagemakerClient():
ld = None
lup = None
la = None
lt = None
ddd = None
ddi = None
es = False
ne = False
net = False
policy = None
def __init__(self):
with open('tests/sagemaker-studio-list-domains.json') as json_file:
self.ld = json.load(json_file)
with open('tests/sagemaker-studio-list-user-profiles.json') as json_file:
self.lup = json.load(json_file)
with open('tests/sagemaker-studio-list-apps.json') as json_file:
self.la = json.load(json_file)
with open('tests/sagemaker-studio-list-tags.json') as json_file:
self.lt = json.load(json_file)
with open('tests/sagemaker-studio-describe-domain.Deleting.json') as json_file:
self.ddd = json.load(json_file)
with open('tests/sagemaker-studio-describe-domain.InService.json') as json_file:
self.ddi = json.load(json_file)
def list_domains(self):
return self.ld
def list_user_profiles(self, DomainIdEquals):
if isinstance(DomainIdEquals, str):
return self.lup
raise ValueError
def list_apps(self, DomainIdEquals, UserProfileNameEquals):
if isinstance(DomainIdEquals, str) and isinstance(UserProfileNameEquals, str):
return self.la
raise ValueError
def list_tags(self, ResourceArn):
if isinstance(ResourceArn, str):
if self.net is True:
return {"Tags":[]}
return self.lt
raise ValueError
def describe_domain(self, DomainId):
if DomainId == 1:
return {}
if self.ne is False:
return self.ddi
return self.ddd
def set_except_simulation(self, boolean):
self.es = boolean
def set_not_exists_simulation(self, boolean):
self.ne = boolean
def set_not_exists_tag_simulation(self, boolean):
self.net = boolean
def set_policy_none(self):
self.policy = None
def delete_app(self, DomainId, UserProfileName, AppType, AppName):
if isinstance(DomainId, str) and isinstance(UserProfileName, str) and isinstance(AppType, str) and isinstance(AppName, str):
return
raise ValueError
def delete_user_profile(self, DomainId, UserProfileName):
if isinstance(DomainId, str) and isinstance(UserProfileName, str):
return
raise ValueError
def delete_domain(self, DomainId, RetentionPolicy):
if isinstance(DomainId, str) and isinstance(RetentionPolicy, dict) and self.es is False:
self.policy = RetentionPolicy['HomeEfsFileSystem']
return
raise ValueError
class TestService(unittest.TestCase, SagemakerStudio):
s = None
def __init__(self, *args, **kwargs):
self.s = SagemakerStudio({})
self.s.sagemaker = SagemakerClient()
unittest.TestCase.__init__(self, *args, **kwargs)
def get_output(self, event = {}):
with hlp.captured_output() as (out, err):
self.s.run(event)
return out.getvalue().strip()
def test_get_instances(self):
instances = self.s.get_instances()
self.assertEqual(instances[0]['DomainName'], 'studio')
def test_get_instances_exception(self):
self.s.sagemaker.set_not_exists_tag_simulation(True)
instances = self.s.get_instances()
self.s.sagemaker.set_not_exists_tag_simulation(False)
self.assertEqual(instances, [])
def test_already_exists(self):
self.s.sagemaker.set_not_exists_simulation(False)
self.assertTrue(self.s.already_exists('id'))
self.s.sagemaker.set_not_exists_simulation(True)
with hlp.captured_output() as (out, err):
self.assertFalse(self.s.already_exists('domain-id'))
self.assertEqual(out.getvalue().strip(), "The domain domain-id not exists")
def test_empty_user_profile(self):
with hlp.captured_output() as (out, err):
self.s.empty_user_profile('user|id')
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user|id\nDeleting app named datascience")
with hlp.captured_output() as (out, err):
self.s.empty_user_profile('user|id', False)
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user|id\nDeleting app named datascience")
with hlp.captured_output() as (out, err):
self.s.empty_user_profile('user|id', True)
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user|id\nDeleting app named default\nDeleting app named datascience")
with self.assertRaises(ValueError):
self.s.empty_user_profile('user')
def test_empty_domain(self):
with hlp.captured_output() as (out, err):
self.s.empty_domain('id')
self.assertEqual(out.getvalue().strip(), "Deleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience")
with hlp.captured_output() as (out, err):
self.s.empty_domain(1)
self.assertEqual(out.getvalue().strip(), "The domain 1 not exists")
def test_stop_apps(self):
with hlp.captured_output() as (out, err):
self.s.stop_apps('id')
self.assertEqual(out.getvalue().strip(), "Deleting all objects of user\nDeleting app named datascience")
def test_run(self):
now = datetime.datetime.now()
self.s.sagemaker.lt['Tags'][0]['Key'] = 'Stop'
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(False)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(True)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of user\nDeleting app named datascience")
self.assertEqual(self.s.sagemaker.policy, None)
self.s.sagemaker.lt['Tags'][0]['Key'] = 'Delete'
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(False)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio")
self.assertEqual(self.s.sagemaker.policy, 'Retain')
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio")
self.assertEqual(self.s.sagemaker.policy, 'Delete')
self.s.sagemaker.set_policy_none()
self.s.sagemaker.set_except_simulation(True)
test = now.replace(hour=8, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio")
self.assertEqual(self.s.sagemaker.policy, None)
test = now.replace(hour=18, minute=00, day=6)
self.s.date_tuple = (test.year, test.month, test.day, test.hour, test.minute)
self.assertEqual(self.get_output(), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio\nWarning: domain named studio is not empty, you have to force for deleting it")
self.assertEqual(self.s.sagemaker.policy, None)
self.assertEqual(self.get_output({"force":["id"]}), "studio\nDeleting all objects of id\nDeleting user profile named user\nDeleting app named default\nDeleting app named datascience\nDeleting studio\nWarning: domain named studio is not empty, you have to force for deleting it")
self.assertEqual(self.s.sagemaker.policy, None)
# Apps, "Status": "Deleted"|"Deleting"|"Failed"|"InService"|"Pending"
# Domain and User, "Status": "Deleting"|"Failed"|"InService"|"Pending"|"Updating"|"Update_Failed"|"Delete_Failed",
if __name__ == '__main__':
unittest.main() | en | 0.677973 | # Apps, "Status": "Deleted"|"Deleting"|"Failed"|"InService"|"Pending" # Domain and User, "Status": "Deleting"|"Failed"|"InService"|"Pending"|"Updating"|"Update_Failed"|"Delete_Failed", | 2.237849 | 2 |
rpython.py | juntalis/python-ctypes-sandbox | 3 | 6618243 | #/usr/bin/env python
# coding: utf-8
"""
This program is free software. It comes without any warranty, to
the extent permitted by applicable law. You can redistribute it
and/or modify it under the terms of the Do What The Fuck You Want
To Public License, Version 2, as published by Sam Hocevar. See
http://sam.zoy.org/wtfpl/COPYING for more details.
"""
import os, sys, pytab
from rdll import *
from ctypes import util
from _kernel32 import is_x64, is_pypy, GetModuleHandleW
if __name__ == '__main__':
mydir = os.path.abspath(os.path.dirname(__file__))
if mydir.lower() not in [p.lower() for p in sys.path]:
sys.path.insert(0, mydir)
if is_x64:
print 'This script requires an 32-bit version of Python.\n'
print 'If you have a 32-bit version of Python, but dont want to have to add it to your PATH, add a bat file to your PATH called, "x86env.bat" or "x86env.cmd" that temporarily adds it to your PATH.\n'
print 'Something like:'
print '@set PATH=%%PATH:\\mypythonx64folder\\=\\mypythonx86folder\\%%\n'
sys.exit(1)
if is_pypy:
handle = GetModuleHandleW('libpypy-c.dll')
else:
handle = sys.dllhandle
rkernel32 = RWinDLL(util.find_library('kernel32'))
if not is_pypy:
smsvcrt = util.find_library(util.find_msvcrt())
if smsvcrt is None:
smsvcrt = util.find_library('msvcr100')
msvcrt = RCDLL(smsvcrt)
pyhome = os.path.abspath(os.path.dirname(sys.executable))
pypath = ';'.join(sys.path)
path = 'PATH=%s' % (pyhome + ';' + os.environ['PATH']).replace(';;',';')
putenv = msvcrt._putenv
putenv.argtypes = [ c_char_p ]
putenv.restype = c_int
putenv(path)
putenv('PYTHONPATH=%s' % pypath)
python = RCDLL(handle=handle)
pytab.populate_exports(python)
python.Py_SetProgramName(sys.executable)
#python.Py_SetPythonHome(pyhome)
python.Py_Initialize()
python.Py_GetPath()
main = python.PyImport_AddModule("__main__")
PyRun_SimpleString = python.PyRun_SimpleString
PyRun_SimpleString.argtypes = [ c_char_p ]
PyRun_SimpleString.restype = c_int
PyRun_SimpleString("from comspec import *\ncheck = stub")
sys.exit(0)
# Gonna comment out the cleanup for now.
# python.Py_Finalize()
# FreeLib = rkernel32.FreeLibrary
# FreeLib.restype = None
# FreeLib.noalloc = True
# FreeLib.argtypes = [ HMODULE ]
# FreeLib(python._handle) | #/usr/bin/env python
# coding: utf-8
"""
This program is free software. It comes without any warranty, to
the extent permitted by applicable law. You can redistribute it
and/or modify it under the terms of the Do What The Fuck You Want
To Public License, Version 2, as published by Sam Hocevar. See
http://sam.zoy.org/wtfpl/COPYING for more details.
"""
import os, sys, pytab
from rdll import *
from ctypes import util
from _kernel32 import is_x64, is_pypy, GetModuleHandleW
if __name__ == '__main__':
mydir = os.path.abspath(os.path.dirname(__file__))
if mydir.lower() not in [p.lower() for p in sys.path]:
sys.path.insert(0, mydir)
if is_x64:
print 'This script requires an 32-bit version of Python.\n'
print 'If you have a 32-bit version of Python, but dont want to have to add it to your PATH, add a bat file to your PATH called, "x86env.bat" or "x86env.cmd" that temporarily adds it to your PATH.\n'
print 'Something like:'
print '@set PATH=%%PATH:\\mypythonx64folder\\=\\mypythonx86folder\\%%\n'
sys.exit(1)
if is_pypy:
handle = GetModuleHandleW('libpypy-c.dll')
else:
handle = sys.dllhandle
rkernel32 = RWinDLL(util.find_library('kernel32'))
if not is_pypy:
smsvcrt = util.find_library(util.find_msvcrt())
if smsvcrt is None:
smsvcrt = util.find_library('msvcr100')
msvcrt = RCDLL(smsvcrt)
pyhome = os.path.abspath(os.path.dirname(sys.executable))
pypath = ';'.join(sys.path)
path = 'PATH=%s' % (pyhome + ';' + os.environ['PATH']).replace(';;',';')
putenv = msvcrt._putenv
putenv.argtypes = [ c_char_p ]
putenv.restype = c_int
putenv(path)
putenv('PYTHONPATH=%s' % pypath)
python = RCDLL(handle=handle)
pytab.populate_exports(python)
python.Py_SetProgramName(sys.executable)
#python.Py_SetPythonHome(pyhome)
python.Py_Initialize()
python.Py_GetPath()
main = python.PyImport_AddModule("__main__")
PyRun_SimpleString = python.PyRun_SimpleString
PyRun_SimpleString.argtypes = [ c_char_p ]
PyRun_SimpleString.restype = c_int
PyRun_SimpleString("from comspec import *\ncheck = stub")
sys.exit(0)
# Gonna comment out the cleanup for now.
# python.Py_Finalize()
# FreeLib = rkernel32.FreeLibrary
# FreeLib.restype = None
# FreeLib.noalloc = True
# FreeLib.argtypes = [ HMODULE ]
# FreeLib(python._handle) | en | 0.721406 | #/usr/bin/env python # coding: utf-8 This program is free software. It comes without any warranty, to the extent permitted by applicable law. You can redistribute it and/or modify it under the terms of the Do What The Fuck You Want To Public License, Version 2, as published by Sam Hocevar. See http://sam.zoy.org/wtfpl/COPYING for more details. #python.Py_SetPythonHome(pyhome) # Gonna comment out the cleanup for now. # python.Py_Finalize() # FreeLib = rkernel32.FreeLibrary # FreeLib.restype = None # FreeLib.noalloc = True # FreeLib.argtypes = [ HMODULE ] # FreeLib(python._handle) | 2.185811 | 2 |
server.py | nycdidar/Content-AI | 3 | 6618244 | # -*- coding: utf-8 -*-
"""
Spell Checker
Next Word Prediction
Keyword Density Percentage
Classify Title
Classify Breakig News
Classify Content Type
Predict High Page View Content
Find NER (Named Entity Recognition)
"""
from flask import Flask, render_template, jsonify,request
from flask_cors import CORS
import json
import os
import modules.classifytitle as classifytitle
import modules.predictword as predictword
import modules.verticalclassify as verticalclassify
import modules.classifybreaking as classifybreaking
import modules.classifypopular as classifypopular
import modules.datasearch as datasearch
from modules.ner import Parser
# JSON display handler.
def display_msg(request_obj, input_obj, field_name):
post_content = request_obj.args.get(field_name)
if not request_obj.args.get(field_name):
post_content = request_obj.form[field_name]
if not post_content:
return jsonify({"Error": 'No URL entered'})
try:
return jsonify(input_obj(post_content))
except Exception as e:
return jsonify({"Error": 'There was an error while processing your request. ' + str(e)})
app = Flask(__name__, '/static')
CORS(app)
# Sample html form to test
@app.route("/test")
def output():
return render_template("sample_gui.html")
# Classify predict vertical
@app.route('/predict_vertical', methods=['POST'])
def start():
print(request.headers.get('Content-Type'))
post_content = request.form['content']
if not post_content:
return jsonify({"Error": 'No data entered'})
return verticalclassify.classify_vertical(post_content)
# Classify title
@app.route("/classify_title")
def classify_title():
title = request.args.get('title')
if not request.args.get('title'):
title = request.form['title']
if not title:
return jsonify({"Error": 'No URL entered'})
try:
return jsonify(classifytitle.classify_title(title))
except Exception as e:
return jsonify({"Error": 'There was an error while processing your request. ' + str(e)})
# Classify breaking news
@app.route("/classify_breaking")
def classify_breaking():
return display_msg(request, classifybreaking.classify_title, 'title')
# Classify popular news
@app.route("/classify_popular")
def classify_popular():
return display_msg(request, classifypopular.classify_title, 'title')
# Classify predict next word
@app.route('/output', methods=['GET'])
def worker():
string = request.args.get('string')
work = request.args.get('work')
return predictword.predict_word(work, string)
# Classify popular news
@app.route("/search")
def wiki_search():
return display_msg(request, datasearch.classify_title, 'content')
# Classify named entity recognition
@app.route("/ner", methods=['GET', 'POST'])
def ner():
content = request.args.get('content')
if not request.args.get('content'):
content = request.form['content']
if not content:
return jsonify({"Error": 'No data entered'})
try:
p = Parser()
p.load_models("models/")
return jsonify(p.predict(content))
del p
except Exception as e:
return jsonify({"Error": 'There was an error while processing your request. ' + str(e)})
# Web server initiate
if __name__=="__main__":
app.run(debug=False, host='0.0.0.0', port=5004)
| # -*- coding: utf-8 -*-
"""
Spell Checker
Next Word Prediction
Keyword Density Percentage
Classify Title
Classify Breakig News
Classify Content Type
Predict High Page View Content
Find NER (Named Entity Recognition)
"""
from flask import Flask, render_template, jsonify,request
from flask_cors import CORS
import json
import os
import modules.classifytitle as classifytitle
import modules.predictword as predictword
import modules.verticalclassify as verticalclassify
import modules.classifybreaking as classifybreaking
import modules.classifypopular as classifypopular
import modules.datasearch as datasearch
from modules.ner import Parser
# JSON display handler.
def display_msg(request_obj, input_obj, field_name):
post_content = request_obj.args.get(field_name)
if not request_obj.args.get(field_name):
post_content = request_obj.form[field_name]
if not post_content:
return jsonify({"Error": 'No URL entered'})
try:
return jsonify(input_obj(post_content))
except Exception as e:
return jsonify({"Error": 'There was an error while processing your request. ' + str(e)})
app = Flask(__name__, '/static')
CORS(app)
# Sample html form to test
@app.route("/test")
def output():
return render_template("sample_gui.html")
# Classify predict vertical
@app.route('/predict_vertical', methods=['POST'])
def start():
print(request.headers.get('Content-Type'))
post_content = request.form['content']
if not post_content:
return jsonify({"Error": 'No data entered'})
return verticalclassify.classify_vertical(post_content)
# Classify title
@app.route("/classify_title")
def classify_title():
title = request.args.get('title')
if not request.args.get('title'):
title = request.form['title']
if not title:
return jsonify({"Error": 'No URL entered'})
try:
return jsonify(classifytitle.classify_title(title))
except Exception as e:
return jsonify({"Error": 'There was an error while processing your request. ' + str(e)})
# Classify breaking news
@app.route("/classify_breaking")
def classify_breaking():
return display_msg(request, classifybreaking.classify_title, 'title')
# Classify popular news
@app.route("/classify_popular")
def classify_popular():
return display_msg(request, classifypopular.classify_title, 'title')
# Classify predict next word
@app.route('/output', methods=['GET'])
def worker():
string = request.args.get('string')
work = request.args.get('work')
return predictword.predict_word(work, string)
# Classify popular news
@app.route("/search")
def wiki_search():
return display_msg(request, datasearch.classify_title, 'content')
# Classify named entity recognition
@app.route("/ner", methods=['GET', 'POST'])
def ner():
content = request.args.get('content')
if not request.args.get('content'):
content = request.form['content']
if not content:
return jsonify({"Error": 'No data entered'})
try:
p = Parser()
p.load_models("models/")
return jsonify(p.predict(content))
del p
except Exception as e:
return jsonify({"Error": 'There was an error while processing your request. ' + str(e)})
# Web server initiate
if __name__=="__main__":
app.run(debug=False, host='0.0.0.0', port=5004)
| en | 0.547788 | # -*- coding: utf-8 -*- Spell Checker Next Word Prediction Keyword Density Percentage Classify Title Classify Breakig News Classify Content Type Predict High Page View Content Find NER (Named Entity Recognition) # JSON display handler. # Sample html form to test # Classify predict vertical # Classify title # Classify breaking news # Classify popular news # Classify predict next word # Classify popular news # Classify named entity recognition # Web server initiate | 2.340486 | 2 |
examples/apigateways/sources/process_api_request.py | DmitryBogomolov/aws-cloudformation-sample | 0 | 6618245 | import json
def handler(event, context):
print(event)
return {
'statusCode': 200,
'body': json.dumps({
'tag': 'Test'
})
}
| import json
def handler(event, context):
print(event)
return {
'statusCode': 200,
'body': json.dumps({
'tag': 'Test'
})
}
| none | 1 | 2.146129 | 2 | |
DeepSpeechPrior/train.py | oucxlw/SoundSourceSeparation | 83 | 6618246 | #!/usr/bin/env python3
import shutil
import os
import argparse
import pickle as pic
from progressbar import progressbar
import numpy as np
import chainer
from chainer import cuda, optimizers, serializers
from chainer.cuda import cupy as cp
import network_VAE
from configure_VAE import *
def train_VAE(gpu=GPU, dataset_fileName=f'{DATASET_SAVE_PATH}/wsj0_normalize_{N_FFT}_{HOP_LENGTH}.pic'):
file_suffix = f"normal-scale=gamma-D={N_LATENT}"
if os.path.isfile(MODEL_SAVE_PATH + '/model-best-{0}.npz'.format(file_suffix) ):
print(f"{MODEL_SAVE_PATH}model-best-{file_suffix}.npz already exist")
exit
cuda.get_device_from_id(gpu).use()
# Load dataset
with open(dataset_fileName, 'rb') as f:
dataset = pic.load(f)
n_data = dataset.shape[1]
# Prepare VAE model
model = network_VAE.VAE(n_freq=int(N_FFT/2+1), n_latent=N_LATENT)
model.to_gpu()
# Setup Optimizer
optimizer = optimizers.Adam(LEARNING_RATE)
optimizer.setup(model)
# Learning loop
min_loss = np.inf
loss_list = []
for epoch in range(N_EPOCH):
print('Epoch:', epoch+1)
sum_loss = 0
perm = np.random.permutation(n_data)
for ii in progressbar(range(0, n_data, BATCH_SIZE)):
minibatch = dataset[:, perm[ii:ii+BATCH_SIZE]].T
scales = np.random.gamma(2, 0.5, (len(minibatch)))
minibatch = minibatch * scales[:, None]
x = chainer.Variable(cp.asarray(minibatch, dtype=cp.float32))
optimizer.update(model.get_loss_func(), x)
sum_loss += float(model.loss.data) * BATCH_SIZE
loss_list.append(float(model.loss.data))
sum_loss /= n_data
print("Loss:", sum_loss)
print('save the model and optimizer')
serializers.save_npz(MODEL_SAVE_PATH + 'model-{0}.npz'.format(file_suffix), model)
with open(MODEL_SAVE_PATH + 'loss-{0}.pic'.format(file_suffix), 'wb') as f:
pic.dump(loss_list, f)
if sum_loss < min_loss:
shutil.copyfile(MODEL_SAVE_PATH + 'model-{0}.npz'.format(file_suffix), MODEL_SAVE_PATH + 'model-best-{0}.npz'.format(file_suffix))
min_loss = sum_loss
sum_loss = 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type= int, default= GPU, help='GPU ID')
args = parser.parse_args()
from make_dataset_wsj0 import make_dataset
make_dataset(WSJ0_PATH, DATASET_SAVE_PATH)
train_VAE(args.gpu)
| #!/usr/bin/env python3
import shutil
import os
import argparse
import pickle as pic
from progressbar import progressbar
import numpy as np
import chainer
from chainer import cuda, optimizers, serializers
from chainer.cuda import cupy as cp
import network_VAE
from configure_VAE import *
def train_VAE(gpu=GPU, dataset_fileName=f'{DATASET_SAVE_PATH}/wsj0_normalize_{N_FFT}_{HOP_LENGTH}.pic'):
file_suffix = f"normal-scale=gamma-D={N_LATENT}"
if os.path.isfile(MODEL_SAVE_PATH + '/model-best-{0}.npz'.format(file_suffix) ):
print(f"{MODEL_SAVE_PATH}model-best-{file_suffix}.npz already exist")
exit
cuda.get_device_from_id(gpu).use()
# Load dataset
with open(dataset_fileName, 'rb') as f:
dataset = pic.load(f)
n_data = dataset.shape[1]
# Prepare VAE model
model = network_VAE.VAE(n_freq=int(N_FFT/2+1), n_latent=N_LATENT)
model.to_gpu()
# Setup Optimizer
optimizer = optimizers.Adam(LEARNING_RATE)
optimizer.setup(model)
# Learning loop
min_loss = np.inf
loss_list = []
for epoch in range(N_EPOCH):
print('Epoch:', epoch+1)
sum_loss = 0
perm = np.random.permutation(n_data)
for ii in progressbar(range(0, n_data, BATCH_SIZE)):
minibatch = dataset[:, perm[ii:ii+BATCH_SIZE]].T
scales = np.random.gamma(2, 0.5, (len(minibatch)))
minibatch = minibatch * scales[:, None]
x = chainer.Variable(cp.asarray(minibatch, dtype=cp.float32))
optimizer.update(model.get_loss_func(), x)
sum_loss += float(model.loss.data) * BATCH_SIZE
loss_list.append(float(model.loss.data))
sum_loss /= n_data
print("Loss:", sum_loss)
print('save the model and optimizer')
serializers.save_npz(MODEL_SAVE_PATH + 'model-{0}.npz'.format(file_suffix), model)
with open(MODEL_SAVE_PATH + 'loss-{0}.pic'.format(file_suffix), 'wb') as f:
pic.dump(loss_list, f)
if sum_loss < min_loss:
shutil.copyfile(MODEL_SAVE_PATH + 'model-{0}.npz'.format(file_suffix), MODEL_SAVE_PATH + 'model-best-{0}.npz'.format(file_suffix))
min_loss = sum_loss
sum_loss = 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type= int, default= GPU, help='GPU ID')
args = parser.parse_args()
from make_dataset_wsj0 import make_dataset
make_dataset(WSJ0_PATH, DATASET_SAVE_PATH)
train_VAE(args.gpu)
| en | 0.342522 | #!/usr/bin/env python3 # Load dataset # Prepare VAE model # Setup Optimizer # Learning loop | 2.16915 | 2 |
main/models.py | abhishek9991/Club-Website | 1 | 6618247 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
import datetime
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
email = models.EmailField(blank=True)
fname = models.CharField(max_length=100, blank=True)
lname = models.CharField(max_length=100, blank=True)
github = models.CharField(max_length=100, blank=True)
dp = models.CharField(max_length=1000, blank=True)
batch = models.IntegerField(default=2017)
facebook = models.CharField(max_length=100, blank=True)
linkedin = models.CharField(max_length=100, blank=True)
twitter = models.CharField(max_length=100, blank=True)
bio = models.TextField(blank=True, null=True)
label = models.CharField(max_length=100 , blank=True)
company = models.CharField(max_length=100, blank=True)
location = models.CharField(max_length=100,blank=True)
frameworks = models.CharField(max_length=500,blank=True)
languages = models.CharField(max_length=500,blank=True)
achivements = models.CharField(max_length=1000,blank=True)
he_profile = models.CharField(max_length=100,blank=True)
spoj_profile = models.CharField(max_length=100,blank=True)
he_ques = models.IntegerField(default=0)
codechef_profile = models.CharField(max_length=100,blank=True)
codechef_ques = models.IntegerField(default=0)
spoj_ques = models.IntegerField(default=0)
git_repos = models.IntegerField(default=0)
my_website = models.CharField(max_length=100,blank=True)
def __str__(self):
return self.fname
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Event(models.Model):
description = models.TextField(blank=True, null=True)
title = models.CharField(max_length=500)
host = models.ManyToManyField(User)
venue = models.CharField(max_length=100)
fee = models.IntegerField(default=0)
rules = models.TextField(blank=True, null=True)
prerequistes = models.TextField(blank=True, null=True)
start_date = models.DateField(default=datetime.date.today,auto_now=False,auto_now_add=False)
end_date = models.DateField(default=datetime.date.today,auto_now=False,auto_now_add=False)
start_time = models.TimeField(default=datetime.datetime.now().time(),auto_now=False,auto_now_add=False)
end_time = models.TimeField(default=datetime.datetime.now().time(),auto_now=False,auto_now_add=False)
def __str__(self):
return self.title
class registration(models.Model):
eventid = models.IntegerField()
mobile = models.CharField(max_length=20)
fname = models.CharField(max_length=100)
lname = models.CharField(max_length=100,default="")
College = models.CharField(max_length=300)
email = models.EmailField()
query = models.CharField(max_length=1000,default="")
def __str__(self):
return self.fname + str(self.eventid)
class feedback(models.Model):
eventid = models.IntegerField()
name = models.CharField(max_length=100,blank=True)
comment = models.TextField()
star = models.IntegerField(default=0)
def __str__(self):
return str(self.star) +" "+ self.name
class project(models.Model):
description = models.TextField(blank=True, null=True)
title = models.CharField(max_length=500)
owner = models.ManyToManyField(User)
demo_link = models.CharField(max_length=100,blank=True)
source = models.CharField(max_length=100)
technologies = models.CharField(max_length=1000)
def __str__(self):
return self.title | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
import datetime
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
email = models.EmailField(blank=True)
fname = models.CharField(max_length=100, blank=True)
lname = models.CharField(max_length=100, blank=True)
github = models.CharField(max_length=100, blank=True)
dp = models.CharField(max_length=1000, blank=True)
batch = models.IntegerField(default=2017)
facebook = models.CharField(max_length=100, blank=True)
linkedin = models.CharField(max_length=100, blank=True)
twitter = models.CharField(max_length=100, blank=True)
bio = models.TextField(blank=True, null=True)
label = models.CharField(max_length=100 , blank=True)
company = models.CharField(max_length=100, blank=True)
location = models.CharField(max_length=100,blank=True)
frameworks = models.CharField(max_length=500,blank=True)
languages = models.CharField(max_length=500,blank=True)
achivements = models.CharField(max_length=1000,blank=True)
he_profile = models.CharField(max_length=100,blank=True)
spoj_profile = models.CharField(max_length=100,blank=True)
he_ques = models.IntegerField(default=0)
codechef_profile = models.CharField(max_length=100,blank=True)
codechef_ques = models.IntegerField(default=0)
spoj_ques = models.IntegerField(default=0)
git_repos = models.IntegerField(default=0)
my_website = models.CharField(max_length=100,blank=True)
def __str__(self):
return self.fname
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Event(models.Model):
description = models.TextField(blank=True, null=True)
title = models.CharField(max_length=500)
host = models.ManyToManyField(User)
venue = models.CharField(max_length=100)
fee = models.IntegerField(default=0)
rules = models.TextField(blank=True, null=True)
prerequistes = models.TextField(blank=True, null=True)
start_date = models.DateField(default=datetime.date.today,auto_now=False,auto_now_add=False)
end_date = models.DateField(default=datetime.date.today,auto_now=False,auto_now_add=False)
start_time = models.TimeField(default=datetime.datetime.now().time(),auto_now=False,auto_now_add=False)
end_time = models.TimeField(default=datetime.datetime.now().time(),auto_now=False,auto_now_add=False)
def __str__(self):
return self.title
class registration(models.Model):
eventid = models.IntegerField()
mobile = models.CharField(max_length=20)
fname = models.CharField(max_length=100)
lname = models.CharField(max_length=100,default="")
College = models.CharField(max_length=300)
email = models.EmailField()
query = models.CharField(max_length=1000,default="")
def __str__(self):
return self.fname + str(self.eventid)
class feedback(models.Model):
eventid = models.IntegerField()
name = models.CharField(max_length=100,blank=True)
comment = models.TextField()
star = models.IntegerField(default=0)
def __str__(self):
return str(self.star) +" "+ self.name
class project(models.Model):
description = models.TextField(blank=True, null=True)
title = models.CharField(max_length=500)
owner = models.ManyToManyField(User)
demo_link = models.CharField(max_length=100,blank=True)
source = models.CharField(max_length=100)
technologies = models.CharField(max_length=1000)
def __str__(self):
return self.title | none | 1 | 2.151223 | 2 | |
src/envs/grid_2d_env/PositionAndRotationObservation.py | aidkilda/understanding-drl-navigation | 0 | 6618248 | import numpy as np
from .Observation import Observation
class PositionAndRotationObservation(Observation):
"""Represents the observation of the agent, that consists of it's position and rotation in the grid.
This observation could be used as a sanity check, before trying more complicated observations.
Beware that observation might not work without standardization/normalization.
"""
def __init__(self):
pass
def get(self, agent_position_and_rotation, grid):
return np.asarray([agent_position_and_rotation.x,
agent_position_and_rotation.y,
agent_position_and_rotation.rotation])
def get_cells(self, agent_position_and_rotation, grid):
return [agent_position_and_rotation.get_pos()]
def size(self):
return 3
@classmethod
def create(cls, config):
"""Use @classmethod polymorphism to be able to construct Observation Objects Generically.
:param config: configuration dictionary for arguments used to set up the right observation.
:return: constructed PositionObservation Object.
"""
return cls()
| import numpy as np
from .Observation import Observation
class PositionAndRotationObservation(Observation):
"""Represents the observation of the agent, that consists of it's position and rotation in the grid.
This observation could be used as a sanity check, before trying more complicated observations.
Beware that observation might not work without standardization/normalization.
"""
def __init__(self):
pass
def get(self, agent_position_and_rotation, grid):
return np.asarray([agent_position_and_rotation.x,
agent_position_and_rotation.y,
agent_position_and_rotation.rotation])
def get_cells(self, agent_position_and_rotation, grid):
return [agent_position_and_rotation.get_pos()]
def size(self):
return 3
@classmethod
def create(cls, config):
"""Use @classmethod polymorphism to be able to construct Observation Objects Generically.
:param config: configuration dictionary for arguments used to set up the right observation.
:return: constructed PositionObservation Object.
"""
return cls()
| en | 0.873137 | Represents the observation of the agent, that consists of it's position and rotation in the grid. This observation could be used as a sanity check, before trying more complicated observations. Beware that observation might not work without standardization/normalization. Use @classmethod polymorphism to be able to construct Observation Objects Generically. :param config: configuration dictionary for arguments used to set up the right observation. :return: constructed PositionObservation Object. | 3.268692 | 3 |
inconnu/common.py | tiltowait/inconnu | 4 | 6618249 | <gh_stars>1-10
"""common.py - Commonly used functions."""
import asyncio
from types import SimpleNamespace
import discord
from discord_ui import Button, SelectMenu, SelectOption
from discord_ui.components import LinkButton
from .constants import SUPPORT_URL
from .settings import Settings
from .vchar import errors, VChar
def pluralize(value: int, noun: str) -> str:
"""Pluralize a noun."""
nouns = {"success": "successes", "die": "dice"}
pluralized = f"{value} {noun}"
if value != 1:
if noun in nouns:
pluralized = f"{value} {nouns[noun]}"
else:
pluralized += "s"
return pluralized
async def present_error(
ctx,
error,
*fields,
author = None,
character: str = None,
footer: str = None,
help_url: str = None,
components = None,
hidden=True
):
"""
Display an error in a nice embed.
Args:
ctx: The Discord context for sending the response.
error: The error message to display.
fields (list): Fields to add to the embed. (fields.0 is name; fields.1 is value)
author (discord.Member): The member the message is attributed to, if not the same as ctx
character (str): The character the message is attributed to
footer (str): Footer text to display.
help_url (str): The documentation URL for the error.
components (list): Buttons or selection menus to add to the message.
"""
if Settings.accessible(ctx.author):
return await __error_text(ctx, error, *fields,
footer=footer,
help_url=help_url,
components=components,
hidden=hidden
)
return await __error_embed(ctx, error, *fields,
author=author,
character=character,
footer=footer,
help_url=help_url,
components=components,
hidden=hidden
)
async def __error_embed(
ctx,
error,
*fields,
author = None,
character: str = None,
footer: str = None,
help_url: str = None,
components = None,
hidden: bool
):
# Figure out the author
if author is None:
avatar = ctx.author.display_avatar
display_name = ctx.author.display_name
else:
avatar = author.display_avatar
display_name = author.display_name
if character is not None:
if isinstance(character, str):
display_name = character
else:
display_name = character.name
embed = discord.Embed(
title="Error",
description=str(error),
color=0xFF0000
)
embed.set_author(name=display_name, icon_url=avatar)
for field in fields:
embed.add_field(name=field[0], value=field[1], inline=False)
if footer is not None:
embed.set_footer(text=footer)
if help_url is not None:
link = [
LinkButton(help_url, "Documentation"),
LinkButton(SUPPORT_URL, "Support")
]
if components is None:
components = link
else:
components = [components, link]
return await ctx.respond(embed=embed, components=components, hidden=hidden)
async def __error_text(
ctx,
error,
*fields,
footer: str = None,
help_url: str = None,
components = None,
hidden: bool
):
"""Display the error as plaintext."""
contents = ["Error", str(error) + "\n"]
for field in fields:
contents.append(f"{field[0]}: {field[1]}")
if footer is not None:
contents.append(f"```{footer}```")
if help_url is not None:
link = [LinkButton(
help_url,
label="Help"
)]
if components is None:
components = link
else:
components = [components, link]
return await ctx.respond("\n".join(contents), components=components, hidden=hidden)
async def select_character(ctx, err, help_url, tip, player=None):
"""
A prompt for the user to select a character from a list.
Args:
ctx: Discord context
err: An error message to display
help_url: A URL pointing to the documentation
tip (tuple): A name and value for an embed field
player: (Optional) A Discord member to query instead
"""
if ctx.author != player:
user = player
err = str(err).replace("You have", f"{user.display_name} has")
else:
user = ctx.author
options = character_options(ctx.guild.id, user.id)
errmsg = await present_error(
ctx,
err,
(tip[0], tip[1]),
author=user,
help_url=help_url,
components=options.components
)
try:
if isinstance(options.components[0], Button):
btn = await errmsg.wait_for("button", ctx.bot, timeout=60)
character = options.characters[btn.custom_id]
else:
btn = await errmsg.wait_for("select", ctx.bot, timeout=60)
character = options.characters[btn.selected_values[0]]
await btn.respond()
await errmsg.disable_components()
return character
except asyncio.exceptions.TimeoutError:
await errmsg.edit(components=None)
return None
def character_options(guild: int, user: int):
"""
Generate a dictionary of characters keyed by ID plus components for selecting them.
Under 6 characters: Buttons
Six or more characters: Selections
"""
characters = VChar.all_characters(guild, user)
chardict = {str(char.id): char for char in characters}
if len(characters) < 6:
components = [Button(char.name, str(char.id)) for char in characters]
else:
options = [SelectOption(str(char.id), char.name) for char in characters]
menu = SelectMenu(options, "character_selector", placeholder="Select a character")
components = [menu]
return SimpleNamespace(characters=chardict, components=components)
async def player_lookup(ctx, player: discord.Member):
"""
Look up a player.
Returns the sought-after player OR the ctx author if player_str is None.
Raises PermissionError if the user doesn't have admin permissions.
Raises ValueError if player is not a valid player name.
"""
if player is None:
return ctx.author
# Players are allowed to look up themselves
if not ctx.author.guild_permissions.administrator and ctx.author != player:
raise LookupError("You don't have lookup permissions.")
return player
class FetchError(Exception):
"""An error for when we are unable to fetch a character."""
async def fetch_character(ctx, character, tip, help_url, owner=None):
"""
Attempt to fetch a character, presenting a selection dialogue if necessary.
Args:
ctx: The Discord context for displaying messages and retrieving guild info
character (str): The name of the character to fetch. Optional.
tip (str): The proper syntax for the command
help_url (str): The URL of the button to display on any error messages
userid (int): The ID of the user who owns the character, if different from the ctx author
"""
if isinstance(character, VChar):
return character
try:
owner = owner or ctx.author
return VChar.fetch(ctx.guild.id, owner.id, character)
except errors.UnspecifiedCharacterError as err:
character = await select_character(ctx, err, help_url, ("Proper syntax", tip), player=owner)
if character is None:
raise FetchError("No character was selected.") from err
return character
except errors.CharacterError as err:
await present_error(ctx, err, help_url=help_url, author=owner)
raise FetchError(str(err)) from err
def paginate(page_size: int, *contents) -> list:
"""Break the contents into pages to fit a Discord message."""
contents = list(contents)
pages = []
if isinstance(contents[0], str):
page = contents.pop(0)
for item in contents:
if len(page) >= page_size:
pages.append(page)
page = item
else:
page += "\n" + item
else:
# [[(header, contents), (header, contents), (header, contents)]]
page = [contents.pop(0)]
page_len = len(page[0].name) + len(page[0].value)
for item in contents:
if page_len >= page_size:
pages.append(page)
page = [item]
page_len = len(item.name) + len(item.value)
else:
page_len += len(item.name) + len(item.value)
page.append(item)
pages.append(page)
return pages
| """common.py - Commonly used functions."""
import asyncio
from types import SimpleNamespace
import discord
from discord_ui import Button, SelectMenu, SelectOption
from discord_ui.components import LinkButton
from .constants import SUPPORT_URL
from .settings import Settings
from .vchar import errors, VChar
def pluralize(value: int, noun: str) -> str:
"""Pluralize a noun."""
nouns = {"success": "successes", "die": "dice"}
pluralized = f"{value} {noun}"
if value != 1:
if noun in nouns:
pluralized = f"{value} {nouns[noun]}"
else:
pluralized += "s"
return pluralized
async def present_error(
ctx,
error,
*fields,
author = None,
character: str = None,
footer: str = None,
help_url: str = None,
components = None,
hidden=True
):
"""
Display an error in a nice embed.
Args:
ctx: The Discord context for sending the response.
error: The error message to display.
fields (list): Fields to add to the embed. (fields.0 is name; fields.1 is value)
author (discord.Member): The member the message is attributed to, if not the same as ctx
character (str): The character the message is attributed to
footer (str): Footer text to display.
help_url (str): The documentation URL for the error.
components (list): Buttons or selection menus to add to the message.
"""
if Settings.accessible(ctx.author):
return await __error_text(ctx, error, *fields,
footer=footer,
help_url=help_url,
components=components,
hidden=hidden
)
return await __error_embed(ctx, error, *fields,
author=author,
character=character,
footer=footer,
help_url=help_url,
components=components,
hidden=hidden
)
async def __error_embed(
ctx,
error,
*fields,
author = None,
character: str = None,
footer: str = None,
help_url: str = None,
components = None,
hidden: bool
):
# Figure out the author
if author is None:
avatar = ctx.author.display_avatar
display_name = ctx.author.display_name
else:
avatar = author.display_avatar
display_name = author.display_name
if character is not None:
if isinstance(character, str):
display_name = character
else:
display_name = character.name
embed = discord.Embed(
title="Error",
description=str(error),
color=0xFF0000
)
embed.set_author(name=display_name, icon_url=avatar)
for field in fields:
embed.add_field(name=field[0], value=field[1], inline=False)
if footer is not None:
embed.set_footer(text=footer)
if help_url is not None:
link = [
LinkButton(help_url, "Documentation"),
LinkButton(SUPPORT_URL, "Support")
]
if components is None:
components = link
else:
components = [components, link]
return await ctx.respond(embed=embed, components=components, hidden=hidden)
async def __error_text(
ctx,
error,
*fields,
footer: str = None,
help_url: str = None,
components = None,
hidden: bool
):
"""Display the error as plaintext."""
contents = ["Error", str(error) + "\n"]
for field in fields:
contents.append(f"{field[0]}: {field[1]}")
if footer is not None:
contents.append(f"```{footer}```")
if help_url is not None:
link = [LinkButton(
help_url,
label="Help"
)]
if components is None:
components = link
else:
components = [components, link]
return await ctx.respond("\n".join(contents), components=components, hidden=hidden)
async def select_character(ctx, err, help_url, tip, player=None):
"""
A prompt for the user to select a character from a list.
Args:
ctx: Discord context
err: An error message to display
help_url: A URL pointing to the documentation
tip (tuple): A name and value for an embed field
player: (Optional) A Discord member to query instead
"""
if ctx.author != player:
user = player
err = str(err).replace("You have", f"{user.display_name} has")
else:
user = ctx.author
options = character_options(ctx.guild.id, user.id)
errmsg = await present_error(
ctx,
err,
(tip[0], tip[1]),
author=user,
help_url=help_url,
components=options.components
)
try:
if isinstance(options.components[0], Button):
btn = await errmsg.wait_for("button", ctx.bot, timeout=60)
character = options.characters[btn.custom_id]
else:
btn = await errmsg.wait_for("select", ctx.bot, timeout=60)
character = options.characters[btn.selected_values[0]]
await btn.respond()
await errmsg.disable_components()
return character
except asyncio.exceptions.TimeoutError:
await errmsg.edit(components=None)
return None
def character_options(guild: int, user: int):
"""
Generate a dictionary of characters keyed by ID plus components for selecting them.
Under 6 characters: Buttons
Six or more characters: Selections
"""
characters = VChar.all_characters(guild, user)
chardict = {str(char.id): char for char in characters}
if len(characters) < 6:
components = [Button(char.name, str(char.id)) for char in characters]
else:
options = [SelectOption(str(char.id), char.name) for char in characters]
menu = SelectMenu(options, "character_selector", placeholder="Select a character")
components = [menu]
return SimpleNamespace(characters=chardict, components=components)
async def player_lookup(ctx, player: discord.Member):
"""
Look up a player.
Returns the sought-after player OR the ctx author if player_str is None.
Raises PermissionError if the user doesn't have admin permissions.
Raises ValueError if player is not a valid player name.
"""
if player is None:
return ctx.author
# Players are allowed to look up themselves
if not ctx.author.guild_permissions.administrator and ctx.author != player:
raise LookupError("You don't have lookup permissions.")
return player
class FetchError(Exception):
"""An error for when we are unable to fetch a character."""
async def fetch_character(ctx, character, tip, help_url, owner=None):
"""
Attempt to fetch a character, presenting a selection dialogue if necessary.
Args:
ctx: The Discord context for displaying messages and retrieving guild info
character (str): The name of the character to fetch. Optional.
tip (str): The proper syntax for the command
help_url (str): The URL of the button to display on any error messages
userid (int): The ID of the user who owns the character, if different from the ctx author
"""
if isinstance(character, VChar):
return character
try:
owner = owner or ctx.author
return VChar.fetch(ctx.guild.id, owner.id, character)
except errors.UnspecifiedCharacterError as err:
character = await select_character(ctx, err, help_url, ("Proper syntax", tip), player=owner)
if character is None:
raise FetchError("No character was selected.") from err
return character
except errors.CharacterError as err:
await present_error(ctx, err, help_url=help_url, author=owner)
raise FetchError(str(err)) from err
def paginate(page_size: int, *contents) -> list:
"""Break the contents into pages to fit a Discord message."""
contents = list(contents)
pages = []
if isinstance(contents[0], str):
page = contents.pop(0)
for item in contents:
if len(page) >= page_size:
pages.append(page)
page = item
else:
page += "\n" + item
else:
# [[(header, contents), (header, contents), (header, contents)]]
page = [contents.pop(0)]
page_len = len(page[0].name) + len(page[0].value)
for item in contents:
if page_len >= page_size:
pages.append(page)
page = [item]
page_len = len(item.name) + len(item.value)
else:
page_len += len(item.name) + len(item.value)
page.append(item)
pages.append(page)
return pages | en | 0.769495 | common.py - Commonly used functions. Pluralize a noun. Display an error in a nice embed. Args: ctx: The Discord context for sending the response. error: The error message to display. fields (list): Fields to add to the embed. (fields.0 is name; fields.1 is value) author (discord.Member): The member the message is attributed to, if not the same as ctx character (str): The character the message is attributed to footer (str): Footer text to display. help_url (str): The documentation URL for the error. components (list): Buttons or selection menus to add to the message. # Figure out the author Display the error as plaintext. A prompt for the user to select a character from a list. Args: ctx: Discord context err: An error message to display help_url: A URL pointing to the documentation tip (tuple): A name and value for an embed field player: (Optional) A Discord member to query instead Generate a dictionary of characters keyed by ID plus components for selecting them. Under 6 characters: Buttons Six or more characters: Selections Look up a player. Returns the sought-after player OR the ctx author if player_str is None. Raises PermissionError if the user doesn't have admin permissions. Raises ValueError if player is not a valid player name. # Players are allowed to look up themselves An error for when we are unable to fetch a character. Attempt to fetch a character, presenting a selection dialogue if necessary. Args: ctx: The Discord context for displaying messages and retrieving guild info character (str): The name of the character to fetch. Optional. tip (str): The proper syntax for the command help_url (str): The URL of the button to display on any error messages userid (int): The ID of the user who owns the character, if different from the ctx author Break the contents into pages to fit a Discord message. # [[(header, contents), (header, contents), (header, contents)]] | 2.993036 | 3 |
py_solutions_91-100/Euler_92.py | tijko/Project-Euler | 0 | 6618250 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A number chain is created by continuously adding the square of the digits in a
number to form a new number until it has been seen before.
For example,
44 → 32 → 13 → 10 → 1 → 1
85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89
Therefore any chain that arrives at 1 or 89 will become stuck in an endless
loop. What is most amazing is that EVERY starting number will eventually arrive
at 1 or 89.
How many starting numbers below ten million will arrive at 89?
'''
from __future__ import print_function
import timeit
try:
range = xrange
except NameError:
pass
digit_sqs = {str(i):i*i for i in range(10)}
chain = lambda n: sum([digit_sqs[i] for i in str(n)])
def euler_92():
eighty_nine = 0
chains = {}
for start in range(1, 10000000):
sq_sum = chain(start)
# keep a reference to allow assigning all numbers to the same
# end sq_sum series number
sub_chain = []
while (sq_sum != 89 and sq_sum != 1 and
chains.get(sq_sum) is None):
chains[sq_sum] = sub_chain
sq_sum = chain(sq_sum)
if sq_sum != 89 and sq_sum != 1:
sq_sum = chains.get(sq_sum)[0]
chains[sq_sum] = sub_chain
# set the reference to the sq_sum, all integers in that series
# will no be set
sub_chain.append(sq_sum)
if sq_sum == 89:
eighty_nine += 1
return eighty_nine
if __name__ == "__main__":
start = timeit.default_timer()
print("Answer: {}".format(euler_92()))
stop = timeit.default_timer()
print("Time: {0:9.5f}".format(stop - start))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A number chain is created by continuously adding the square of the digits in a
number to form a new number until it has been seen before.
For example,
44 → 32 → 13 → 10 → 1 → 1
85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89
Therefore any chain that arrives at 1 or 89 will become stuck in an endless
loop. What is most amazing is that EVERY starting number will eventually arrive
at 1 or 89.
How many starting numbers below ten million will arrive at 89?
'''
from __future__ import print_function
import timeit
try:
range = xrange
except NameError:
pass
digit_sqs = {str(i):i*i for i in range(10)}
chain = lambda n: sum([digit_sqs[i] for i in str(n)])
def euler_92():
eighty_nine = 0
chains = {}
for start in range(1, 10000000):
sq_sum = chain(start)
# keep a reference to allow assigning all numbers to the same
# end sq_sum series number
sub_chain = []
while (sq_sum != 89 and sq_sum != 1 and
chains.get(sq_sum) is None):
chains[sq_sum] = sub_chain
sq_sum = chain(sq_sum)
if sq_sum != 89 and sq_sum != 1:
sq_sum = chains.get(sq_sum)[0]
chains[sq_sum] = sub_chain
# set the reference to the sq_sum, all integers in that series
# will no be set
sub_chain.append(sq_sum)
if sq_sum == 89:
eighty_nine += 1
return eighty_nine
if __name__ == "__main__":
start = timeit.default_timer()
print("Answer: {}".format(euler_92()))
stop = timeit.default_timer()
print("Time: {0:9.5f}".format(stop - start))
| en | 0.901103 | #!/usr/bin/env python # -*- coding: utf-8 -*- A number chain is created by continuously adding the square of the digits in a number to form a new number until it has been seen before. For example, 44 → 32 → 13 → 10 → 1 → 1 85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89 Therefore any chain that arrives at 1 or 89 will become stuck in an endless loop. What is most amazing is that EVERY starting number will eventually arrive at 1 or 89. How many starting numbers below ten million will arrive at 89? # keep a reference to allow assigning all numbers to the same # end sq_sum series number # set the reference to the sq_sum, all integers in that series # will no be set | 3.882367 | 4 |