index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
7,589
|
MADRobotNO/Simple_NEAT_v0.2
|
refs/heads/master
|
/Layer.py
|
from Node import Node
from Connection import Connection
import numpy as np
class Layer:
INPUT_LAYER = "input"
HIDDEN_LAYER = "hidden"
OUTPUT_LAYER = "output"
INPUT_LAYER_ID = 0
OUTPUT_LAYER_ID = 1
def __init__(self, layer_id, layer_type, number_of_nodes, list_of_all_nodes, list_of_all_connections, list_of_all_layers, list_of_innovations, activation_function=Node.TANH_ACTIVATION_FUNCTION):
self.layer_id = layer_id
self.layer_type = layer_type
self.number_of_nodes = number_of_nodes
self.activation_function = activation_function
self.list_of_layer_nodes = []
self.list_of_layer_connections = []
self.initialize_layer(list_of_all_nodes, list_of_all_connections, list_of_all_layers, list_of_innovations)
def initialize_layer(self, list_of_all_nodes, list_of_all_connections, list_of_all_layers, list_of_innovations):
self.initialize_nodes(list_of_all_nodes)
if self.layer_type != self.INPUT_LAYER:
self.initialize_connections(list_of_all_connections, list_of_all_layers, list_of_innovations)
def initialize_nodes(self, list_of_all_nodes):
for i in range(self.number_of_nodes):
node = Node(len(list_of_all_nodes), self.layer_type, self.layer_id, self.activation_function)
list_of_all_nodes.append(node)
self.list_of_layer_nodes.append(node)
def initialize_connections(self, list_of_all_connections, list_of_all_layers, list_of_innovations):
# layer nearest to input layer, connecting to input
for input_node in list_of_all_layers[self.INPUT_LAYER_ID].list_of_layer_nodes:
for current_node in self.list_of_layer_nodes:
innovation_id = self.get_innovation_id(list_of_innovations, input_node.node_id, current_node.node_id)
connection = Connection(len(list_of_all_connections), input_node.node_id, current_node.node_id, innovation_id)
list_of_all_connections.append(connection)
self.list_of_layer_connections.append(connection)
def get_innovation_id(self, list_of_innovations, from_node_id, to_node_id):
new_connection = (from_node_id, to_node_id)
for index, innovation in enumerate(list_of_innovations):
if innovation == new_connection or reversed(innovation) == new_connection:
return index
list_of_innovations.append(new_connection)
return list_of_innovations.index(new_connection)
def generate_new_weights(self):
for connection in self.list_of_layer_connections:
connection.generate_random_weight()
def generate_new_biases(self):
for node in self.list_of_layer_nodes:
node.generate_random_bias()
def add_connection(self, from_node, to_node, innovation_id, list_of_all_connections):
new_connection = Connection(len(list_of_all_connections), from_node.node_id, to_node.node_id, innovation_id)
self.list_of_layer_connections.append(new_connection)
list_of_all_connections.append(new_connection)
return new_connection
def add_new_node(self, input_layer, list_of_innovations, list_of_all_nodes, list_of_all_connections):
node = Node(len(list_of_all_nodes), self.layer_type, self.layer_id, self.activation_function)
for from_node in input_layer.list_of_layer_nodes:
innovation_id = self.get_innovation_id(list_of_innovations, from_node.node_id, node.node_id)
self.add_connection(from_node, node, innovation_id, list_of_all_connections)
self.list_of_layer_nodes.append(node)
list_of_all_nodes.append(node)
return node
def mutate(self, mutation_rate):
for node in self.list_of_layer_nodes:
# x % chance to adjust bias
if np.random.random() < mutation_rate:
node.adjust_bias()
# x % chance to change bias
elif np.random.random() < mutation_rate:
node.generate_random_bias()
for connection in self.list_of_layer_connections:
# x % chance to adjust weight
if np.random.random() < mutation_rate:
connection.adjust_weight()
# x % chance to change weight
elif np.random.random() < mutation_rate:
connection.generate_random_weight()
def __str__(self):
return_string = "Layer id: " + str(self.layer_id) + ", layer type: " + self.layer_type + ", number of connections: " \
+ str(len(self.list_of_layer_connections)) + ", number of nodes: " + str(len(self.list_of_layer_nodes)) + "\n"
return_string += "Nodes:\n"
if len(self.list_of_layer_nodes) > 0:
for node in self.list_of_layer_nodes:
return_string += node.__str__() + "\n"
else:
return_string += "No nodes in layer!\n"
return_string += "Connections:\n"
if len(self.list_of_layer_connections) > 0:
for connection in self.list_of_layer_connections:
return_string += connection.__str__() + "\n"
else:
return_string += "No connections!\n"
return return_string
|
{"/Layer.py": ["/Node.py", "/Connection.py"], "/Connection.py": ["/Node.py"], "/Model.py": ["/Connection.py", "/Layer.py", "/Node.py"], "/example.py": ["/NEAT.py", "/RandomData.py"], "/NEAT.py": ["/Model.py"]}
|
7,590
|
MADRobotNO/Simple_NEAT_v0.2
|
refs/heads/master
|
/Connection.py
|
from Node import Node
import numpy as np
class Connection:
def __init__(self, connection_id, from_node, to_node, innovation_id):
self.connection_id = connection_id
self.innovation_id = innovation_id
self.from_node = from_node
self.to_node = to_node
self.enabled = True
self.weight = None
self.generate_random_weight()
def adjust_weight(self):
self.weight += np.random.uniform(-0.1, 0.1)
def generate_random_weight(self):
self.weight = np.random.uniform(-1, 1)
def __str__(self):
return "Connection id: " + str(self.connection_id) + ", innovation id: " + str(self.innovation_id) + \
", from node: " + str(self.from_node) + ", to node: " + str(self.to_node) + ", enabled: " + \
str(self.enabled) + ", weight: " + str(self.weight)
|
{"/Layer.py": ["/Node.py", "/Connection.py"], "/Connection.py": ["/Node.py"], "/Model.py": ["/Connection.py", "/Layer.py", "/Node.py"], "/example.py": ["/NEAT.py", "/RandomData.py"], "/NEAT.py": ["/Model.py"]}
|
7,591
|
MADRobotNO/Simple_NEAT_v0.2
|
refs/heads/master
|
/Model.py
|
from Connection import Connection
from Layer import Layer
import numpy as np
from Node import Node
class Model:
def __init__(self, model_id, number_of_inputs, number_of_outputs, mutation_rate, list_of_innovations):
self.model_id = model_id
self.outputs = []
self.score = 0.0
self.number_of_inputs = number_of_inputs
self.number_of_outputs = number_of_outputs
self.mutation_rate = mutation_rate
self.list_of_all_nodes = []
self.list_of_all_layers = []
self.list_of_all_connections = []
self.list_of_all_hidden_layers = []
self.input_layer = None
self.output_layer = None
self.initialize_model(list_of_innovations)
def initialize_model(self, list_of_innovations):
input_layer = Layer(len(self.list_of_all_layers), Layer.INPUT_LAYER, self.number_of_inputs, self.list_of_all_nodes, self.list_of_all_connections, self.list_of_all_layers, list_of_innovations)
self.list_of_all_layers.append(input_layer)
self.input_layer = input_layer
output_layer = Layer(len(self.list_of_all_layers), Layer.OUTPUT_LAYER, self.number_of_outputs, self.list_of_all_nodes, self.list_of_all_connections, self.list_of_all_layers, list_of_innovations, Node.SIGMOID_ACTIVATION_FUNCTION)
self.list_of_all_layers.append(output_layer)
self.output_layer = output_layer
def regenerate_random_weights_bias(self):
for layer in self.list_of_all_layers:
layer.generate_new_weights()
layer.generate_new_biases()
def mutate(self, list_of_innovations, parent_model=None, parent_model_two=None):
random_number = np.random.random()
# x percent chance to mutate structure
if random_number < self.mutation_rate:
# print("Mutating structure on model", self.model_id)
self.mutate_structure(list_of_innovations)
else:
# print("Mutating weights on model", self.model_id)
for layer in self.list_of_all_layers:
# each element of model has x percent chance to mutate
layer.mutate(self.mutation_rate)
def mutate_structure(self, list_of_innovations):
nodes_connection_ratio = 0.7
random_number = np.random.random()
# create new connection or disable old
if random_number <= nodes_connection_ratio:
# print("Adding new connection")
node_one = self.list_of_all_nodes[np.random.randint(0, len(self.list_of_all_nodes))]
# first selected node cannot be an output node
while node_one.node_type is Node.OUTPUT_NODE:
node_one = self.list_of_all_nodes[np.random.randint(0, len(self.list_of_all_nodes))]
node_two = self.list_of_all_nodes[np.random.randint(0, len(self.list_of_all_nodes))]
# print("Node one selected:", node_one)
# print("Node two selected:", node_two)
# second selected node has to be:
# 1. in lower layer to prevent reverse connections (exception: connection between input and output layers)
# 2. different than first node
# 3. of type hidden to prevent connection in the same layer for input/output layers
# 4. other than input node as there can be only output connections from input layer
while (node_two.layer_id > node_one.layer_id) \
or (node_two.node_id == node_one.node_id) \
or ((node_two.layer_id == node_one.layer_id) and (node_two.node_type is not Node.HIDDEN_NODE)) \
or (node_two.node_type is Node.INPUT_NODE):
if node_two.layer_id > node_one.layer_id and (node_one.node_type == Node.INPUT_NODE
and node_two.node_type == Node.OUTPUT_NODE):
# print("Exception node one is input node two is output and node 2 layer is greater than node one")
break
if (node_one.node_type is Node.INPUT_NODE) and (node_two.node_type is not Node.INPUT_NODE):
# print("Exception node one is input node two is other than input")
break
# print("generate again node two")
# print("Node one selected:", node_one)
node_two = self.list_of_all_nodes[np.random.randint(0, len(self.list_of_all_nodes))]
# print("Node selected for check", node_two)
# print("Connection from:", node_one, ", to:", node_two)
existing_connection_found = False
for connection in self.list_of_all_layers[node_two.layer_id].list_of_layer_connections:
if connection.from_node == node_one.node_id and connection.to_node == node_two.node_id:
# print("Existing connection found, mutating connection", connection, "to", (not connection.enabled))
if connection.enabled is True:
connection.enabled = False
else:
connection.enabled = True
existing_connection_found = True
break
if not existing_connection_found:
# new connection
innovation_id = self.get_innovation_id(node_one, node_two, list_of_innovations)
self.list_of_all_layers[node_two.layer_id].add_connection(node_one, node_two, innovation_id, self.list_of_all_connections)
# create new node
else:
if len(self.list_of_all_hidden_layers) == 0:
self.create_new_hidden_layer_with_node(list_of_innovations)
else:
# select random hidden layer or create a new one to add node
random_number = np.random.random()
if random_number > 0.3:
# print("Adding new node to existing layer")
hidden_layer = self.list_of_all_hidden_layers[np.random.randint(0, len(self.list_of_all_hidden_layers))]
# print("Hidden layer:", hidden_layer)
previous_layer = self.list_of_all_layers[hidden_layer.layer_id-1]
node = hidden_layer.add_new_node(self.input_layer, list_of_innovations, self.list_of_all_nodes, self.list_of_all_connections)
# print("New node:", node)
to_node = previous_layer.list_of_layer_nodes[np.random.randint(0, len(previous_layer.list_of_layer_nodes))]
previous_layer.add_connection(node, to_node, self.get_innovation_id(node, to_node, list_of_innovations), self.list_of_all_connections)
else:
# new hidden layer
# print("Creating new hidden layer with new node")
self.create_new_hidden_layer_with_node(list_of_innovations)
def create_new_hidden_layer_with_node(self, list_of_innovations):
# create hidden layer with one new node
# print("Adding new layer and new node")
hidden_layer = Layer(len(self.list_of_all_layers), Layer.HIDDEN_LAYER, 1, self.list_of_all_nodes,
self.list_of_all_connections, self.list_of_all_layers, list_of_innovations)
self.list_of_all_layers.append(hidden_layer)
self.list_of_all_hidden_layers.append(hidden_layer)
node = hidden_layer.list_of_layer_nodes[len(hidden_layer.list_of_layer_nodes) - 1]
from_connections = hidden_layer.list_of_layer_connections
# connect node to next layer
previous_layer = self.list_of_all_layers[hidden_layer.layer_id - 1]
to_node = previous_layer.list_of_layer_nodes[np.random.randint(0, len(previous_layer.list_of_layer_nodes))]
previous_layer_connections = previous_layer.list_of_layer_connections
previous_layer.add_connection(node, to_node, self.get_innovation_id(node, to_node, list_of_innovations), self.list_of_all_connections)
# disable old connection
for connection in previous_layer_connections:
for from_connection in from_connections:
if from_connection.from_node == connection.from_node and connection.to_node == to_node.node_id:
connection.enabled = False
def get_innovation_id(self, node_from, node_to, list_of_innovations):
new_connection = (node_from.node_id, node_to.node_id)
if new_connection in list_of_innovations:
return list_of_innovations.index(new_connection)
list_of_innovations.append(new_connection)
return len(list_of_innovations)
def get_node_by_id(self, node_id):
for node in self.list_of_all_nodes:
if node_id == node.node_id:
return node
return None
def feed_forward(self, input_data):
self.outputs = []
# input layer
for input_index, input_node in enumerate(self.input_layer.list_of_layer_nodes):
input_node.output = input_data[input_index]
# hidden layers
for hidden_layer in reversed(self.list_of_all_hidden_layers):
for node in hidden_layer.list_of_layer_nodes:
node.input_data = 0.0
for hidden_conn_index, connection in enumerate(hidden_layer.list_of_layer_connections):
if connection.to_node == node.node_id and connection.enabled:
input_node = self.get_node_by_id(connection.from_node)
node.input_data += input_node.output * connection.weight
node.calculate_output()
# output layer
for node in self.output_layer.list_of_layer_nodes:
node.input_data = 0.0
for output_conn_index, output_connection in enumerate(self.output_layer.list_of_layer_connections):
if output_connection.to_node == node.node_id and output_connection.enabled:
input_node = self.get_node_by_id(output_connection.from_node)
node.input_data += input_node.output * output_connection.weight
node.calculate_output()
self.outputs.append(node.output)
return self.outputs
def fit(self, input_data, target):
outputs = self.feed_forward(input_data)
for output_index, output in enumerate(outputs):
if output > 0.5:
temp_output = 1.0
else:
temp_output = 0.0
self.reward(output, temp_output, target[output_index])
def reward(self, output, temp_output, target):
if temp_output == target:
# reward
self.score += abs((2 * output) - 1)
def __str__(self):
return_string = "Model id: " + str(self.model_id) + ", number of layers: " + str(len(self.list_of_all_layers)) \
+ ", score: " + str(self.score) + "\nLayers:\n"
for layer in self.list_of_all_layers:
return_string += layer.__str__() + "\n"
return return_string
|
{"/Layer.py": ["/Node.py", "/Connection.py"], "/Connection.py": ["/Node.py"], "/Model.py": ["/Connection.py", "/Layer.py", "/Node.py"], "/example.py": ["/NEAT.py", "/RandomData.py"], "/NEAT.py": ["/Model.py"]}
|
7,592
|
MADRobotNO/Simple_NEAT_v0.2
|
refs/heads/master
|
/example.py
|
from NEAT import Neat
from RandomData import Xor
xor = Xor()
neat = Neat(2, 1, 10, 0.4)
for model in neat.list_of_all_models:
model.mutate(neat.list_of_innovations)
# print(neat)
neat.fit(xor.data, xor.targets, 50)
print("done")
|
{"/Layer.py": ["/Node.py", "/Connection.py"], "/Connection.py": ["/Node.py"], "/Model.py": ["/Connection.py", "/Layer.py", "/Node.py"], "/example.py": ["/NEAT.py", "/RandomData.py"], "/NEAT.py": ["/Model.py"]}
|
7,593
|
MADRobotNO/Simple_NEAT_v0.2
|
refs/heads/master
|
/NEAT.py
|
from Model import Model
import numpy as np
import copy
class Neat:
def __init__(self, number_of_inputs=2, number_of_outputs=1, number_of_models=1, mutation_rate=0.1):
self.list_of_innovations = []
self.list_of_all_models = []
self.mutation_rate = mutation_rate
self.number_of_inputs = number_of_inputs
self.number_of_outputs = number_of_outputs
self.number_of_models = number_of_models
parent_model = self.initialize_model()
self.generate_population(parent_model)
def generate_population(self, parent_model, parent_model_two=None):
for i in range(len(self.list_of_all_models), self.number_of_models):
model = self.model_from_parent_structure(parent_model)
model.mutate(self.list_of_innovations, parent_model, parent_model_two)
self.list_of_all_models.append(model)
def model_from_parent_structure(self, parent_model):
model = copy.deepcopy(parent_model)
model.model_id = len(self.list_of_all_models)
model.outputs = None
model.score = 0.0
model.regenerate_random_weights_bias()
return model
def initialize_model(self):
model = Model(len(self.list_of_all_models), self.number_of_inputs, self.number_of_outputs, self.mutation_rate,
self.list_of_innovations)
self.list_of_all_models.append(model)
return model
def fit(self, input_data, target_data, number_of_generations):
for generation in range(1, number_of_generations+1):
print("Generation:", generation)
# for each data row ...
for index, input_row in enumerate(input_data):
# ... train each model
for model in self.list_of_all_models:
model.fit(input_row, target_data[index])
best_models = self.get_best_and_second_best_model()
print("\nBest model:")
print(best_models.get('first'))
print("Second best:")
print(best_models.get('second'))
def get_best_and_second_best_model(self):
current_highest = 0.0
current_second_highest = 0.0
current_first_model = None
current_second_model = None
for model in self.list_of_all_models:
if model.score > current_highest:
current_second_highest = current_highest
current_second_model = current_first_model
current_highest = model.score
current_first_model = model
elif model.score > current_second_highest:
current_second_highest = model.score
current_second_model = model
return {"first": current_first_model, "second": current_second_model}
def __str__(self):
return_string = "Current NEAT state:\n"
for model in self.list_of_all_models:
return_string += model.__str__() + "\n"
return_string += "List of innovations:\n" + str(self.list_of_innovations)
return return_string
|
{"/Layer.py": ["/Node.py", "/Connection.py"], "/Connection.py": ["/Node.py"], "/Model.py": ["/Connection.py", "/Layer.py", "/Node.py"], "/example.py": ["/NEAT.py", "/RandomData.py"], "/NEAT.py": ["/Model.py"]}
|
7,594
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/script/__init__.py
|
__author__ = 'tchaly'
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,595
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Tools/ScriptBrowser.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# imports
from Model.Script import Script
from Tools.Prompter import Prompter
from Tools.CmdLineExplorer import CmdLineExplorer
from Tools.JobExecutor import JobExecutor
##
#
class ScriptBrowser(Prompter, object):
## available choices for menu
choix_menu = {
1: "Add a script to queue",
2: "Delete a script from queue",
3: "Next step"
}
## available choices for folder mode selection
choix_folder_mode = {
1: "Add a script located in this folder",
2: "Parent folder",
3: "Go in a specific folder",
4: "Previous menu"
}
## available choices for file removing
choix_deletion_mode = {
1: "Delete file from stack",
2: "Previous menu"
}
question = "Choose an action among those : "
## constructor
def __init__(self, init_root):
super(ScriptBrowser, self).__init__()
self.file_exec = []
self.initial_root = init_root
## display script stack
def display_stack(self):
self.print_msg("######## Current script stack : ")
string_to_print = "[ "
i = 0
while i < len(self.file_exec):
script_object = self.file_exec[i]
if i != 0:
string_to_print += ", " + str(script_object)
else:
string_to_print += str(script_object)
i += 1
string_to_print += " ]"
self.print_msg(string_to_print)
## launch the prompt menu
def prompt_menu(self):
choix_utilisateur = "-1"
while choix_utilisateur != "3":
self.clear_screen()
self.display_stack()
self.print_msg("") # saut de ligne
choix_utilisateur = self.prompt_choice(ScriptBrowser.question, ScriptBrowser.choix_menu.itervalues())
self.print_option_choosen(choix_utilisateur, ScriptBrowser.choix_menu)
self.press_a_key_continue()
# Mode : menu d'exploration de dossier et ajout de script dans la file.
if choix_utilisateur == "1":
self.prompt_folder_mode()
# Mode : menu de suppression de fichier.
if choix_utilisateur == "2":
self.prompt_stack_deletion_mode()
## launch the prompter for folder selection
def prompt_folder_mode(self):
choix_utilisateur = "-1"
file_explorer = CmdLineExplorer(self.initial_root)
while choix_utilisateur != "4":
self.clear_screen()
self.display_stack()
self.print_msg("\n######## Current folder : " + file_explorer.current_path)
file_explorer.display_folder_information()
self.print_msg("")
choix_utilisateur = self.prompt_choice(ScriptBrowser.question, ScriptBrowser.choix_folder_mode.itervalues())
self.print_option_choosen(choix_utilisateur, ScriptBrowser.choix_folder_mode)
# add a script
if choix_utilisateur == "1":
file = self.prompt_question("Which file to stack ?")
if file_explorer.is_in_folder(file) and file_explorer.is_a_file(file_explorer.current_path + "/" + file):
tool = self.prompt_question("With which tool do you want to execute this tool ?")
self.add_script_in_stack(file, tool, file_explorer.current_path + "/" + file)
# go to a specific folder
if choix_utilisateur == "3":
folder = self.prompt_question("In which folder would you like to go into ?")
if file_explorer.is_in_folder(folder) and file_explorer.is_a_folder(file_explorer.current_path + "/" + folder):
file_explorer.go_forward(folder)
self.press_a_key_continue()
# go back to the parent folder
if choix_utilisateur == "2" or choix_utilisateur == "cd ..":
file_explorer.go_backward()
## launch the prompter for removing file from stack
def prompt_stack_deletion_mode(self):
choix_utilisateur = "-1"
while choix_utilisateur != "2":
self.clear_screen()
self.display_stack()
self.print_msg("")
choix_utilisateur = self.prompt_choice(ScriptBrowser.question, ScriptBrowser.choix_deletion_mode.itervalues())
self.print_option_choosen(choix_utilisateur, ScriptBrowser.choix_deletion_mode)
# the user want to remove a file
if choix_utilisateur == "1":
file = self.prompt_question("Which file to delete from stack ?")
file_deleted = False
for script in self.file_exec:
if script.intitule == file:
self.delete_scrit_from_stack(script)
file_deleted = True
if not file_deleted:
self.print_output_msg("File hasn't been found.")
self.press_a_key_continue()
self.prompt_menu()
## add the specified script into the stack
def add_script_in_stack(self, file_name, tool, file_full_path):
s = Script(file_name, tool, file_full_path)
self.file_exec.append(s)
self.print_output_msg("Script has been added.")
## delete the specified script from the stack
def delete_scrit_from_stack(self, element_object):
self.file_exec.remove(element_object)
self.print_output_msg("Script has been deleted.")
## test
def test():
sbp = ScriptBrowser()
# Options selection
sbp.prompt_menu()
#for script in sbp.file_exec:
# print script.intitule
exe = JobExecutor()
exe.executeScripts(sbp.file_exec, "node0")
## launch test
if __name__ == "__main__":
test()
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,596
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Model/Nodes.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# imports
import sys
from ClusterShell.NodeSet import NodeSet
from ClusterShell.Task import task_self
from ClusterShell.NodeSet import NodeSetException
##
#
class Nodes:
## initialize the object
def __init__(self, nodeset):
# the node set
self.ns = nodeset
# active nodes
self.ns_ok = NodeSet()
# inactive or inexisting nodes
self.ns_ko = NodeSet()
## add the specified node
def add(self, node):
self.ns |= NodeSet.fromlist(node)
## delete the specified node
def delete(self, node):
self.ns -= NodeSet.fromlist(node)
## check if the submitted nodes are active
def checkNodes(self):
try:
# print command info
print '\n== Checking active nodes =='
# launch ping on the specified nodes
task_self().run('echo OK', nodes=self.ns)
# retrieve and check return code
for retcode, nodes in task_self().iter_retcodes():
if retcode in (0, 1, 2):
# add nodes to OK set
self.ns_ok |= NodeSet.fromlist(nodes)
print '%s : OK' % nodes
else:
# add nodes to KO set
self.ns_ko |= NodeSet.fromlist(nodes)
print '%s : KO' % nodes
# syntax error
except NodeSetException:
print >> sys.stderr, '(!) Error : the submitted nodeset [%s] is not valid' % self.ns
## get all submitted nodes (include invalid)
def getAllNodes(self):
return self.ns
## retrieve the active nodes
def getActiveNodes(self):
return self.ns_ok
## check and retrieve active nodes
def filter_disfunctional_nodes(ns):
n = Nodes(ns)
n.checkNodes()
return n.getActiveNodes()
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,597
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/View/IHM.py
|
#! /usr/bin/python
# imports
import os
import sys
from Model.Nodes import Nodes
from Model.ConfFileManager import ConfFileManager
from Tools.ScriptBrowser import ScriptBrowser
from Tools.NodeBrowser import NodeBrowser
from Tools.Prompter import Prompter
from Tools.JobExecutor import JobExecutor
##
#
class IHM(Prompter):
## constructor
def __init__(self):
# the task executor
self.exe = JobExecutor()
## a GUI for static mode execution
def staticMode(self, nodes, service, action):
# check and retrieve active nodes
ns = Nodes(nodes)
ns.checkNodes()
ns_ok = ns.getActiveNodes()
# execute the specified service
self.exe.executeService(ns_ok, service, action)
## a GUI for conf file processing and execution
def fileConfigMode(self, fileName):
# instantiate the conf file manager
s = ConfFileManager(None, None, None)
try:
# launch it
s.executeFromFile(fileName)
# catch Ctrl + C
except KeyboardInterrupt :
print '\n Bye bye !\n'
sys.exit(1)
## a GUI for nodes and service browsing and execution
def interactiveMode(self):
# clear screen
os.system('clear')
print '\n== Welcome to Clustershell ! =='
# browse node
nbp = NodeBrowser()
nbp.promptMenu()
# browse service and action
nbp.submitService()
nbp.submitAction()
# retrieve active nodes and execute
self.exe.executeService(nbp.ns, nbp.service, nbp.action)
## a GUI for scripts browsing and execution
def fullInteractiveMode(self):
# clear screen
os.system('clear')
print '\n== Welcome to Clustershell ! =='
# browse nodes
nbp = NodeBrowser()
nbp.promptMenu()
try:
# browse script
sbp = ScriptBrowser(self.prompt_question("Script root folder ?"))
sbp.prompt_menu()
# catch Ctrl + C
except KeyboardInterrupt :
print '\n Bye bye !\n'
sys.exit(1)
# execute the script to the specified nodes
self.exe.executeScripts(sbp.file_exec, nbp.ns)
## display error msg if invalid arguments
def displayErrorMsg(self):
print >> sys.stderr,'\n== Error : invalid arguments =='
print >> sys.stderr,'=> Right syntax for static mode : python Launcher.py -s <nodeset> <service> <action>'
print >> sys.stderr,'=> Right syntax for conf file mode : python Launcher.py -f <filename>'
print >> sys.stderr,'=> Right syntax for interactive mode : python Launcher.py -i'
print >> sys.stderr,'=> Right syntax for full interactive mode : python Launcher.py -fi'
exit(1)
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,598
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Model/ConfFileManager.py
|
#! /usr/bin/python
# imports
import sys
from Model.Nodes import Nodes
from Tools.JobExecutor import JobExecutor
from ClusterShell.NodeSet import NodeSet
from ClusterShell.Task import task_self
##
#
class ConfFileManager:
## constructor (noeuds, service et action)
def __init__(self, ns, service, action):
# check params
if(ns is not None and service is not None and action is not None):
self.ns = NodeSet(ns)
self.service = service
self.action = action # <start|stop|restart|status>
else:
self.ns = None
self.service = ''
self.action = ''
# the service executor
self.executor = JobExecutor()
## parse the file and execute it
def executeFromFile(self,nom_fichier):
fichier = open(nom_fichier,'r')
line = ''
for ligne in fichier.readlines():
# remove blanks
line = ligne.replace(' ','')
# check if not empty => prevent parsing error
if str(line) > 0 and line[0] != '\n' and line[0] != '#':
# parse line
self.executeFromLine(line)
# close the file
fichier.close()
## parse the line and execute it
def executeFromLine(self,line):
# the error message to be printed
error_len_3 = 'Error : configuration file usage is <set of services>::<set of nodes>::<action>[-> <set of services>::<set of nodes>::<action>]\n'
# the node set
self.ns = None
commande = ''
# ignore comment or empty lines
if line == '' or line[0] == '#':
pass
else:
# retrieve each block
for bloc_dependance in line.split('->'):
# retrieve each item
split = bloc_dependance.split('::')
# parse error
if len(split) != 3:
print '%s' % error_len_3
exit(1)
# retrieve services, nodes and action
services = split[0]
nodes = split[1]
self.action = split[2].replace('\n','')
print '***************************************'
print '* Handling \'%s\' *' % bloc_dependance
print '***************************************'
# check nodes
nodeList = Nodes(nodes)
nodeList.checkNodes()
# update node list
self.ns = nodeList.getActiveNodes()
# execute each service for the specified nodes
for service in services.split(','):
self.service = service
self.executor.executeService(self.ns, self.service, self.action)
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,599
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/script/zip.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# variables utilisées
name_of_file = "tail5authlog"
default_path = "/root/scpy"
storage_remote_name = "supervisor"
print("Installing 7zip if needed.")
os.system("apt-get install -yq p7zip")
print("Zipping this file.")
os.system("7z a "+ default_path +"/logfile.7z "+ default_path +"/"+ name_of_file)
print("Removing original file : " + name_of_file)
os.system("rm "+default_path+"/"+name_of_file)
# marche, mais à tenter si la config du réseau est stable
#print("Fetch it on remote storage : "+storage_remote_name+".")
#os.system("clush -w "+ storage_remote_name +" --copy "+ default_path)
print("Done.")
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,600
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/launcher.py
|
#! /usr/bin/python
import sys
from View.IHM import IHM
if __name__ == '__main__':
# instantiate the GUI
view = IHM()
# check params and launch the appropriate mode
if len(sys.argv) == 5 and (sys.argv[1] == '-s' or sys.argv[1] == '--static'):
# launch static mode
view.staticMode(sys.argv[2], sys.argv[3], sys.argv[4])
elif len(sys.argv) == 3 and sys.argv[1] == '-f':
# launch conf mode
view.fileConfigMode(sys.argv[2])
elif len(sys.argv) == 2 and sys.argv[1] == '-i':
# launch interactive mode
view.interactiveMode()
elif len(sys.argv) == 2 and sys.argv[1] == '-fi':
# launch full interactive mode
view.fullInteractiveMode()
else:
view.displayErrorMsg()
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,601
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Tools/NodeBrowser.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# imports
import sys
from ClusterShell.NodeSet import NodeSet
from ClusterShell.NodeSet import NodeSetException
from Model.Nodes import Nodes
from Tools.Prompter import Prompter
##
#
class NodeBrowser(Prompter):
## constructor
def __init__(self):
self.ns = NodeSet()
self.service = ''
self.action = ''
## launch the prompt menu
def promptMenu(self):
try:
# launch the prompter for node selection
self.submitNodeList()
# catch Ctrl + C
except KeyboardInterrupt :
print '\n Bye bye !\n'
sys.exit(1)
## ask the user to submit the node list
def submitNodeList(self):
# info msg
print '\n# Step 1 of 3 : Please enter nodes name below (using the clustershell syntax <azur1>, <azur[1-2]>) :'
# retrieve keyboard input
try:
self.ns = NodeSet(self.input_request(''))
repeat = True
# ask if the user wants to add another node/node group
while repeat :
# print added nodes
for node in self.ns:
print 'node : %s' % node
# user want to add nodes ?
print '\n### Add nodes ? (yes | no)'
# retrieve answer
ans = self.input_request('')
# check the ans
if ans == 'Yes' or ans == 'Y' or ans == 'y' or ans == 'yes':
print '### Please enter the node/group list below : '
# retrieve and append nodes
self.ns.add(self.input_request(''))
# the user don't want to add additionnal nodes
else:
# unset flag
repeat = False
# check submitted nodes
self.ns = self.checkSubmittedNodes(self.ns)
# invalid submitted node list / syntax error
except NodeSetException :
print >> sys.stderr, '\n(!) Error : the submitted node list is not valid\n' % self.ns
## retrieve the service to be performed
def submitService(self):
# specify the service
print '\n# Step 2 of 3: Please enter the service to be launched'
self.service = self.input_request('')
## retrieve the action to be performed
def submitAction(self):
# choose action to be executed
print '\n# Step 3 of 3 : Please choose the action to perform '
actionList = ['start','stop','restart','status']
self.print_choice(actionList)
# flag for the prompter
repeat = True
# retrieve user's choice
while repeat :
# show prompter
choice = self.input_request('')
if choice == '1' or choice == 'start':
self.action = 'start'
repeat = False
elif choice == '2' or choice == 'stop':
self.action = 'stop'
repeat = False
elif choice == '3' or choice == 'restart':
self.action = 'restart'
repeat = False
elif choice == '4' or choice == 'status':
self.action = 'status'
repeat = False
else:
print >> sys.stderr,'Error : invalid choice'
## check and retrieves ok nodes
def checkSubmittedNodes(self, nodes):
# create nodeset
ns = Nodes(nodes)
# ping nodes
ns.checkNodes()
# return active nodes
return ns.getActiveNodes()
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,602
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Tools/Prompter.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# imports
import os
##
#
class Prompter:
## constructor
def __init__(self):
pass
## list available choices
def prompt_choice(self, question, iterable):
self.print_question(question)
self.print_choice(iterable)
return self.input_request("choose an option (1, 2, 3, ...) ?")
## ask a question
def prompt_question(self, sentence):
self.print_msg("")
self.print_question(sentence)
return self.input_request(" type here : ")
## ask the user to press a key to continue
def press_a_key_continue(self):
self.input_request("Press a key to continue...")
## retrieve the user's choice
def print_option_choosen(self, rep, liste):
try:
self.print_output_msg("You've choosen : " + str(liste[int(rep)]))
except:
self.print_output_msg(str(rep) + " is not in the list.")
## print the user's choice
def print_choice(self, iterable):
i = 1
for option in iterable:
self.print_option(i, option)
i += 1
## print option
def print_option(self, number, msg):
self.print_msg(str(number) + ") " + msg)
## print question
def print_question(self, msg):
self.print_msg("? : " + str(msg))
## print output msg
def print_output_msg(self, msg):
self.print_msg("")
self.print_msg("[Output] : " + msg)
return msg
## print the specified message
def print_msg(self, msg):
print(msg)
## prompter
def input_request(self, msg):
some_text = raw_input("-> " + str(msg))
return some_text
## clear the screen
def clear_screen(self):
os.system("clear")
## test
def test():
p = Prompter()
# the question
p.print_output_msg(p.prompt_question("Is python a good language ?"))
# available choices
list = ["SUPAman doh", "Make me feel cool", "Back to the old scripting fashion time..."]
index = p.print_output_msg(p.prompt_choice("What's developping in Python makes you feel like ?", list))
p.print_option_choosen(index, list)
## launch test
if __name__ == "__main__":
test()
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,603
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Tools/CmdLineExplorer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# imports
import os
from Prompter import Prompter
##
#
class CmdLineExplorer(Prompter, object):
## constructor
def __init__(self, root_folder):
super(CmdLineExplorer, self).__init__()
# set paths
self.initial_path = root_folder
self.current_path = root_folder
# set folder infos
self.folder_information = ""
self.set_folder_information()
## go to the specified folder
def go_in_folder(self, folder_name):
if folder_name != "":
if folder_name == "..":
self.go_backward()
else:
self.go_forward(folder_name)
## go the the folder
def go_forward(self, folder):
new_path = self.current_path
if new_path != "/":
new_path += "/"
new_path += folder
if self.is_a_folder(new_path):
self.current_path = new_path
self.set_folder_information()
## go back to the parent folder
def go_backward(self):
tab = str(self.current_path).split("/")
new_path = ""
tab.reverse()
while len(tab) != 1:
element = tab.pop()
if element != "":
new_path += "/" + element
if new_path != "":
self.current_path = new_path
else:
self.current_path = "/"
self.set_folder_information()
## set directory info
def set_folder_information(self):
try:
self.folder_information = os.listdir(self.current_path)
except:
self.print_output_msg("No can't do.")
finally:
pass
## display directory information
def display_folder_information(self):
self.print_msg("# List of files and folders : ")
self.print_msg("")
j = 0
to_print = ""
for entity in self.folder_information:
to_print += entity + " "
if j == 6:
j = 0
to_print += "\n"
j += 1
print to_print
## check if the specified file is in the current folder
def is_in_folder(self, file_or_folder):
if file_or_folder in self.folder_information:
return True
else:
self.print_output_msg(file_or_folder + " is not located in this folder.")
return False
## check if the specified file path is a directory
def is_a_folder(self, folder_full_path):
if os.path.isdir(folder_full_path):
return True
else:
self.print_output_msg("Not a folder.")
## check if the specified file path is a regular file
def is_a_file(self, file_full_path):
if os.path.isfile(file_full_path):
return True
else:
self.print_output_msg("Not a file.")
## test the current class
def test():
cle = CmdLineExplorer("/home/tchaly/EspaceDeTravail/Git")
#cle.display_folder_information()
#cle.go_forward("ProjetGrid")
#cle.display_folder_information()
#cle.go_backward()
#cle.display_folder_information()
while True:
cle.go_backward()
print cle.current_path
cle.press_a_key_continue()
## launch test
if __name__ == "__main__":
test()
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,604
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Tools/JobExecutor.py
|
#! /usr/bin/python
# imports
from ClusterShell.NodeSet import NodeSet
from ClusterShell.Task import task_self
from Tools.Prompter import Prompter
## class for executing commands
#
class JobExecutor(Prompter):
## constructor
def __init__(self):
# task launcher
self.task = task_self()
## execute a service
def executeService(self, ns, service, action):
# set command
command = 'service ' + service + ' ' + action
# print info message
if action == 'start':
print '\n== Launching \'%s\' on [%s] ==' % (service, ns)
elif action == 'stop':
print '\n== Stopping \'%s\' on [%s] ==' % (service, ns)
elif action == 'restart':
print '\n== Restarting \'%s\' on [%s] ==' % (service, ns)
elif action == 'status':
print '\n== Get status of \'%s\' on [%s] ==' % (service, ns)
else:
print 'Error : <action> must be start | stop | restart | status'
exit(1)
# run command
self.task.run(command, nodes=ns)
# handling output
for output, nodes in self.task.iter_buffers():
# agregate and print output
print '%s : %s' % (nodes, output)
# print blank line
print ''
## execute the stack of jobs
def executeScripts(self, stack, ns):
for job in stack:
self.task.run(job.tool + " " + job.path, nodes=ns)
# handling output
for output, nodes in self.task.iter_buffers():
# agregate and print output
print '%s : %s' % (nodes, output)
print ""
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,605
|
tchaly-bethmaure/ProjetGrid
|
refs/heads/master
|
/Model/Script.py
|
## class for handling scripts
#
class Script(object):
## constructor
def __init__(self, nom_script, tool_script, chemin_fichier):
super(Script,self).__init__()
# script name
self.intitule = nom_script
# tool
self.tool = tool_script
# script path
self.path = chemin_fichier
## get script name
def __str__(self):
return self.intitule
## get script path
def get_file_with_path(self):
return self.path
|
{"/Tools/ScriptBrowser.py": ["/Model/Script.py", "/Tools/Prompter.py", "/Tools/CmdLineExplorer.py", "/Tools/JobExecutor.py"], "/launcher.py": ["/View/IHM.py"]}
|
7,614
|
eriknystrom/csc401-assign4
|
refs/heads/master
|
/Circle.py
|
from math import pi
# ClassCircle
# Write class called circle, with an instance variable, for the radius
# and methods to calculate the area and circumference of the circle.
class Circle:
def __init__(self):
self.radius = 0
def area (self):
return (self.radius ** 2) * pi
def circumference (self):
return self.radius * 2 * pi
|
{"/assign4.py": ["/Circle.py"]}
|
7,615
|
eriknystrom/csc401-assign4
|
refs/heads/master
|
/assign4.py
|
# Assignment 4 for CSC401 Python
import functools
from Circle import Circle
# Function One(list of strings)
# Take a list of strings as a parameter and return all unique strings
def function_one(string_list):
unique_list = []
unique = True
for x in range(len(string_list)):
string_test = string_list[x]
for y in range(len(string_list)):
if string_test == string_list[y] and x != y:
unique = False
break
elif string_test != string_list[y]:
unique = True
if unique:
unique_list.append(string_test)
# set_list = list(set(string_list))
return unique_list
# Function Two(integer)
# Take an integer and return whether or not the number is Perfect
# Perfect number = sum of divisors = number
def function_two(perfect_number):
# factors_list = []
# for i in range(1,perfect_number):
# if (perfect_number % i) == 0:
# factors_list.append(i)
sum_of = sum(factors(perfect_number))-perfect_number
if sum_of == perfect_number and sum_of > 1:
return True
else:
return False
# Function Three(integer)
# Take an integer and return Perfect numbers <= number
def function_three(perfect_number_two):
list_of_pn = []
for i in range(1,perfect_number_two):
if function_two(i):
list_of_pn.append(i)
return list_of_pn
# Function Four(list of mixed types)
# Take list of mixed type and count number of integers
def function_four(mixed_list):
count = 0
for i in range(len(mixed_list)):
if isinstance(mixed_list[i], int):
count += 1
elif isinstance(mixed_list[i],list):
count += function_four(mixed_list[i])
return count
# Function Five(list of anything)
# Take list of anything and remove second item
def function_five(list_of_anything = []):
if len(list_of_anything) > 1:
del list_of_anything[1]
return list_of_anything
# Factors function borrowed from: http://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
# Quicker than my for loop method
def factors(n):
return functools.reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))
def main():
print(function_one(['good', 'cat', 'bad', 'cat']))
print(function_two(496))
print(function_three(2048))
print(function_four([1, ['A', 2], 'B', 3, 'C', 4, ['D', 5]]))
print(function_five(['A', ['A', 'B'], 'C']))
my_circle = Circle()
my_circle.radius = 5
print(my_circle.area())
print(my_circle.circumference())
if __name__ == "__main__":
main()
|
{"/assign4.py": ["/Circle.py"]}
|
7,616
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/tests/test_token_endpoint.py
|
import json
from urllib import urlencode
import uuid
from django.test import RequestFactory
from django.test import TestCase
from oidc_provider.lib.utils.token import *
from oidc_provider.tests.utils import *
from oidc_provider.views import *
class TokenTestCase(TestCase):
"""
To obtain an Access Token and an ID Token, the RP Client sends a
Token Request to the Token Endpoint to obtain a Token Response
when using the Authorization Code Flow.
"""
def setUp(self):
self.factory = RequestFactory()
self.user = create_fake_user()
self.client = create_fake_client(response_type='code')
self.state = uuid.uuid4().hex
def _post_request(self, post_data):
"""
Makes a request to the token endpoint by sending the
`post_data` parameters using the 'application/x-www-form-urlencoded'
format.
"""
url = reverse('oidc_provider:token')
request = self.factory.post(url,
data=urlencode(post_data),
content_type='application/x-www-form-urlencoded')
response = TokenView.as_view()(request)
return response
def _create_code(self):
"""
Generate a valid grant code.
"""
code = create_code(
user=self.user,
client=self.client,
scope=['openid', 'email'])
code.save()
return code
def test_request_methods(self):
"""
Client sends an HTTP POST request to the Token Endpoint. Other request
methods MUST NOT be allowed.
"""
url = reverse('oidc_provider:token')
requests = [
self.factory.get(url),
self.factory.put(url),
self.factory.delete(url),
]
for request in requests:
response = TokenView.as_view()(request)
self.assertEqual(response.status_code == 405, True,
msg=request.method+' request does not return a 405 status.')
request = self.factory.post(url)
response = TokenView.as_view()(request)
self.assertEqual(response.status_code == 400, True,
msg=request.method+' request does not return a 400 status.')
def test_client_authentication(self):
"""
The authorization server support including the
client credentials in the request-body using the `client_id` and
`client_secret`parameters.
See: http://tools.ietf.org/html/rfc6749#section-2.3.1
"""
code = self._create_code()
# Test a valid request to the token endpoint.
post_data = {
'client_id': self.client.client_id,
'client_secret': self.client.client_secret,
'redirect_uri': self.client.default_redirect_uri,
'grant_type': 'authorization_code',
'code': code.code,
'state': self.state,
}
response = self._post_request(post_data)
response_dic = json.loads(response.content)
self.assertEqual('access_token' in response_dic, True,
msg='"access_token" key is missing in response.')
self.assertEqual('error' in response_dic, False,
msg='"error" key should not exists in response.')
# Now, test with an invalid client_id.
invalid_data = post_data.copy()
invalid_data['client_id'] = self.client.client_id * 2 # Fake id.
# Create another grant code.
code = self._create_code()
invalid_data['code'] = code.code
response = self._post_request(invalid_data)
response_dic = json.loads(response.content)
self.assertEqual('error' in response_dic, True,
msg='"error" key should exists in response.')
self.assertEqual(response_dic.get('error') == 'invalid_client', True,
msg='"error" key value should be "invalid_client".')
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,617
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/models.py
|
import json
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
def scope_property():
def fget(self):
return self._scope.split()
def fset(self, value):
self._scope = ' '.join(value)
return locals()
class Client(models.Model):
RESPONSE_TYPE_CHOICES = [
('code', 'code (Authorization Code Flow)'),
('id_token', 'id_token (Implicit Flow)'),
('id_token token', 'id_token token (Implicit Flow)'),
]
name = models.CharField(max_length=100, default='')
client_id = models.CharField(max_length=255, unique=True)
client_secret = models.CharField(max_length=255, unique=True)
response_type = models.CharField(max_length=30,
choices=RESPONSE_TYPE_CHOICES)
_redirect_uris = models.TextField(default='')
def redirect_uris():
def fget(self):
return self._redirect_uris.splitlines()
def fset(self, value):
self._redirect_uris = '\n'.join(value)
return locals()
redirect_uris = property(**redirect_uris())
@property
def default_redirect_uri(self):
return self.redirect_uris[0] if self.redirect_uris else ''
class Code(models.Model):
user = models.ForeignKey(User)
client = models.ForeignKey(Client)
code = models.CharField(max_length=255, unique=True)
expires_at = models.DateTimeField()
_scope = models.TextField(default='')
scope = property(**scope_property())
def has_expired(self):
return timezone.now() >= self.expires_at
class Token(models.Model):
user = models.ForeignKey(User)
client = models.ForeignKey(Client)
access_token = models.CharField(max_length=255, unique=True)
expires_at = models.DateTimeField()
_scope = models.TextField(default='')
scope = property(**scope_property())
_id_token = models.TextField()
def id_token():
def fget(self):
return json.loads(self._id_token)
def fset(self, value):
self._id_token = json.dumps(value)
return locals()
id_token = property(**id_token())
class UserInfo(models.Model):
GENDER_CHOICES = [
('F', 'Female'),
('M', 'Male'),
]
user = models.OneToOneField(User, primary_key=True)
given_name = models.CharField(max_length=255, blank=True, null=True)
family_name = models.CharField(max_length=255, blank=True, null=True)
middle_name = models.CharField(max_length=255, blank=True, null=True)
nickname = models.CharField(max_length=255, blank=True, null=True)
gender = models.CharField(max_length=100, choices=GENDER_CHOICES, null=True)
birthdate = models.DateField(null=True)
zoneinfo = models.CharField(max_length=100, default='', blank=True,
null=True)
locale = models.CharField(max_length=100, default='', blank=True, null=True)
preferred_username = models.CharField(max_length=255, blank=True, null=True)
profile = models.URLField(default='', null=True, blank=True)
picture = models.URLField(default='', null=True, blank=True)
website = models.URLField(default='', null=True, blank=True)
email_verified = models.NullBooleanField(default=False)
locale = models.CharField(max_length=100, blank=True, null=True)
phone_number = models.CharField(max_length=255, blank=True, null=True)
phone_number_verified = models.NullBooleanField(default=False)
address_street_address = models.CharField(max_length=255, blank=True,
null=True)
address_locality = models.CharField(max_length=255, blank=True, null=True)
address_region = models.CharField(max_length=255, blank=True, null=True)
address_postal_code = models.CharField(max_length=255, blank=True,
null=True)
address_country = models.CharField(max_length=255, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
@property
def name(self):
name = ''
if self.given_name:
name = self.given_name
if self.family_name:
name = name + ' ' + self.family_name
return name
@property
def address_formatted(self):
formatted = ', '.join([
self.address_street_address,
self.address_locality,
self.address_country])
if formatted.startswith(', '):
formatted = formatted[2:]
if formatted.endswith(', '):
formatted = formatted[:-2]
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,618
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/lib/utils/common.py
|
from django.core.urlresolvers import reverse
from oidc_provider import settings
def get_issuer():
"""
Construct the issuer full url. Basically is the site url with some path
appended.
"""
site_url = settings.get('SITE_URL')
path = reverse('oidc_provider:provider_info') \
.split('/.well-known/openid-configuration/')[0]
issuer = site_url + path
return issuer
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,619
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/tests/utils.py
|
from django.contrib.auth.models import User
from oidc_provider.models import *
def create_fake_user():
"""
Create a test user.
Return a User object.
"""
user = User()
user.username = 'johndoe'
user.email = 'johndoe@example.com'
user.set_password('1234')
user.save()
return user
def create_fake_client(response_type):
"""
Create a test client, response_type argument MUST be:
'code', 'id_token' or 'id_token token'.
Return a Client object.
"""
client = Client()
client.name = 'Some Client'
client.client_id = '123'
client.client_secret = '456'
client.response_type = response_type
client.redirect_uris = ['http://example.com/']
client.save()
return client
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,620
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/admin.py
|
from django.contrib import admin
from .models import Client, Code, Token, UserInfo
admin.site.register(Client)
admin.site.register(Code)
admin.site.register(Token)
admin.site.register(UserInfo)
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,621
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/example_app/provider_app/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^openid/', include('oidc_provider.urls', namespace='oidc_provider')),
url(r'^admin/', include(admin.site.urls)),
)
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,622
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/lib/claims.py
|
from django.utils.translation import ugettext as _
from oidc_provider.models import UserInfo
class AbstractScopeClaims(object):
def __init__(self, user, scopes):
self.user = user
self.scopes = scopes
self.setup()
def setup(self):
pass
def create_response_dic(self):
"""
Generate the dic that will be jsonify. Checking scopes given vs
registered.
Returns a dic.
"""
dic = {}
for scope in self.scopes:
if scope in self._scopes_registered():
dic.update(getattr(self, 'scope_' + scope)(self.user))
dic = self._clean_dic(dic)
return dic
def _scopes_registered(self):
"""
Return a list that contains all the scopes registered
in the class.
"""
scopes = []
for name in self.__class__.__dict__:
if name.startswith('scope_'):
scope = name.split('scope_')[1]
scopes.append(scope)
return scopes
def _clean_dic(self, dic):
"""
Clean recursively all empty or None values inside a dict.
"""
aux_dic = dic.copy()
for key, value in dic.iteritems():
if not value:
del aux_dic[key]
elif type(value) is dict:
aux_dic[key] = self._clean_dic(value)
return aux_dic
class StandardScopeClaims(AbstractScopeClaims):
"""
Based on OpenID Standard Claims.
See: http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
"""
def setup(self):
try:
self.userinfo = UserInfo.objects.get(user=self.user)
except UserInfo.DoesNotExist:
# Create an empty model object.
self.userinfo = UserInfo()
def scope_profile(self, user):
dic = {
'name': self.userinfo.name,
'given_name': self.userinfo.given_name,
'family_name': self.userinfo.family_name,
'middle_name': self.userinfo.middle_name,
'nickname': self.userinfo.nickname,
'preferred_username': self.userinfo.preferred_username,
'profile': self.userinfo.profile,
'picture': self.userinfo.picture,
'website': self.userinfo.website,
'gender': self.userinfo.gender,
'birthdate': self.userinfo.birthdate,
'zoneinfo': self.userinfo.zoneinfo,
'locale': self.userinfo.locale,
'updated_at': self.userinfo.updated_at,
}
return dic
def scope_email(self, user):
dic = {
'email': self.user.email,
'email_verified': self.userinfo.email_verified,
}
return dic
def scope_phone(self, user):
dic = {
'phone_number': self.userinfo.phone_number,
'phone_number_verified': self.userinfo.phone_number_verified,
}
return dic
def scope_address(self, user):
dic = {
'address': {
'formatted': self.userinfo.address_formatted,
'street_address': self.userinfo.address_street_address,
'locality': self.userinfo.address_locality,
'region': self.userinfo.address_region,
'postal_code': self.userinfo.address_postal_code,
'country': self.userinfo.address_country,
}
}
return dic
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,623
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/lib/errors.py
|
import urllib
class RedirectUriError(Exception):
error = 'Redirect URI Error'
description = 'The request fails due to a missing, invalid, or mismatching redirection URI (redirect_uri).'
class ClientIdError(Exception):
error = 'Client ID Error'
description = 'The client identifier (client_id) is missing or invalid.'
class AuthorizeError(Exception):
_errors = {
# Oauth2 errors.
# https://tools.ietf.org/html/rfc6749#section-4.1.2.1
'invalid_request': 'The request is otherwise malformed',
'unauthorized_client': 'The client is not authorized to request an authorization code using this method',
'access_denied': 'The resource owner or authorization server denied the request',
'unsupported_response_type': 'The authorization server does not support obtaining an authorization code using '
'this method',
'invalid_scope': 'The requested scope is invalid, unknown, or malformed',
'server_error': 'The authorization server encountered an error',
'temporarily_unavailable': 'The authorization server is currently unable to handle the request due to a '
'temporary overloading or maintenance of the server',
# OpenID errors.
# http://openid.net/specs/openid-connect-core-1_0.html#AuthError
'interaction_required': 'The Authorization Server requires End-User interaction of some form to proceed',
'login_required': 'The Authorization Server requires End-User authentication',
'account_selection_required': 'The End-User is required to select a session at the Authorization Server',
'consent_required': 'The Authorization Server requires End-User consent',
'invalid_request_uri': 'The request_uri in the Authorization Request returns an error or contains invalid data',
'invalid_request_object': 'The request parameter contains an invalid Request Object',
'request_not_supported': 'The provider does not support use of the request parameter',
'request_uri_not_supported': 'The provider does not support use of the request_uri parameter',
'registration_not_supported': 'The provider does not support use of the registration parameter',
}
def __init__(self, redirect_uri, error, grant_type):
self.error = error
self.description = self._errors.get(error)
self.redirect_uri = redirect_uri
self.grant_type = grant_type
def create_uri(self, redirect_uri, state):
description = urllib.quote(self.description)
# See: http://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthError
hash_or_question = '#' if self.grant_type == 'implicit' else '?'
uri = '{0}{1}error={2}&error_description={3}'.format(
redirect_uri,
hash_or_question,
self.error,
description)
# Add state if present.
uri = uri + ('&state={0}'.format(state) if state else '')
return uri
@property
def response(self):
pass
class TokenError(Exception):
_errors = {
# Oauth2 errors.
# https://tools.ietf.org/html/rfc6749#section-5.2
'invalid_request': 'The request is otherwise malformed',
'invalid_client': 'Client authentication failed (e.g., unknown client, no client authentication included, '
'or unsupported authentication method)',
'invalid_grant': 'The provided authorization grant or refresh token is invalid, expired, revoked, does not '
'match the redirection URI used in the authorization request, or was issued to another client',
'unauthorized_client': 'The authenticated client is not authorized to use this authorization grant type',
'unsupported_grant_type': 'The authorization grant type is not supported by the authorization server',
'invalid_scope': 'The requested scope is invalid, unknown, malformed, or exceeds the scope granted by the '
'resource owner',
}
def __init__(self, error):
self.error = error
self.description = self._errors.get(error)
def create_dict(self):
dic = {
'error': self.error,
'error_description': self.description,
}
return dic
class UserInfoError(Exception):
_errors = {
# Oauth2 errors.
# https://tools.ietf.org/html/rfc6750#section-3.1
'invalid_request': (
'The request is otherwise malformed', 400
),
'invalid_token': (
'The access token provided is expired, revoked, malformed, or invalid for other reasons', 401
),
'insufficient_scope': (
'The request requires higher privileges than provided by the access token', 403
),
}
def __init__(self, code):
self.code = code
error_tuple = self._errors.get(code, ('', ''))
self.description = error_tuple[0]
self.status = error_tuple[1]
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,624
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/lib/endpoints/discovery.py
|
from django.core.urlresolvers import reverse
from oidc_provider import settings
from oidc_provider.lib.utils.common import get_issuer
class ProviderInfoEndpoint(object):
@classmethod
def create_response_dic(cls):
dic = {}
dic['issuer'] = get_issuer()
SITE_URL = settings.get('SITE_URL')
dic['authorization_endpoint'] = SITE_URL + reverse('oidc_provider:authorize')
dic['token_endpoint'] = SITE_URL + reverse('oidc_provider:token')
dic['userinfo_endpoint'] = SITE_URL + reverse('oidc_provider:userinfo')
from oidc_provider.models import Client
types_supported = [x[0] for x in Client.RESPONSE_TYPE_CHOICES]
dic['response_types_supported'] = types_supported
# TODO:
#dic['jwks_uri'] = None
# See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
dic['subject_types_supported'] = ['public']
return dic
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,625
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/lib/utils/token.py
|
from datetime import timedelta
import time
import uuid
from django.utils import timezone
import jwt
from oidc_provider.models import *
from oidc_provider import settings
def create_id_token(iss, sub, aud, auth_time):
"""
Receives a user object, iss (issuer) and aud (audience).
Then creates the id_token dic.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic.
"""
expires_in = settings.get('OIDC_IDTOKEN_EXPIRE')
now = timezone.now()
# Convert datetimes into timestamps.
iat_time = time.mktime(now.timetuple())
exp_time = time.mktime((now + timedelta(seconds=expires_in)).timetuple())
user_auth_time = time.mktime(auth_time.timetuple())
dic = {
'iss': iss,
'sub': sub,
'aud': aud,
'exp': exp_time,
'iat': iat_time,
'auth_time': user_auth_time,
}
return dic
def encode_id_token(id_token_dic, client_secret):
"""
Represent the ID Token as a JSON Web Token (JWT).
Return a hash.
"""
id_token_hash = jwt.encode(id_token_dic, client_secret)
return id_token_hash
def create_token(user, client, id_token_dic, scope):
"""
Create and populate a Token object.
Return a Token object.
"""
token = Token()
token.user = user
token.client = client
token.access_token = uuid.uuid4().hex
token.id_token = id_token_dic
token.refresh_token = uuid.uuid4().hex
token.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_TOKEN_EXPIRE'))
token.scope = scope
return token
def create_code(user, client, scope):
"""
Create and populate a Code object.
Return a Code object.
"""
code = Code()
code.user = user
code.client = client
code.code = uuid.uuid4().hex
code.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_CODE_EXPIRE'))
code.scope = scope
return code
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,626
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/views.py
|
from django.contrib.auth.views import redirect_to_login
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.views.decorators.http import require_http_methods
from django.views.generic import View
from oidc_provider.lib.endpoints.authorize import *
from oidc_provider.lib.endpoints.discovery import *
from oidc_provider.lib.endpoints.token import *
from oidc_provider.lib.endpoints.userinfo import *
from oidc_provider.lib.errors import *
class AuthorizeView(View):
def get(self, request, *args, **kwargs):
authorize = AuthorizeEndpoint(request)
try:
authorize.validate_params()
if request.user.is_authenticated():
# Check if there's a hook setted.
hook_resp = settings.get('OIDC_AFTER_USERLOGIN_HOOK')(
request=request, user=request.user,
client=authorize.client)
if hook_resp:
return hook_resp
# Generate hidden inputs for the form.
context = {
'params': authorize.params,
}
hidden_inputs = render_to_string(
'oidc_provider/hidden_inputs.html', context)
# Remove `openid` from scope list
# since we don't need to print it.
authorize.params.scope.remove('openid')
context = {
'client': authorize.client,
'hidden_inputs': hidden_inputs,
'params': authorize.params,
}
return render(request, 'oidc_provider/authorize.html', context)
else:
path = request.get_full_path()
return redirect_to_login(path)
except (ClientIdError, RedirectUriError) as error:
context = {
'error': error.error,
'description': error.description,
}
return render(request, 'oidc_provider/error.html', context)
except (AuthorizeError) as error:
uri = error.create_uri(
authorize.params.redirect_uri,
authorize.params.state)
return HttpResponseRedirect(uri)
def post(self, request, *args, **kwargs):
authorize = AuthorizeEndpoint(request)
allow = True if request.POST.get('allow') else False
try:
uri = authorize.create_response_uri(allow)
return HttpResponseRedirect(uri)
except (AuthorizeError) as error:
uri = error.create_uri(
authorize.params.redirect_uri,
authorize.params.state)
return HttpResponseRedirect(uri)
class TokenView(View):
def post(self, request, *args, **kwargs):
token = TokenEndpoint(request)
try:
token.validate_params()
dic = token.create_response_dic()
return TokenEndpoint.response(dic)
except (TokenError) as error:
return TokenEndpoint.response(error.create_dict(), status=400)
@require_http_methods(['GET', 'POST'])
def userinfo(request):
userinfo = UserInfoEndpoint(request)
try:
userinfo.validate_params()
dic = userinfo.create_response_dic()
return UserInfoEndpoint.response(dic)
except (UserInfoError) as error:
return UserInfoEndpoint.error_response(
error.code,
error.description,
error.status)
class ProviderInfoView(View):
def get(self, request, *args, **kwargs):
dic = ProviderInfoEndpoint.create_response_dic()
return JsonResponse(dic)
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,627
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/lib/endpoints/authorize.py
|
from datetime import timedelta
import uuid
from django.utils import timezone
from oidc_provider.lib.errors import *
from oidc_provider.lib.utils.common import get_issuer
from oidc_provider.lib.utils.params import *
from oidc_provider.lib.utils.token import *
from oidc_provider.models import *
from oidc_provider import settings
class AuthorizeEndpoint(object):
def __init__(self, request):
self.request = request
self.params = Params()
# Because in this endpoint we handle both GET
# and POST request.
self.query_dict = (self.request.POST if self.request.method == 'POST'
else self.request.GET)
self._extract_params()
# Determine which flow to use.
if self.params.response_type in ['code']:
self.grant_type = 'authorization_code'
elif self.params.response_type in ['id_token', 'id_token token']:
self.grant_type = 'implicit'
self._extract_implicit_params()
else:
self.grant_type = None
def _extract_params(self):
"""
Get all the params used by the Authorization Code Flow
(and also for the Implicit).
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
self.params.client_id = self.query_dict.get('client_id', '')
self.params.redirect_uri = self.query_dict.get('redirect_uri', '')
self.params.response_type = self.query_dict.get('response_type', '')
self.params.scope = self.query_dict.get('scope', '').split()
self.params.state = self.query_dict.get('state', '')
def _extract_implicit_params(self):
"""
Get specific params used by the Implicit Flow.
See: http://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthRequest
"""
self.params.nonce = self.query_dict.get('nonce', '')
def validate_params(self):
if not self.params.redirect_uri:
raise RedirectUriError()
if not ('openid' in self.params.scope):
raise AuthorizeError(
self.params.redirect_uri,
'invalid_scope',
self.grant_type)
try:
self.client = Client.objects.get(client_id=self.params.client_id)
if not (self.params.redirect_uri in self.client.redirect_uris):
raise RedirectUriError()
if not self.grant_type or not (self.params.response_type == self.client.response_type):
raise AuthorizeError(
self.params.redirect_uri,
'unsupported_response_type',
self.grant_type)
except Client.DoesNotExist:
raise ClientIdError()
def create_response_uri(self, allow):
if not allow:
raise AuthorizeError(
self.params.redirect_uri,
'access_denied',
self.grant_type)
try:
self.validate_params()
if self.grant_type == 'authorization_code':
code = create_code(
user=self.request.user,
client=self.client,
scope=self.params.scope)
code.save()
# Create the response uri.
uri = self.params.redirect_uri + '?code={0}'.format(code.code)
else: # Implicit Flow
# TODO refactor since it's the same as the token endpoint
sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR')(
user=self.request.user)
id_token_dic = create_id_token(
iss=get_issuer(),
sub=sub,
aud=self.client.client_id,
auth_time=self.request.user.last_login)
token = create_token(
user=self.request.user,
client=self.client,
id_token_dic=id_token_dic,
scope=self.params.scope)
# Store the token.
token.save()
id_token = encode_id_token(
id_token_dic, self.client.client_secret)
# Create the response uri.
uri = self.params.redirect_uri + \
'#token_type={0}&id_token={1}&expires_in={2}'.format(
'bearer',
id_token,
60 * 10,
)
# Check if response_type is 'id_token token' then
# add access_token to the fragment.
if self.params.response_type == 'id_token token':
uri += '&access_token={0}'.format(token.access_token)
except:
raise AuthorizeError(
self.params.redirect_uri,
'server_error',
self.grant_type)
# Add state if present.
uri += ('&state={0}'.format(self.params.state) if self.params.state else '')
return uri
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,628
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/settings.py
|
from django.conf import settings
class DefaultSettings(object):
@property
def LOGIN_URL(self):
"""
REQUIRED.
"""
return None
@property
def SITE_URL(self):
"""
REQUIRED.
"""
return None
@property
def OIDC_AFTER_USERLOGIN_HOOK(self):
"""
OPTIONAL.
"""
def default_hook_func(request, user, client):
return None
return default_hook_func
@property
def OIDC_CODE_EXPIRE(self):
"""
OPTIONAL.
"""
return 60*10
@property
def OIDC_EXTRA_SCOPE_CLAIMS(self):
"""
OPTIONAL.
"""
from oidc_provider.lib.claims import AbstractScopeClaims
return AbstractScopeClaims
@property
def OIDC_IDTOKEN_EXPIRE(self):
"""
OPTIONAL.
"""
return 60*10
@property
def OIDC_IDTOKEN_SUB_GENERATOR(self):
"""
OPTIONAL.
"""
def default_sub_generator(user):
return user.id
return default_sub_generator
@property
def OIDC_TOKEN_EXPIRE(self):
"""
OPTIONAL.
"""
return 60*60
default_settings = DefaultSettings()
def get(name):
'''
Helper function to use inside the package.
'''
try:
value = getattr(default_settings, name)
value = getattr(settings, name)
except AttributeError:
if value == None:
raise Exception('You must set ' + name + ' in your settings.')
return value
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,629
|
davidcerezal/django-oidc-provider
|
refs/heads/master
|
/oidc_provider/tests/test_authorize_endpoint.py
|
import urllib
import uuid
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from django.test import TestCase
from oidc_provider import settings
from oidc_provider.models import *
from oidc_provider.tests.utils import *
from oidc_provider.views import *
class AuthorizationCodeFlowTestCase(TestCase):
"""
Test cases for Authorize Endpoint using Authorization Code Flow.
"""
def setUp(self):
self.factory = RequestFactory()
self.user = create_fake_user()
self.client = create_fake_client(response_type='code')
self.state = uuid.uuid4().hex
def test_missing_parameters(self):
"""
If the request fails due to a missing, invalid, or mismatching
redirection URI, or if the client identifier is missing or invalid,
the authorization server SHOULD inform the resource owner of the error.
See: https://tools.ietf.org/html/rfc6749#section-4.1.2.1
"""
url = reverse('oidc_provider:authorize')
request = self.factory.get(url)
response = AuthorizeView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(bool(response.content), True)
def test_invalid_response_type(self):
"""
The OP informs the RP by using the Error Response parameters defined
in Section 4.1.2.1 of OAuth 2.0.
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthError
"""
# Create an authorize request with an unsupported response_type.
query_str = urllib.urlencode({
'client_id': self.client.client_id,
'response_type': 'something_wrong',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}).replace('+', '%20')
url = reverse('oidc_provider:authorize') + '?' + query_str
request = self.factory.get(url)
response = AuthorizeView.as_view()(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.has_header('Location'), True)
# Should be an 'error' component in query.
query_exists = 'error=' in response['Location']
self.assertEqual(query_exists, True)
def test_user_not_logged(self):
"""
The Authorization Server attempts to Authenticate the End-User by
redirecting to the login view.
See: http://openid.net/specs/openid-connect-core-1_0.html#Authenticates
"""
query_str = urllib.urlencode({
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}).replace('+', '%20')
url = reverse('oidc_provider:authorize') + '?' + query_str
request = self.factory.get(url)
request.user = AnonymousUser()
response = AuthorizeView.as_view()(request)
# Check if user was redirected to the login view.
login_url_exists = settings.get('LOGIN_URL') in response['Location']
self.assertEqual(login_url_exists, True)
# Check if the login will redirect to a valid url.
try:
next_value = response['Location'].split(REDIRECT_FIELD_NAME + '=')[1]
next_url = urllib.unquote(next_value)
is_next_ok = next_url == url
except:
is_next_ok = False
self.assertEqual(is_next_ok, True)
def test_user_consent_inputs(self):
"""
Once the End-User is authenticated, the Authorization Server MUST
obtain an authorization decision before releasing information to
the Client.
See: http://openid.net/specs/openid-connect-core-1_0.html#Consent
"""
query_str = urllib.urlencode({
'client_id': self.client.client_id,
'response_type': 'code',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}).replace('+', '%20')
url = reverse('oidc_provider:authorize') + '?' + query_str
request = self.factory.get(url)
# Simulate that the user is logged.
request.user = self.user
# Remove the hook, because we want to test default behaviour.
OIDC_AFTER_USERLOGIN_HOOK = settings.default_settings.OIDC_AFTER_USERLOGIN_HOOK
with self.settings(
OIDC_AFTER_USERLOGIN_HOOK=OIDC_AFTER_USERLOGIN_HOOK):
response = AuthorizeView.as_view()(request)
# Check if hidden inputs exists in the form,
# also if their values are valid.
input_html = '<input name="{0}" type="hidden" value="{1}" />'
to_check = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': 'code',
}
for key, value in to_check.iteritems():
is_input_ok = input_html.format(key, value) in response.content
self.assertEqual(is_input_ok, True,
msg='Hidden input for "'+key+'" fails.')
def test_user_consent_response(self):
"""
First,
if the user denied the consent we must ensure that
the error response parameters are added to the query component
of the Redirection URI.
Second,
if the user allow the RP then the server MUST return
the parameters defined in Section 4.1.2 of OAuth 2.0 [RFC6749]
by adding them as query parameters to the redirect_uri.
"""
response_type = 'code'
url = reverse('oidc_provider:authorize')
post_data = {
'client_id': self.client.client_id,
'redirect_uri': self.client.default_redirect_uri,
'response_type': response_type,
'scope': 'openid email',
'state': self.state,
}
request = self.factory.post(url, data=post_data)
# Simulate that the user is logged.
request.user = self.user
response = AuthorizeView.as_view()(request)
# Because user doesn't allow app, SHOULD exists an error parameter
# in the query.
self.assertEqual('error=' in response['Location'], True,
msg='error param is missing.')
self.assertEqual('access_denied' in response['Location'], True,
msg='access_denied param is missing.')
# Simulate user authorization.
post_data['allow'] = 'Accept' # Should be the value of the button.
request = self.factory.post(url, data=post_data)
# Simulate that the user is logged.
request.user = self.user
response = AuthorizeView.as_view()(request)
# Validate the code returned by the OP.
code = (response['Location'].split('code='))[1].split('&')[0]
try:
code = Code.objects.get(code=code)
is_code_ok = (code.client == self.client) and \
(code.user == self.user)
except:
is_code_ok = False
self.assertEqual(is_code_ok, True,
msg='Code returned is invalid.')
# Check if the state is returned.
state = (response['Location'].split('state='))[1].split('&')[0]
self.assertEqual(state == self.state, True,
msg='State change or is missing.')
class AuthorizationImplicitFlowTestCase(TestCase):
"""
Test cases for Authorize Endpoint using Implicit Flow.
"""
def setUp(self):
self.factory = RequestFactory()
self.user = create_fake_user()
self.client = create_fake_client(response_type='id_token token')
self.state = uuid.uuid4().hex
# TODO
def test_something(self):
query_str = urllib.urlencode({
'client_id': self.client.client_id,
'response_type': 'id_token token',
'redirect_uri': self.client.default_redirect_uri,
'scope': 'openid email',
'state': self.state,
}).replace('+', '%20')
url = reverse('oidc_provider:authorize') + '#' + query_str
request = self.factory.get(url)
# Simulate that the user is logged.
request.user = self.user
response = AuthorizeView.as_view()(request)
|
{"/oidc_provider/tests/test_token_endpoint.py": ["/oidc_provider/lib/utils/token.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"], "/oidc_provider/tests/utils.py": ["/oidc_provider/models.py"], "/oidc_provider/admin.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/claims.py": ["/oidc_provider/models.py"], "/oidc_provider/lib/endpoints/discovery.py": ["/oidc_provider/lib/utils/common.py", "/oidc_provider/models.py"], "/oidc_provider/lib/utils/token.py": ["/oidc_provider/models.py"], "/oidc_provider/views.py": ["/oidc_provider/lib/endpoints/authorize.py", "/oidc_provider/lib/endpoints/discovery.py", "/oidc_provider/lib/errors.py"], "/oidc_provider/lib/endpoints/authorize.py": ["/oidc_provider/lib/errors.py", "/oidc_provider/lib/utils/common.py", "/oidc_provider/lib/utils/token.py", "/oidc_provider/models.py"], "/oidc_provider/settings.py": ["/oidc_provider/lib/claims.py"], "/oidc_provider/tests/test_authorize_endpoint.py": ["/oidc_provider/models.py", "/oidc_provider/tests/utils.py", "/oidc_provider/views.py"]}
|
7,669
|
jQwotos/anime_scrapers
|
refs/heads/master
|
/info_collectors/anidb.py
|
import requests
import os
from bs4 import BeautifulSoup
from difflib import SequenceMatcher
import sys
# Constants
BASE_URL = "http://api.anidb.net:9001/httpapi?request=anime"
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
SEARCH_FILE = os.path.join(BASE_PATH, "anime-titles.xml")
IMAGE_URL = "http://img7.anidb.net/pics/anime/"
CLIENT = "fadedanimefinder"
CLIENT_VERSION = 1
MIN_SIMILARITY_RATIO = 0.5
sys.path.append(BASE_PATH)
from _init_anidb import download_list
def _similar(original_txt, matched_txt):
return SequenceMatcher(None, original_txt, matched_txt).ratio()
def search(query, strict=False):
'''
Search for a particular anime among the DB.
In this module, `strict` is a dummy parameter, and does not do anything.
Returns a list which contains a dict, containing the show ID and different
names. Use that ID to get detailed info via getDetailedInfo(ID).
'''
download_list.get_file()
with open(SEARCH_FILE, "rb") as f:
search_contents = f.read()
result_page = BeautifulSoup(search_contents, "xml").animetitles
results = list()
ratio_list = list()
for anime in result_page.findAll("anime"):
highest_ratio = 0
for title in anime.findAll("title"):
ratio = _similar(query, title.string)
if ratio > MIN_SIMILARITY_RATIO:
if ratio > highest_ratio:
highest_ratio = ratio
if not highest_ratio:
continue
ratio_list.append(highest_ratio)
id = int(anime['aid'])
titles = [title.string for title in
anime.findAll("title", attrs={"type": ["main", "official"]})]
results.append({"id": id, "titles": titles})
return [x for (y, x) in
sorted(list(zip(ratio_list, results)),
key=lambda pair: pair[0], reverse=True)]
def getDetailedInfo(id):
'''
Gets a detailed info from the ID provided. A dict is returned with
the following keys. The type of the value is also mentioned.
id: int, type: str, start_date: str, end_date: str, other_names: str,
creators: [{str: str}], permanent_rating: float, image_url: str,
description: str, recommendations: [{str: str}]
'''
request = requests.get(BASE_URL, params={
"request": "anime",
"aid": str(id),
"protover": "1",
"client": CLIENT,
"clientver": str(CLIENT_VERSION)
})
request.raise_for_status()
result_page = BeautifulSoup(request.text, "xml")
results = {
"id": id,
"type": result_page.find("type").string,
"episode_count": result_page.find("episodecount").string,
"start_date": result_page.find("startdate").string,
"end_date": result_page.find("enddate").string,
"other_names": [title.string for title in
result_page.find("titles").findAll("title")],
"creators": [{name['type']: name.string}
for name in result_page.find("creators").findAll("name")],
"permanent_rating": float(result_page.find("ratings")
.find("permanent").string),
"image_url": IMAGE_URL + result_page.find("picture").string,
"description": result_page.find("description").string
}
return results
matching_urls = [
{
'urls': [],
'function': search,
},
{
'urls': [],
'function': getDetailedInfo,
},
]
|
{"/download_handler.py": ["/templates/module_search.py"], "/info_handler.py": ["/templates/module_search.py"]}
|
7,670
|
jQwotos/anime_scrapers
|
refs/heads/master
|
/download_handler.py
|
import logging
from .templates.module_search import ModuleSearch
class DownloadHandler(ModuleSearch):
# Deals with resolving downloading of files
def __init__(self):
self._get_modules('downloaders')
def single_download(self, link, abs_path):
"""
Download a single episode.
'link' is the full link of it (get it with scraper_handler).
'abs_path' is full path + filename of downloaded file, example -
"/home/User/MyDownloadedEpisode.mp4"
"""
for module in self.modules:
if self._try_match_module(link, module):
if module.download(link, abs_path):
return True
return False
return False
def resolve(self, data):
logging.info(
"Trying to resolve '%s'"
% (data['epNum'])
)
for module in self.modules:
for source in data['sources']:
logging.info(
"Trying to resolve '%s' source."
% (source['link'])
)
if self._try_match_module(source['link'], module):
logging.info(
"Found a matching module for '%s'."
% (source,)
)
# PEP8 Too long
fileName = "%s.mp4" % (data['epNum'],) if 'epNum' in data else source
if module.download(source['link'], fileName):
break
download_handler = DownloadHandler()
|
{"/download_handler.py": ["/templates/module_search.py"], "/info_handler.py": ["/templates/module_search.py"]}
|
7,671
|
jQwotos/anime_scrapers
|
refs/heads/master
|
/info_collectors/_init_anidb.py
|
from datetime import date
import requests
import os
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
INFO_FILE = os.path.join(BASE_PATH, "last_download.txt")
DOWNLOAD_URL = "http://anidb.net/api/anime-titles.xml.gz"
DOWNLOAD_FILE = os.path.join(BASE_PATH, "anime-titles.xml")
class DownloadList:
def __init__(self):
self.need_download = self.need_to_download()
def need_to_download(self):
try:
with open(INFO_FILE, "r") as f:
data = f.readline()
if len(data) > 0:
last_download = date.fromordinal(int(data))
time_delta = date.today() - last_download
if time_delta.days > 7:
return True
else:
return False
else:
return True
except FileNotFoundError:
return True
return False
def write_today_ordinal(self):
with open(INFO_FILE, "w") as f:
f.write(str(date.today().toordinal()) + "\n")
def download_list(self):
request = requests.get(DOWNLOAD_URL, stream=True)
with open(DOWNLOAD_FILE, "wb") as f:
for chunk in request.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
def get_file(self):
if self.need_to_download():
self.write_today_ordinal()
self.download_list()
download_list = DownloadList()
|
{"/download_handler.py": ["/templates/module_search.py"], "/info_handler.py": ["/templates/module_search.py"]}
|
7,672
|
jQwotos/anime_scrapers
|
refs/heads/master
|
/scrapers/anime9.py
|
import logging
import requests
from bs4 import BeautifulSoup as bs
site_name = "9anime.is"
BASE_URL = 'https://9anime.is'
SEARCH_URL = '%s/search' % (BASE_URL,)
INFO_API_URL = "%s/ajax/episode/info" % (BASE_URL,)
def _parse_search_single(data):
img = data.find("img")
nameAnchor = data.find("a", {"class": "name"})
lang = data.find('div', {'class': 'lang'})
lang = lang.text if lang is not None else 'sub'
return {
'title': nameAnchor.text,
'link': nameAnchor['href'],
'language': lang.lower(),
'host': site_name,
'poster': img['src']
}
def _parse_search_multi(data):
return [
_parse_search_single(x)
for x in data.findAll("div", {"class": "item"})
]
def search(query):
params = {
'keyword': query,
}
data = bs(requests.get(SEARCH_URL, params=params).content)
return _parse_search_multi(data)
def _scrape_episode_source(data):
return {
'link': data['file'],
'type': data['type'],
'quality': data['label'],
}
def _scrape_episode_sources(data):
request = requests.get(data['grabber'], params=data['params']).json()
return [_scrape_episode_source(x) for x in request['data']]
def _scrape_episode_info(id):
logging.debug("'%s' is performing a info grab for '%s'" % (site_name, id,))
params = {'id': id}
data = requests.get(INFO_API_URL, params=params)
if data.status_code == 200:
data = data.json()
if data.get('target') == '' or data.get('type') == 'direct':
return _scrape_episode_sources(data)
else:
return {
'link': data.get('target'),
'type': data.get('type'),
}
def _parse_server_single_episode(data):
anchor = data.find("a")
id = anchor['data-id']
output = {
'data-id': id,
'epNum': anchor.text,
'sources': _scrape_episode_info(id),
}
return output if output['sources'] is not None else None
def _parse_server_episodes(data):
episodes = data.findAll("li")
sources = [_parse_server_single_episode(x) for x in episodes]
if len(sources) > 0:
return list(filter(None, sources))
def _scrape_all_servers(data):
servers = data.findAll("ul", {"class": "episodes range active"})
sourcedServers = [_parse_server_episodes(x) for x in servers]
return list(filter(None, sourcedServers))
def format_combine_multi(unformatedOutput):
output = []
for ep in unformatedOutput:
output.append({
'epNum': str(int(ep)), # remove leading 0s
'sources': unformatedOutput[ep]
})
return output
def combine_multi(servers):
unformatedOutput = {}
print(servers)
for server in servers:
for ep in server:
if ep['epNum'] not in unformatedOutput:
unformatedOutput[ep['epNum']] = [ep['sources']]
else:
unformatedOutput[ep['epNum']] += [ep['sources']]
return format_combine_multi(unformatedOutput)
def _scrape_title(data):
return data.find('h1', {'class': 'title'}).text
def scrape_all_show_sources(link):
logging.info(
"A request for '%s' was made under %s scraper." %
(link, site_name)
)
data = bs(requests.get(link).content, 'html.parser')
body = data.find('body')
servers = _scrape_all_servers(data)
return {
'episodes': combine_multi(servers),
'title': _scrape_title(data),
'host': site_name,
}
matching_urls = [
{
'urls': [
r'https://9anime.to/watch/(.*).(.*)',
r'https://9anime.is/watch/(.*).(.*)'
],
'function': scrape_all_show_sources,
},
]
|
{"/download_handler.py": ["/templates/module_search.py"], "/info_handler.py": ["/templates/module_search.py"]}
|
7,673
|
jQwotos/anime_scrapers
|
refs/heads/master
|
/info_handler.py
|
import glob
from .templates.module_search import ModuleSearch
class InfoHandler(ModuleSearch):
def __init__(self):
self._get_modules('info_collectors')
def _search_module(self, query, strict, module):
return module.search(query, strict)
def search(self, query, strict=False):
return [
self._search_module(query, strict, x)
for x in self.modules
]
def _details_module(self, id, module):
return module.getDetailedInfo(id)
def getDetailedInfo(self, id):
return [
self._details_module(id, x) for x in self.modules
]
info_handler = InfoHandler()
|
{"/download_handler.py": ["/templates/module_search.py"], "/info_handler.py": ["/templates/module_search.py"]}
|
7,674
|
jQwotos/anime_scrapers
|
refs/heads/master
|
/test.py
|
import os
fileLocation = os.path.realpath(__file__)
directory = os.path.dirname(fileLocation)
print(os.path.join(directory, ".."))
|
{"/download_handler.py": ["/templates/module_search.py"], "/info_handler.py": ["/templates/module_search.py"]}
|
7,675
|
jQwotos/anime_scrapers
|
refs/heads/master
|
/templates/module_search.py
|
import glob
import imp
import logging
import os
import re
class ModuleSearch(object):
def _load_single_module(self, f):
return imp.load_source(f[:-3], f)
def _load_modules(self):
return [self._load_single_module(x) for x in self.modules]
def _try_match_url(self, link, matchingURL):
return True if re.match(matchingURL, link) is not None else False
def _try_match_module_section(self, link, section):
urls = section['urls']
matches = [
section['function'] for x in urls
if self._try_match_url(link, x) is not False
]
return True if len(matches) > 0 else False
def _try_match_module(self, link, module):
sections = module.matching_urls
return [x['function'] for x in sections
if self._try_match_module_section(link, x) is not False]
def __is_underscore(self, f):
if f[f.rfind('/') + 1] == "_":
return True
return False
def _get_modules(self, location):
fileLocation = os.path.realpath(__file__)
directory = os.path.dirname(fileLocation)
self.module_location = os.path.join(directory, '..', location)
self.modules = glob.glob("%s/*.py" % (self.module_location))
self.modules = [
module for module in self.modules
if not self.__is_underscore(module)
]
'''
for i in range(len(self.modules)): # Delete modules beginning with '_'
module = self.modules[i]
if module[module.rfind("/") + 1] == "_":
del self.modules[i]
'''
self.modules = self._load_modules()
|
{"/download_handler.py": ["/templates/module_search.py"], "/info_handler.py": ["/templates/module_search.py"]}
|
7,677
|
dtmaciasca/abcBack
|
refs/heads/master
|
/abcBack_app/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('create_evento/', views.postEvento, name='post_evento'),
path('categorias/', views.getAllCategorias, name='get_categorias'),
path('evento/<int:idEvento>', views.getDetailEvento, name='get_id_evento'),
path('eventos/<int:idUser>', views.getAllEventos, name='get_eventos'),
path('update_evento/<int:idEvento>', views.putEvento, name='put_evento'),
path('delete_evento/<int:idEvento>', views.deleteEvento, name='delete_evento'),
path('login/', views.login, name='login'),
path('logout/', views.logout, name='logout'),
path('getTokenVal/', views.getTokenVal, name='getTokenVal'),
path('users/add/', views.postUser, name='addUser')
]
|
{"/abcBack_app/views.py": ["/abcBack_app/serializers.py", "/abcBack_app/models.py"], "/abcBack_app/serializers.py": ["/abcBack_app/models.py"]}
|
7,678
|
dtmaciasca/abcBack
|
refs/heads/master
|
/abcBack_app/views.py
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.status import (HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND, HTTP_200_OK)
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.core.serializers import *
from .serializers import EventoSerializer, CategoriaSerializer
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from django.contrib.auth import authenticate
from .models import *
import json, datetime
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view, permission_classes
from django.db import IntegrityError
# Create your views here.
@csrf_exempt
def postEvento(request):
if request.method == 'POST':
try:
json_evento = json.loads(request.body.decode('utf-8'))
user = User.objects.get(id=json_evento['user'])
categoria = Categoria.objects.get(id=json_evento['categoria'])
evento_model = Evento(
nombre = json_evento['nombre'],
categoria=categoria,
lugar=json_evento['lugar'],
direccion=json_evento['direccion'],
fecha_inicio=datetime.datetime.strptime(json_evento['fecha_inicio'], '%Y-%m-%d'),
fecha_fin=datetime.datetime.strptime(json_evento['fecha_fin'], '%Y-%m-%d'),
es_presencial=json_evento['es_presencial'],
user=user)
evento_model.save()
return HttpResponse(serialize("json", [evento_model]))
except Exception as ex:
return HttpResponseBadRequest(
content='BAD_REQUEST: ' + str(ex),
status=HTTP_400_BAD_REQUEST
)
@csrf_exempt
def getAllCategorias(request):
try:
data = Categoria.objects.all()
if request.method == 'GET':
serializer = CategoriaSerializer(data, many=True)
return JsonResponse(serializer.data, safe=False)
except Exception as ex:
return HttpResponseBadRequest(
content='BAD_REQUEST: ' + str(ex),
status=HTTP_400_BAD_REQUEST
)
@csrf_exempt
def putEvento(request, idEvento):
if request.method == 'PUT':
evento = Evento.objects.get(id=idEvento);
try:
json_evento = json.loads(request.body.decode('utf-8'))
if (json_evento['categoria'] != None):
categoria = Categoria.objects.get(id=json_evento['categoria'])
evento.categoria = categoria
if (json_evento['nombre'] != None):
evento.nombre = json_evento['nombre']
if (json_evento['lugar'] != None):
evento.lugar=json_evento['lugar']
if (json_evento['direccion'] != None):
evento.direccion=json_evento['direccion']
if (json_evento['fecha_inicio'] != None):
evento.fecha_inicio=json_evento['fecha_inicio']
if (json_evento['fecha_fin'] != None):
evento.fecha_fin=json_evento['fecha_fin']
if (json_evento['es_presencial'] != None):
evento.es_presencial=json_evento['es_presencial']
evento.save()
return HttpResponse(status=HTTP_200_OK)
except Exception as ex:
return HttpResponseBadRequest(
content='BAD_REQUEST: ' + str(ex),
status=HTTP_400_BAD_REQUEST
)
@csrf_exempt
def getAllEventos(request, idUser):
try:
eventos = Evento.objects.filter(user=idUser).order_by('-fecha_creacion')
if request.method == 'GET':
serializer = EventoSerializer(eventos, many=True)
return JsonResponse(serializer.data, safe=False)
except Exception as ex:
return HttpResponseBadRequest(
content='BAD_REQUEST: ' + str(ex),
status=HTTP_400_BAD_REQUEST
)
@csrf_exempt
def deleteEvento(request, idEvento):
if request.method == 'DELETE':
try:
evento = Evento.objects.get(id=idEvento)
print('llego ahi')
evento.delete()
print('finalizo', evento)
if evento.id != None:
print('entro aqui!!: ', evento)
evento.delete()
return HttpResponse(status=HTTP_200_OK)
except Exception as ex:
print('error: ', ex)
return HttpResponseBadRequest(
content='BAD_REQUEST: ' + str(ex),
status=HTTP_400_BAD_REQUEST
)
def getDetailEvento(request, idEvento):
data = Evento.objects.filter(id=idEvento)
print(data)
if request.method == 'GET':
serializer = EventoSerializer(data, many=True)
print(serializer.data)
return JsonResponse(serializer.data, safe=False)
@csrf_exempt
@api_view(["POST"])
@permission_classes((AllowAny,))
def login(request):
username = request.data.get("username")
password = request.data.get("password")
if username is "" or password is "":
return Response({'error': 'Debe ingresar usuario y contraseña'}, status=HTTP_400_BAD_REQUEST)
user = authenticate(username=username, password=password)
if user == None:
return Response({'error': 'Credenciales inválidas'}, status=HTTP_400_BAD_REQUEST)
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key, 'username': user.username,
'firstName': user.first_name, 'lastName': user.last_name, 'id': user.id}, status=HTTP_200_OK)
@csrf_exempt
@api_view(["GET"])
@permission_classes((AllowAny,))
def getTokenVal(request):
if request.method == 'GET':
token = request.META['HTTP_AUTHORIZATION']
token = token.replace('Token ', '')
try:
TokenStatus = Token.objects.get(key=token).user.is_active
except Token.DoesNotExist:
TokenStatus = False
if TokenStatus == True:
return Response({'mensaje': 'Token valido'}, status=HTTP_200_OK)
else:
return Response({'error': 'Token inválido'}, status=HTTP_400_BAD_REQUEST)
@csrf_exempt
@api_view(["GET"])
@permission_classes((AllowAny,))
def logout(request):
token = request.META['HTTP_AUTHORIZATION']
token = token.replace('Token ', '')
try:
TokenStatus = Token.objects.get(key=token).user.is_active
except Token.DoesNotExist:
TokenStatus = False
if TokenStatus == True:
Token.objects.filter(key=token).delete()
return Response({'mensaje': 'Sesión finalizada'}, status=HTTP_200_OK)
else:
return Response({'error': 'Token no existe'}, status=HTTP_404_NOT_FOUND)
@csrf_exempt
def postUser(request):
if request.method == 'POST':
print(request.body)
user_model = None
try:
json_user = json.loads(request.body.decode('utf-8'))
username = json_user['username']
password = json_user['password']
first_name = json_user['first_name']
last_name = json_user['last_name']
user_model = User.objects.create_user(username=username, password=password)
user_model.first_name = first_name
user_model.last_name = last_name
user_model.email = username
user_model.save()
print('user_model: ', user_model)
return HttpResponse(serialize("json", [user_model]))
except KeyError as e:
return HttpResponseBadRequest(
content='El campo ' + str(e) + ' es requerido.'
)
except IntegrityError as e:
if 'UNIQUE constraint' in str(e):
return HttpResponseBadRequest(
content='Ya existe un usuario con ese correo. '
)
except Exception as ex:
print(ex)
return HttpResponseBadRequest(
content='BAD_REQUEST: ' + str(ex),
status=HTTP_400_BAD_REQUEST
)
|
{"/abcBack_app/views.py": ["/abcBack_app/serializers.py", "/abcBack_app/models.py"], "/abcBack_app/serializers.py": ["/abcBack_app/models.py"]}
|
7,679
|
dtmaciasca/abcBack
|
refs/heads/master
|
/abcBack_app/models.py
|
from django.db import models
from django.contrib.auth.models import User
import datetime
# Create your models here.
class Categoria(models.Model):
nombre = models.CharField(max_length=100)
def __str__(self):
return self.nombre + ' ' + str(self.id)
class Evento(models.Model):
nombre = models.CharField(max_length=100)
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE, related_name='categoria')
lugar = models.CharField(null=False, max_length=500)
direccion = models.CharField(null=False, max_length=500)
fecha_inicio = models.DateField(null=False)
fecha_fin = models.DateField(null=False)
es_presencial = models.BooleanField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
fecha_creacion = models.DateTimeField(default=datetime.datetime.now())
|
{"/abcBack_app/views.py": ["/abcBack_app/serializers.py", "/abcBack_app/models.py"], "/abcBack_app/serializers.py": ["/abcBack_app/models.py"]}
|
7,680
|
dtmaciasca/abcBack
|
refs/heads/master
|
/abcBack_app/serializers.py
|
from .models import Evento, Categoria
from rest_framework import serializers
class CategoriaSerializer(serializers.ModelSerializer):
class Meta:
model=Categoria
fields=('id','nombre')
class EventoSerializer(serializers.ModelSerializer):
categoria = CategoriaSerializer()
class Meta:
model = Evento
fields = ('id','nombre', 'categoria', 'lugar', 'direccion', 'fecha_inicio',
'fecha_fin', 'es_presencial')
|
{"/abcBack_app/views.py": ["/abcBack_app/serializers.py", "/abcBack_app/models.py"], "/abcBack_app/serializers.py": ["/abcBack_app/models.py"]}
|
7,681
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/scrapers/test_file_scraper.py
|
#!/usr/bin/env python3
"""Module for TestFileScraper"""
import sys
class TestFileScraper:
"""TestFileScraper class
Scrapes test files from any projects.
Args:
soup (obj): BeautifulSoup obj containing parsed link
"""
def __init__(self, soup):
"""Initialize a test file scraper
"""
self.soup = soup
self.pre = self.find_test_files()
def find_test_files(self):
"""Find test files
"""
return self.soup.select("pre")
def write_test_files(self):
"""Write test files
"""
print("> Writing test files...")
for item in self.pre:
find_c = item.text.find("main.c")
find_html = item.text.find(".html")
find_js = item.text.find(".js")
find_py = item.text.find(".py")
find_sql = item.text.find(".sql")
find_test = item.text.find("cat")
# find_main checks if there are main files on project page
if find_test != -1 and any([
i != -1 for i in
[find_c, find_html, find_js, find_py, find_sql]
]):
try:
name = item.text.split("cat ", 1)[1]
user = item.text.split("$", 1)[0]
if find_c != -1:
name = name.split(".c", 1)[0] + ".c"
elif find_sql != -1:
name = name.split(".sql", 1)[0] + ".sql"
elif find_js != -1:
name = name.split(".js", 1)[0] + ".js"
else:
name = name.split(".py", 1)[0] + ".py"
# html edge case test text creation
if find_html != -1:
text = item.text.split(".html")[1]
text = str(text.split(user, 1)[0])
text = text.split("\n", 1)[1]
name = name.split(".html", 1)[0] + ".html"
else:
text = item.text.split(name, 1)[1]
text = text.split("\n", 1)[1]
text = text.split(user, 1)[0]
text = text.split("\n")
w_test_file = open(name, "w+")
for i in range(len(text) - 1):
if find_html != -1:
w_test_file.write(text[i])
else:
w_test_file.write(text[i] + '\n')
w_test_file.close()
except (AttributeError, IndexError):
name = item.text
newlines = 0
# Checks if test file's name has more than 1 newline
for i in name:
if newlines > 1:
name = "[Not a test file]"
break
if i == "\n":
newlines += 1
print("* [ERROR] Could not create test file", name,
file=sys.stderr)
continue
except IOError:
print("* [ERROR] Could not create a test file",
file=sys.stderr)
continue
else:
pass
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,682
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/__init__.py
|
#!/usr/bin/env python3
"""
Provides the rhinoscraper module
"""
import requests
from bs4 import BeautifulSoup
from . rhinoproject import RhinoProject
from . rhinoread import RhinoRead
def rhinoscrape(soup, github_user, github_name):
"""
Run the rhinoproject and rhinoreadme scrapers
"""
rhino = RhinoProject(soup)
rhino.run()
rhino = RhinoRead(soup, github_user, github_name)
rhino.run()
def get_soup(session, project):
"""
Request project data and parse it with BeautifulSoup
Return parsed HTML as a BeautifulSoup object
"""
resp = session.get('https://intranet.hbtn.io/projects/{}'.format(project))
if 200 <= resp.status_code < 300:
return BeautifulSoup(resp.content, features='html.parser')
return None
def create_session(hbtn_user, hbtn_pass):
"""
Log in to intranet.hbtn.io
Return the login session
"""
auth_url = 'https://intranet.hbtn.io/auth/sign_in'
with requests.Session() as session:
resp = session.get(auth_url)
soup = BeautifulSoup(resp.content, features='html.parser')
try:
auth_data = {
'user[login]': hbtn_user,
'user[password]': hbtn_pass,
'authenticity_token': soup.find(
'input', {'name': 'authenticity_token'}
).get('value'),
'commit': soup.find(
'input', {'name': 'commit'}
).get('value')
}
except AttributeError:
pass
else:
resp = session.post(auth_url, data=auth_data)
if 200 <= resp.status_code < 300:
return session
return None
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,683
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/api/v1/views/project.py
|
#!/usr/bin/env python3
"""
Provides RESTful API routes for Holberton
"""
import os
import shlex
import shutil
import subprocess
import tempfile
import flask
import requests
from rhinoscraper import create_session, get_soup, rhinoscrape
from . import app_views
AUTH_KEYS = {'hbtn_email', 'hbtn_password', 'hbtn_api_key', 'github_password'}
@app_views.route("/<project_id>", methods=['POST'])
def hbtn_project(project_id):
"""
Log into holberton and retrieve a project given it's ID
Params:
hbtn_email
hbtn_password
hbtn_api_key
github_password
"""
auth = flask.request.get_json()
if AUTH_KEYS - auth.keys():
flask.abort(400)
auth['hbtn_email'] = auth['hbtn_email'].split('@')[0]
if not auth['hbtn_email'].isnumeric():
flask.abort(400)
auth['hbtn_email'] = '@'.join([
auth['hbtn_email'], 'holbertonschool.com'
])
auth['hbtn_token'] = hbtn_api_auth_token(
auth['hbtn_email'], auth['hbtn_password'], auth['hbtn_api_key']
)
user = hbtn_api_user(auth['hbtn_token'])
proj = hbtn_api_project(project_id, auth['hbtn_token'])
repo = proj['tasks'][0]['github_repo']
with create_session(auth['hbtn_email'], auth['hbtn_password']) as session:
git_project(get_soup(session, project_id),
github_user=user['github_username'],
github_pass=auth['github_password'],
github_name=user['full_name'],
github_repo=repo)
return (os.path.join(repo, proj['name']), 200)
def git_project(soup, github_user, github_pass, github_repo, github_name):
"""
Scrape project and perform git operations
"""
giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(
user=github_user, password=github_pass, repo=github_repo
)
oldcwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
gitdir = os.path.join(tmpdir, github_repo)
cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))
subprocess.run(shlex.split(cmd), check=False)
os.chdir(gitdir)
rhinoscrape(soup, github_user, github_name)
cmd = 'git add .'
subprocess.run(shlex.split(cmd), check=False)
msg = 'Project committed by Rhino Repo'
cmd = 'git commit -m {}'.format(shlex.quote(msg))
subprocess.run(shlex.split(cmd), check=False)
cmd = 'git push {}'.format(shlex.quote(giturl))
subprocess.run(shlex.split(cmd), check=False)
os.chdir(oldcwd)
shutil.rmtree(tmpdir, ignore_errors=True)
def hbtn_api_auth_token(hbtn_email, hbtn_password, hbtn_api_key):
"""
Get holberton auth token
"""
url = 'https://intranet.hbtn.io/users/auth_token.json'
params = {
'email': hbtn_email,
'password': hbtn_password,
'api_key': hbtn_api_key,
'scope': 'checker'
}
resp = requests.post(url, params=params)
return resp.json().get('auth_token')
def hbtn_api_user(hbtn_auth_token):
"""
Get holberton user info
"""
url = 'https://intranet.hbtn.io/users/me.json'
resp = requests.get(url, params={'auth_token': hbtn_auth_token})
return resp.json()
def hbtn_api_project(hbtn_project_id, hbtn_auth_token):
"""
Get holberton project info
"""
url = 'https://intranet.hbtn.io/projects/{}.json'.format(hbtn_project_id)
params = {'auth_token': hbtn_auth_token, 'id': hbtn_project_id}
resp = requests.get(url, params=params)
return resp.json()
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,684
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/api/v1/app.py
|
#!/usr/bin/env python3
"""
Returns the status of the API
"""
from os import getenv
from flask import Flask, jsonify, make_response
from flask_cors import CORS
from api.v1.views import app_views
HOST = getenv('RHINO_API_HOST', '0.0.0.0')
PORT = getenv('RHINE_API_PORT', '5000')
cors = CORS(app_views, resources={r"/*": {"origins": '*'}})
app = Flask(__name__)
app.url_map.strict_slashes = False
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
app.register_blueprint(app_views)
@app.errorhandler(404)
def error_404(err):
"""Produce a 404 error message"""
return make_response(jsonify(error="oops, I did it again..."), 404)
if __name__ == "__main__":
app.run(host=HOST, port=PORT, threaded=True)
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,685
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/scrapers/high_scraper.py
|
#!/usr/bin/env python3
"""Module for HighScraper"""
import os
import re
import sys
class HighScraper:
"""HighScraper class
High-Level_Programming project scraper.
Args:
soup (obj): BeautifulSoup obj containing parsed link
Attributes:
py_flag (int): For write_checker()
py_js (int): For write_checker()
"""
py_flag = 0
js_flag = 0
def __init__(self, soup):
self.soup = soup
self.file_names = self.find_files()
self.prototypes_list = self.find_prototypes()
def find_prototypes(self):
"""Method to scrape python prototypes
Has a failsafe incase there are non-python files in scraped data.
"""
res = []
find_protos = self.soup.find_all(string=re.compile("Prototype: "))
for item in find_protos:
py_proto = item.next_sibling.text
find_py = py_proto.find(":")
if find_py != 1:
res.append(py_proto)
else:
pass
return res
def find_files(self):
"""Method to scrape for python file names"""
return self.soup.find_all(string=re.compile("File: "))
def write_files(self):
"""Method to write/create python files
Has a function that creates directories if found in `file_name`.
Last function creates required files in additional directory.
"""
new_dir_files = []
file_idx = 0
one_dir_check = 0
folder_name = None
print("> Creating task files...")
for item in self.file_names:
text_file = item.next_sibling.text
try:
find_pyfile = text_file.find(".py")
find_comma = re.search('(.+?),', text_file)
# Creating sub directories if exists
find_folder = re.search(', (.+?)/', text_file)
find_dir_file = re.search('/(.+?)$', text_file)
if find_dir_file is not None:
new_dir_files.append(str(find_dir_file.group(1)))
if find_folder is not None and one_dir_check == 0:
folder_name = str(find_folder.group(1))
os.mkdir(folder_name)
one_dir_check += 1
# Handling multiple files
if "," in text_file:
create_name = str(find_comma.group(1))
make_comma = open(create_name, "w+")
make_comma.close()
elif "." not in text_file and one_dir_check != 1:
os.mkdir(text_file)
else:
w_file_name = open(text_file, "w+")
if ".py" in text_file:
self.py_flag = 1
w_file_name.write("#!/usr/bin/python3\n")
elif ".sh" in text_file:
w_file_name.write("#!/bin/bash\n")
elif ".js" in text_file:
self.js_flag = 1
w_file_name.write("#!/usr/bin/node\n")
else:
pass
# Creating prototypes in parallel with files
if find_pyfile != -1:
w_file_name.write(self.prototypes_list[file_idx])
file_idx += 1
else:
pass
w_file_name.close()
except AttributeError:
print("* [ERROR] Failed to write", text_file,
file=sys.stderr)
continue
except IOError:
print("* [ERROR] Failed to write file",
file=sys.stderr)
except IndexError:
pass
# Check if new dir created, insert files if there is
if folder_name is not None and one_dir_check == 1:
os.chdir(folder_name)
for item in new_dir_files:
if "," in item:
item_obj = re.search('/(.+?)$', text_file)
item = str(item_obj.group(1))
dir_file = open(item, "w+")
dir_file.close()
os.chdir("..")
def write_checker(self):
"""Write checker data
"""
with open("check.sh", "w") as ofile:
ofile.write("#!/usr/bin/env bash\n")
if self.js_flag == 1:
ofile.write("semistandard --fix ")
if self.py_flag == 1:
ofile.write("pep8 ")
if self.file_names:
for i in self.file_names:
ofile.write('"%s" ' % i.next_sibling.text)
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,686
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/api/v1/views/__init__.py
|
#!/usr/bin/python3
"""
initializes the api views from a flask blueprint set up in api/v1/app.py
"""
from flask import Blueprint
app_views = Blueprint('app_views', __name__, url_prefix='/api/v1')
from api.v1.views.project import *
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,687
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/rhinoproject.py
|
#!/usr/bin/env python3
"""
Provides a class to scrape project data and create project files
"""
import os
import re
import shutil
import stat
import sys
from bs4 import BeautifulSoup
from . scrapers.high_scraper import HighScraper
from . scrapers.low_scraper import LowScraper
from . scrapers.sys_scraper import SysScraper
from . scrapers.test_file_scraper import TestFileScraper
class RhinoProject:
"""
Definition of a class to scrape project data and create project files
"""
def __init__(self, soup):
"""
Instantiate a RhinoProject with a BeautifulSoup object
"""
if not isinstance(soup, BeautifulSoup):
raise TypeError("'soup' must be a 'BeautifulSoup'")
self.soup = soup
self.project_name = self.scrape_name()
self.project_type = self.scrape_type()
def scrape_name(self):
"""
Scrape the project directory name by locating 'Directory:'
Return the project directory name
"""
pattern = re.compile(r'^directory:\s+', flags=re.I)
element = self.soup.find(string=pattern)
if element is None:
raise ValueError('Unable to determine project name')
return element.next_element.text
def scrape_type(self):
"""
Scrape the project type by locating 'GitHub repository:'
Return the project type
"""
pattern = re.compile(r'^github\s+repository:\s+', flags=re.I)
element = self.soup.find(string=pattern)
if element is None:
raise ValueError('Unable to determine project type')
return element.next_sibling.text
def run(self):
"""
Scrape project data based on the project type and write project files
Return an absolute path to the project directory
"""
olddir = os.getcwd()
os.mkdir(self.project_name)
os.chdir(self.project_name)
newdir = os.getcwd()
try:
if re.search(r'-high', self.project_type):
task_scraper = HighScraper(self.soup)
elif re.search(r'-low', self.project_type):
task_scraper = LowScraper(self.soup)
elif re.search(r'-sys', self.project_type):
task_scraper = SysScraper(self.soup)
else:
raise ValueError('Invalid project type')
test_scraper = TestFileScraper(self.soup)
task_scraper.write_files()
test_scraper.write_test_files()
for name in os.listdir():
try:
os.chmod(name, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
except OSError:
pass
except Exception:
shutil.rmtree(newdir, ignore_errors=True)
raise
finally:
os.chdir(olddir)
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,688
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/scrapers/sys_scraper.py
|
#!/usr/bin/env python3
"""Module for SysScraper"""
import re
import sys
class SysScraper:
"""SysScraper class
System-Engineering_Devops project scraper.
Args:
soup (obj): BeautifulSoup obj containing parsed link
Attributes:
ruby_check (str): if ruby exists, assign to 0. Else scrape empty list
file_names (list): scraped file names from find_files()
"""
def __init__(self, soup):
self.soup = soup
self.file_names = self.find_files()
self.ruby_check = self.ruby_checker()
def ruby_checker(self):
"""Method that checks for ruby files in project
"""
tmp = self.soup.find_all(string=re.compile("env ruby"))
if tmp != []:
return 0
return tmp
def find_files(self):
"""Method that scrapes bash or ruby for file names"""
return self.soup.find_all(string=re.compile("File: "))
def write_files(self):
"""Method that writes/creates bash or ruby files"""
print("> Creating task files...")
for item in self.file_names:
try:
w_file_name = open(item.next_sibling.text, "w")
if self.ruby_check == 0:
w_file_name.write("#!/usr/bin/env ruby\n")
elif ".py" in item.next_sibling.text:
w_file_name.write("#!/usr/bin/python3\n")
else:
w_file_name.write("#!/usr/bin/env bash\n")
w_file_name.close()
except (AttributeError, IndexError):
try:
print("* [ERROR] Failed to write", item.next_sibling.text,
file=sys.stderr)
except AttributeError:
print("* [ERROR] Failed to find task files",
file=sys.stderr)
continue
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,689
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/integrate.py
|
#!/usr/bin/python3
"""
Flask App that integrates with Rhino-Repo static HTML Template
"""
from flask import Flask, render_template, url_for
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5001
host = '0.0.0.0'
@app.route('/')
def index():
"""
handles request to custom template
"""
return render_template('index.html')
@app.route('/credent/')
def credent():
"""
handles request to custom template
"""
return render_template('credent.html')
@app.route('/done/<repo>')
def done(repo):
"""
handles request to custom template
"""
return render_template('done.html', repo=repo)
@app.route('/404/')
def error_404():
"""
handles request to custom template
"""
return render_template('404.html')
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,690
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/rhinoread.py
|
#!/usr/bin/env python3
"""
Provides a class to scrape project data and produce a README
"""
from . scrapers.read_scraper import ReadScraper
class RhinoRead(ReadScraper):
"""
Definition of a class to scrape project data and create project files
"""
def __init__(self, soup, github_user, github_name):
"""
Instantiate a RhinoRead object
"""
super().__init__(soup)
if not isinstance(github_user, str):
raise TypeError("'user' must be a 'str'")
if not isinstance(github_name, str):
raise TypeError("'name' must be a 'str'")
self.user = github_user
self.name = github_name
def run(self):
"""
Scrape project data and create a README
"""
# Write README.md with scraped data
self.open_readme()
self.write_title()
self.write_info()
self.write_tasks()
profile = 'https://github.com/{}'.format(self.user)
self.write_footer(self.name, self.user, profile)
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,691
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/scrapers/read_scraper.py
|
#!/usr/bin/env python3
"""Module for ReadScraper"""
import os
import re
import sys
import requests
from bs4 import BeautifulSoup, Comment
class ReadScraper:
"""ReadScraper class
README.md scraper
Args:
soup (obj): BeautifulSoup obj containing parsed link
Attributes:
title (str):
repo_name ():
dir_name ():
"""
big_project_type = 0
task_info = []
readme = None
def __init__(self, soup):
self.soup = soup
self.title = self.find_title()
self.repo_name = self.find_repo_name()
self.dir_name = self.check_big_project()
self.prj_info = self.find_learning()
self.file_names = self.find_files()
self.task_names = self.find_tasks()
self.task_info = self.find_task_de()
def find_title(self):
"""Method that finds title of project"""
prj_title = self.soup.find("h1")
return prj_title.text
def find_repo_name(self):
"""Method that finds the repository name"""
r_name = self.soup.find(string=re.compile("GitHub repository: "))
return r_name.next_element
def check_big_project(self):
"""Method that checks if project is a big one"""
try:
tmp = self.repo_name.find_next("li").next_element.next_element.text
if "-" in tmp:
return tmp
raise AttributeError
except AttributeError:
print("* [ERROR] Failed to scrape project directory",
file=sys.stderr)
self.big_project_type = 1
return ""
def find_learning(self):
"""Method that finds the learning objectives"""
try:
h2 = self.soup.find("h2", string=re.compile("Learning Objectives"))
h3 = h2.find_next("h3").next_element.next_element.next_element.text
return h3.splitlines()
except AttributeError:
print("* [ERROR] Failed to scrape learning objectives",
file=sys.stderr)
return ""
def find_files(self):
"""Method that finds file names"""
temp = []
try:
file_list = self.soup.find_all(string=re.compile("File: "))
for idx in file_list:
file_text = idx.next_sibling.text
# Finding comma index for multiple files listed
find_comma = file_text.find(",")
if find_comma != -1:
temp.append(file_text[:find_comma])
else:
temp.append(file_text)
return temp
except (IndexError, AttributeError):
print("* [ERROR] Failed to scrape project filenames",
file=sys.stderr)
return None
def find_tasks(self):
"""Method that finds task names"""
temp = []
try:
task_list = self.soup.find_all("h4", class_="task")
for idx in task_list:
item = idx.next_element.strip("\n").strip()
temp.append(item)
return temp
except (IndexError, AttributeError):
print("* [ERROR] Failed to scrape task titles",
file=sys.stderr)
return None
def find_task_de(self):
"""Method that finds the task descriptions"""
temp = []
try:
info_list = self.soup.find_all(
string=lambda text: isinstance(text, Comment))
for comments in info_list:
if comments == " Task Body ":
info_text = comments.next_element.next_element.text
temp.append(info_text)
return temp
except (IndexError, AttributeError):
print("* [ERROR] Failed to scrape task descriptions",
file=sys.stderr)
return None
def open_readme(self):
"""Method that opens the README.md file"""
try:
if self.big_project_type == 1:
raise IOError
filename = self.dir_name + "/README.md"
self.readme = open(filename, "w+")
except IOError:
self.readme = open("README.md", "w")
def write_title(self):
"""Method that writes the title to README.md"""
print("> Writing project title...")
self.readme.write("# {}\n".format(self.title))
self.readme.write("\n")
def write_info(self):
"""Method that writes project info to README.md"""
print("> Writing learning objectives...")
self.readme.write("## Description\n")
self.readme.write("What you should learn from this project:\n")
try:
for item in self.prj_info:
if len(item) == 0:
self.readme.write("{}\n".format(item))
continue
self.readme.write("* {}\n".format(item))
except (AttributeError, IndexError, UnicodeEncodeError):
print("* [ERROR] Failed to write learning objectives",
file=sys.stderr)
self.readme.write("\n")
self.readme.write("---\n")
def write_tasks(self):
"""Method that writes the entire tasks to README.md"""
if (self.task_names is not None and self.file_names is not None and
self.task_info is not None):
print("> Writing task information...")
count = 0
while count < len(self.task_names):
try:
self.readme.write("\n")
self.readme.write(
"### [{}](./{})\n".format(
self.task_names[count], self.file_names[count]))
self.readme.write("* {}\n".format(self.task_info[count]))
self.readme.write("\n")
count += 1
except IndexError:
print("* [ERROR] Could not write", self.task_names[count],
file=sys.stderr)
count += 1
continue
def write_footer(self, author, user, git_link):
"""Method that writes the footer to README.md"""
print("> Writing author information...")
self.readme.write("---\n")
self.readme.write("\n")
self.readme.write("## Author\n")
self.readme.write("* **{}** - ".format(author))
self.readme.write("[{}]".format(user))
self.readme.write("({})".format(git_link))
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,692
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/tests/test_base_parse.py
|
#!/usr/bin/env python3
"""Unittest for LowScraper"""
import unittest
from scrapers import *
class TestBaseParse(unittest.TestCase):
"""Test for LowScraper"""
def setUp(self):
self.parse = BaseParse("https://intranet.hbtn.io/projects/232")
self.parse.get_json()
def tearDown(self):
del self.parse
def test_base_object(self):
self.assertIsNotNone(self.parse)
self.assertIsInstance(self.parse, object)
self.assertIn("scrapers.base_parse.BaseParse", str(self.parse))
def test_json_data(self):
self.assertIsInstance(self.parse.json_data, dict)
def test_get_soup(self):
self.parse.get_soup()
self.assertIsNotNone(self.parse.soup)
self.assertIsInstance(self.parse.soup, object)
self.assertIn("bs4.BeautifulSoup", str(self.parse.soup.__class__))
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,693
|
patrickdeyoreo/rhino-repo
|
refs/heads/master
|
/rhinoscraper/scrapers/low_scraper.py
|
#!/usr/bin/env python3
"""Module for LowScraper"""
import re
import sys
from bs4 import BeautifulSoup
PUTCHAR = """
#include <unistd.h>
/**
* _putchar - writes the character c to stdout
* @c: The character to print
*
* Return: On success 1.
* On error, -1 is returned, and errno is set appropriately.
*/
int _putchar(char c)
{
\treturn (write(1, &c, 1));
}
"""
class LowScraper:
"""
Low-Level_Programming project scraper.
Public instance methods:
detect_putchar
scrape_prototypes
scrape_header
write_putchar
write_header
write_files
Public instance attributes:
putchar_required: bool: is a custom putchar required
header: str: name of C header file
prototypes: list: function prototypes
files: list: project files
"""
def __init__(self, soup):
"""
Instantiate a LowScraper with a BeautifulSoup object
Args:
soup: BeautifulSoup: Parsed HTML from a Holberton project
"""
if not isinstance(soup, BeautifulSoup):
raise TypeError("'soup' must be a 'BeautifulSoup'")
self.soup = soup
self.detect_putchar()
self.scrape_header()
self.scrape_prototypes()
self.scrape_files()
def detect_putchar(self):
"""
Check if custom '_putchar' is required
Returns
"""
print("> Checking if a custom '_putchar' is required...")
regex = re.compile(r'^you\s+are\s+allowed\s+to\s+use\b', flags=re.I)
match = self.soup.find(string=regex)
try:
self.putchar_required = match.next_sibling.text == '_putchar'
except (TypeError, ValueError):
self.putchar_required = False
return self.putchar_required
def scrape_header(self):
"""
Scrape C header file name
"""
print("> Scraping name of header file...")
try:
regex = re.compile(r'\bforget\s+to\s+push\s+your\s+header\s+file')
match = self.soup.find(string=regex).previous_element
self.header = match.previous_element.previous_element
except AttributeError:
self.header = None
return self.header
def scrape_prototypes(self):
"""
Scrape C prototypes
"""
print("> Scraping function prototypes...")
if self.putchar_required:
self.prototypes = ['int _putchar(char c)']
else:
self.prototypes = []
regex = re.compile(r"\bprototype:\s", flags=re.I)
self.prototypes.extend([element.next_sibling.text.replace(';', '') for
element in self.soup.find_all(string=regex)])
return self.prototypes
def scrape_files(self):
"""
Scrape C file names
"""
print("> Scraping file names...")
regex = re.compile(r'\bfile: ', flags=re.I)
self.files = self.soup.find_all(string=regex) or []
return self.files
def write_putchar(self):
"""
Write '_putchar' if required
"""
if self.putchar_required:
print("> Writing '_putchar.c'...")
try:
with open('_putchar.c', 'w') as ofile:
print(PUTCHAR.strip(), file=ofile)
except OSError:
pass
def write_header(self):
"""
Write C header file (if required)
"""
if self.header:
print("> Writing header file... ")
try:
include_guard = self.header.replace('.', '_', 1).upper()
prototypes = ['{};'.format(s) for s in self.prototypes]
with open(self.header, 'w') as ofile:
print('#ifndef {}'.format(include_guard), file=ofile)
print('#define {}'.format(include_guard), file=ofile)
print('', file=ofile)
print('#include <stdio.h>', file=ofile)
print('#include <stdlib.h>', file=ofile)
print('', file=ofile)
print(*prototypes, sep='\n', file=ofile)
print('', file=ofile)
print('#endif /* {} */'.format(include_guard), file=ofile)
except (AttributeError, OSError):
print("* [ERROR] Failed to write header file", self.header,
file=sys.stderr)
def write_files(self):
"""
Write project files
Handles multiple file names by searching for ','.
"""
self.write_header()
self.write_putchar()
print("> Writing task files...")
for element, prototype in zip(self.files, self.prototypes):
try:
filename = element.next_sibling.text.split(",")[0]
funcname = prototype.split("(", maxsplit=1)[0].split(" ")
funcname = funcname[len(funcname)-1].split("*")
funcname = funcname[len(funcname)-1]
if self.header is not None:
with open(filename, 'w') as ofile:
print('#include', self.header, file=ofile)
print('', file=ofile)
print('/**', file=ofile)
print(' *', funcname, '-', file=ofile)
print(' *', file=ofile)
print(' * Return:', file=ofile)
print(' */', file=ofile)
print(prototype, file=ofile)
print('{', file=ofile)
print('', file=ofile)
print('}', file=ofile)
except (AttributeError, OSError):
print("* [ERROR] Failed to write task file", filename,
file=sys.stderr)
def write_checker(self):
"""
Write betty style checker
"""
try:
line = ['betty']
if self.header:
line.append(self.header)
if self.files:
line.extend([item.next_sibling.text for item in self.files])
with open('check.sh', 'w') as ofile:
print('#!/usr/bin/env bash', file=ofile)
print(*line, file=ofile)
except (OSError, TypeError, ValueError):
pass
|
{"/rhinoscraper/__init__.py": ["/rhinoscraper/rhinoproject.py", "/rhinoscraper/rhinoread.py"], "/api/v1/views/project.py": ["/rhinoscraper/__init__.py", "/api/v1/views/__init__.py"], "/api/v1/app.py": ["/api/v1/views/__init__.py"], "/api/v1/views/__init__.py": ["/api/v1/views/project.py"], "/rhinoscraper/rhinoproject.py": ["/rhinoscraper/scrapers/high_scraper.py", "/rhinoscraper/scrapers/low_scraper.py", "/rhinoscraper/scrapers/sys_scraper.py", "/rhinoscraper/scrapers/test_file_scraper.py"], "/rhinoscraper/rhinoread.py": ["/rhinoscraper/scrapers/read_scraper.py"]}
|
7,694
|
fbarbu15/-number_generator
|
refs/heads/master
|
/main.py
|
'''
Created on Dec 21, 2019
@author: Florin
'''
from time import time
from libs.number_generator import NumberGenerator
from resources.variables import START_A, MULTIPLY_FACTOR_A, START_B, \
MULTIPLY_FACTOR_B
num_gen_a = NumberGenerator(START_A, MULTIPLY_FACTOR_A)
num_gen_b = NumberGenerator(START_B, MULTIPLY_FACTOR_B)
total_count = 0
start_time = time()
for _ in range(40000000): # This runs in 80 seconds on my machine
if bin(next(num_gen_a))[-16:] == bin(next(num_gen_b))[-16:]:
total_count += 1
print("<{0}> matching pairs found for START A:{1} and B:{2} values. Execution time: {3} seconds".format(total_count, START_A, START_B, time() - start_time))
|
{"/main.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_first_five_pairs.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_binary.py": ["/libs/number_generator.py", "/resources/variables.py"], "/libs/number_generator.py": ["/resources/variables.py"]}
|
7,695
|
fbarbu15/-number_generator
|
refs/heads/master
|
/tests/test_first_five_pairs.py
|
'''
Created on Dec 21, 2019
@author: Florin
'''
import pytest
from libs.number_generator import NumberGenerator
from resources.variables import MULTIPLY_FACTOR_A, MULTIPLY_FACTOR_B
class TestsFirstFive():
@pytest.fixture(scope="function", autouse=True)
def _setup_teardown_test(self):
self.first_five_values = []
def test_generator_a(self):
num_gen_a = NumberGenerator(65, MULTIPLY_FACTOR_A)
for _ in range(5):
self.first_five_values.append(next(num_gen_a))
assert self.first_five_values[0] == 1092455, "First value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[0], 1092455)
assert self.first_five_values[1] == 1181022009, "Second value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[1], 1181022009)
assert self.first_five_values[2] == 245556042, "Third value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[2], 245556042)
assert self.first_five_values[3] == 1744312007, "Fourth value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[3], 1744312007)
assert self.first_five_values[4] == 1352636452, "Fifth value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[4], 1352636452)
def test_generator_b(self):
num_gen_b = NumberGenerator(8921, MULTIPLY_FACTOR_B)
for _ in range(5):
self.first_five_values.append(next(num_gen_b))
assert self.first_five_values[0] == 430625591, "First value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[0], 430625591)
assert self.first_five_values[1] == 1233683848, "Second value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[1], 1233683848)
assert self.first_five_values[2] == 1431495498, "Third value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[2], 1431495498)
assert self.first_five_values[3] == 137874439, "Fourth value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[3], 137874439)
assert self.first_five_values[4] == 285222916, "Fifth value not generated correctly. Got <{0}> but expected <{1}>".format(self.first_five_values[4], 285222916)
|
{"/main.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_first_five_pairs.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_binary.py": ["/libs/number_generator.py", "/resources/variables.py"], "/libs/number_generator.py": ["/resources/variables.py"]}
|
7,696
|
fbarbu15/-number_generator
|
refs/heads/master
|
/resources/variables.py
|
'''
Created on Dec 21, 2019
@author: Florin
'''
import os
# Start A and B can be passed as environment variables (from CI build params for example)
START_A = os.getenv("START_A", 65)
START_B = os.getenv("START_B", 8921)
MULTIPLY_FACTOR_A = 16807
MULTIPLY_FACTOR_B = 48271
DIVIDE_FACTOR = 2147483647
|
{"/main.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_first_five_pairs.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_binary.py": ["/libs/number_generator.py", "/resources/variables.py"], "/libs/number_generator.py": ["/resources/variables.py"]}
|
7,697
|
fbarbu15/-number_generator
|
refs/heads/master
|
/tests/test_binary.py
|
'''
Created on Dec 21, 2019
@author: Florin
'''
import pytest
from libs.number_generator import NumberGenerator
from resources.variables import MULTIPLY_FACTOR_A, MULTIPLY_FACTOR_B
class TestsBinary():
@pytest.fixture(scope="function", autouse=True)
def _setup_teardown_test(self):
self.num_gen_a = NumberGenerator(65, MULTIPLY_FACTOR_A)
self.num_gen_b = NumberGenerator(8921, MULTIPLY_FACTOR_B)
def test_first_five_pairs(self):
for i in range(5):
if i == 2:
assert bin(next(self.num_gen_a))[-16:] == bin(next(self.num_gen_b))[-16:], \
"Least significant 16 bits should've matched for the 3rd pair"
else:
assert bin(next(self.num_gen_a))[-16:] != bin(next(self.num_gen_b))[-16:], \
"Least significant 16 bits should match only for the 3rd pair, not here"
def test_count_match_on_40_million_pairs(self):
total_count = 0
for _ in range(40000000): # This runs in 80 seconds on my machine
if bin(next(self.num_gen_a))[-16:] == bin(next(self.num_gen_b))[-16:]:
total_count += 1
assert total_count == 588, "Total matching pairs in 40 million rows is incorrect. Got <{0}> but expected <{1}>".format(total_count, 588)
|
{"/main.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_first_five_pairs.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_binary.py": ["/libs/number_generator.py", "/resources/variables.py"], "/libs/number_generator.py": ["/resources/variables.py"]}
|
7,698
|
fbarbu15/-number_generator
|
refs/heads/master
|
/libs/number_generator.py
|
'''
Created on Dec 21, 2019
@author: Florin
'''
from resources.variables import DIVIDE_FACTOR
class NumberGenerator():
def __init__(self, start_value, multiply_factor):
self.current_value = start_value
self._multiply_factor = multiply_factor
def __next__(self):
self.current_value = (self.current_value * self._multiply_factor) % DIVIDE_FACTOR
return self.current_value
|
{"/main.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_first_five_pairs.py": ["/libs/number_generator.py", "/resources/variables.py"], "/tests/test_binary.py": ["/libs/number_generator.py", "/resources/variables.py"], "/libs/number_generator.py": ["/resources/variables.py"]}
|
7,699
|
michaelwolz/Mosel-Wein-Hack-2019
|
refs/heads/master
|
/Labeler.py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Philipp
#
# Created: 17.02.2019
# Copyright: (c) Philipp 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os
from tkinter import *
from tkinter import filedialog, messagebox
from PIL import Image, ImageTk
class MainWindow:
""" Main Class. Create GUI and all functions.
"""
def __init__(self, master):
self.master = master
self.label = 0
self.master.title("Labeler")
self.labelVar = StringVar()
self.labelVar.set('0')
# adding UI widgets
self.next_button = Button(self.master, text="Next Image", command=self.next)
self.next_button.grid(row = 1,column = 2,sticky = W+E)
self.pickdir_button = Button(self.master, text="Pick Image Directory", command=self.pickImageDir)
self.pickdir_button.grid(row = 1, column = 1, sticky = W+E)
self.label_start = Button(self.master, text="Label: Start", command=self.labelStart)
self.label_start.grid(row = 1, column = 5, sticky = W+E)
self.label_stop = Button(self.master, text="Label: Stop", command=self.labelStop)
self.label_stop.grid(row = 1, column = 6, sticky = W+E)
self.label_entry = Entry(self.master, width = 2, textvariable = self.labelVar)
self.label_entry.grid(row = 1, column = 4, sticky = W+E)
self.close_button = Button(self.master, text="Exit", command=master.destroy)
self.close_button.grid(row = 1, column = 7, sticky = W+E)
# Key bind for mouse scroll-wheel 'down' to switch to next image
self.master.bind('<Button-5>', self.next)
def labelStart(self):
""" Set label to 1
"""
self.label = 1
self.labelVar.set('1')
def labelStop(self):
""" Set label to 0
"""
self.label = 0
self.labelVar.set('0')
def next(self, event = 0):
""" Iterate one step through image list.
"""
# First rename last image according to label
if self.label == 1:
os.rename(self.img_now, os.path.split(self.img_now)[0] + '/1_' + os.path.split(self.img_now)[1])
else:
os.rename(self.img_now, os.path.split(self.img_now)[0] + '/0_' + os.path.split(self.img_now)[1])
# Iterate to next image and update UI widget
try:
self.img_now = next(self.file)
self.img = ImageTk.PhotoImage(Image.open(self.img_now).resize((900, 750)))
self.panel.configure(image = self.img)
except StopIteration:
messagebox.showerror("Error!", "Letztes Bild erreicht!")
def pickImageDir(self):
""" Let user pick frame directory, build a file list and show the first
image.
"""
self.imageDir = filedialog.askdirectory()
# Search for .jpg files in frame directory
self.files = []
for root, dirs, files in os.walk(self.imageDir):
for file in files:
if file.endswith('.jpg'):
self.files.append(os.path.join(root, file))
# Start iteration through .jpg files
self.file = iter(self.files)
self.img_now = next(self.file)
# Create image widget with first image
self.img = ImageTk.PhotoImage(Image.open(self.img_now).resize((800, 632)))
self.panel = Label(self.master, height = 632, width = 800, image = self.img)
self.panel.photo = self.img
self.panel.grid(row = 2, column = 1, columnspan = 7)
################################################################################
# main function
################################################################################
if __name__ == '__main__':
# starting GUI
root = Tk()
gui = MainWindow(root)
root.mainloop()
|
{"/main.py": ["/VideoAnalysis.py"]}
|
7,700
|
michaelwolz/Mosel-Wein-Hack-2019
|
refs/heads/master
|
/GrapeDetection.py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Philipp
#
# Created: 16.02.2019
# Copyright: (c) Philipp 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import cv2
import numpy as np
def grapeDetection():
img = cv2.imread(r".\data\Auswahl\P63_R2_r___Data_2017_09_13_084114_ERO\063_002_036_1DC24E74_1505292356747.jpg")
thresh = 40
thresh_max = 80
maxval = 1
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#img_gray, s, v = cv2.split(img_gray)
#img_gray = cv2.resize(img_gray, (0, 0), fx = 0.4, fy = 0.4)
img_gray = cv2.GaussianBlur(img_gray, (5, 5), 0)
print(img_gray[0,200])
cv2.imshow("Results", img_gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
_, t_img = cv2.threshold(img_gray, thresh, maxval, cv2.THRESH_BINARY)
_, t_img2 = cv2.threshold(img_gray, thresh_max, maxval, cv2.THRESH_TOZERO_INV)
t_img = img_gray
#t_img = t_img+t_img2-127
c_vec = cv2.HoughCircles(t_img, method = cv2.HOUGH_GRADIENT, dp = 8, minDist = 1, maxRadius = 30)
#c_vec2 = cv2.HoughCircles(t_img, method = cv2.HOUGH_GRADIENT, dp = 4, minDist = 15, maxRadius = 30)
#c_vec3 = cv2.HoughCircles(t_img, method = cv2.HOUGH_GRADIENT, dp = 4.5, minDist = 15, maxRadius = 30)
r_img = cv2.cvtColor(t_img, cv2.COLOR_GRAY2BGR)
try:
for circle in c_vec[0]:
mask = np.full((t_img.shape[0], t_img.shape[1]), 0, dtype = np.uint8)
cir = cv2.circle(mask, (circle[0], circle[1]), circle[2], (255,255,255), -1)
if cv2.mean(t_img, mask)[0] < 0:
np.delete(c_vec[0], circle)
continue
cir = cv2.circle(r_img, (circle[0], circle[1]), circle[2], (255,0,255))
r_img = cir
except:
print("Error!")
try:
for circle in c_vec2[0]:
mask = np.full((t_img.shape[0], t_img.shape[1]), 0, dtype = np.uint8)
cir = cv2.circle(mask, (circle[0], circle[1]), circle[2], (255,255,255), -1)
if cv2.mean(t_img, mask)[0] < 0:
np.delete(c_vec[0], circle)
continue
cir = cv2.circle(r_img, (circle[0], circle[1]), circle[2], (255,0,255))
r_img = cir
except:
print("Error!")
try:
for circle in c_vec3[0]:
mask = np.full((t_img.shape[0], t_img.shape[1]), 0, dtype = np.uint8)
cir = cv2.circle(mask, (circle[0], circle[1]), circle[2], (255,255,255), -1)
if cv2.mean(t_img, mask)[0] < 50:
np.delete(c_vec[0], circle)
continue
cir = cv2.circle(r_img, (circle[0], circle[1]), circle[2], (255,0,255))
r_img = cir
except:
print("Error!")
r_img = cv2.resize(r_img, (0, 0), fx = 0.4, fy = 0.4)
cv2.imshow("Results", r_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
grapeDetection()
if __name__ == '__main__':
main()
|
{"/main.py": ["/VideoAnalysis.py"]}
|
7,701
|
michaelwolz/Mosel-Wein-Hack-2019
|
refs/heads/master
|
/Stack.py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Philipp
#
# Created: 16.02.2019
# Copyright: (c) Philipp 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import cv2
import numpy as np
def get_gradient(im) :
# Calculate the x and y gradients using Sobel operator
grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=3)
grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=3)
# Combine the two gradients
grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
return grad
# Read the images to be aligned
im1 = cv2.imread(r".\data\processed\063_002_014_1DC24E74_1505292225589_cropped.jpg");
im2 = cv2.imread(r".\data\processed\063_002_014_1DC24F98_1505292225589_cropped.jpg");
# Convert images to grayscale
im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY)
im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY)
# Find size of image1
sz = im1.shape
# Define the motion model
warp_mode = cv2.MOTION_TRANSLATION
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
if warp_mode == cv2.MOTION_HOMOGRAPHY :
warp_matrix = np.eye(3, 3, dtype=np.float32)
else :
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Specify the number of iterations.
number_of_iterations = 5000;
# Specify the threshold of the increment
# in the correlation coefficient between two iterations
termination_eps = 1e-10;
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria)
if warp_mode == cv2.MOTION_HOMOGRAPHY :
# Use warpPerspective for Homography
im2_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
else :
# Use warpAffine for Translation, Euclidean and Affine
im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
# Show final results
cv2.imshow("Image 1", im1)
cv2.imshow("Image 2", im2)
cv2.imshow("Aligned Image 2", im2_aligned)
cv2.waitKey(0)
def main():
pass
if __name__ == '__main__':
main()
|
{"/main.py": ["/VideoAnalysis.py"]}
|
7,702
|
michaelwolz/Mosel-Wein-Hack-2019
|
refs/heads/master
|
/VideoAnalysis.py
|
import cv2
from keras.models import load_model
import numpy as np
BLUE = (255, 0, 0)
RED = (0, 0, 255)
GREEN = (0, 255, 0)
FONTSCALE = 1
LINETYPE = 2
FONT = cv2.FONT_HERSHEY_SIMPLEX
# Loading pre-trained AI model
model = load_model("data/model_2c.h5")
# model = load_model("data/grapes_model_adam.h5")
class VideoAnalysis:
stopped = False
stop_counter = 0
start_counter = 0
bottomLeftCornerOfText = (0, 0)
frame_counter = 0
bw_array = [0, 0, 0, 0]
# First solution using black and white pixel ratio for region of interest
def run(self, path):
print("Running video analysis...")
cap = cv2.VideoCapture(path)
if not cap.isOpened():
raise IOError("Error opening video file")
while cap.isOpened():
ret, orig_frame = cap.read()
if ret:
# Resize frame to 70% of original size
frame = cv2.resize(orig_frame, (0, 0), fx=0.70, fy=0.70)
# Draw region of interest
height, width, channels = orig_frame.shape
cv2.rectangle(orig_frame, (int(width / 2), 0), (width, int(height / 3 * 2)), (255, 0, 0), 2)
# Crop frame to region of interest
height, width, channels = frame.shape
frame = frame[0:int(height / 3 * 2), int(width / 2):width]
# Set bottomLeftCorner
self.bottomLeftCornerOfText = (int(width / 2) + 320, int(height / 3 * 2) + 100)
# Generate binary black and white image from green channel
_, bw = cv2.threshold(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), 60, 256, cv2.THRESH_BINARY)
# Simple analysis by counting white pixels
bw_ratio = np.sum(bw == 0) / np.sum(bw == 255)
if not self.stopped and bw_ratio < 5:
self.stop_counter += 1
if self.stop_counter > 15:
self.stop_signal()
elif self.stopped and bw_ratio >= 5:
self.start_counter += 1
if self.start_counter > 15:
self.start_signal()
if self.stopped:
self.write_to_image(orig_frame, "Machine stopped " + str(np.round(bw_ratio)), RED)
else:
self.write_to_image(orig_frame, "Machine running " + str(np.round(bw_ratio)), GREEN)
# Show video
orig_frame = cv2.resize(orig_frame, (0, 0), fx=0.70, fy=0.70)
cv2.imshow('VideoAnalysis', orig_frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
# Cleanup
cap.release()
cv2.destroyAllWindows()
# Trying to use flickering blue light to be more resistant against lightning changes
def run_version_2(self, path):
print("Running video analysis...")
frame_counter = 0
bw_array = [0, 0, 0, 0]
cap = cv2.VideoCapture(path)
if not cap.isOpened():
raise IOError("Error opening video file")
while cap.isOpened():
ret, orig_frame = cap.read()
if ret:
# Resize frame to 70% of original size
frame = cv2.resize(orig_frame, (0, 0), fx=0.70, fy=0.70)
# Draw region of interest
height, width, channels = orig_frame.shape
cv2.rectangle(orig_frame, (int(width / 2), int(height / 3)),
(int(width / 2 * 1.3), int(height / 3 * 1.5)), (255, 0, 0), 2)
# Crop frame to region of interest
height, width, channels = frame.shape
frame = frame[int(height / 3):int(height / 3 * 1.5), int(width / 2):int(width / 2 * 1.3)]
# Extract BGR channels
b, g, r = cv2.split(frame)
# Set bottomLeftCorner
self.bottomLeftCornerOfText = (int(width * 1.3 / 2 - 200), int(height * 1.3 - 20))
# Generate binary black and white image
_, bw = cv2.threshold(b, 60, 256, cv2.THRESH_BINARY)
# Simple analysis by counting white pixels
bw_ratio = np.sum(bw == 0) / np.sum(bw == 255)
if bw_ratio < 30:
bw_array[frame_counter % 4] = 1
else:
bw_array[frame_counter % 4] = 0
frame_check = np.count_nonzero(bw_array)
if frame_check == 4:
if not self.stopped:
self.stop_signal()
else:
if self.stopped:
self.start_signal()
if self.stopped:
self.write_to_image(orig_frame, "Machine stopped " + str(np.round(bw_ratio)), RED)
else:
self.write_to_image(orig_frame, "Machine running " + str(np.round(bw_ratio)), GREEN)
frame_counter += 1
# Show video
orig_frame = cv2.resize(orig_frame, (0, 0), fx=0.70, fy=0.70)
cv2.imshow('VideoAnalysis', orig_frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
# Cleanup
cap.release()
cv2.destroyAllWindows()
# Use ai model for finding "Stickels"
def run_ai_version(self, path):
print("Running video analysis...")
cap = cv2.VideoCapture(path)
if not cap.isOpened():
raise IOError("Error opening video file")
while cap.isOpened():
ret, frame = cap.read()
if ret:
height, width, _ = frame.shape
# Set bottomLeftCorner
self.bottomLeftCornerOfText = (int(width / 2) - 200, height - 50)
if self.check_image(frame):
self.start_counter += 1
self.stop_counter = 0
if self.stopped and self.start_counter > 2:
self.start_signal()
else:
self.start_counter = 0
self.stop_counter += 1
if not self.stopped and self.stop_counter > 2:
self.stop_signal()
if self.stopped:
self.write_to_image(frame, "Machine stopped", RED)
else:
self.write_to_image(frame, "Machine running", GREEN)
# Show video
frame = cv2.resize(frame, (0, 0), fx=0.70, fy=0.70)
cv2.imshow('VideoAnalysis', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
# Cleanup
cap.release()
cv2.destroyAllWindows()
# Using grape model to identify grapes (in blue light)
def grape_recognition(self, path):
print("Running video analysis...")
cap = cv2.VideoCapture(path)
if not cap.isOpened():
raise IOError("Error opening video file")
while cap.isOpened():
ret, frame = cap.read()
if ret:
height, width, _ = frame.shape
# Set bottomLeftCorner
self.bottomLeftCornerOfText = (int(width / 2) - 200, height - 50)
if self.grape_in_blue_light(frame):
self.write_to_image(frame, "No Grape", RED)
else:
self.write_to_image(frame, "Grape", GREEN)
# Show video
frame = cv2.resize(frame, (0, 0), fx=0.90, fy=0.90)
cv2.imshow('VideoAnalysis', frame)
if cv2.waitKey(50) & 0xFF == ord('q'):
break
else:
break
# Cleanup
cap.release()
cv2.destroyAllWindows()
def stop_signal(self):
self.stopped = True
self.start_counter = 0
def start_signal(self):
self.stopped = False
self.stop_counter = 0
def write_to_image(self, image, text, color):
cv2.putText(image, text,
self.bottomLeftCornerOfText,
FONT,
FONTSCALE,
color,
LINETYPE)
# For Stickel
def check_image(self, image):
image = cv2.resize(image, (32, 32)).flatten()
image = image.reshape((1, image.shape[0]))
image = image.astype("float") / 255.0
preds = model.predict(image)
return preds[0][0] > 0.5
# For finding grapes (same function as check_image though)
def grape_in_blue_light(self, image):
image = cv2.resize(image, (32, 32)).flatten()
image = image.reshape((1, image.shape[0]))
image = image.astype("float") / 255.0
preds = model.predict(image)
return preds[0][0] > 0.5
|
{"/main.py": ["/VideoAnalysis.py"]}
|
7,703
|
michaelwolz/Mosel-Wein-Hack-2019
|
refs/heads/master
|
/main.py
|
import VideoAnalysis
def main():
run_video_analysis()
def run_video_analysis():
va = VideoAnalysis.VideoAnalysis()
va.run_ai_version("data/RGB/video_rgb-13_09_2017-10_02.avi")
if __name__ == '__main__':
main()
|
{"/main.py": ["/VideoAnalysis.py"]}
|
7,704
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/signup/views.py
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.contrib import auth
from django.contrib.auth import authenticate, login
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
from .models import User
from django.http import HttpResponseRedirect
from django.contrib import redirects
# def index(request):
# return render(request, 'login.html')
from django.views.generic.base import View
# class CustomBackend(ModelBackend):
# def authenticate(self, username=None, password=None, **kwargs):
# print("<<< start authenticate >>>")
# try:
# user = User.objects.get(Q(username=username) | Q(email=username))
# if user.check_password(password):
# return user
# except Exception as e:
# return None
#
# def user_login(self, request):
# print("get in user login")
# if request.method == 'POST':
# print("get user name and password")
# user_name = request.POST.get('username', "")
# pass_word = request.POST.get('password', "")
# print(user_name)
# print(pass_word)
# user = authenticate(username=user_name, password=pass_word)
# if user is not None:
# login(request, user)
# return render(request, "tool.html")
# else:
# return render(request, 'login.html', {"msg": "用户名或密码错误!"})
# elif request.method == 'GET':
# return render(request, 'login.html', {})
def simple_login(request):
if request.method == 'POST':
user_name = request.POST.get('username', "")
pass_word = request.POST.get('password', "")
usr = User.objects.get(username=user_name)
if usr.password == pass_word:
# request.session['is_login'] = True
# request.session['user_id'] = usr.id
# request.session['user_name'] = usr.username
# return render(request, 'index.html', {"msg": "You have successfully login!"})
ret = HttpResponseRedirect('/welcome/')
ret.set_cookie('is_login', True)
ret.set_cookie('user_id', usr.id)
ret.set_cookie('user_name', usr.username)
return ret
else:
return render(request, 'login.html', {"msg": "Invalid user or password!"})
return render(request, 'login.html')
def signup(request):
if request.method == 'POST':
user_name = request.POST.get('username', "")
pass_word1 = request.POST.get('password1', "")
pass_word2 = request.POST.get('password2', "")
try:
usr = User.objects.get(username=user_name)
except Exception as e:
usr = None
if usr is not None:
return render(request, 'register.html', {"msg": "The user name already exist!"})
else:
if pass_word1 == pass_word2:
# create new users
print("create new user")
new_user = User.objects.create(username=user_name, password=pass_word1)
new_user.save()
return render(request, 'index.html')
else:
return render(request, 'register.html', {"msg": "Two password not the same, Please try again."})
return render(request, 'register.html')
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,705
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/signup/admin.py
|
from django.contrib import admin
from signup.models import User
# Register your models here.
admin.site.register([User])
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,706
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/tools/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('save', views.save_map, name='save'),
path('check', views.check, name='check'),
]
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,707
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/community/admin.py
|
from django.contrib import admin
from community.models import Map
# Register your models here.
admin.site.register([Map])
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,708
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/signup/urls.py
|
from django.urls import path
# from .views import CustomBackend
from . import views
urlpatterns = [
path('', views.simple_login, name='login'),
path('register/', views.signup, name='signup'),
]
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,709
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/community/models.py
|
from django.db import models
# Create your models here.
class Map(models.Model):
map_name = models.CharField(max_length=50)
map_url = models.TextField(max_length=100)
user_id = models.IntegerField()
like = models.IntegerField(default=0)
create_time = models.DateField(auto_now=True)
def __str__(self):
return self.map_name
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,710
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/community/views.py
|
import base64
import os
import sys
from io import BytesIO
import qrcode
from django.http import HttpResponse
from django.shortcuts import render
from chameleon.settings import MAP_DIR, BASE_DIR
from .models import Map
from signup.models import User
import qrcode
from chameleon import settings
def index(request):
# sort the map data based on the 'like' attribute.
# Select 9 map which have most likes.
ordered_map = Map.objects.order_by('-like').all()
# display_map_list = dict()
# for i in range(9):
# display_map_list['map' + str(i)] = ordered_map[i].map_url
# # display_map_list['map' + str(i) + 'user'] = User.objects.get(id=ordered_map[i].user_id).username
display_map_dict = dict()
display_map_dict['map_user_all'] = list()
map_class = [0, 1, 2]
print(display_map_dict)
for i, map in enumerate(ordered_map):
temp = dict()
if '.svg' in map.map_url:
try:
map_dir = os.path.join(BASE_DIR, 'static/')
filename = os.path.join(map_dir, map.map_url)
# # 为了兼容win系统
# if sys.platform.startswith('win'):
# filename=filename.replace('/','\\')
# print(filename)
with open(filename, 'rb') as f:
map_data = f.read()
map_data = base64.b64encode(map_data)
temp['map_src'] = 'data:image/svg+xml;base64,' + map_data.decode()
except:
continue
temp['map'] = ordered_map[i].map_url
try:
temp['map_user'] = User.objects.get(id=ordered_map[i].user_id).username
except:
temp['map_user'] = 'unknown'
tmp = i % 3
temp['map_count'] = map_class[tmp]
display_map_dict['map_user_all'].append(temp)
# print(display_map_dict)
return render(request, 'community.html', display_map_dict)
from django.utils.http import urlencode
def download(request):
count = 0
if request.method == "GET":
img_url = request.GET.get('url')
count += 1
print("count:", count)
print('img_url:', img_url)
data = {"img_url": img_url}
if '.svg' in img_url:
map_dir = os.path.join(BASE_DIR, 'static/')
filename = os.path.join(map_dir, img_url)
# # 为了兼容win系统
# if sys.platform.startswith('win'):
# filename=filename.replace('/','\\')
# print(filename)
with open(filename, 'rb') as f:
map_data = f.read()
map_data = base64.b64encode(map_data)
map_src = 'data:image/svg+xml;base64,' + map_data.decode()
data = {"img_url": img_url, 'map_src': map_src}
return render(request, 'download.html',data)
# def update_img_url(request):
# if request.method == "GET":
# img_url = request.GET.get('url')
# print("img_url:", img_url)
# return HttpResponse(request, {"img_url": img_url})
def makeqrcode(request, data):
url = "http://" + settings.URL + '/' + "community/download?url=" + data
# url = os.path.join(settings.BASE_DIR, "static/img/portfolio/card3.jpg")
img = qrcode.make(url) # 传入网址计算出二维码图片字节数据
buf = BytesIO() # 创建一个BytesIO临时保存生成图片数据
img.save(buf) # 将图片字节数据放到BytesIO临时保存
image_stream = buf.getvalue() # 在BytesIO临时保存拿出数据
# imagepath = os.path.join(settings.BASE_DIR, "static/img/{}".format("qrcode")) # 图片路径
# with open(imagepath, 'rb') as f:
# image_data = f.read()
response = HttpResponse(image_stream, content_type="image/jpg") # 将二维码数据返回到页面
return response
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,711
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/tools/tests.py
|
import binascii
import unittest
from unittest.mock import patch, mock_open
from django.http import HttpResponse
from django.test import Client
# Create your tests here.
from django.test import TestCase
from community.models import Map
from signup.models import User
class MapTestCase(TestCase):
"""
Test Map Case
"""
def setUp(self):
self.user = User.objects.create(username='nick', password='123')
# test map is created
def test_map_create(self):
Map.objects.create(map_name='map', map_url='', user_id=self.user.id)
maps = Map.objects.filter(user_id=self.user.id)
self.assertEqual(len(maps), 1)
# test map __str__ function is called and equal to map name
def test_map_str(self):
map = Map.objects.create(map_name='map', map_url='', user_id=self.user.id)
self.assertEqual(str(map), map.map_name)
# test map model fields limit length
def test_map_field_maxlength(self):
map = Map.objects.create(map_name='map', map_url='', user_id=self.user.id)
max_length = map._meta.get_field('map_name').max_length
self.assertEqual(max_length, 50)
max_length = map._meta.get_field('map_url').max_length
self.assertEqual(max_length, 100)
# skip this test because local environment we test with sqlite3, sqlite3 does not have the limitation of field length, but mysql have the limitation
@unittest.skip("sqlite does not require the field length of char field")
def test_map_create_failed_with_exceeded_field(self):
map_name = ['x' for i in range(100)]
print(map_name)
map = Map.objects.create(map_name=map_name, map_url='', user_id=self.user.id)
self.assertEqual(map.map_name, map_name)
# self.assertRaises()
def tearDown(self) -> None:
self.user.delete()
class ToolsViewsTestCase(TestCase):
"""
Test Tools
"""
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
"""
Test the tool/ router is correctly triggered.
"""
def test_index(self):
c = Client()
response = c.get('/tool/')
self.assertEqual(response.status_code, 200)
"""
I mock the render and return a string instead, I only want to test the router is triggered correctly.
"""
@patch('tools.views.render')
def test_mock_index(self, mock_render):
c = Client()
mock_render.return_value = HttpResponse('tools_html')
response = c.get('/tool/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'tools_html')
"""
Mock the open function because I do not want to produce new files and modify files in test.
"""
@patch('builtins.open', new_callable=mock_open())
def test_save_map(self, mock_open_):
c = Client()
response = c.post('/tool/save', {
'map_data': 'PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NUWVBFIHN2ZyBQVUJMSUMgIi0vL1czQy8vRFREIFNWRyAxLjEvL0VOIiAiaHR0cDovL3d3dy53My5vcmcvR3JhcGhpY3MvU1ZHLzEuMS9EVEQvc3ZnMTEuZHRkIj4KPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB2ZXJzaW9uPSIxLjEiIHdpZHRoPSIxMjFweCIgaGVpZ2h0PSI2MXB4IiB2aWV3Qm94PSItMC41IC0wLjUgMTIxIDYxIiBjb250ZW50PSImbHQ7bXhmaWxlIGV0YWc9JnF1b3Q7VGdBR2JKbGNJaGw3a1JuRGFxSDQmcXVvdDsgYWdlbnQ9JnF1b3Q7TW96aWxsYS81LjAgKE1hY2ludG9zaDsgSW50ZWwgTWFjIE9TIFggMTBfMTRfNikgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzgwLjAuMzk4Ny4xMDYgU2FmYXJpLzUzNy4zNiZxdW90OyBtb2RpZmllZD0mcXVvdDsyMDIwLTAyLTE5VDEyOjQ0OjI3LjY1OVomcXVvdDsgaG9zdD0mcXVvdDt0ZXN0LmRyYXcuaW8mcXVvdDsgdmVyc2lvbj0mcXVvdDtARFJBV0lPLVZFUlNJT05AJnF1b3Q7Jmd0OyZsdDtkaWFncmFtIGlkPSZxdW90O3JVdXh2bWFtZE5aMXpyTFhPbF82JnF1b3Q7IG5hbWU9JnF1b3Q7UGFnZS0xJnF1b3Q7Jmd0O2xaUExic0l3RUVXL0prc2t4Nll0V3dvcGZhaWxLcXFRMkpsNGNGdzVHZVFZU1ByMVRZaWRCeXphcmpJK21VZm1YaWRnczdSWUdMNVBYbEdBRGlnUlJjRG1BYVVocGF4NjFLUnN5VjFEcEZIQ3NRNnMxRGM0U0J3OUtBSDVJTkVpYXF2MlF4aGpsa0ZzQjR3Ymc2ZGgyZzcxY09xZVM3Z0NxNWpyYTdwV3dpWU5uZHlRamorQ2tvbWZIQkwzSnVVKzJZRTg0UUpQUGNTaWdNME1vbTJpdEppQnJ0WHp1cnlGNy9NeFpSK2piRU5pU2FmUlJxcFIwK3poUHlYdENnWXkrOWZXbnptWTVmYXJscFFTemJlVnIrZktsZVhHTmczOTBPeXdIZHVuWTFnc1g5YlBiSWU0THFlamJzUDJJM05iZWxVTkhqSUJkVDBKMkwzVVBNOWQzS3BVSDVvNVJ6QVdpZ3M3ZnRrbDdJMWZBS1pnVFZuVnVTN01lK0p1NWNRZFQ1M0RvVTlKZXU3ZU9zYmRwWkp0NTA2NEtuQWIrMk5QU284NjE4L3B2WitIUlQ4PSZsdDsvZGlhZ3JhbSZndDsmbHQ7L214ZmlsZSZndDsiIHN0eWxlPSJiYWNrZ3JvdW5kLWNvbG9yOiByZ2IoMjU1LCAyNTUsIDI1NSk7Ij48ZGVmcy8+PGc+PHJlY3QgeD0iMCIgeT0iMCIgd2lkdGg9IjEyMCIgaGVpZ2h0PSI2MCIgZmlsbD0iI2ZmZmZmZiIgc3Ryb2tlPSIjMDAwMDAwIiBwb2ludGVyLWV2ZW50cz0iYWxsIi8+PGcgZmlsbD0iIzAwMDAwMCIgZm9udC1mYW1pbHk9IkhlbHZldGljYSIgdGV4dC1hbmNob3I9Im1pZGRsZSIgZm9udC1zaXplPSIxMnB4Ij48dGV4dCB4PSI1OS41IiB5PSIzNC41Ij5TdGFydDwvdGV4dD48L2c+PC9nPjwvc3ZnPg==',
'map_name': 'test_map_name',
'user_id': '2'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['status'], 'ok')
self.assertEqual(response.json()['user_id'], 2)
self.assertEqual(response.json()['map_url'], 'map_img/' + 'test_map_name' + '.svg'
)
"""
If a request to "/tool/save" carries non-base64 data in it's body, raise corresponding error here.
"""
def test_save_map_with_non_base64_img_data(self):
c = Client()
with self.assertRaises(binascii.Error) as e:
response = c.post('/tool/save', {
'map_data': 'P',
'map_name': 'test_map_name',
'user_id': '2'
})
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,712
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/chameleon/settings_dev.py
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# URL = "159.75.82.228:9090"
URL = os.environ.get('DJANGO_HOST') or "127.0.0.1:8080"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'chameleon_db_dev',
'USER': 'root',
'PASSWORD': 'chameleon',
'HOST': '159.75.82.228',
'PORT': '3306',
}
}
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,713
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/tools/views.py
|
# Create your views here.
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
import base64
from django.views.decorators.csrf import csrf_exempt
from community.models import Map
from chameleon.settings import BASE_DIR
import os
def index(request):
return render(request, 'tool.html')
@csrf_exempt
def check(request):
user_id = request.POST['user_id']
print(f"the user id {user_id}")
# if user_id == -1:
# return
return JsonResponse({
"user_id": user_id
})
# if user_id != -1:
# print("testtttt")
# return render(request, 'login.html')
# else:
# print("toollllllll")
# # Not login
# return render(request, 'tool.html')
@csrf_exempt
def save_map(request):
# Save map from diagrams.net
# map格式 mapId-userId-mapName
imgstring = request.POST['map_data']
map_name = request.POST['map_name']
user_id = request.POST['user_id']
try:
user_id = int(user_id)
except:
user_id = -1
imgdata = base64.b64decode(imgstring)
# filename = str(user_id) + '-' + map_name
# filename 1. 创建数据库里map_url('map_img/xxx.jpg') 2. 图片存储路径,跟前面路径一样,存jpg。
map_dir = os.path.join(BASE_DIR, 'static/')
filename = 'map_img/' + map_name + '.svg'
jpg = os.path.join(map_dir, 'map_img/' + map_name + '.jpg')
map = Map.objects.create(map_name=map_name, user_id=user_id, map_url=filename)
# map_dir = os.path.join(BASE_DIR, 'image_map')
# if not os.path.exists(map_dir):
# os.makedirs(map_dir)
# with open(os.path.join(map_dir, str(map.id)+"-"+filename) + '.svg', 'wb') as f:
with open(os.path.join(map_dir, filename), 'wb') as f:
f.write(imgdata)
return JsonResponse({
'status': 'ok',
'map_id': map.id,
'map_name': map.map_name,
'map_url': map.map_url,
'user_id': map.user_id
})
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,714
|
MUSTMIIZ11/chameleon
|
refs/heads/main
|
/welcome/views.py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
# def index(request):
# return HttpResponse("Hello world, you are at poll index")
# def index(request):
# context = dict()
# context['hello'] = 'Hello World! You are now in the welcome app!'
# return render(request, 'navigation.html', context)
def index(request):
return render(request, 'index.html')
|
{"/community/admin.py": ["/community/models.py"], "/community/views.py": ["/community/models.py"], "/tools/tests.py": ["/community/models.py"], "/tools/views.py": ["/community/models.py"]}
|
7,718
|
SolaligueJoel/Proyecto-Integrador
|
refs/heads/main
|
/src/clases/localidad.py
|
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import io
import base64
from flask_sqlalchemy import SQLAlchemy
import requests
import matplotlib.pyplot as plt
db = SQLAlchemy()
class Localidad(db.Model):
__tablename__ = "localidad"
id = db.Column(db.Integer,primary_key=True)
location = db.Column(db.String)
price_min = db.Column(db.Integer)
price_max = db.Column(db.Integer)
time = db.Column(db.String)
def __repr__(self):
return f"Location:{self.location} price min: {self.price_min} precio max: {self.price_max} time: {self.time}"
def create_schema():
db.drop_all()
db.create_all()
def insert(location,price_min,price_max,time):
location = Localidad(location = location, price_min = price_min, price_max = price_max,time=time)
db.session.add(location)
db.session.commit()
def fetch(location):
url = 'https://api.mercadolibre.com/sites/MLA/search?category=MLA1459&q=Departamentos%20Alquilers%20{}%20&limit=50'.format(location)
response = requests.get(url)
data_json = response.json()
new_data = data_json["results"]
#Filtrando lista en pesos
new_list = [{"price":x["price"], "condition":x["condition"]} for x in new_data if x.get("currency_id") == "ARS"]
return new_list
def transform(new_data,min,max):
#Realizando listas por valor minimo, intermedio y maximo
precio_min = [int(x["price"]) for x in new_data if x.get("price")< min]
precio_min_max = [int(x["price"]) for x in new_data if x.get("price")>min and x.get("price")< max]
precio_max = [int(x["price"]) for x in new_data if x.get("price")>max]
return [len(precio_min),len(precio_min_max),len(precio_max)]
def report(limit=0, offset=0):
# Obtener todas las personas
#query = db.session.query(Localidad).filter(Localidad.location == location)
query= db.session.query(Localidad)
if limit > 0:
query = query.limit(limit)
if offset > 0:
query = query.offset(offset)
json_result_list = []
# De los resultados obtenidos pasar a un diccionario
# que luego será enviado como JSON
# TIP --> la clase Persona podría tener una función
# para pasar a JSON/diccionario
for person in query:
json_result = {'Localidad': person.location,'Price_min': person.price_min, 'Price_max': person.price_max, 'Time':person.time}
json_result_list.append(json_result)
return json_result_list
def grafico(data,location):
#Pie Plot
fig = Figure(figsize=(10,5))
fig.tight_layout()
ax = fig.add_subplot()
ax.set_title('Cantidad Alquileres en {}'.format(location),fontsize=27)
label = ['Valor minimo','Valor intermedio','Valor maximo']
colors = ['#90EE90','#FDD835','#B03A2E']
label2 = [f'Alq,Dep por debajo del valor minimo: {data[0]}', f'Alq,Dep entre los precios min y max: {data[1]}',f'Alq,Dep que superan el valor maximo: {data[2]}']
ax.set_xlim(0,10)
ax.pie(data,labels= label, wedgeprops={'edgecolor':'black'}, autopct='%0.0f%%', colors=colors)
ax.legend(label2,loc=1,fontsize=8)
ax.axis('equal')
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
encoded_img = base64.encodebytes(output.getvalue())
plt.close(fig)
return encoded_img
#return Response(output.getvalue(),mimetype='image/png')
|
{"/app.py": ["/src/configuracion/config.py", "/src/clases/users.py"]}
|
7,719
|
SolaligueJoel/Proyecto-Integrador
|
refs/heads/main
|
/src/configuracion/config.py
|
from configparser import ConfigParser
def config(section, filename='config.ini'):
# Read config file
parser = ConfigParser()
parser.read(filename)
# Read section
config_param = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
config_param[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return config_param
|
{"/app.py": ["/src/configuracion/config.py", "/src/clases/users.py"]}
|
7,720
|
SolaligueJoel/Proyecto-Integrador
|
refs/heads/main
|
/app.py
|
from flask.helpers import flash, url_for
from werkzeug.utils import redirect
import traceback
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, request, jsonify, render_template, Response
from flask_login import LoginManager,login_required,login_user,logout_user,current_user
from src.configuracion.config import config
import os
import pytz
from src.clases.users import User
from src.clases import localidad
from src.clases import users
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
app.config['SECRET_KEY']='thisisasecretkey'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
# Obtener la path de ejecución actual del script
script_path = os.path.dirname(os.path.realpath(__file__))
# Obtener los parámetros del archivo de configuración
config_path_name = os.path.join(script_path ,'src/configuracion','config.ini')
db_config = config('db', config_path_name)
server_config = config('server', config_path_name)
# Indicamos al sistema (app) de donde leer la base de datos
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{db_config['database']}"
localidad.db.init_app(app)
users.db.init_app(app)
#-----------------------------INDEX-----------------------------#
@app.route("/")
def index():
try:
if os.path.isfile(db_config['database']) == False:
localidad.create_schema()
users.create_schema()
return redirect(url_for('signup'))
except:
return jsonify({'trace': traceback.format_exc()})
#-----------------------------LOGIN-----------------------------#
@app.route("/login",methods=['GET','POST'])
def login():
if request.method == 'GET':
try:
return render_template('login.html')
except:
return jsonify({'trace': traceback.format_exc()})
if request.method == 'POST':
try:
user_name = str(request.form.get('user_name'))
check_user= users.validar_user(user_name)
password = request.form.get('password')
if check_user is not None and check_user.check_password(password):
login_user(check_user)
flash(f'Hola {user_name}!')
return render_template('home.html')
elif not check_user:
flash(' Usuario incorrecto!')
return render_template('login.html')
elif check_user.check_password(request.form.get('password')) is False:
flash(' La contraseña no coincide con el usuario')
return render_template('login.html')
else:
flash(' Los datos ingresados no son corretos.')
return render_template('login.html')
except:
return jsonify({'trace': traceback.format_exc()})
#-----------------------------SIGNUP-----------------------------#
@app.route("/signup",methods=['GET','POST'])
def signup():
if request.method == 'GET':
try:
return render_template('signup.html')
except:
return jsonify({'trace': traceback.format_exc()})
if request.method == 'POST':
try:
time1 = datetime.now(pytz.timezone('America/Argentina/Buenos_Aires'))
time = time1.strftime('%d/%m/%Y | %H:%M')
user_name = request.form.get('user_name')
email = request.form.get('email')
password = request.form.get('password')
check_user = users.validar_user(user_name)
check_email = users.validar_email(email)
if check_user:
flash(f'El Usuario "{user_name}" ya se encuentra registrado!')
return render_template('signup.html')
elif check_email:
flash(f'El email "{email}" ya se encuentra registrado!')
return render_template('signup.html')
new_user = User(user_name,email,password,time)
users.insert(new_user)
login_user(new_user)
flash(f'Hola {user_name}!')
return render_template('home.html')
except:
return jsonify({'trace': traceback.format_exc()})
@login_manager.user_loader
def load_user(user_id):
return users.user_id(user_id)
@app.route('/logout')
@login_required
def logout():
logout_user()
return render_template('login.html')
#--RESET TABLE USER
@app.route("/resetear")
@login_required
def resetear():
users.create_schema()
return ("Base de datos regenerada")
#----------------------------- HOME -----------------------------#
@app.route("/home",methods=['GET','POST'])
@login_required
def meli():
if request.method == 'GET':
try:
return render_template('home.html')
except:
return jsonify({'trice': traceback.format_exc()})
if request.method == 'POST':
try:
time1 = datetime.now(pytz.timezone('America/Argentina/Buenos_Aires'))
time = time1.strftime('%d/%m/%Y, %H:%M')
location = str(request.form.get('location'))
price_min = str(request.form.get('price_min'))
price_max = str(request.form.get('price_max'))
if not location:
flash("Ingrese una localidad.")
return render_template('home.html')
elif price_min > price_max:
flash("Precios incorrectos.")
return render_template('home.html')
elif (price_min or price_max) == "":
flash("Precios incorrectos.")
return render_template('home.html')
localidad.insert(location,int(price_min),int(price_max),time)
min = int(price_min)
max = int(price_max)
dataset = localidad.fetch(location)
data = localidad.transform(dataset,min,max)
encoded_img = localidad.grafico(data,location)
return render_template('grafico.html',overview_graph=encoded_img)
except:
return jsonify({'trace': traceback.format_exc()})
#-----------------------------TABLA LOCALIDADES-----------------------------#
@app.route("/localidades")
@login_required
def localidades():
if request.method == 'GET':
try:
data = localidad.report()
return render_template('tabla.html',data = data)
except:
return jsonify({'trace': traceback.format_exc()})
if request.method == 'POST':
try:
time1 = datetime.now(pytz.timezone('America/Argentina/Buenos_Aires'))
time = time1.strftime('%d/%m/%Y, %H:%M')
location = str(request.form.get('location'))
price_min = str(request.form.get('price_min'))
price_max = str(request.form.get('price_max'))
if (location is None or price_min.isdigit() and price_max.isdigit() is False):
return Response(status=400)
localidad.insert(location,int(price_min),int(price_max),time)
except:
return jsonify({'trace': traceback.format_exc()})
#--RESET TABLE LOCALIDADES
@app.route("/reset")
@login_required
def reset():
try:
localidad.create_schema()
return render_template('tabla.html')
except:
return jsonify({'trade': traceback.format_exc()})
if __name__ == '__main__':
app.run(host=server_config['host'],
port=server_config['port'],
debug=True)
|
{"/app.py": ["/src/configuracion/config.py", "/src/clases/users.py"]}
|
7,721
|
SolaligueJoel/Proyecto-Integrador
|
refs/heads/main
|
/src/clases/users.py
|
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
from werkzeug.security import generate_password_hash,check_password_hash
db = SQLAlchemy()
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.String(20), unique=True)
email = db.Column(db.String(50))
password = db.Column(db.String(50))
time = db.Column(db.String)
def __init__(self,user_name,email,password,time):
self.user_name = user_name
self.email = email
self.password = self.create_password_hash(password)
self.time = time
def __repr__(self):
return '<User %r>' % self.user_name
def create_password_hash(self,password):
return generate_password_hash(password,method="sha256")
def check_password(self,password):
return check_password_hash(self.password,password)
def create_schema():
db.drop_all()
db.create_all()
def insert(new_user):
db.session.add(new_user)
db.session.commit()
def validar_user(user_name):
user_query = User.query.filter_by(user_name=user_name).first()
return user_query
def validar_email(email):
user_mail = User.query.filter_by(email=email).first()
return user_mail
def user_id(user_id):
return User.query.get(int(user_id))
|
{"/app.py": ["/src/configuracion/config.py", "/src/clases/users.py"]}
|
7,728
|
algorithmiaio/nautilus
|
refs/heads/master
|
/dataset.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
from Algorithmia import client
import pandas as pd
class Dataset(object):
'''
Dataset should be formatted as a pandas DataFrame.
'''
def __init__(self, data_world_api_key=None):
self.data_world_api_key = data_world_api_key
self.data_set = None
def get_dataset(self):
# Override method after inheritance
return self.data_set
class SentimentDataset(Dataset):
def __init__(self, data_world_api_key=None):
super().__init__(data_world_api_key)
self.positive_sentiment = None
self.neutral_sentiment = None
self.negative_sentiment = None
self.compound_sentiment = None
def get_positive_sentiment():
# Override method after inheritance
return self.positive_sentiment
def get_neutral_sentiment():
# Override method after inheritance
return self.neutral_sentiment
def get_negative_sentiment():
# Override method after inheritance
return self.negative_sentiment
class AppleComputersTwitterSentiment(SentimentDataset):
def __init__(self, data_world_api_key=None):
super().__init__(data_world_api_key)
self.data_world_api_key = data_world_api_key
self.data_set = pd.read_csv('https://query.data.world/s/0cUqqvqhsPW532T5HZqFYuWuqWp2mS', encoding='ISO-8859-1')
self.init_lambda_functions()
self.remove_missing_sentiment()
self.calculate_compound_sentiment()
self.positive_sentiment = self.get_positive_sentiment()
self.neutral_sentiment = self.get_neutral_sentiment()
self.negative_sentiment = self.get_negative_sentiment()
self.compound_sentiment = self.get_compound_sentiment()
def init_lambda_functions(self):
self.lambda_positive_compound_sentiment_confidence = lambda x: x["sentiment:confidence"]
self.lambda_neutral_compound_sentiment_confidence = lambda x: 0
self.lambda_negative_compound_sentiment_confidence = lambda x: -1*x["sentiment:confidence"]
def remove_missing_sentiment(self):
# Remove any tweet that does not contain information about sentiment
self.data_set = self.data_set[
(self.data_set["sentiment"] == "1") |
(self.data_set["sentiment"] == "3") |
(self.data_set["sentiment"] == "5")
]
def calculate_compound_sentiment(self):
self.data_set.loc[(self.data_set["sentiment"] == "5"), "sentiment:compound_confidence"] = \
self.data_set.apply(self.lambda_positive_compound_sentiment_confidence, axis=1)
self.data_set.loc[(self.data_set["sentiment"] == "3"), "sentiment:compound_confidence"] = \
self.data_set.apply(self.lambda_neutral_compound_sentiment_confidence, axis=1)
self.data_set.loc[(self.data_set["sentiment"] == "1"), "sentiment:compound_confidence"] = \
self.data_set.apply(self.lambda_negative_compound_sentiment_confidence, axis=1)
def get_positive_sentiment(self):
return self.data_set[(self.data_set["sentiment"] == "5")][["text", "sentiment:confidence"]]
def get_neutral_sentiment(self):
return self.data_set[(self.data_set["sentiment"] == "3")][["text", "sentiment:confidence"]]
def get_negative_sentiment(self):
return self.data_set[(self.data_set["sentiment"] == "1")][["text", "sentiment:confidence"]]
def get_compound_sentiment(self):
return self.data_set[["text", "sentiment:compound_confidence"]]
def get_dataset(self):
return self.data_set
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,729
|
algorithmiaio/nautilus
|
refs/heads/master
|
/tests/test_competition.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
# Comparing sentiment analysis algorithms unit-test
# Dataset from: http://ai.stanford.edu/~amaas/data/sentiment/
# To run this test, run the following in the project root directory:
# python -m pytest tests/test_sentiment_competition.py
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,730
|
algorithmiaio/nautilus
|
refs/heads/master
|
/tests/test_sentiment_algorithmia.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
# Comparing sentiment analysis algorithms unit-test
# Dataset from: http://ai.stanford.edu/~amaas/data/sentiment/
# To run this test, run the following in the project root directory:
# python -m pytest tests/test_sentiment_algorithmia.py --algorithmia_api_key=simXXXXXXXXXX
from algo import AlgorithmiaAlgorithm
from glue import AlgorithmiaNlpSentimentAnalysis, AlgorithmiaNlpSocialSentimentAnalysis, \
AlgorithmiaMtmanSentimentAnalysis
def test_define_algorithmia_algorithms1(algorithmia_api_key):
algo1_name = "nlp/SentimentAnalysis/1.0.4"
algo1_type = "classification"
algo1_glue = AlgorithmiaNlpSentimentAnalysis()
algo1_input = "I like trains."
algo1_expected_output = 0.3612
sentiment_algo1 = AlgorithmiaAlgorithm(api_key=algorithmia_api_key,
algo_name=algo1_name, algo_type=algo1_type, glue=algo1_glue)
sentiment_algo1.call(algo1_input)
assert sentiment_algo1.result["compound"] == algo1_expected_output
def test_define_algorithmia_algorithms2(algorithmia_api_key):
algo1_name = "nlp/SocialSentimentAnalysis/0.1.4"
algo1_type = "classification"
algo1_glue = AlgorithmiaNlpSocialSentimentAnalysis()
algo1_input = "I like trains."
algo1_expected_output = {}
algo1_expected_output["positive"] = 0.714
algo1_expected_output["neutral"] = 0.286
algo1_expected_output["negative"] = 0.0
algo1_expected_output["compound"] = 0.3612
sentiment_algo1 = AlgorithmiaAlgorithm(api_key=algorithmia_api_key,
algo_name=algo1_name, algo_type=algo1_type, glue=algo1_glue)
sentiment_algo1.call(algo1_input)
assert sentiment_algo1.result["positive"] == algo1_expected_output["positive"]
assert sentiment_algo1.result["neutral"] == algo1_expected_output["neutral"]
assert sentiment_algo1.result["negative"] == algo1_expected_output["negative"]
assert sentiment_algo1.result["compound"] == algo1_expected_output["compound"]
def test_define_algorithmia_algorithms2(algorithmia_api_key):
algo1_name = "mtman/SentimentAnalysis/0.1.1"
algo1_type = "classification"
algo1_glue = AlgorithmiaMtmanSentimentAnalysis()
algo1_input = "I like trains."
algo1_expected_output = 0.0
sentiment_algo1 = AlgorithmiaAlgorithm(api_key=algorithmia_api_key,
algo_name=algo1_name, algo_type=algo1_type, glue=algo1_glue)
sentiment_algo1.call(algo1_input)
assert sentiment_algo1.result["compound"] == algo1_expected_output
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,731
|
algorithmiaio/nautilus
|
refs/heads/master
|
/conftest.py
|
import pytest
def pytest_addoption(parser):
parser.addoption("--algorithmia_api_key", action="store", default="",
help="algorithmia_api_key: simXXXXXXXXXX")
@pytest.fixture
def algorithmia_api_key(request):
return request.config.getoption("--algorithmia_api_key")
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,732
|
algorithmiaio/nautilus
|
refs/heads/master
|
/metrics.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
import numpy as np
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,733
|
algorithmiaio/nautilus
|
refs/heads/master
|
/algo_type.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
class AlgoTypes(object):
def __init__(self):
self.types = ["classification", "regression", "multilabel"]
def valid_type(self, algo_type):
if algo_type in self.types:
return True
else:
return False
def get_types(self):
return self.types
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,734
|
algorithmiaio/nautilus
|
refs/heads/master
|
/visualize.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,735
|
algorithmiaio/nautilus
|
refs/heads/master
|
/algo.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
from Algorithmia import client
from algo_type import AlgoTypes
class Algorithm(object):
'''
Algorithm is defined as an API endpoint that has a certain function.
Does not only refer to an Algorithmia algorithm.
'''
def __init__(self, api_key, algo_name, algo_type, glue):
self.types = AlgoTypes()
self.type = algo_type
if not self.types.valid_type(self.type):
raise Exception("Algorithm is not a valid type.")
if isinstance(glue, type(None)):
raise Exception("Please provide a glue for your algorithm.")
self.glue = glue
self.client = None
self.algo = None
self.metadata = None
self.result = None
def call(self, input):
return self.result
class AlgorithmiaAlgorithm(Algorithm):
'''
An Algorithmia algorithm.
'''
def __init__(self, api_key, algo_name, algo_type, glue):
super().__init__(api_key, algo_name, algo_type, glue)
self.client = client(api_key)
self.algo = self.client.algo(algo_name)
def call(self, user_input):
algo_input = self.glue.process_input(user_input)
res = self.algo.pipe(algo_input)
self.metadata = res.metadata
self.result = self.glue.process_output(res.result)
return self.result
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,736
|
algorithmiaio/nautilus
|
refs/heads/master
|
/nau.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
# Collects metrics data for getting algorithm performance
from metrics import *
class Competition(object):
def __init__(self):
continue
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,737
|
algorithmiaio/nautilus
|
refs/heads/master
|
/tests/test_sentiment_dataset.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
# Comparing sentiment analysis algorithms unit-test
# Dataset from: http://ai.stanford.edu/~amaas/data/sentiment/
# To run this test, run the following in the project root directory:
# python -m pytest tests/test_sentiment_dataset.py
from dataset import AppleComputersTwitterSentiment
ds = AppleComputersTwitterSentiment()
def test_dataset_downloaded():
assert not isinstance(ds, type(None))
def test_verify_positive_sentiment_values():
# There should only be 423 positive tweets
assert len(ds.get_positive_sentiment()) == 423
def test_verify_neutral_sentiment_values():
# There should only be 2162 neutral tweets
assert len(ds.get_neutral_sentiment()) == 2162
def test_verify_negative_sentiment_values():
# There should only be 1219 negative tweets
assert len(ds.get_negative_sentiment()) == 1219
def test_verify_compound_sentiment_values():
# There should be 3804 compound tweets
assert len(ds.get_compound_sentiment()) == 3804
def test_verify_compound_scores():
# Make sure that compound scores are between -1 and 1
assert min(ds.get_compound_sentiment()["sentiment:compound_confidence"]) >= -1
assert max(ds.get_compound_sentiment()["sentiment:compound_confidence"]) <= 1
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,738
|
algorithmiaio/nautilus
|
refs/heads/master
|
/glue.py
|
#!/usr/bin/env python3
# Author: A. Besir Kurtulmus
from algo_type import AlgoTypes
class Glue(object):
'''
Glue is an I/O adaptor for algorithms. It creates a common standard for
interacting with algorithms in metrics.
Currently 3 types of glues are supported: classification, regression and
multilabel.
'''
def __init__(self, algo_type):
self.input_structure = None
self.output_structure = None
self.types = AlgoTypes()
self.type = algo_type
if not self.types.valid_type(self.type):
raise Exception("Algorithm is not a valid type.")
def process_input(self, user_input):
# This takes the user input, and casts it into the algo input.
# override this method
algo_input = user_input
return algo_input
def process_output(self, algo_output):
# This takes the algo output, and casts it into the user output.
# override this method
user_output = algo_output
return user_output
class SentimentAnalysisGlue(Glue):
'''
Glue object for sentiment analysis.
Returns sentiment object with positive, neutral, negative and compound
float values.
'''
def __init__(self):
super().__init__("classification")
self.sentiment = {}
self.sentiment["positive"] = None
self.sentiment["neutral"] = None
self.sentiment["negative"] = None
self.sentiment["compound"] = None
class AlgorithmiaNlpSentimentAnalysis(SentimentAnalysisGlue):
def __init__(self):
super().__init__()
self.input_structure = {}
self.output_structure = {}
self.input_structure["document"] = str
self.output_structure["sentiment"] = float
self.output_structure["document"] = str
def process_input(self, user_input):
algo_input = {}
if not isinstance(user_input, str):
raise Exception("Input must be a string.")
algo_input["document"] = user_input
return algo_input
def process_output(self, algo_output):
user_output = {}
self.sentiment["compound"] = algo_output[0]["sentiment"]
return self.sentiment
class AlgorithmiaNlpSocialSentimentAnalysis(SentimentAnalysisGlue):
def __init__(self):
super().__init__()
self.input_structure = {}
self.output_structure = {}
self.input_structure["sentence"] = str
self.output_structure["sentiment"] = {}
self.output_structure["sentiment"]["positive"] = float
self.output_structure["sentiment"]["neutral"] = float
self.output_structure["sentiment"]["negative"] = float
self.output_structure["sentiment"]["compound"] = float
self.output_structure["sentence"] = str
def process_input(self, user_input):
algo_input = {}
if not isinstance(user_input, str):
raise Exception("Input must be a string.")
algo_input["sentence"] = user_input
return algo_input
def process_output(self, algo_output):
user_output = {}
self.sentiment["positive"] = algo_output[0]["positive"]
self.sentiment["neutral"] = algo_output[0]["neutral"]
self.sentiment["negative"] = algo_output[0]["negative"]
self.sentiment["compound"] = algo_output[0]["compound"]
return self.sentiment
class AlgorithmiaMtmanSentimentAnalysis(SentimentAnalysisGlue):
def __init__(self):
super().__init__()
self.input_structure = str
self.output_structure = float
def process_input(self, user_input):
if not isinstance(user_input, str):
raise Exception("Input must be a string.")
return user_input
def process_output(self, algo_output):
self.sentiment["compound"] = float(algo_output)
return self.sentiment
|
{"/tests/test_sentiment_algorithmia.py": ["/algo.py", "/glue.py"], "/algo.py": ["/algo_type.py"], "/nau.py": ["/metrics.py"], "/tests/test_sentiment_dataset.py": ["/dataset.py"], "/glue.py": ["/algo_type.py"]}
|
7,753
|
phycomlab/Protein
|
refs/heads/main
|
/UGW_examples.py
|
import numpy as np
import math
import torch
import matplotlib.pyplot as plt
import ot
import ot.plot
from scipy.io import loadmat
from scipy.spatial import distance
from scipy.stats import wishart
from solver.unbalanced_sinkhorn import sinkhorn_log
from solver.tlb_kl_sinkhorn_solver import TLBSinkhornSolver
###############################################################################
# Function Declarations
###############################################################################
# Balanced Gromov-Wasserstein (Solomon et al.)
def GromovWasserstein(mu0, mu1, D0, D1, ent, eta, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
gamma = np.ones((n0, n1))
while True:
K = np.exp((D0*mu0).dot(gamma*mu1).dot(D1)/ent)
gamma_new = SinkhornProjection(np.power(K, eta)*np.power(gamma, 1-eta), mu0, mu1, thresh)
diff = np.linalg.norm(gamma - gamma_new)
if diff < thresh:
return gamma
gamma = gamma_new
# Sinkhorn subroutine used in balanced Gromov-Wasserstein
def SinkhornProjection(K, mu0, mu1, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
v0 = np.ones(n0)
v1 = np.ones(n1)
while True:
v0_new = np.reciprocal(K.dot(v1*mu1))
v1_new = np.reciprocal(np.transpose(K).dot(v0_new*mu0))
diff = max(np.linalg.norm(v0 - v0_new), np.linalg.norm(v1 - v1_new))
if diff < thresh:
return ((K*v1).T * v0).T
v0 = v0_new
v1 = v1_new
# Unbalanced Gromov-Wasserstein (Solomon et al. with added KL regularization)
def UnbalancedGromovWasserstein(mu0, mu1, D0, D1, ent, lambda0, lambda1, eta, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
gamma = np.ones((n0, n1))
while True:
K = np.exp((D0*mu0).dot(gamma*mu1).dot(D1)/ent)
gamma_new = UnbalancedSinkhornProjection(np.power(K, eta)*np.power(gamma, 1-eta),
mu0, mu1, ent, lambda0, lambda1, thresh)
diff = np.linalg.norm(gamma - gamma_new)
print(diff)
if diff < thresh:
return gamma
gamma = gamma_new
# Sinkhorn subroutine used in unbalanced Gromov-Wasserstein
def UnbalancedSinkhornProjection(K, mu0, mu1, ent, lambda0, lambda1, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
v0 = np.ones(n0)
v1 = np.ones(n1)
while True:
v0_new = np.power(np.reciprocal(K.dot(v1*mu1)), lambda0*np.reciprocal(lambda0+ent*mu0))
v1_new = np.power(np.reciprocal(np.transpose(K).dot(v0_new*mu0)), lambda1*np.reciprocal(lambda1+ent*mu1))
diff = max(np.linalg.norm(v0 - v0_new), np.linalg.norm(v1 - v1_new))
if diff < thresh:
return ((K*v1).T * v0).T
v0 = v0_new
v1 = v1_new
def readProteinCodes(file):
next(file) # skip header line
codes = []
for line in file:
res = line.split(',')
domain = res[0]
cathcode = res[1].strip('\n')
codes.append([domain, cathcode])
return codes
def makeCodeDicts(domains, cath_codes):
n = len(domains)
domainToCath = {}
cathToDomain = {}
for i in range(n):
domain = domains[i][0]
cath = cath_codes[i][0]
if domain in domainToCath:
domainToCath[domain].append((i, cath))
else:
domainToCath[domain] = [(i, cath)]
if cath in cathToDomain:
cathToDomain[cath].append((i, domain))
else:
cathToDomain[cath] = [(i, domain)]
return domainToCath, cathToDomain
###############################################################################
# Load in protein atom positions/distances
###############################################################################
#file = open('cathcodes.txt', 'r')
#codes = readProteinCodes(file)
# Get protein geodseic distances
mat0 = loadmat('ProteinData0.mat')
mat1 = loadmat('ProteinData1.mat')
mat2 = loadmat('ProteinData2.mat')
mat3 = loadmat('ProteinData3.mat')
mat4 = loadmat('ProteinData4.mat')
protein_data = np.concatenate([mat0['X_0'], mat1['X_1'], mat2['X_2'], mat3['X_3'], mat4['X_4']])
geodists = protein_data[:, 0]
domains = protein_data[:, 1]
cath_codes = protein_data[:, 2]
# make dictionary of CATH code to domain
domainToCath, cathToDomain = makeCodeDicts(protein_data[:, 1], protein_data[:, 2])
# choose two proteins to compare
# very similar
ind1 = 16
ind2 = 16
# very different
#ind1 = 2
#ind2 = 166
dom1 = domains[ind1][0]
dom2 = domains[ind2][0]
cath1 = cath_codes[ind1][0]
cath2 = cath_codes[ind2][0]
D1 = geodists[ind1]
D2 = geodists[ind2]
# normalize distance matrices
const = np.sqrt(np.sqrt(np.mean(D1**2)) * np.sqrt(np.mean(D2**2)))
D1 = D1 / const
D2 = D2 / const
plt.figure(1)
plt.title('Protein 1 Geodesic Distances \n (Domain: ' + dom1 + ', CATH: ' + cath1 + ')', fontweight='bold')
plt.pcolormesh(geodists[ind1])
plt.colorbar()
plt.figure(2)
plt.title('Protein 2 Geodesic Distances \n (Domain: ' + dom2 + ', CATH: ' + cath2 + ')', fontweight='bold')
plt.pcolormesh(geodists[ind2])
plt.colorbar()
# number of atoms in each protein
num_atoms1 = D1.shape[0]
num_atoms2 = D2.shape[0]
###############################################################################
# Run Unbalanced Gromov-Wasserstein
###############################################################################
# empirical distributions over atoms
mu1 = np.ones(num_atoms1) / num_atoms1
mu2 = np.ones(num_atoms2) / num_atoms2
# put higher weight on pairings which you want to discourage (not a necessary step)
#mask = np.zeros((num_atoms1, num_atoms2))
#plt.figure(2)
#plt.title('Mask', fontweight='bold')
#plt.pcolormesh(np.log(mask))
#plt.colorbar()
# Parameters
rho = 4e-2 # Peyre et al. mass constraint parameter
ent = 8e-4 # entropic regularization, controls width of coupling band
#eta = 1e-1 # learning rate of Solomon et al. GW implementation above
# Apply one of the GW methods below
# POT GW
#coupling = ot.gromov.gromov_wasserstein(D1, D2, mu1, mu2, 'square_loss')
# POT entropic GW
coupling = ot.gromov.entropic_gromov_wasserstein(D1, D2, mu1, mu2, 'square_loss', ent)
# POT partial GW (only transfer a fixed m amount of mass)
#coupling = ot.partial.partial_gromov_wasserstein(D1, D2, mu1, mu2, m=0.5)
# Solomon et al. GW
#gamma = GromovWasserstein(mu1, mu2, D1, D2, ent, eta, thresh=1e-4)
#coupling = gamma * np.outer(mu1, mu2)
# Solomon et al. unbalanced GW
#lambda1 = 0.1
#lambda2 = 0.1
#gamma = UnbalancedGromovWasserstein(mu1, mu2, D1, D2, ent, lambda1, lambda2, eta, thresh=1e-6)
#coupling = gamma * np.outer(mu1, mu2)
# Peyre et al. unbalanced GW
#print('UGW')
#solver = TLBSinkhornSolver(nits=50, nits_sinkhorn=1000, tol=1e-10, tol_sinkhorn=1e-7)
#coupling, gamma = solver.tlb_sinkhorn(torch.from_numpy(mu1), torch.from_numpy(D1),
# torch.from_numpy(mu2), torch.from_numpy(D2), rho, ent)
#coupling = coupling.numpy()
# post-process the coupling
#thresh = np.max(coupling)
#coupling = coupling / np.sum(coupling) # normalize
# plot inferred coupling
plt.figure(3)
plt.title('Inferred Coupling Between \n Protein 1 (Domain: ' + dom1 + ', CATH: ' + cath1 + ') & \n Protein 2 (Domain: ' + dom2 + ', CATH: ' + cath2 + ')\n', fontweight='bold')
plt.pcolormesh(coupling)#, vmin = 0, vmax = thresh)
plt.xlabel('atoms in protein 2')
plt.ylabel('atoms in protein 1')
plt.colorbar()
#plt.savefig('inferred_coupling.png', dpi=300)
plt.show()
# mass contained in coupling
mass = np.sum(coupling)
print('Coupling Mass: ' + str(mass))
# compute GW cost for inferred coupling
constD, hD1, hD2 = ot.gromov.init_matrix(D1, D2, mu1, mu2, loss_fun='square_loss')
cost = ot.gromov.gwloss(constD, hD1, hD2, coupling)
cost = cost / (mass**2)
print('Adjusted GW Cost: ' + str(cost))
|
{"/UGW_examples.py": ["/solver/unbalanced_sinkhorn.py"]}
|
7,754
|
phycomlab/Protein
|
refs/heads/main
|
/UGW_protein_paramsearch.py
|
import numpy as np
import pandas as pd
import math
import torch
import matplotlib.pyplot as plt
import ot
import ot.plot
import os.path
from os import path
from scipy.io import loadmat
from scipy.special import comb
import itertools
from multiprocessing import Pool, RawArray
from functools import partial
from solver.tlb_kl_sinkhorn_solver import TLBSinkhornSolver
###############################################################################
# Function Declarations
###############################################################################
def readProteinCodes(file):
next(file) # skip header line
codes = []
for line in file:
res = line.split(',')
domain = res[0]
cathcode = res[1].strip('\n')
codes.append([domain, cathcode])
return codes
###############################################################################
# Run Unbalanced Gromov-Wasserstein Process
###############################################################################
# A global dictionary storing the variables passed from the initializer.
var_dict = {}
def init_worker(rhos, ents, solver, geodists):
# Using a dictionary is not strictly necessary. You can also
# use global variables.
var_dict['rhos'] = rhos
var_dict['ents'] = ents
var_dict['solver'] = solver
numproteins = len(geodists)
for i in range(numproteins):
var_dict['D' + str(i)] = geodists[i]
def runGW(idx):
ind1, ind2, i, j = idx
# load in the global variables that we read from
rhos = var_dict['rhos']
ents = var_dict['ents']
solver = var_dict['solver']
rho = rhos[i]
ent = ents[j]
D1 = np.frombuffer(var_dict['D' + str(ind1)], dtype=np.float64)
D2 = np.frombuffer(var_dict['D' + str(ind2)], dtype=np.float64)
# number of atoms in each protein
num_atoms1 = int(math.sqrt(D1.size))
num_atoms2 = int(math.sqrt(D2.size))
# reshape the distance matrices
D1 = np.reshape(D1, [num_atoms1, num_atoms1])
D2 = np.reshape(D2, [num_atoms2, num_atoms2])
# empirical distributions over atoms
mu1 = np.ones(num_atoms1) / num_atoms1
mu2 = np.ones(num_atoms2) / num_atoms2
# Peyre et al. unbalanced GW
try:
#coupling = ot.gromov.gromov_wasserstein(D1, D2, mu1, mu2, 'square_loss')
#coupling = ot.gromov.entropic_gromov_wasserstein(D1, D2, mu1, mu2, 'square_loss', ent)
coupling, _ = solver.tlb_sinkhorn(torch.from_numpy(mu1), torch.from_numpy(D1),
torch.from_numpy(mu2), torch.from_numpy(D2),
rho, ent)
coupling = coupling.numpy()
except:
result = (math.inf, math.inf)
return idx, result
mass = np.sum(coupling)
# if mass of coupling is zero, GW analysis failed
if mass <= 1e-15:
result = (math.inf, math.inf)
return idx, result
# compute GW cost for inferred coupling
constD, hD1, hD2 = ot.gromov.init_matrix(D1, D2, mu1, mu2, loss_fun='square_loss')
GWdist = ot.gromov.gwloss(constD, hD1, hD2, coupling)
result = (GWdist, mass)
return idx, result
###############################################################################
# Create Many UGW Programs for Different Protein Pairs/Parameter Combinations
###############################################################################
if __name__ == '__main__':
# Get protein geodseic distances
mat0 = loadmat('ProteinData0.mat')
mat1 = loadmat('ProteinData1.mat')
mat2 = loadmat('ProteinData2.mat')
mat3 = loadmat('ProteinData3.mat')
mat4 = loadmat('ProteinData4.mat')
protein_data = np.concatenate([mat0['X_0'], mat1['X_1'], mat2['X_2'], mat3['X_3'], mat4['X_4']])
#protein_data = loadmat('ProteinData_CA.mat')['X1']
geodists = protein_data[:, 0]
domains = protein_data[:, 1]
cath_codes = protein_data[:, 2]
numproteins = domains.size
# compute max value in each distance matrix
diams = np.zeros(numproteins)
norms = np.zeros(numproteins)
for i in range(numproteins):
diams[i] = np.max(geodists[i])
norms[i] = np.linalg.norm(geodists[i], 'fro')
# Initialize the UGW solver
nits = 50
nits_sinkhorn = 1000
tol = 1e-10
tol_sinkhorn = 1e-7
maxtime = 1200
solver = TLBSinkhornSolver(nits, nits_sinkhorn, tol, tol_sinkhorn, maxtime)
rhos = np.array([5e-4]) #np.array([1e-4, 2.5e-4, 5e-4, 7.5e-4, 1e-3, 2.5e-3, 5e-3])
numrhos = rhos.size
ents = np.array([4e-5]) #np.array([1e-5, 2.5e-5, 5e-5, 7.5e-5, 1e-4, 2.5e-4, 5e-4])
numents = ents.size
distMats = np.empty((numproteins, numproteins, numrhos, numents))
distMats[:] = np.nan
masses = np.empty((numproteins, numproteins, numrhos, numents))
masses[:] = np.nan
expname = 'unbalanced_GW'
if not path.exists(expname):
os.makedirs(expname)
np.savez(expname + '/params.npz', expname, nits=nits, nits_sinkhorn=nits_sinkhorn,
tol=tol, tol_sinkhorn=tol_sinkhorn, maxtime=maxtime, rhos=rhos, ents=ents)
outname = expname + '/results.npz'
#outname = expname + '/results_ac.npz'
if path.exists(outname):
# put old file results into new array
results = np.load(outname)
distMats = results['distMats']
masses = results['masses']
# remove proteins that take too long from the analysis
protein_inds = np.arange(numproteins)
bad_inds = np.where(np.isinf(diams))[0]
protein_inds = np.setdiff1d(protein_inds, bad_inds, True)
# get all parameter combinations
all_idxs = [(i, j, k, l) for i, j, k, l in
itertools.product(protein_inds, protein_inds, range(numrhos), range(numents))]
# keep only those parameter combinations which have not been tested
idxs = []
for idx in all_idxs:
if np.isnan(distMats[idx]):
idxs.append(idx)
# create shared memory for all processes
shared_rhos = RawArray('d', rhos)
shared_ents = RawArray('d', ents)
shared_geodists = list()
for i in range(numproteins):
shared_geodists.append(RawArray('d', geodists[i].flatten()))
# initialize pool of processes
p = Pool(processes=20, maxtasksperchild=1, initializer=init_worker, initargs=(shared_rhos, shared_ents, solver, shared_geodists))
print('Num CPUs: ' + str(os.cpu_count()))
# run the GW function for each process
freq = 1
count = 0
for (idx, res) in p.imap_unordered(partial(runGW), idxs):
ind1, ind2, i, j = idx
print(str(count) + ': ' + str(idx))
# save GW distances between protein pairs
GWdist, mass = res
distMats[ind1, ind2, i, j] = GWdist
masses[ind1, ind2, i, j] = mass
# save results with a certain frequency
if count % freq == 0:
print('saved')
np.savez(outname, distMats=distMats, masses=masses)
count += 1
p.close()
p.join()
# save final results
print('saved')
np.savez(outname, distMats=distMats, masses=masses)
|
{"/UGW_examples.py": ["/solver/unbalanced_sinkhorn.py"]}
|
7,755
|
phycomlab/Protein
|
refs/heads/main
|
/UGW_pairs.py
|
import numpy as np
import pandas as pd
import math
import torch
import matplotlib.pyplot as plt
import ot
import ot.plot
import os.path
from os import path
from scipy.io import loadmat
from scipy.special import comb
from sklearn import metrics
import timeout_decorator
from solver.tlb_kl_sinkhorn_solver import TLBSinkhornSolver
###############################################################################
# Function Declarations
###############################################################################
# Balanced Gromov-Wasserstein (Solomon et al.)
def GromovWasserstein(mu0, mu1, D0, D1, ent, eta, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
gamma = np.ones((n0, n1))
while True:
K = np.exp((D0*mu0).dot(gamma*mu1).dot(D1)/ent)
gamma_new = SinkhornProjection(np.power(K, eta)*np.power(gamma, 1-eta), mu0, mu1, thresh)
diff = np.linalg.norm(gamma - gamma_new)
if diff < thresh:
return gamma
gamma = gamma_new
# Sinkhorn subroutine used in balanced Gromov-Wasserstein
def SinkhornProjection(K, mu0, mu1, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
v0 = np.ones(n0)
v1 = np.ones(n1)
while True:
v0_new = np.reciprocal(K.dot(v1*mu1))
v1_new = np.reciprocal(np.transpose(K).dot(v0_new*mu0))
diff = max(np.linalg.norm(v0 - v0_new), np.linalg.norm(v1 - v1_new))
if diff < thresh:
return ((K*v1).T * v0).T
v0 = v0_new
v1 = v1_new
# Unbalanced Gromov-Wasserstein (Solomon et al. with added KL regularization)
def UnbalancedGromovWasserstein(mu0, mu1, D0, D1, ent, lambda0, lambda1, eta, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
gamma = np.ones((n0, n1))
while True:
K = np.exp((D0*mu0).dot(gamma*mu1).dot(D1)/ent)
gamma_new = UnbalancedSinkhornProjection(np.power(K, eta)*np.power(gamma, 1-eta),
mu0, mu1, ent, lambda0, lambda1, thresh)
diff = np.linalg.norm(gamma - gamma_new)
print(diff)
if diff < thresh:
return gamma
gamma = gamma_new
# Sinkhorn subroutine used in unbalanced Gromov-Wasserstein
def UnbalancedSinkhornProjection(K, mu0, mu1, ent, lambda0, lambda1, thresh=1e-7):
n0 = mu0.size
n1 = mu1.size
v0 = np.ones(n0)
v1 = np.ones(n1)
while True:
v0_new = np.power(np.reciprocal(K.dot(v1*mu1)), lambda0*np.reciprocal(lambda0+ent*mu0))
v1_new = np.power(np.reciprocal(np.transpose(K).dot(v0_new*mu0)), lambda1*np.reciprocal(lambda1+ent*mu1))
diff = max(np.linalg.norm(v0 - v0_new), np.linalg.norm(v1 - v1_new))
if diff < thresh:
return ((K*v1).T * v0).T
v0 = v0_new
v1 = v1_new
def readProteinCodes(file):
next(file) # skip header line
codes = []
for line in file:
res = line.split(',')
domain = res[0]
cathcode = res[1].strip('\n')
codes.append([domain, cathcode])
return codes
def makeCodeDicts(domains, cath_codes):
n = len(domains)
domainToCath = {}
cToDomain = {}
caToDomain = {}
catToDomain = {}
cathToDomain = {}
for i in range(n):
domain = domains[i][0]
cath = cath_codes[i][0]
groups = cath.split('.')
c = groups[0]
ca = '.'.join(groups[:2])
cat = '.'.join(groups[:3])
if c in cToDomain:
cToDomain[c].append((i, domain))
else:
cToDomain[c] = [(i, domain)]
if ca in caToDomain:
caToDomain[ca].append((i, domain))
else:
caToDomain[ca] = [(i, domain)]
if cat in catToDomain:
catToDomain[cat].append((i, domain))
else:
catToDomain[cat] = [(i, domain)]
if cath in cathToDomain:
cathToDomain[cath].append((i, domain))
else:
cathToDomain[cath] = [(i, domain)]
if domain in domainToCath:
domainToCath[domain].append((i, cath))
else:
domainToCath[domain] = [(i, cath)]
return domainToCath, cToDomain, caToDomain, catToDomain, cathToDomain
# remove NaNs rows and columns from a symmetric matrix
def removeNans(mat):
n = mat.shape[0]
inds = np.arange(n)
nanCounts = np.sum(np.isnan(mat), axis=0)
if np.all(nanCounts == 0):
return mat, inds
uniqueCounts = np.flip(np.unique(nanCounts))
for count in uniqueCounts:
filtered_inds = inds[nanCounts < count]
filtered_mat = mat[filtered_inds][:, filtered_inds]
filtered_numNans = np.sum(np.isnan(filtered_mat))
if filtered_numNans == 0:
return filtered_mat, filtered_inds
# from https://github.com/letiantian/kmedoids
def kMedoids(D, k, tmax=100):
# determine dimensions of distance matrix D
m, n = D.shape
if k > n:
raise Exception('too many medoids')
# find a set of valid initial cluster medoid indices since we
# can't seed different clusters with two points at the same location
valid_medoid_inds = set(range(n))
invalid_medoid_inds = set([])
rs,cs = np.where(D==0)
# the rows, cols must be shuffled because we will keep the first duplicate below
index_shuf = list(range(len(rs)))
np.random.shuffle(index_shuf)
rs = rs[index_shuf]
cs = cs[index_shuf]
for r,c in zip(rs,cs):
# if there are two points with a distance of 0...
# keep the first one for cluster init
if r < c and r not in invalid_medoid_inds:
invalid_medoid_inds.add(c)
valid_medoid_inds = list(valid_medoid_inds - invalid_medoid_inds)
if k > len(valid_medoid_inds):
raise Exception('too many medoids (after removing {} duplicate points)'.format(
len(invalid_medoid_inds)))
# randomly initialize an array of k medoid indices
M = np.array(valid_medoid_inds)
np.random.shuffle(M)
M = np.sort(M[:k])
# create a copy of the array of medoid indices
Mnew = np.copy(M)
# initialize a dictionary to represent clusters
C = {}
for t in range(tmax):
# determine clusters, i.e. arrays of data indices
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# update cluster medoids
for kappa in range(k):
J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)
j = np.argmin(J)
Mnew[kappa] = C[kappa][j]
np.sort(Mnew)
# check for convergence
if np.array_equal(M, Mnew):
break
M = np.copy(Mnew)
else:
# final update of cluster memberships
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# return results
return M, C
def rand_index(clusters, classes):
tp_plus_fp = comb(np.bincount(clusters), 2).sum()
tp_plus_fn = comb(np.bincount(classes), 2).sum()
A = np.c_[(clusters, classes)]
tp = sum(comb(np.bincount(A[A[:, 0] == i, 1]), 2).sum()
for i in set(clusters))
fp = tp_plus_fp - tp
fn = tp_plus_fn - tp
tn = comb(len(A), 2) - tp - fp - fn
return (tp + tn) / (tp + fp + fn + tn)
###############################################################################
# Load in protein atom positions/distances
###############################################################################
#file = open('cathcodes.txt', 'r')
#codes = readProteinCodes(file)
# Get protein geodseic distances
#mat0 = loadmat('ProteinData0.mat')
#mat1 = loadmat('ProteinData1.mat')
#mat2 = loadmat('ProteinData2.mat')
#mat3 = loadmat('ProteinData3.mat')
#mat4 = loadmat('ProteinData4.mat')
#protein_data = np.concatenate([mat0['X_0'], mat1['X_1'], mat2['X_2'], mat3['X_3'], mat4['X_4']])
protein_data = loadmat('ProteinData_CA.mat')['X1']
geodists = protein_data[:, 0]
domains = protein_data[:, 1]
cath_codes = protein_data[:, 2]
numproteins = domains.size
# compute max value in each distance matrix
diams = np.zeros(numproteins)
norms = np.zeros(numproteins)
for i in range(numproteins):
diams[i] = np.max(geodists[i])
norms[i] = np.linalg.norm(geodists[i], 'fro')
# make dictionary of CATH code to domain
domainToCath, cToDomain, caToDomain, catToDomain, cathToDomain = makeCodeDicts(protein_data[:, 1], protein_data[:, 2])
###############################################################################
# Run Unbalanced Gromov-Wasserstein
###############################################################################
# Parameters
#rho = 1000 # Peyre et al. mass constraint parameter
#ent = 10 # entropic regularization, controls width of coupling band
# choose subset of proteins to compare
inds = np.arange(255)
#solver = TLBSinkhornSolver(nits=500, nits_sinkhorn=1000, gradient=False, tol=1e-3, tol_sinkhorn=1e-5)
costs = np.zeros((numproteins, numproteins))
costs[:] = np.nan
masses = np.zeros((numproteins, numproteins))
masses[:] = np.nan
#outname = 'results/rho=' + str(rho) + '_ent=' + str(ent) + '.npz'
#outname = 'results/rho=' + str(rho) + '_ent=' + str(ent) + '_alphacarbons.npz'
#outname = 'results/' + 'ent=' + str(ent) + '_balanced_alphacarbons.npz'
outname = 'results/balanced_alphacarbons.npz'
if path.exists(outname):
# put old file results into new array
results = np.load(outname)
costs = results['costs']
masses = results['masses']
#bad_inds = []
bad_inds = np.where(diams == math.inf)[0]
#bad_inds = [25, 76, 89]
#bad_inds = [15, 25, 65, 76, 89]
#bad_inds = [15, 25, 38, 65, 76, 88, 89, 94, 99]
# remove proteins that take too long from the analysis
inds = np.setdiff1d(inds, bad_inds, True)
numinds = inds.size
freq = 10 # how often to save
count = 0
for i in range(numinds):
ind1 = inds[i]
for j in range(i, numinds):
ind2 = inds[j]
#if not costs[ind1, ind2] == 0:
# continue
if not np.isnan(costs[ind1, ind2]):
continue
print('Comparing proteins (' + str(ind1) + ', ' + str(ind2) + ')')
dom1 = domains[ind1][0]
dom2 = domains[ind2][0]
cath1 = cath_codes[ind1][0]
cath2 = cath_codes[ind2][0]
D1 = geodists[ind1]
D2 = geodists[ind2]
# number of atoms in each protein
num_atoms1 = D1.shape[0]
num_atoms2 = D2.shape[0]
# empirical distributions over atoms
mu1 = np.ones(num_atoms1) / num_atoms1
mu2 = np.ones(num_atoms2) / num_atoms2
# Peyre et al. unbalanced GW
try:
coupling = ot.gromov.gromov_wasserstein(D1, D2, mu1, mu2, 'square_loss')
#coupling = ot.gromov.entropic_gromov_wasserstein(D1, D2, mu1, mu2, 'square_loss', ent)
#coupling, _ = solver.tlb_sinkhorn(torch.from_numpy(mu1), torch.from_numpy(D1),
# torch.from_numpy(mu2), torch.from_numpy(D2),
# rho, ent)
#coupling = coupling.numpy()
except timeout_decorator.TimeoutError as e:
print(e)
continue
mass = np.sum(coupling)
# if mass of coupling is zero, GW analysis failed
if mass <= 1e-15:
continue
# compute GW cost for inferred coupling
constD, hD1, hD2 = ot.gromov.init_matrix(D1, D2, mu1, mu2, loss_fun='square_loss')
cost = ot.gromov.gwloss(constD, hD1, hD2, coupling)
#couplings[ind1, ind2] = coupling
costs[ind1, ind2] = cost
masses[ind1, ind2] = mass
count += 1
if count % freq == 0:
print('saving')
np.savez(outname, costs=costs, masses=masses)
# make cost and mass matrices symmetric
i_lower = np.tril_indices(numproteins, -1)
costs[i_lower] = costs.T[i_lower]
masses[i_lower] = masses.T[i_lower]
np.savez(outname, costs=costs, masses=masses)
# plot cost matrix
plt.figure(1)
plt.pcolormesh(costs)
plt.colorbar()
plt.show()
# plot mass matrix
plt.figure(2)
plt.pcolormesh(masses)
plt.colorbar()
plt.show()
###############################################################################
# Cluster GW Distances and Compute Rand Index of Classification
###############################################################################
# keep only indices with non-nan distances
filtered_GWdists, filtered_inds = removeNans(costs)
numfiltered = len(filtered_inds)
filtered_masses = masses[filtered_inds][:, filtered_inds]
#filtered_GWdists = filtered_GWdists / (np.outer(diams[filtered_inds], diams[filtered_inds]) * filtered_masses**2)
filtered_GWdists = filtered_GWdists / (np.outer(diams[filtered_inds], diams[filtered_inds]))
#filtered_GWdists = filtered_GWdists / np.outer(norms[filtered_inds], norms[filtered_inds])
np.fill_diagonal(filtered_GWdists, 0)
# plot nan-filtered GW cost matrix
plt.figure(3)
plt.title('GW filtered distance matrix', fontweight='bold')
plt.pcolormesh(filtered_GWdists)
plt.colorbar()
plt.show()
plt.figure(4)
plt.title('GW coupling masses', fontweight='bold')
plt.pcolormesh(filtered_masses)
plt.colorbar()
plt.show()
# get true labels of proteins based on their C, CA, CAT, or CATH codes
true_labels = np.zeros(numproteins, dtype=int)
true_labels[:] = np.nan
codeDict = caToDomain
codecount = 0
for code in codeDict:
indWithCodeExists = False
for tple in codeDict[code]:
ind = tple[0]
true_labels[ind] = codecount
codecount += 1
true_k = len(np.unique(true_labels[filtered_inds]))
# get classification of proteins from k-medoids clustering
k = 25
medoid_inds, labelDict = kMedoids(filtered_GWdists, k)
GW_labels = np.zeros(numfiltered, dtype=int)
for i in range(k):
GW_labels[labelDict[i]] = i
print('True Labels: ' + str(true_labels[filtered_inds]))
print('True Number of Classes: ' + str(true_k))
print('GW Labels: ' + str(GW_labels))
randInd = rand_index(true_labels[filtered_inds], GW_labels)
adjRandInd = metrics.adjusted_rand_score(true_labels[filtered_inds], GW_labels)
print('Rand Index: ' + str(randInd))
print('Adjusted Rand Index: ' + str(adjRandInd))
###############################################################################
# Cluster FATCAT Distances and Compute Rand Index of Classification
###############################################################################
# read in FATCAT p-values
fatcat_pvals = pd.read_csv('FATCAT-P-value.csv')
fatcat_dists = np.zeros((numproteins, numproteins))
fatcat_dists[:] = np.nan
rowdomains = fatcat_pvals['Unnamed: 0']
for coldomain in fatcat_pvals.keys():
if coldomain in domainToCath:
colind = domainToCath[coldomain][0][0]
arr = fatcat_pvals[coldomain]
arrlen = len(arr)
for i in range(arrlen):
rowdomain = rowdomains[i]
rowind = domainToCath[rowdomain][0][0]
fatcat_dists[rowind, colind] = arr[i]
# symmetrize the FATCAT p-value matrix
i_lower = np.tril_indices(numproteins, -1)
fatcat_dists[i_lower] = fatcat_dists.T[i_lower]
# plot FATCAT distance matrix
plt.figure(3)
plt.title('FATCAT distance matrix', fontweight='bold')
plt.pcolormesh(fatcat_dists)
plt.colorbar()
plt.show()
# filter our nan value from FATCAT p-values
filtered_fatcat_dists, filtered_inds = removeNans(fatcat_dists)
numfiltered = len(filtered_inds)
true_k = len(np.unique(true_labels[filtered_inds]))
# get classification of proteins from k-medoids clustering
k = 24
medoid_inds, labelDict = kMedoids(filtered_fatcat_dists, k)
fatcat_labels = np.zeros(numfiltered, dtype=int)
for i in range(k):
fatcat_labels[labelDict[i]] = i
print('True Labels: ' + str(true_labels[filtered_inds]))
print('True Number of Classes: ' + str(true_k))
print('FATCAT Labels: ' + str(fatcat_labels))
randInd = rand_index(true_labels[filtered_inds], fatcat_labels)
adjRandInd = metrics.adjusted_rand_score(true_labels[filtered_inds], fatcat_labels)
print('Rand Index: ' + str(randInd))
print('Adjusted Rand Index: ' + str(adjRandInd))
|
{"/UGW_examples.py": ["/solver/unbalanced_sinkhorn.py"]}
|
7,756
|
phycomlab/Protein
|
refs/heads/main
|
/solver/unbalanced_sinkhorn.py
|
import numpy as np
import math
def sinkhorn_log(mu, nu, c, eps, niter=1000, tau=-0.5, rho=math.inf):
# sinkhorn_log - stabilized sinkhorn over log domain with acceleration
#
# gamma, u, v, Wprimal, Wdual, err = sinkhorn_log(mu, nu, c, eps, niter, tau, rho)
#
# mu and nu are marginals.
# c is cost
# eps is regularization
# coupling is
# gamma = exp( (-c+u*ones(1,N(2))+ones(N(1),1)*v')/epsilon );
#
# niter is the number of iterations.
# tau is an avering step.
# - tau=0 is usual sinkhorn
# - tau<0 produces extrapolation and can usually accelerate.
#
# rho controls the amount of mass variation. Large value of rho
# impose strong constraint on mass conservation. rho=Inf (default)
# corresponds to the usual OT (balanced setting).
#
# Copyright (c) 2016 Gabriel Peyre
lmbda = rho / (rho + eps)
if rho == math.inf:
lmbda = 1
m = mu.size
n = nu.size
H1 = np.ones(m)
H2 = np.ones(n)
ave = lambda tau, u, u1: tau * u + (1 - tau) * u1
lse = lambda A: np.log(np.sum(np.exp(A), axis=1))
M = lambda u, v: (-c + np.outer(u, H2) + np.outer(H1, v)) / eps
# kullback divergence
H = lambda p: -np.sum(p * (np.log(p + 1e-20) - 1))
KL = lambda h, p: np.sum(h * np.log(h / p) - h + p)
KLd = lambda u, p: np.sum(p * (np.exp(-u)-1))
dotp = lambda x, y: np.sum(x * y)
Wprimal = np.zeros(niter)
Wdual = np.zeros(niter)
err = np.zeros(niter)
u = np.zeros(m);
v = np.zeros(n);
for i in range(niter):
u1 = u;
u = ave(tau, u, lmbda*eps*np.log(mu) - lmbda*eps*lse(M(u,v)) + lmbda*u);
v = ave(tau, v, lmbda*eps*np.log(nu) - lmbda*eps*lse(M(u,v).transpose()) + lmbda*v);
# coupling
gamma = np.exp(M(u,v));
if rho == math.inf: # marginal violation
Wprimal[i] = dotp(c, gamma) - eps*H(gamma)
Wdual[i] = dotp(u, mu) + dotp(v, nu) - eps*np.sum(gamma)
err[i] = np.linalg.norm(np.sum(gamma, axis=1)-mu)
else: # difference with previous iterate
Wprimal[i] = dotp(c, gamma) - eps*H(gamma) + rho*KL(np.sum(gamma, axis=1), mu) + rho*KL(np.sum(gamma, axis=0), nu)
Wdual[i] = -rho*KLd(u/rho, mu) - rho*KLd(v/rho, nu) - eps*np.sum(gamma)
err[i] = np.linalg.norm(u - u1, ord=1)
return gamma, u, v, Wprimal, Wdual, err
|
{"/UGW_examples.py": ["/solver/unbalanced_sinkhorn.py"]}
|
7,757
|
joanmadsen/newproject2
|
refs/heads/master
|
/newproject2/urls.py
|
"""newproject2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
from hellow_world_app.views import HomeView
from hellow_world_app.views import RockList
from hellow_world_app.views import RockDetail
from hellow_world_app.views import CreateRock
from hellow_world_app.views import UpdateRock
from hellow_world_app.views import DeleteRock
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomeView.as_view()),
path('home/', HomeView.as_view(template_name='index.html'), name='home'),
path('about/', TemplateView.as_view(template_name='about.html'), name='about'),
path('contact/', TemplateView.as_view(template_name='contact.html'), name='contact'),
path('rock_list/', RockList.as_view(),name='rocks'),
path('rock_detail/<int:id>', RockDetail.as_view(), name='rock_detail'),
path('rocks/CreateRock', CreateRock.as_view(), name='create_rock'),
path('rock_update/<int:pk>', UpdateRock.as_view(), name='update_rock'),
path('rocks/<int:pk>', DeleteRock.as_view(), name='delete_rock')
]
|
{"/newproject2/urls.py": ["/hellow_world_app/views.py"], "/hellow_world_app/views.py": ["/hellow_world_app/forms.py"]}
|
7,758
|
joanmadsen/newproject2
|
refs/heads/master
|
/hellow_world_app/apps.py
|
from django.apps import AppConfig
class HellowWorldAppConfig(AppConfig):
name = 'hellow_world_app'
|
{"/newproject2/urls.py": ["/hellow_world_app/views.py"], "/hellow_world_app/views.py": ["/hellow_world_app/forms.py"]}
|
7,759
|
joanmadsen/newproject2
|
refs/heads/master
|
/hellow_world_app/views.py
|
# Create your views here.
from __future__ import unicode_literals
from django.shortcuts import render
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.http import HttpResponseRedirect
from hellow_world_app.models import Rock
from django.views.generic.list import ListView
from hellow_world_app.forms import CreateRockForm
from django.urls import reverse_lazy
class HomeView(TemplateView):
template_name = 'index.html'
def get(self, request, *args, **kwargs):
context = {
'number': 6,
'rock': Rock.objects.all().order_by('?'),
}
return self.render_to_response(context)
class RockList(ListView):
model = Rock
def get_context_data(self, *, object_list=None, **kwargs):
context = {
'rock': Rock.objects.all().order_by('?'),
}
return context
def get_template_names(self):
return ["rock_list.html"]
class RockDetail(TemplateView):
template_name = 'rocks/rock_detail.html'
def get(self, request, *args, **kwargs):
context = {
'rock': Rock.objects.get(id=kwargs['id'])
}
return self.render_to_response(context)
class CreateRock(CreateView):
template_name = 'rocks/CreateRock.html'
model = Rock
fields = ['name', 'description', 'slug']
class UpdateRock(UpdateView):
template_name = 'rocks/UpdateRock.html'
model = Rock
fields = ['name', 'description', 'slug']
class DeleteRock(DeleteView):
template_name = 'rocks/DeleteRock.html'
model = Rock
success_url = reverse_lazy('rocks')
|
{"/newproject2/urls.py": ["/hellow_world_app/views.py"], "/hellow_world_app/views.py": ["/hellow_world_app/forms.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.