hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74c3c58b7c52752273d5091bfab02b92f9e98a85 | 10,549 | py | Python | dgi/code2graph/class_graph_builder.py | konveyor/tackle-data-gravity-insights | 97a3eb6a04a2bca7f7e3422581a8fad055d90c04 | [
"Apache-2.0"
] | 3 | 2022-03-28T20:54:34.000Z | 2022-03-31T15:14:39.000Z | dgi/code2graph/class_graph_builder.py | rofrano/tackle-data-gravity-insights | f734f023dc46ca8e038b5ba8029e5c1177a1d34f | [
"Apache-2.0"
] | 9 | 2022-03-01T13:29:50.000Z | 2022-03-31T13:04:36.000Z | dgi/code2graph/class_graph_builder.py | rofrano/tackle-data-gravity-insights | f734f023dc46ca8e038b5ba8029e5c1177a1d34f | [
"Apache-2.0"
] | 3 | 2022-03-28T14:41:45.000Z | 2022-03-30T19:17:31.000Z | ################################################################################
# Copyright IBM Corporation 2021, 2022
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import errno
import logging
import pandas as pd
from typing import Dict
from pathlib import Path
from tqdm import tqdm
from neomodel.exceptions import DoesNotExist
# Import out packages
from dgi.code2graph.process_facts import ConsumeFacts
from dgi.models import ClassNode
from dgi.code2graph.abstract_graph_builder import AbstractGraphBuilder
# Author information
__author__ = "Rahul Krishna"
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Rahul Krishna"
__email__ = "rkrsn@ibm.com"
__status__ = "Research Prototype"
class ClassGraphBuilder(AbstractGraphBuilder):
def __init__(self, opt):
super().__init__(opt)
@staticmethod
def _clear_all_nodes():
""" Delete all nodes
"""
for node in ClassNode.nodes.all():
node.delete()
def _process_entrypoints(self):
""" Annotate nodes with their entrypoint data
"""
facts_dir = Path(self.opt.GRAPH_FACTS_DIR)
# ----------------
# Process Servlets
# ----------------
# Make sure all Servlet data files are available
if not facts_dir.joinpath(self.opt.JEE.SERVLET.GenericServlet).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.SERVLET.GenericServlet)
if not facts_dir.joinpath(self.opt.JEE.SERVLET.WebServlet).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.SERVLET.WebServlet)
if not facts_dir.joinpath(self.opt.JEE.SERVLET.ServletFilter).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.SERVLET.ServletFilter)
for key, fact_file in self.opt.JEE.SERVLET:
if not fact_file or not isinstance(fact_file, str):
continue
fact_file = facts_dir.joinpath(fact_file)
with open(fact_file, 'r') as facts:
classes = facts.readlines()
for class_name in classes:
class_name = class_name.rstrip()
try:
graph_node = ClassNode.nodes.get(node_class=class_name)
except DoesNotExist:
continue
graph_node.node_is_entrypoint = True
graph_node.node_is_servlet = True
graph_node.servlet_type = key
graph_node.save()
# --------------
# Process Beans
# --------------
# Make sure all Beans data files are available
if not facts_dir.joinpath(self.opt.JEE.BEANS.EJBTransactionBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.EJBTransactionBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.SessionBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.SessionBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.SingletonBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.SingletonBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.StatefulBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.StatefulBean)
if not facts_dir.joinpath(self.opt.JEE.BEANS.StatelessBean).exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(
errno.ENOENT), self.opt.JEE.BEANS.StatelessBean)
for key, fact_file in self.opt.JEE.BEANS:
if not fact_file or not isinstance(fact_file, str):
continue
fact_file = facts_dir.joinpath(fact_file)
with open(fact_file, 'r') as facts:
classes = facts.readlines()
for class_name in classes:
class_name = class_name.rstrip()
try:
graph_node = ClassNode.nodes.get(node_class=class_name)
except DoesNotExist:
continue
graph_node.node_is_entrypoint = True
graph_node.node_is_bean = True
graph_node.bean_type = key
graph_node.save()
def _create_prev_and_next_nodes(self, prev_meth: Dict, next_meth: Dict):
prev_class_name = prev_meth["class"]
prev_class_short_name = prev_class_name.split('.')[-1]
try:
prev_graph_node = ClassNode.nodes.get(
node_short_name=prev_class_short_name)
except DoesNotExist:
# Method information
prev_graph_node = ClassNode(
node_class=prev_class_name,
node_short_name=prev_class_short_name).save()
next_class_name = next_meth["class"]
next_class_short_name = next_class_name.split('.')[-1]
try:
next_graph_node = ClassNode.nodes.get(
node_short_name=next_class_short_name)
except DoesNotExist:
# Method information
next_graph_node = ClassNode(
node_class=next_class_name,
node_short_name=next_class_short_name).save()
return prev_graph_node, next_graph_node
def _populate_heap_edges(self, heap_flows: pd.DataFrame) -> None:
""" Populate heap carried dependencies
Args:
heap_flows (pd.DataFrame): Heap flows as a pandas dataframe
"""
logging.info("Populating heap carried dependencies edges")
rel_id = 0
for _, row in tqdm(heap_flows.iterrows(), total=heap_flows.shape[0]):
prev_meth = row.prev
next_meth = row.next
prev_graph_node, next_graph_node = self._create_prev_and_next_nodes(
prev_meth, next_meth)
if prev_graph_node != next_graph_node:
rel = prev_graph_node.heap_flows.relationship(next_graph_node)
rel_id += 1
if rel and (rel.pmethod, rel.nmethod, rel.context, rel.heap_object) == (
prev_meth['name'], next_meth["name"], row.context, row.heap_obj):
rel.weight += 1
rel.rel_id = rel_id
rel.save()
else:
relationship_property = {
"weight": 1,
"rel_id": rel_id,
"pmethod": prev_meth['name'],
"nmethod": next_meth['name'],
"context": row.context,
"heap_object": row.heap_obj
}
prev_graph_node.heap_flows.connect(
next_graph_node, relationship_property)
def _populate_dataflow_edges(self, data_flows: pd.DataFrame) -> None:
""" Populate data flow dependencies
Args:
data_flows (pd.DataFrame): Data flows as a pandas dataframe
"""
logging.info("Populating dataflow edges")
rel_id = 0
for _, row in tqdm(data_flows.iterrows(), total=data_flows.shape[0]):
prev_meth = row.prev
next_meth = row.next
prev_graph_node, next_graph_node = self._create_prev_and_next_nodes(
prev_meth, next_meth)
if prev_graph_node != next_graph_node:
rel = prev_graph_node.data_flows.relationship(next_graph_node)
rel_id += 1
if rel and (rel.pmethod, rel.nmethod, rel.context) == (
prev_meth['name'], next_meth["name"], row.context):
rel.rel_id = rel_id
rel.weight += 1
rel.save()
else:
next_graph_node.data_flows.connect(
prev_graph_node, {
"weight": 1,
"rel_id": rel_id,
"pmethod": prev_meth['name'],
"nmethod": next_meth['name'],
"context": row.context
})
def _populate_callreturn_edges(self, call_ret_flows: pd.DataFrame) -> None:
""" Populate data flow dependencies
Args:
call_ret_flows (pd.DataFrame): Data flows as a pandas dataframe
"""
logging.info("Populating call-return dependencies edges")
rel_id = 0
for _, row in tqdm(call_ret_flows.iterrows(),
total=call_ret_flows.shape[0]):
prev_meth = row.prev
next_meth = row.next
prev_graph_node, next_graph_node = self._create_prev_and_next_nodes(
prev_meth, next_meth)
if prev_graph_node.node_class != next_graph_node.node_class:
rel = prev_graph_node.call_ret_flows.relationship(
next_graph_node)
rel_id += 1
if rel and (rel.pmethod, rel.nmethod, rel.pcontext, rel.ncontext) == (
prev_meth["name"], next_meth["name"], row.prev_context, row.next_context):
rel.rel_id = rel_id
rel.weight += 1
rel.save()
else:
next_graph_node.call_ret_flows.connect(
prev_graph_node, {
"weight": 1,
"rel_id": rel_id,
"pmethod": prev_meth['name'],
"nmethod": next_meth['name'],
"pcontext": row.prev_context,
"ncontext": row.next_context
})
| 40.263359 | 98 | 0.567921 | 1,168 | 10,549 | 4.864726 | 0.17637 | 0.063358 | 0.031679 | 0.029039 | 0.665611 | 0.583069 | 0.575678 | 0.541359 | 0.495952 | 0.424498 | 0 | 0.004943 | 0.328752 | 10,549 | 261 | 99 | 40.417625 | 0.797486 | 0.115177 | 0 | 0.472527 | 0 | 0 | 0.039695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.06044 | 0 | 0.10989 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c471d1d72ba31b703ea6a15dd25e610bf41cfa | 5,972 | py | Python | evernotecheck.py | scpedicini/evernotecheck | 32663a577d22f2bdb9c9bac9c5f741e40efd18a7 | [
"MIT"
] | null | null | null | evernotecheck.py | scpedicini/evernotecheck | 32663a577d22f2bdb9c9bac9c5f741e40efd18a7 | [
"MIT"
] | null | null | null | evernotecheck.py | scpedicini/evernotecheck | 32663a577d22f2bdb9c9bac9c5f741e40efd18a7 | [
"MIT"
] | null | null | null | import logging
from evernote.api.client import EvernoteClient
from evernote.edam.notestore.ttypes import NoteFilter, NotesMetadataResultSpec
from evernote.edam.error.ttypes import (EDAMSystemException, EDAMErrorCode)
import pickle
import sys
from datetime import datetime
from time import sleep
import os
logger = logging.getLogger(__name__)
class VirtualNote:
def __init__(self, guid, title, content_length, date_modified, largest_resource):
self.Guid = guid
self.Title = title
self.ContentLength = content_length
self.DateModified = date_modified
self.LargestResource = largest_resource
def safe_int(x):
return 0 if x is None else x
def evernote_wait_try_again(fptr):
"""
Wait until mandated wait and try again
http://dev.evernote.com/doc/articles/rate_limits.php
"""
def f2(*args, **kwargs):
try:
return fptr(*args, **kwargs)
except EDAMSystemException, e:
if e.errorCode == EDAMErrorCode.RATE_LIMIT_REACHED:
logger.info( "rate limit: {0} s. wait".format(e.rateLimitDuration))
sleep(e.rateLimitDuration)
logger("wait over")
return fptr(*args, **kwargs)
return f2
# Jetbrains throws TypeError: issubclass() arg 1 must be a class (because subclassing object and overwriting __getattribute__)
class RateLimitingEvernoteProxy(object):
# based on http://code.activestate.com/recipes/496741-object-proxying/
__slots__ = ["_obj"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
def __getattribute__(self, name):
return evernote_wait_try_again(getattr(object.__getattribute__(self, "_obj"), name))
EVERNOTE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'evernotes.p')
EVERNOTE_CREDENTIALS = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'evernote_credentials')
dictNotes = dict()
shrankNotes = list()
addedNotes = list()
removedNotes = list()
try:
with open(EVERNOTE_FILE, 'rb') as f:
dictNotes = pickle.load(f)
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
dictNotes = dict()
oldNoteCount = len(dictNotes)
Unmatched = set(dictNotes.keys())
with open(EVERNOTE_CREDENTIALS, 'r') as f:
developer_token = f.read()
# Set up the NoteStore client
# client = EvernoteClient(token=dev_token, sandbox = False)
_client = EvernoteClient(token=developer_token, sandbox=False)
client = RateLimitingEvernoteProxy(_client)
note_store = client.get_note_store()
# Make API calls
# notebooks = note_store.listNotebooks()
# for notebook in notebooks:
# print "Notebook: ", notebook.name
# '177e5d31-1868-408b-b0eb-860b5fbc34cb'
# notefilter
# notemetas = note_store.findNotesMetadata(filter=, maxNotes=250)
all_filter = NoteFilter()
result_spec = NotesMetadataResultSpec(includeContentLength=True, includeTitle=True, includeUpdated=True,
includeUpdateSequenceNum=True, includeLargestResourceMime=True,
includeLargestResourceSize=True, includeAttributes=True)
# authenticationToken, filter, offset, maxNotes, resultSpec
# findNotesMetadata(authenticationToken, filter, offset, maxNotes, resultSpec):
offset = 0
max_notes = 250
totalNotes = None
changesDetected = False
while totalNotes is None or offset < totalNotes:
result_list = note_store.findNotesMetadata(developer_token, all_filter, offset, max_notes, result_spec)
if totalNotes is None:
totalNotes = result_list.totalNotes
offset += len(result_list.notes)
for note in result_list.notes:
localtime = str(datetime.fromtimestamp(note.updated / 1000.0))
if note.guid in dictNotes:
matchedNote = dictNotes[note.guid]
if not hasattr(matchedNote, 'LargestResource'):
matchedNote.LargestResource = None
if note.guid in Unmatched:
Unmatched.remove(note.guid)
if note.contentLength < matchedNote.ContentLength:
print('Note: ' + note.title + ' reduced from ' + str(matchedNote.ContentLength) + ' to ' + str(note.contentLength) + ' : Reduced by ' + str(matchedNote.ContentLength - note.contentLength) + ' bytes')
shrankNotes.append(note.guid)
if note.largestResourceSize < matchedNote.LargestResource:
print('Note: ' + note.title + ' embedded attachment reduced from ' + str(matchedNote.LargestResource) + ' to ' + str(note.largestResourceSize) + ' : Change ' + str(safe_int(matchedNote.LargestResource) - safe_int(note.largestResourceSize)) + ' bytes')
shrankNotes.append(note.guid)
if note.title != matchedNote.Title:
print('Note: ' + matchedNote.Title + ' changed to ' + str(note.title))
matchedNote.Title = note.title
matchedNote.ContentLength = note.contentLength
matchedNote.DateModified = localtime
matchedNote.LargestResource = note.largestResourceSize
else:
dictNotes[note.guid] = VirtualNote(note.guid, note.title, note.contentLength, localtime, note.largestResourceSize)
addedNotes.append(note.guid)
print('New Note: ' + note.title)
for unmatched_guids in Unmatched:
removed_note = dictNotes[unmatched_guids]
print('Removed Note: ' + removed_note.Title)
del dictNotes[unmatched_guids]
# note.title, note.guid, note.contentLength
print('Old note count: ' + str(oldNoteCount) + ' New note count: ' + str(offset))
# Guid, Title, Size
print('Evernote verification complete')
if raw_input("To save changes, type (Y): ").lower() == "y":
try:
with open(EVERNOTE_FILE, 'wb') as f:
pickle.dump(dictNotes, f)
print('Local store updated')
except Exception as e:
print("Unexpected error:", sys.exc_info()[0])
print(e)
| 33.550562 | 267 | 0.682853 | 652 | 5,972 | 6.105828 | 0.334356 | 0.020095 | 0.010048 | 0.01055 | 0.09897 | 0.062798 | 0.062798 | 0.044712 | 0.044712 | 0.044712 | 0 | 0.009599 | 0.215003 | 5,972 | 177 | 268 | 33.740113 | 0.83959 | 0.122405 | 0 | 0.122642 | 0 | 0 | 0.075446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.084906 | null | null | 0.103774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74c5c6595e58e5ebc6c9bd0923ccb47fb28a9fb2 | 3,025 | py | Python | tcfcli/cmds/local/common/options.py | dorasang/tcfcli | a24f14aa5a0281822de580479471bd3e83a8778b | [
"Apache-2.0"
] | 14 | 2019-03-01T09:47:36.000Z | 2019-11-28T01:58:54.000Z | tcfcli/cmds/local/common/options.py | dorasang/tcfcli | a24f14aa5a0281822de580479471bd3e83a8778b | [
"Apache-2.0"
] | 8 | 2019-03-12T10:22:53.000Z | 2020-08-20T08:15:51.000Z | tcfcli/cmds/local/common/options.py | dorasang/tcfcli | a24f14aa5a0281822de580479471bd3e83a8778b | [
"Apache-2.0"
] | 7 | 2019-03-01T09:47:52.000Z | 2020-06-13T12:14:48.000Z | import click
import os
_DEAFULT_TEMPLATE_FILE = 'template.[yaml|yml]'
def get_template_abspath(ctx, param, template_name):
if template_name == _DEAFULT_TEMPLATE_FILE:
template_name = 'template.yaml'
tmp = 'template.yml'
if os.path.exists(tmp):
template_name = tmp
return os.path.abspath(template_name)
def template_click_option():
"""
Click Option for template option
"""
return click.option('--template', '-t',
default=_DEAFULT_TEMPLATE_FILE,
type=click.Path(exists=True),
envvar="TCF_TEMPLATE_FILE",
callback=get_template_abspath,
show_default=True)
def invoke_common_options(f):
invoke_options = [
template_click_option(),
click.option('--env-vars', '-n',
help='JSON file contains function environment variables.',
type=click.Path(exists=True)),
click.option('--debug-port', '-d',
help='The port exposed for debugging. If specified, local container will start with debug mode.',
envvar="TCF_DEBUG_PORT"),
click.option('--debugger-path',
help='The debugger path in host. If specified, the debugger will mounted into the function container.'),
click.option('--debug-args',
help='Additional args to be passed the debugger.',
envvar="DEBUGGER_ARGS"),
click.option('--docker-volume-basedir', '-v',
help='The basedir where TCF template locate in.',
envvar="TCF_DOCKER_VOLUME_BASEDIR"),
click.option('--docker-network',
help='Specifies the name or id of an existing docker network which containers should connect to, '
'along with the default bridge network.',
envvar="TCF_DOCKER_NETWORK"),
click.option('--log-file', '-l',
help='Path of logfile where send runtime logs to'),
click.option('--skip-pull-image',
is_flag=True,
help='Specify whether CLI skip pulling or update docker images',
envvar="TCF_SKIP_PULL_IMAGE"),
click.option('--region'),
]
for option in reversed(invoke_options):
option(f)
return f
def service_common_options(port):
def construct_options(f):
service_options = [
click.option('--host',
default="127.0.0.1",
help="Local hostname or IP address bind to (default: '127.0.0.1')"),
click.option("--port", "-p",
default=port,
help="Local port number to listen on (default: '{}')".format(str(port)))
]
for option in reversed(service_options):
option(f)
return f
return construct_options | 32.880435 | 125 | 0.551736 | 326 | 3,025 | 4.97546 | 0.361963 | 0.101726 | 0.035142 | 0.033292 | 0.107275 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006021 | 0.341157 | 3,025 | 92 | 126 | 32.880435 | 0.807827 | 0.010579 | 0 | 0.063492 | 0 | 0 | 0.324043 | 0.016118 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079365 | false | 0.015873 | 0.031746 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c605ad56148f8243d56a13d2c1c91bf87be5fb | 1,126 | py | Python | GoPiGo_J/src/server/daemons/server2.py | theplatypus/GoPiGo_Java | 85544c93146d898bf08f0898350a9ef80da6754f | [
"Unlicense",
"MIT"
] | 1 | 2016-01-22T20:35:24.000Z | 2016-01-22T20:35:24.000Z | GoPiGo_J/src/server/daemons/server2.py | theplatypus/GoPiGo_Java | 85544c93146d898bf08f0898350a9ef80da6754f | [
"Unlicense",
"MIT"
] | null | null | null | GoPiGo_J/src/server/daemons/server2.py | theplatypus/GoPiGo_Java | 85544c93146d898bf08f0898350a9ef80da6754f | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
# This is a basic example for a socket server for the GoPiGo.
# This allows the client to connects can be used to respond to the commands and run the GoPiGo
# the socket server is running on Port 5005 on localhost
# Send a single byte command to the server from the client:
#
# fwd #Move forward with PID
# motor_fwd #Move forward without PID
# bwd #Move back with PID
# motor_bwd #Move back without PID
# left #Turn Left by turning off one motor
# left_rot #Rotate left by running both motors is opposite direction
# right #Turn Right by turning off one motor
# right_rot #Rotate Right by running both motors is opposite direction
# stop #Stop the GoPiGo
# ispd #Increase the speed by 10
# dspd #Decrease the speed by 10
# m1 #Control motor1
# m2 #Control motor2
# led #Turn On/Off the LED's
#set_left_speed #Set the speed of the right motor
#set_right_speed #Set the speed of the left motor
#en_com_timeout #Enable communication timeout
#dis_com_timeout #Disable communication timeout
import gopigo
print "Python Dameon Started"
while True:
data = raw_input()
gopigo.right()
| 31.277778 | 94 | 0.753996 | 189 | 1,126 | 4.42328 | 0.470899 | 0.038278 | 0.033493 | 0.035885 | 0.188995 | 0.141148 | 0.090909 | 0 | 0 | 0 | 0 | 0.013216 | 0.193606 | 1,126 | 35 | 95 | 32.171429 | 0.907489 | 0.853464 | 0 | 0 | 0 | 0 | 0.170732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.2 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
74c76ee9a4a603fbe963cba6d550995d33315088 | 302 | py | Python | python100/problems/003.py | zerlous/morning-python | 8ef8b5602ece9f74da870f3588ad7c2f734792b3 | [
"MIT"
] | null | null | null | python100/problems/003.py | zerlous/morning-python | 8ef8b5602ece9f74da870f3588ad7c2f734792b3 | [
"MIT"
] | null | null | null | python100/problems/003.py | zerlous/morning-python | 8ef8b5602ece9f74da870f3588ad7c2f734792b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : zerlous
# @File : 003.py
# @Time : 2019-04-02 23:15
# 一个整数,它加上100后是一个完全平方数,再加上168又是一个完全平方数,请问该数是多少?
# x + 100 = m^2 , x + 268 = n^2
# => n^2 - m^2 = 168
# => (m+n)(m-n) = 168
# => 设i = m+n, j = m -n, 则i*j=168
# => m=(i+j)/2, n =(i-j)/2 得i,j均偶数 | 25.166667 | 47 | 0.519868 | 59 | 302 | 2.661017 | 0.59322 | 0.050955 | 0.038217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179167 | 0.205298 | 302 | 12 | 48 | 25.166667 | 0.475 | 0.927152 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
74c8da5877fedaee54944d0a1f1f838582ece639 | 2,049 | py | Python | apis/news/query_helper.py | lannguyen0910/SAB | 12c787cf592cb26c2a91829038ad1c2f9bac1f16 | [
"MIT"
] | 3 | 2021-11-03T14:36:53.000Z | 2021-11-18T17:21:28.000Z | apis/news/query_helper.py | lannguyen0910/SAB | 12c787cf592cb26c2a91829038ad1c2f9bac1f16 | [
"MIT"
] | null | null | null | apis/news/query_helper.py | lannguyen0910/SAB | 12c787cf592cb26c2a91829038ad1c2f9bac1f16 | [
"MIT"
] | null | null | null | ERROR_TEXT = "Sources can not be set if country or category is set."
class QueryHelper(object):
def __init__(self, query=None, category=None, country=None,
sources=None, language=None, slack_channel=None):
"""Constructs the query helper object.
Args:
name: string, The name for this query (used in Slack).
query: string, The query to use. Advanced search is available:
Surround phrases with quotes (") for exact match.
Prepend words or phrases that must appear with a + symbol. Eg: +bitcoin
Prepend words that must not appear with a - symbol. Eg: -bitcoin
Alternatively you can use the AND / OR / NOT keywords,
and optionally group these with parenthesis.
Eg: crypto AND (ethereum OR litecoin) NOT bitcoin.
category: string, One of business, entertainment, general, health, science
sports, technology. Cannot be set if sources is set.
country: string, The 2-letter ISO 3166-1 code (lowercase) for the country.
Cannot be set if sources is set.
sources: list, String sources valid for the api. Obtainable from
https://newsapi.org/sources or by calling the sources endpoint.
Cannot be set if category or country is set.
language: string, The 2-letter ISO-639-1 code of the language
you want to get headlines for. Defaults to "en".
slack_channel: string, the #channel name where these results will be
published.
Raises:
ValueError if sources is set with country or category.
"""
if sources is not None and (country is not None or category is not None):
raise ValueError(ERROR_TEXT)
self.query = query
self.category = category
self.country = country
self.sources = sources
self.language = language
self.slack_channel = slack_channel
| 49.97561 | 87 | 0.617862 | 260 | 2,049 | 4.830769 | 0.403846 | 0.019904 | 0.022293 | 0.031051 | 0.111465 | 0.08121 | 0.039809 | 0 | 0 | 0 | 0 | 0.007948 | 0.324549 | 2,049 | 40 | 88 | 51.225 | 0.899566 | 0.632992 | 0 | 0 | 0 | 0 | 0.094474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c973a8e5ee64795c61fcb8c64b1477242ab749 | 1,666 | py | Python | final_project/accounts/forms.py | W7SP/project_defense | 550152bd82998333444ace099c47feffffb6c3ab | [
"MIT"
] | null | null | null | final_project/accounts/forms.py | W7SP/project_defense | 550152bd82998333444ace099c47feffffb6c3ab | [
"MIT"
] | null | null | null | final_project/accounts/forms.py | W7SP/project_defense | 550152bd82998333444ace099c47feffffb6c3ab | [
"MIT"
] | null | null | null | from django.contrib.auth import forms as auth_forms, get_user_model
from django.core.validators import MinLengthValidator
from final_project.accounts.helpers import BootstrapFormMixin
from final_project.accounts.models import Profile
from django import forms
from final_project.main.validators import validate_only_letters
UserModel = get_user_model()
class UserRegistrationForm(BootstrapFormMixin, auth_forms.UserCreationForm):
first_name = forms.CharField(
max_length=Profile.FIRST_NAME_MAX_LENGTH,
validators=(
MinLengthValidator(Profile.FIRST_NAME_MIN_LENGTH),
validate_only_letters,
)
)
last_name = forms.CharField(
max_length=Profile.LAST_NAME_MAX_LENGTH,
)
picture = forms.URLField()
date_of_birth = forms.DateField()
gender = forms.ChoiceField(
choices=Profile.GENDERS,
)
account_balance = forms.IntegerField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_bootstrap_form_controls()
class Meta:
model = UserModel
fields = ('email',)
def save(self, commit=True):
user = super().save(commit=commit)
profile = Profile(
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'],
picture=self.cleaned_data['picture'],
date_of_birth=self.cleaned_data['date_of_birth'],
gender=self.cleaned_data['gender'],
account_balance=self.cleaned_data['account_balance'],
user=user,
)
if commit:
profile.save()
return user
| 29.75 | 76 | 0.67587 | 185 | 1,666 | 5.778378 | 0.362162 | 0.06174 | 0.084191 | 0.044902 | 0.063611 | 0.063611 | 0 | 0 | 0 | 0 | 0 | 0 | 0.233493 | 1,666 | 55 | 77 | 30.290909 | 0.837118 | 0 | 0 | 0 | 0 | 0 | 0.039016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74c9ebcbbf685433e3347e934c9eb9cfdc882fb1 | 1,931 | py | Python | mobile-analytics/visualisations/funnel_plots.py | trangqdo/mobile-analytics | 097bb4596bb11ea186048ba5bc925e84c7fd37cc | [
"Apache-2.0"
] | null | null | null | mobile-analytics/visualisations/funnel_plots.py | trangqdo/mobile-analytics | 097bb4596bb11ea186048ba5bc925e84c7fd37cc | [
"Apache-2.0"
] | null | null | null | mobile-analytics/visualisations/funnel_plots.py | trangqdo/mobile-analytics | 097bb4596bb11ea186048ba5bc925e84c7fd37cc | [
"Apache-2.0"
] | null | null | null | from plotly import graph_objs as go
from stats.funnel import create_funnel_df, group_funnel_dfs
def plot_stacked_funnel(events, steps, col=None, from_date=None, to_date=None, step_interval=0):
"""
Function used for producing a funnel plot
:param events: (DataFrame)
events dataframe
:param steps: (list)
list containing funnel steps as strings
:param col: (str)
column to be used for grouping the funnel dataframes
:return: (plt.figure) funnel plot
"""
# create list to append each trace to
# this will be passed to "go.Figure" at the end
data = []
# if col is provided, create a funnel_df for each entry in the "col"
if col:
# generate dict of funnel dataframes
dict_ = group_funnel_dfs(events, steps, col)
title = 'Funnel plot per {}'.format(col)
else:
funnel_df = create_funnel_df(events, steps, from_date=from_date, to_date=to_date, step_interval=step_interval)
dict_ = {'Total': funnel_df}
title = 'Funnel plot'
for t in dict_.keys():
trace = go.Funnel(
name=t,
y=dict_[t].step.values,
x=dict_[t].val.values,
textinfo="value+percent previous"
)
data.append(trace)
layout = go.Layout(margin={"l": 180, "r": 0, "t": 30, "b": 0, "pad": 0},
funnelmode="stack",
showlegend=True,
hovermode='closest',
title='Funnel plot per {}'.format(col),
legend=dict(orientation="v",
bgcolor='#E2E2E2',
xanchor='left',
font=dict(
size=12)
)
)
return go.Figure(data, layout)
| 33.293103 | 118 | 0.520974 | 221 | 1,931 | 4.425339 | 0.457014 | 0.0409 | 0.046012 | 0.03681 | 0.055215 | 0.055215 | 0 | 0 | 0 | 0 | 0 | 0.011775 | 0.384257 | 1,931 | 57 | 119 | 33.877193 | 0.810765 | 0.252719 | 0 | 0 | 0 | 0 | 0.075215 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74cb877c97af60c6e4b5a8e17a82bc5b28994b1e | 1,016 | py | Python | segregation/tests/test_local_relative_centralization.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | 92 | 2019-02-17T02:36:29.000Z | 2022-01-22T04:29:10.000Z | segregation/tests/test_local_relative_centralization.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | 128 | 2019-02-22T03:52:40.000Z | 2022-02-28T18:39:01.000Z | segregation/tests/test_local_relative_centralization.py | noahbouchier/segregation | 88bd9608251b8bc42eae9265adb7941279b9868c | [
"BSD-3-Clause"
] | 29 | 2019-02-17T02:36:50.000Z | 2022-03-17T04:15:49.000Z | import unittest
import geopandas as gpd
import numpy as np
from libpysal.examples import load_example
from segregation.local import LocalRelativeCentralization
class Local_Relative_Centralization_Tester(unittest.TestCase):
def test_Local_Relative_Centralization(self):
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
df = s_map[["geometry", "BLACK", "TOT_POP"]]
index = LocalRelativeCentralization(df, "BLACK", "TOT_POP")
np.testing.assert_almost_equal(
index.statistics[0:10],
np.array(
[
0.03443055,
-0.29063264,
-0.19110976,
0.24978919,
0.01252249,
0.61152941,
0.78917647,
0.53129412,
0.04436346,
-0.20216325,
]
),
)
if __name__ == "__main__":
unittest.main()
| 29.028571 | 87 | 0.538386 | 95 | 1,016 | 5.505263 | 0.621053 | 0.042065 | 0.10325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148903 | 0.372047 | 1,016 | 34 | 88 | 29.882353 | 0.670846 | 0 | 0 | 0 | 0 | 0 | 0.065945 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74ce8c8a200f28440567b9bb992acb489cd7d1a9 | 107 | py | Python | office365/sharepoint/utilities/wopi_web_app_properties.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | 544 | 2016-08-04T17:10:16.000Z | 2022-03-31T07:17:20.000Z | office365/sharepoint/utilities/wopi_web_app_properties.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | 438 | 2016-10-11T12:24:22.000Z | 2022-03-31T19:30:35.000Z | office365/sharepoint/utilities/wopi_web_app_properties.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | 202 | 2016-08-22T19:29:40.000Z | 2022-03-30T20:26:15.000Z | from office365.runtime.client_value import ClientValue
class WopiWebAppProperties(ClientValue):
pass
| 17.833333 | 54 | 0.831776 | 11 | 107 | 8 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031915 | 0.121495 | 107 | 5 | 55 | 21.4 | 0.904255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
74cf3cdf2e8551d93b756d1f90473ad3386552cc | 819 | py | Python | python3-ardubus/setup.py | rambo/arDuBUS | d68ec05d75b3f463254039b31c31afec7e752f83 | [
"MIT"
] | 3 | 2016-01-31T21:48:37.000Z | 2021-01-17T00:39:22.000Z | python3-ardubus/setup.py | rambo/arDuBUS | d68ec05d75b3f463254039b31c31afec7e752f83 | [
"MIT"
] | null | null | null | python3-ardubus/setup.py | rambo/arDuBUS | d68ec05d75b3f463254039b31c31afec7e752f83 | [
"MIT"
] | null | null | null | """Packaging script for ardubus"""
import os
import subprocess
import setuptools
GIT_VERSION = 'UNKNOWN'
try:
GIT_VERSION = subprocess.check_output(['git', 'rev-parse', '--verify', '--short', 'HEAD']).decode('ascii').strip()
except subprocess.CalledProcessError:
pass
setuptools.setup(
name='ardubus',
version=os.getenv('PACKAGE_VERSION', '0.1.0+git.%s' % GIT_VERSION),
author='Eero "rambo" af Heurlin',
author_email='eero.afheurlin@iki.fi',
packages=setuptools.find_packages(),
license='MIT',
long_description=open('README.md', 'rt', encoding='utf-8').read(),
long_description_content_type='text/markdown',
description='ArDuBUS for python3',
install_requires=open('requirements.txt', 'rt', encoding='utf-8').readlines(),
url='https://github.com/rambo/ardubus',
)
| 31.5 | 118 | 0.695971 | 101 | 819 | 5.524752 | 0.683168 | 0.053763 | 0.046595 | 0.050179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008392 | 0.126984 | 819 | 25 | 119 | 32.76 | 0.772028 | 0.034188 | 0 | 0 | 0 | 0 | 0.289172 | 0.026752 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.047619 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74cf57d813aedb4d1919443807aec8fd07bf82ec | 6,247 | py | Python | utils/validation.py | gykovacs/ideal_binning_mv | 536ed9f897e5470568b2a2768eb4a119c7df1fff | [
"MIT"
] | null | null | null | utils/validation.py | gykovacs/ideal_binning_mv | 536ed9f897e5470568b2a2768eb4a119c7df1fff | [
"MIT"
] | null | null | null | utils/validation.py | gykovacs/ideal_binning_mv | 536ed9f897e5470568b2a2768eb4a119c7df1fff | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 19:49:52 2020
@author: gykovacs
"""
import numpy as np
# dimensionality
n_d = 8
# bins
n_bins = 3
def generate_template():
return np.round(np.random.rand(n_d)*10, decimals=0)
def generate_eqw_binning(t):
t_diff= (np.max(t) - np.min(t))/n_bins
t_binning= np.digitize(t, t_bins)
return t_binning
def generate_S_from_binning(t_binning):
S= np.zeros(shape=(len(t_binning), len(np.unique(t_binning))))
for i, t_ in enumerate(t_binning):
S[i][t_]= 1
return S
def generate_S(t):
t_binning = generate_eqw_binning(t)
return generate_S_from_binning(t_binning)
def generate_unique_binning(t):
return np.digitize(t, np.unique(t) + 0.01)
def generate_S_unique(t):
t_binning = generate_unique_binning(t)
return generate_S_from_binning(t_binning)
def generate_A_from_S(S):
return np.dot(np.dot(S, np.linalg.inv(np.dot(S.T, S))), S.T)
def generate_A(t):
return generate_A_from_S(generate_S(t))
def generate_m(t):
return np.random.rand(len(np.unique(t)))
t= generate_template()
print('t', t)
A= generate_A(t)
print('A', A)
S_u = generate_S_unique(t)
print('S_u', S_u)
m= generate_m(t)
print('m', m)
# <AS_u m, S_u m>
np.dot(np.dot(A, np.dot(S_u, m)), np.dot(S_u, m))
# <AS_u m, AS_u m>
np.dot(np.dot(A, np.dot(S_u, m)), np.dot(A, np.dot(S_u, m)))
np.dot(np.dot(A, np.dot(S_u, m)), t)
np.dot(np.dot(S_u, m), t)
np.var(t + np.dot(S_u, m))
np.var(t)
cov_m= np.outer(m, m)
t_binning = generate_unique_binning(t)
ns= []
for i in np.unique(t_binning):
ns.append(np.sum(t_binning == i))
ns= np.array(ns)
def var(x):
return np.mean((x - np.mean(x))**2)
var_Sm = np.var(np.dot(S_u, m))
var_Sm= np.sum(ns*(m**2))/n_d - np.dot(np.dot(ns, cov_m), ns)/(n_d**2)
var_total= np.var(t + np.dot(S_u, m))
var_t= np.var(t)
total= 0.0
for i in range(len(t)):
for j in range(S_u.shape[0]):
for k in range(S_u.shape[1]):
total+= t[i]*S_u[j][k]*m[k]
total/= n_d**2
covar_minus= - 2*total
total= 0.0
for i in range(len(t)):
for j in range(S_u.shape[1]):
total+= t[i]*S_u[i][j]*m[j]
total/= n_d
covar_plus= 2*total
var_total
var_t + var_Sm + covar_minus + covar_plus
generate_S_from_binning(t_binning)
np.var(np.dot(S_u, m))
total= 0.0
for i in range(S_u.shape[0]):
for j in range(S_u.shape[1]):
for k in range(S_u.shape[0]):
for l in range(S_u.shape[1]):
total+=S_u[i][j]*S_u[k][l]*cov_m[j][l]
total
np.dot(np.dot(ns, cov_m), ns)
total= 0.0
for i in range(S_u.shape[0]):
for j in range(S_u.shape[1]):
total+= S_u[i][j]**2*m[j]**2
total
np.dot(ns, m**2)
total= 0.0
for i in range(S_u.shape[0]):
for j in range(S_u.shape[1]):
for k in range(S_u.shape[1]):
total+= S_u[i][j]*S_u[i][k]*m[j]*m[k]
total
np.mean((t + np.dot(S_u, m))**2)
covar_plus + np.mean(t*t) + total
w= np.sin(t) + np.random.rand(20)/2
n_bins= 3
t_diff= (np.max(t) - np.min(t))/n_bins
t_bins= [np.min(t) + t_diff*i for i in range(1, n_bins)]
w_diff= (np.max(w) - np.min(w))/n_bins
w_bins= [np.min(w) + w_diff*i for i in range(1, n_bins)]
t_binning= np.digitize(t, t_bins)
n_bins_full= len(np.unique(t))
t_bins_full= np.unique(t) + 0.01
t_binning_full= np.digitize(t, t_bins_full)
S= np.zeros(shape=(len(t_binning), 3))
for i, t_ in enumerate(t_binning):
S[i][t_]= 1
S_full= np.zeros(shape=(len(t_binning), n_bins_full))
for i, t_ in enumerate(t_binning_full):
S_full[i][t_]= 1
m_full= np.unique(t)
m_full_digitized= np.digitize(m_full, t_bins)
I= {}
for i in range(3):
I[i]= []
for j in range(len(m_full_digitized)):
if m_full_digitized[j] == i:
I[i].append(j)
A= np.dot(np.dot(S, np.linalg.inv(np.dot(S.T, S))), S.T)
m= np.array([1, 2, 3])
# term 1
# true value
np.dot(np.dot(A, np.dot(S, m)), np.dot(A, np.dot(S, m)))
# check
total= 0.0
for i in range(3):
total+= np.sum(t_binning == i)*m[i]**2
total
# term 2
# true value
np.dot(np.dot(S, m), np.dot(S, m))
# check
total= 0.0
for i in range(3):
total+= np.sum(t_binning == i)*m[i]**2
total
# term 3
# true value
np.dot(np.dot(A, np.dot(S, m)), np.dot(S, m))
# check
total= 0.0
for i in range(3):
total+= np.sum(t_binning == i)**2 * m[i]**2
total
# next check
means= []
for i in range(3):
means.append(np.sum(t[t_binning == i]))
means= np.array(means)
np.dot(means, m)
np.dot(np.dot(A, t), np.dot(A, np.dot(S, m)))
#########
np.dot(np.dot(A, t), np.dot(S, m))
np.dot(np.dot(A, np.dot(S, m)), t)
np.dot(t, np.dot(S, m))
np.dot(np.dot(A, np.dot(S_full, m_full)), np.dot(S_full, m_full))
np.dot(np.dot(S_full, m_full), np.dot(S_full, m_full))
np.dot(np.dot(A, np.dot(S_full, m_full)), np.dot(A, np.dot(S_full, m_full)))
np.sum(np.multiply(A, np.dot(np.dot(S_full, np.outer(m_full, m_full)), S_full.T)))
total= 0.0
for i in range(3):
tmp= 0.0
for j in I[i]:
tmp+= 1.0/np.sum(t_binning == i)*np.sum(t_binning_full == j)*m_full[j]**2
#tmp+= m_full[j]**2
#total+= np.sum(t_binning == i)*m_full[j]**2
total+= tmp
total
total= 0.0
for i in range(len(A)):
for j in range(len(A)):
for k in range(len(m_full)):
for l in range(len(m_full)):
total= total + A[i][j]*S_full[j][k]*m_full[k]*S_full[i][l]*m_full[l]
total
total= 0.0
for i in range(len(A)):
for j in range(len(A)):
for k, l in itertools.product()
total= total + A[i][j]*m_full[k]**2*np.sum(t_binning_full == k)
total
total= 0.0
for i in range(n_bins_full):
for j in range(n_bins_full):
if (i in I[0] and j in I[0]):
total+= 1.0/np.sum(t_binning == 0)*m_full[i]*m_full[j]*np.sum(t_binning_full == i)*np.sum(t_binning_full == j)
if (i in I[1] and j in I[1]):
total+= 1.0/np.sum(t_binning == 1)*m_full[i]*m_full[j]*np.sum(t_binning_full == i)*np.sum(t_binning_full == j)
if (i in I[2] and j in I[2]):
total+= 1.0/np.sum(t_binning == 2)*m_full[i]*m_full[j]*np.sum(t_binning_full == i)*np.sum(t_binning_full == j)
total
S= np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0]])
A= np.dot(np.dot(S, np.linalg.inv(np.dot(S.T, S))), S.T)
| 19.045732 | 122 | 0.60317 | 1,371 | 6,247 | 2.585704 | 0.07221 | 0.101551 | 0.057546 | 0.053597 | 0.649083 | 0.569535 | 0.515092 | 0.454443 | 0.423977 | 0.385049 | 0 | 0.025935 | 0.191452 | 6,247 | 327 | 123 | 19.103976 | 0.675906 | 0.038418 | 0 | 0.340782 | 1 | 0 | 0.001014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.005587 | null | null | 0.022346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
74cf89eb0f06c514d58868658f9cede0db7a9aaa | 2,210 | py | Python | LeetCode/LC003_bruteForce.py | JeffreyAsuncion/CodingProblems_Python | db71cb46b2579c1c65767a644a0ea989da4fa559 | [
"MIT"
] | null | null | null | LeetCode/LC003_bruteForce.py | JeffreyAsuncion/CodingProblems_Python | db71cb46b2579c1c65767a644a0ea989da4fa559 | [
"MIT"
] | null | null | null | LeetCode/LC003_bruteForce.py | JeffreyAsuncion/CodingProblems_Python | db71cb46b2579c1c65767a644a0ea989da4fa559 | [
"MIT"
] | null | null | null | """
3. Longest Substring Without Repeating Characters
Given a string s,
find the length of the longest substring without repeating characters.
Example 1:
Input: s = "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: s = "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: s = "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.
Example 4:
Input: s = ""
Output: 0
Constraints:
0 <= s.length <= 5 * 104
s consists of English letters, digits, symbols and spaces.
"""
def allUnique(s, start, end):
seenStr = ''
for i in range(start, end):
char = s[i]
# check if char has been seen already in seenStr
if char in seenStr:
# return False - char is not unique
return False
else:
seenStr += char
# return True - char is unique
return True
def lengthOfLongestSubstring(s: str) -> int:
# base case where s is empty string
if s == "":
# return length of 0
return 0
longest = 0
for i in range(len(s)):
j = i + 1
# corrected the range to len(s) + 1 and works on edge cases but TimesOut longer strings
# O(n^3) need to optimize
for j in range(len(s)+1): # range == len(s) + 1 to correct for j = i + 1
if allUnique(s,i,j):
# ans is the max value of ans vs j -i
longest = max(longest, j-i)
return longest
# Example 1:
s1 = "abcabcbb"
print(lengthOfLongestSubstring(s1)) # Output: 3
# Explanation: The answer is "abc", with the length of 3.
# Example 2:
s2 = "bbbbb"
print(lengthOfLongestSubstring(s2))#Output: 1
# Explanation: The answer is "b", with the length of 1.
# Example 3:
s3 = "pwwkew"
print(lengthOfLongestSubstring(s3))#Output: 3
# Explanation: The answer is "wke", with the length of 3.
# Example 4:
s4 = ""
print(lengthOfLongestSubstring(s4))# Output: 0
# Example 5:
s5 = "aab"
print(lengthOfLongestSubstring(s5))# Output: 2 | 25.402299 | 95 | 0.615385 | 320 | 2,210 | 4.25 | 0.325 | 0.047059 | 0.056618 | 0.097059 | 0.299265 | 0.2375 | 0.232353 | 0.232353 | 0.232353 | 0.232353 | 0 | 0.031131 | 0.287783 | 2,210 | 87 | 96 | 25.402299 | 0.83291 | 0.58371 | 0 | 0 | 0 | 0 | 0.024664 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0 | 0 | 0.206897 | 0.172414 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d068093d0ae6b5a5ba2737ff2398f7a073b853 | 10,913 | py | Python | pattoo/agent.py | palisadoes/pattoo-os | cccf0ddb50a8bb971c0c527b4ea5ef96c6819fac | [
"Apache-2.0"
] | null | null | null | pattoo/agent.py | palisadoes/pattoo-os | cccf0ddb50a8bb971c0c527b4ea5ef96c6819fac | [
"Apache-2.0"
] | null | null | null | pattoo/agent.py | palisadoes/pattoo-os | cccf0ddb50a8bb971c0c527b4ea5ef96c6819fac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Pattoo .Agent class.
Description:
This script:
1) Processes a variety of information from agents
2) Posts the data using HTTP to a server listed
in the configuration file
"""
# Standard libraries
import textwrap
import sys
import time
import argparse
import ipaddress
import multiprocessing
import os
from pprint import pprint
# PIP3 libraries
from gunicorn.app.base import BaseApplication
from gunicorn.six import iteritems
# Pattoo libraries
from pattoo import daemon
from pattoo.pattoo import CONFIG
from pattoo import log
from pattoo.api import API
class Agent(object):
"""Agent class for daemons."""
def __init__(self, parent, child=None):
"""Initialize the class.
Args:
parent: Name of parent daemon
child: Name of child daemon
Returns:
None
"""
# Initialize key variables (Parent)
self.parent = parent
self.pidfile_parent = daemon.pid_file(parent)
self.lockfile_parent = daemon.lock_file(parent)
# Initialize key variables (Child)
if bool(child) is None:
self._pidfile_child = None
else:
self._pidfile_child = daemon.pid_file(child)
def name(self):
"""Return agent name.
Args:
None
Returns:
value: Name of agent
"""
# Return
value = self.parent
return value
def query(self):
"""Placeholder method."""
# Do nothing
pass
class AgentDaemon(daemon.Daemon):
"""Class that manages agent deamonization."""
def __init__(self, agent):
"""Initialize the class.
Args:
agent: agent object
Returns:
None
"""
# Initialize variables to be used by daemon
self.agent = agent
# Call up the base daemon
daemon.Daemon.__init__(self, agent)
def run(self):
"""Start polling.
Args:
None
Returns:
None
"""
# Start polling. (Poller decides frequency)
while True:
self.agent.query()
class AgentCLI(object):
"""Class that manages the agent CLI.
Args:
None
Returns:
None
"""
def __init__(self):
"""Initialize the class.
Args:
None
Returns:
None
"""
# Initialize key variables
self.parser = None
def process(self, additional_help=None):
"""Return all the CLI options.
Args:
None
Returns:
args: Namespace() containing all of our CLI arguments as objects
- filename: Path to the configuration file
"""
# Header for the help menu of the application
parser = argparse.ArgumentParser(
description=additional_help,
formatter_class=argparse.RawTextHelpFormatter)
# CLI argument for starting
parser.add_argument(
'--start',
required=False,
default=False,
action='store_true',
help='Start the agent daemon.'
)
# CLI argument for stopping
parser.add_argument(
'--stop',
required=False,
default=False,
action='store_true',
help='Stop the agent daemon.'
)
# CLI argument for getting the status of the daemon
parser.add_argument(
'--status',
required=False,
default=False,
action='store_true',
help='Get daemon daemon status.'
)
# CLI argument for restarting
parser.add_argument(
'--restart',
required=False,
default=False,
action='store_true',
help='Restart the agent daemon.'
)
# CLI argument for stopping
parser.add_argument(
'--force',
required=False,
default=False,
action='store_true',
help=textwrap.fill(
'Stops or restarts the agent daemon ungracefully when '
'used with --stop or --restart.', width=80)
)
# Get the parser value
self.parser = parser
def control(self, agent):
"""Control the pattoo agent from the CLI.
Args:
agent: Agent object
Returns:
None
"""
# Get the CLI arguments
self.process()
parser = self.parser
args = parser.parse_args()
# Run daemon
_daemon = AgentDaemon(agent)
if args.start is True:
_daemon.start()
elif args.stop is True:
if args.force is True:
_daemon.force()
else:
_daemon.stop()
elif args.restart is True:
if args.force is True:
_daemon.force()
_daemon.start()
else:
_daemon.restart()
elif args.status is True:
_daemon.status()
else:
parser.print_help()
sys.exit(2)
class AgentAPI(Agent):
"""pattoo API agent that serves web pages.
Args:
None
Returns:
None
Functions:
__init__:
populate:
post:
"""
def __init__(self, parent, child):
"""Initialize the class.
Args:
parent: Name of parent daemon
child: Name of child daemon
Returns:
None
"""
# Initialize key variables
Agent.__init__(self, parent, child)
self.config = CONFIG
def query(self):
"""Query all remote devices for data.
Args:
None
Returns:
None
"""
# Initialize key variables
config = self.config
# Check for lock and pid files
if os.path.exists(self.lockfile_parent) is True:
log_message = (
'Lock file {} exists. Multiple API daemons running '
'API may have died '
'catastrophically in the past, in which case the lockfile '
'should be deleted. '
''.format(self.lockfile_parent))
log.log2see(1083, log_message)
if os.path.exists(self.pidfile_parent) is True:
log_message = (
'PID file: {} already exists. Daemon already running? '
'If not, it may have died catastrophically in the past '
'in which case you should use --stop --force to fix.'
''.format(self.pidfile_parent))
log.log2see(1084, log_message)
######################################################################
#
# Assign options in format that the Gunicorn WSGI will accept
#
# NOTE! to get a full set of valid options pprint(self.cfg.settings)
# in the instantiation of StandaloneApplication. The option names
# do not exactly match the CLI options found at
# http://docs.gunicorn.org/en/stable/settings.html
#
######################################################################
options = {
'bind': _ip_binding(),
'accesslog': config.log_file_api(),
'errorlog': config.log_file_api(),
'capture_output': True,
'pidfile': self._pidfile_child,
'loglevel': config.log_level(),
'workers': _number_of_workers(),
'umask': 0o0007,
}
# Log so that user running the script from the CLI knows that something
# is happening
log_message = (
'Pattoo API running on {}:{} and logging to file {}.'
''.format(
config.listen_address(),
config.bind_port(),
config.log_file_api()))
log.log2info(1022, log_message)
# Run
StandaloneApplication(API, options).run()
class StandaloneApplication(BaseApplication):
"""Class to integrate the Gunicorn WSGI with the Pattoo Flask application.
Modified from: http://docs.gunicorn.org/en/latest/custom.html
"""
def __init__(self, app, options=None):
"""Initialize the class.
args:
app: Flask application object of type Flask(__name__)
options: Gunicorn CLI options
"""
# Initialize key variables
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
pprint(self.cfg.settings)
def load_config(self):
"""Load the configuration."""
# Initialize key variables
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
# Assign configuration parameters
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
"""Run the Flask application throught the Gunicorn WSGI."""
return self.application
def _number_of_workers():
"""Get the number of CPU cores on this server."""
return (multiprocessing.cpu_count() * 2) + 1
def agent_sleep(agent_name, seconds=300):
"""Make agent sleep for a specified time, while updating PID every 300s.
Args:
agent_name: Name of agent
seconds: number of seconds to sleep
Returns:
uid: UID for agent
"""
# Initialize key variables
interval = 300
remaining = seconds
# Start processing
while True:
# Update the PID file timestamp (important)
daemon.update_pid(agent_name)
# Sleep for at least "interval" number of seconds
if remaining < interval:
time.sleep(remaining)
break
else:
time.sleep(interval)
# Decrement remaining time
remaining = remaining - interval
def _ip_binding():
"""Create IPv4 / IPv6 binding for Gunicorn.
Args:
None
Returns:
result: bind
"""
# Initialize key variables
config = CONFIG
ipv4 = False
ip_address = config.listen_address()
# Check IP address type
try:
ip_object = ipaddress.ip_address(ip_address)
except:
log_message = (
'The {} IP address in the configuration file is incorrectly '
'formatted'.format(ip_address))
log.log2die(1234, log_message)
# Is this an IPv4 address?
ipv4 = isinstance(ip_object, ipaddress.IPv4Address)
if ipv4 is True:
result = '{}:{}'.format(ip_address, config.bind_port())
else:
result = '[{}]:{}'.format(ip_address, config.bind_port())
return result
| 24.802273 | 79 | 0.550078 | 1,142 | 10,913 | 5.147986 | 0.24606 | 0.01684 | 0.033679 | 0.018711 | 0.199354 | 0.15853 | 0.143222 | 0.117367 | 0.079946 | 0.068379 | 0 | 0.007104 | 0.355081 | 10,913 | 439 | 80 | 24.85877 | 0.828218 | 0.297077 | 0 | 0.216216 | 0 | 0 | 0.111437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086486 | false | 0.005405 | 0.075676 | 0 | 0.210811 | 0.016216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d0de2d8da21d05590e9e49b38f27e37b7c316e | 5,359 | py | Python | utils/create_dmg_masks.py | deg4uss3r/xview2-baseline | ae3b63003efe5ffd712a32b0083e044f595f0d3e | [
"BSD-3-Clause"
] | null | null | null | utils/create_dmg_masks.py | deg4uss3r/xview2-baseline | ae3b63003efe5ffd712a32b0083e044f595f0d3e | [
"BSD-3-Clause"
] | null | null | null | utils/create_dmg_masks.py | deg4uss3r/xview2-baseline | ae3b63003efe5ffd712a32b0083e044f595f0d3e | [
"BSD-3-Clause"
] | 1 | 2020-02-13T14:02:26.000Z | 2020-02-13T14:02:26.000Z | #####################################################################################################################################################################
# xView2 #
# Copyright 2019 Carnegie Mellon University. #
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO #
# WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, #
# EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, #
# TRADEMARK, OR COPYRIGHT INFRINGEMENT. #
# Released under a MIT (SEI)-style license, please see LICENSE.md or contact permission@sei.cmu.edu for full terms. #
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use #
# and distribution. #
# This Software includes and/or makes use of the following Third-Party Software subject to its own license: #
# 1. SpaceNet (https://github.com/motokimura/spacenet_building_detection/blob/master/LICENSE) Copyright 2017 Motoki Kimura. #
# DM19-0988 #
#####################################################################################################################################################################
import json
from os import walk, path, makedirs
from shapely import wkt
from shapely.geometry import Polygon
import numpy as np
from cv2 import fillPoly, imwrite
def get_files(base_dir):
files = []
dis_pre_files = [f for f in next(walk(path.join(base_dir, "labels")))[2] if 'post' in f]
for f in dis_pre_files:
files.append(path.join(base_dir, "labels", f))
return files
def create_image(inference_data):
damage_key = {'un-classified': 0, 'no-damage': 1, 'minor-damage': 2, 'major-damage': 3, 'destroyed': 4}
# Creating a blank img 1024x1024x1 (the size of the orginal images, but greyscale not full RGB)
mask_img = np.zeros((1024,1024,1), np.uint8)
# For each polygon in the image (according to the json)
# Fill the poylgon with the value from the damage key
for poly in inference_data['features']['xy']:
if 'subtype' in poly['properties']:
damage = poly['properties']['subtype']
else:
# If the subtype json field does not exist, do not write out the polygon
damage = 'un-classified'
coords = wkt.loads(poly['wkt'])
poly_np = np.array(coords.exterior.coords, np.int32)
fillPoly(mask_img, [poly_np], damage_key[damage])
# Return the image once we've gone over every polygon
return mask_img
def save_image(polygons, output_path):
# Output the filled in polygons to an image file
imwrite(output_path, polygons)
def write_gt(infile, output_dir):
with open(infile) as gt_file:
gt_json = json.load(gt_file)
# getting mask only if 'post' is in the title and writing out masks with damage value as the polygon pixel values
gt_masked_image = create_image(gt_json)
gt_masked_image_path = path.join(output_dir, path.basename(infile).split('.json')[0]+'_masked_dmg.png')
save_image(gt_masked_image, gt_masked_image_path)
if __name__ == "__main__":
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description="create_dmg_masks.py: Creates maskes with polygon filled by the damage value")
parser.add_argument('--base-dir',
required=True,
metavar='/path/to/xBD/train/',
help="Full path to the train directory; expects 'labels' under that directory")
parser.add_argument('--output-dir',
required=True,
metavar='/path/to/output/directory/',
help="Full path to the output directory you wish to store the output pngs")
args = parser.parse_args()
# Create output dir to save all masks if it doesn't exist already
if not path.isdir(args.output_dir):
makedirs(args.output_dir)
# We expect all label files to be under a base dir like:
# ~/Downloads/train/labels/<ALL_LABELS>.json
all_files = get_files(args.base_dir)
for infile in all_files:
write_gt(infile, args.output_dir)
| 54.131313 | 166 | 0.53816 | 597 | 5,359 | 4.720268 | 0.420436 | 0.022356 | 0.034067 | 0.004968 | 0.046842 | 0.019872 | 0 | 0 | 0 | 0 | 0 | 0.012591 | 0.333085 | 5,359 | 98 | 167 | 54.683673 | 0.775881 | 0.49095 | 0 | 0.04 | 0 | 0 | 0.185859 | 0.011008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.14 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d1fba55e38668fb6474e6c5a72b31dae8639fa | 147 | py | Python | feersum_nlu_util/__init__.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 9 | 2017-10-10T12:24:23.000Z | 2021-08-18T14:07:51.000Z | feersum_nlu_util/__init__.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 1 | 2020-12-06T11:03:25.000Z | 2021-04-14T05:21:23.000Z | feersum_nlu_util/__init__.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 2 | 2019-02-12T08:26:06.000Z | 2022-02-01T09:39:47.000Z | # coding: utf-8
# flake8: noqa
"""
FeersumNLU API Utils
"""
from feersum_nlu_util import transfer
from feersum_nlu_util import image_utils
| 12.25 | 40 | 0.741497 | 21 | 147 | 4.952381 | 0.714286 | 0.211538 | 0.269231 | 0.346154 | 0.461538 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 0.183673 | 147 | 11 | 41 | 13.363636 | 0.85 | 0.326531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
74d2a0a4ac8360f6243b2b0575a3f4eccdc09c49 | 6,240 | py | Python | generate_playlist.py | renoc/python-rpifo | 8c9fa6eca8129836447f712eb74d660d900a1c84 | [
"0BSD"
] | null | null | null | generate_playlist.py | renoc/python-rpifo | 8c9fa6eca8129836447f712eb74d660d900a1c84 | [
"0BSD"
] | null | null | null | generate_playlist.py | renoc/python-rpifo | 8c9fa6eca8129836447f712eb74d660d900a1c84 | [
"0BSD"
] | null | null | null | from random import randint, shuffle
from time import time
import os
import re
def print_message(message):
print message
class Playlist(object):
MIN_FOLDER_SIZE = 2 # Minmum effective value
dirlist = []
evenly_spaced = True
exclusions = []
extensions = []
filedict = {}
fulldict = {}
feedback = print_message
fullspread = []
last_feedback = time()
rootdir = u'.'
def __init__(self, *args, **kwargs):
self.feedback = print_message
try:
import settings
except ImportError:
self.feedback('Settings NOT FOUND')
return False
def set_exclusions():
for exclude in settings.FILENAME_EXCLUSION:
len(exclude) and self.exclusions.append(
re.compile(exclude.strip(), re.I))
def set_extensions():
pattern = re.compile('[\W_]+')
for ext in settings.EXTENTIONS:
# strip . and \n
ext = pattern.sub('', ext)
len(ext) and self.extensions.append(ext)
def set_fullspread():
for folder in settings.FULLSPREAD_FOLDERS:
len(folder) and self.fullspread.append(
re.compile(folder.strip(), re.I))
self.evenly_spaced = settings.EVENLY_SPACED
self.feedback = settings.FEEDBACK
self.MIN_FOLDER_SIZE = settings.MIN_FOLDER_SIZE
self.rootdir = settings.ROOT_DIR
set_exclusions()
set_extensions()
set_fullspread()
self.feedback('Settings loaded')
def report_progress(self, operation='Processing'):
now = time()
if now - self.last_feedback > 3:
self.feedback('%s %s Files' %(operation, len(self.dirlist)))
self.last_feedback = now
def check_filetype(self, filename, dirpath):
for pattern in self.exclusions:
forbidden = pattern.search(filename) or pattern.search(dirpath)
if forbidden:
return False
if len(self.extensions):
ext = filename.split('.')[-1]
if not ext.lower() in self.extensions:
return False
return True
def process_list(self):
# sort files in folders alphabetically
keys = self.filedict.keys()
for key in keys:
self.filedict[key].sort(key=lambda x: x.lower())
try:
from pdabt import DABTree
except ImportError:
self.feedback('DABTree NOT FOUND')
return False
def place_season(folder, key, count, node):
# create seasons / normalize time between episodes
size = len(folder)
if size > self.MIN_FOLDER_SIZE:
node.add_value(value=count)
return
leaf = node.invoke_least()
for _ in range(size):
self.dirlist.append(key)
if node.west is leaf:
folder.insert(0, '')
else:
folder.append('')
place_season(folder, key, count, leaf)
dabtree = DABTree()
self.feedback('Calculating Season Sizes...')
shuffle(keys)
for key in keys:
folder = self.filedict[key]
assert len(folder) > 0
exempt = False
for pattern in self.fullspread:
exempt = exempt or pattern.search(key)
if exempt:
self.fulldict[key] = self.filedict.pop(key)
else:
place_season(folder, key, len(folder), dabtree)
self.report_progress()
def read_directories(playlist):
playlist.feedback('Reading Directories...')
for dirpath, dnames, fnames in os.walk(playlist.rootdir):
for filename in fnames:
# exclude self and previous playlist result
if len(dirpath) > 1 and playlist.check_filetype(filename, dirpath):
playlist.dirlist.append(dirpath)
q = playlist.filedict.get(dirpath, [])
q.append(filename)
# not worth optimizing
playlist.filedict[dirpath] = q
playlist.report_progress('Reading')
def write_entry(playlist, open_file, dirpath):
playlist.report_progress('Writing')
filename = playlist.filedict[dirpath].pop(0)
# remove season padding
if not filename:
return
open_file.write(('%s/%s\n' % (dirpath, filename)).encode('utf8'))
def spaceout(playlist, dictlist):
output = []
for directory in sorted(dictlist, key=lambda k: len(
dictlist[k]), reverse=False):
playlist.report_progress()
varient = len(output) / (len(dictlist[directory]) + 1.0)
for index in range(len(dictlist[directory]), 0, -1):
output.insert(int(index * varient + 0.5), directory)
return output
def output_espifo_m3u(playlist, output):
with open('playlist.m3u', 'w') as open_file:
for dirpath in output:
playlist.dirlist.pop(0)
write_entry(playlist, open_file, dirpath)
def output_rpifo_m3u(playlist):
# Reduce problem with 'programming blocks'
shuffle(playlist.dirlist)
with open('playlist.m3u', 'w') as open_file:
while len(playlist.dirlist):
dirpath = playlist.dirlist.pop(
randint(0, len(playlist.dirlist)) - 1)
write_entry(playlist, open_file, dirpath)
def gen_playlist():
playlist = Playlist()
read_directories(playlist)
playlist.process_list()
playlist.feedback('Outputting File playlist.m3u')
if playlist.evenly_spaced is True:
output = spaceout(playlist, playlist.filedict)
spaced = []
if len(playlist.fulldict):
spaced = spaceout(playlist, playlist.fulldict)
playlist.filedict.update(playlist.fulldict)
varient = len(output) / (len(spaced) + 1.0)
for index in range(len(spaced), 0, -1):
output.insert(int(index * varient + 0.5), spaced[index - 1])
output_espifo_m3u(playlist, output)
else:
output_rpifo_m3u(playlist)
playlist.feedback('...Done')
gen_playlist()
| 32.842105 | 79 | 0.584776 | 682 | 6,240 | 5.250733 | 0.243402 | 0.023457 | 0.014521 | 0.016755 | 0.125942 | 0.07456 | 0.065345 | 0.034069 | 0.017314 | 0 | 0 | 0.007253 | 0.315064 | 6,240 | 189 | 80 | 33.015873 | 0.830604 | 0.039904 | 0 | 0.124183 | 0 | 0 | 0.035774 | 0 | 0 | 0 | 0 | 0 | 0.006536 | 0 | null | null | 0 | 0.052288 | null | null | 0.026144 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74d30db42e4e43fd40ce31aa9b1b2da29831eebb | 29,244 | py | Python | setup.py | Viech/cynetworkx | 01a37859c67b752392e9e783c949084964eef2cf | [
"BSD-3-Clause"
] | 12 | 2019-07-23T08:07:53.000Z | 2022-03-09T06:13:16.000Z | setup.py | Viech/cynetworkx | 01a37859c67b752392e9e783c949084964eef2cf | [
"BSD-3-Clause"
] | 7 | 2019-08-30T07:00:00.000Z | 2021-12-30T08:02:56.000Z | setup.py | Viech/cynetworkx | 01a37859c67b752392e9e783c949084964eef2cf | [
"BSD-3-Clause"
] | 5 | 2020-10-10T03:40:32.000Z | 2021-11-23T12:28:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for cynetworkx
You can install cynetworkx with
python setup.py install
"""
from glob import glob
import os
import sys
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from setuptools import setup
from setuptools.extension import Extension
from Cython.Build import cythonize
if sys.argv[-1] == 'setup.py':
print("To install, run 'python setup.py install'")
print()
if sys.version_info[:2] < (2, 7):
print("NetworkX requires Python 2.7 or later (%d.%d detected)." %
sys.version_info[:2])
sys.exit(-1)
# Write the version information.
sys.path.insert(0, 'cynetworkx')
import cynetworkx.release as release
version = release.write_versionfile()
sys.path.pop(0)
extensions = [
Extension("cynetworkx.algorithms.approximation.__init__", ["cynetworkx/algorithms/approximation/__init__.py"]),
Extension("cynetworkx.algorithms.approximation.clique", ["cynetworkx/algorithms/approximation/clique.py"]),
Extension("cynetworkx.algorithms.approximation.clustering_coefficient", ["cynetworkx/algorithms/approximation/clustering_coefficient.py"]),
Extension("cynetworkx.algorithms.approximation.connectivity", ["cynetworkx/algorithms/approximation/connectivity.py"]),
Extension("cynetworkx.algorithms.approximation.dominating_set", ["cynetworkx/algorithms/approximation/dominating_set.py"]),
Extension("cynetworkx.algorithms.approximation.independent_set", ["cynetworkx/algorithms/approximation/independent_set.py"]),
Extension("cynetworkx.algorithms.approximation.kcomponents", ["cynetworkx/algorithms/approximation/kcomponents.py"]),
Extension("cynetworkx.algorithms.approximation.matching", ["cynetworkx/algorithms/approximation/matching.py"]),
Extension("cynetworkx.algorithms.approximation.ramsey", ["cynetworkx/algorithms/approximation/ramsey.py"]),
Extension("cynetworkx.algorithms.approximation.steinertree", ["cynetworkx/algorithms/approximation/steinertree.py"]),
Extension("cynetworkx.algorithms.approximation.vertex_cover", ["cynetworkx/algorithms/approximation/vertex_cover.py"]),
Extension("cynetworkx.algorithms.assortativity.__init__", ["cynetworkx/algorithms/assortativity/__init__.py"]),
Extension("cynetworkx.algorithms.assortativity.connectivity", ["cynetworkx/algorithms/assortativity/connectivity.py"]),
Extension("cynetworkx.algorithms.assortativity.correlation", ["cynetworkx/algorithms/assortativity/correlation.py"]),
Extension("cynetworkx.algorithms.assortativity.mixing", ["cynetworkx/algorithms/assortativity/mixing.py"]),
Extension("cynetworkx.algorithms.assortativity.neighbor_degree", ["cynetworkx/algorithms/assortativity/neighbor_degree.py"]),
Extension("cynetworkx.algorithms.assortativity.pairs", ["cynetworkx/algorithms/assortativity/pairs.py"]),
Extension("cynetworkx.algorithms.bipartite.__init__", ["cynetworkx/algorithms/bipartite/__init__.py"]),
Extension("cynetworkx.algorithms.bipartite.basic", ["cynetworkx/algorithms/bipartite/basic.py"]),
Extension("cynetworkx.algorithms.bipartite.centrality", ["cynetworkx/algorithms/bipartite/centrality.py"]),
Extension("cynetworkx.algorithms.bipartite.cluster", ["cynetworkx/algorithms/bipartite/cluster.py"]),
Extension("cynetworkx.algorithms.bipartite.covering", ["cynetworkx/algorithms/bipartite/covering.py"]),
Extension("cynetworkx.algorithms.bipartite.edgelist", ["cynetworkx/algorithms/bipartite/edgelist.py"]),
Extension("cynetworkx.algorithms.bipartite.generators", ["cynetworkx/algorithms/bipartite/generators.py"]),
Extension("cynetworkx.algorithms.bipartite.matching", ["cynetworkx/algorithms/bipartite/matching.py"]),
Extension("cynetworkx.algorithms.bipartite.matrix", ["cynetworkx/algorithms/bipartite/matrix.py"]),
Extension("cynetworkx.algorithms.bipartite.projection", ["cynetworkx/algorithms/bipartite/projection.py"]),
Extension("cynetworkx.algorithms.bipartite.redundancy", ["cynetworkx/algorithms/bipartite/redundancy.py"]),
Extension("cynetworkx.algorithms.bipartite.spectral", ["cynetworkx/algorithms/bipartite/spectral.py"]),
Extension("cynetworkx.algorithms.centrality.__init__", ["cynetworkx/algorithms/centrality/__init__.py"]),
Extension("cynetworkx.algorithms.centrality.betweenness", ["cynetworkx/algorithms/centrality/betweenness.py"]),
Extension("cynetworkx.algorithms.centrality.betweenness_subset", ["cynetworkx/algorithms/centrality/betweenness_subset.py"]),
Extension("cynetworkx.algorithms.centrality.closeness", ["cynetworkx/algorithms/centrality/closeness.py"]),
Extension("cynetworkx.algorithms.centrality.current_flow_betweenness", ["cynetworkx/algorithms/centrality/current_flow_betweenness.py"]),
Extension("cynetworkx.algorithms.centrality.current_flow_betweenness_subset", ["cynetworkx/algorithms/centrality/current_flow_betweenness_subset.py"]),
Extension("cynetworkx.algorithms.centrality.current_flow_closeness", ["cynetworkx/algorithms/centrality/current_flow_closeness.py"]),
Extension("cynetworkx.algorithms.centrality.degree_alg", ["cynetworkx/algorithms/centrality/degree_alg.py"]),
Extension("cynetworkx.algorithms.centrality.dispersion", ["cynetworkx/algorithms/centrality/dispersion.py"]),
Extension("cynetworkx.algorithms.centrality.eigenvector", ["cynetworkx/algorithms/centrality/eigenvector.py"]),
Extension("cynetworkx.algorithms.centrality.flow_matrix", ["cynetworkx/algorithms/centrality/flow_matrix.py"]),
Extension("cynetworkx.algorithms.centrality.harmonic", ["cynetworkx/algorithms/centrality/harmonic.py"]),
Extension("cynetworkx.algorithms.centrality.katz", ["cynetworkx/algorithms/centrality/katz.py"]),
Extension("cynetworkx.algorithms.centrality.load", ["cynetworkx/algorithms/centrality/load.py"]),
Extension("cynetworkx.algorithms.centrality.reaching", ["cynetworkx/algorithms/centrality/reaching.py"]),
Extension("cynetworkx.algorithms.centrality.subgraph_alg", ["cynetworkx/algorithms/centrality/subgraph_alg.py"]),
Extension("cynetworkx.algorithms.coloring.__init__", ["cynetworkx/algorithms/coloring/__init__.py"]),
Extension("cynetworkx.algorithms.coloring.greedy_coloring", ["cynetworkx/algorithms/coloring/greedy_coloring.py"]),
Extension("cynetworkx.algorithms.coloring.greedy_coloring_with_interchange", ["cynetworkx/algorithms/coloring/greedy_coloring_with_interchange.py"]),
Extension("cynetworkx.algorithms.community.__init__", ["cynetworkx/algorithms/community/__init__.py"]),
Extension("cynetworkx.algorithms.community.asyn_fluidc", ["cynetworkx/algorithms/community/asyn_fluidc.py"]),
Extension("cynetworkx.algorithms.community.centrality", ["cynetworkx/algorithms/community/centrality.py"]),
Extension("cynetworkx.algorithms.community.community_generators", ["cynetworkx/algorithms/community/community_generators.py"]),
Extension("cynetworkx.algorithms.community.community_utils", ["cynetworkx/algorithms/community/community_utils.py"]),
Extension("cynetworkx.algorithms.community.kclique", ["cynetworkx/algorithms/community/kclique.py"]),
Extension("cynetworkx.algorithms.community.kernighan_lin", ["cynetworkx/algorithms/community/kernighan_lin.py"]),
Extension("cynetworkx.algorithms.community.label_propagation", ["cynetworkx/algorithms/community/label_propagation.py"]),
Extension("cynetworkx.algorithms.community.quality", ["cynetworkx/algorithms/community/quality.py"]),
Extension("cynetworkx.algorithms.components.__init__", ["cynetworkx/algorithms/components/__init__.py"]),
Extension("cynetworkx.algorithms.components.attracting", ["cynetworkx/algorithms/components/attracting.py"]),
Extension("cynetworkx.algorithms.components.biconnected", ["cynetworkx/algorithms/components/biconnected.py"]),
Extension("cynetworkx.algorithms.components.connected", ["cynetworkx/algorithms/components/connected.py"]),
Extension("cynetworkx.algorithms.components.semiconnected", ["cynetworkx/algorithms/components/semiconnected.py"]),
Extension("cynetworkx.algorithms.components.strongly_connected", ["cynetworkx/algorithms/components/strongly_connected.py"]),
Extension("cynetworkx.algorithms.components.weakly_connected", ["cynetworkx/algorithms/components/weakly_connected.py"]),
Extension("cynetworkx.algorithms.connectivity.__init__", ["cynetworkx/algorithms/connectivity/__init__.py"]),
Extension("cynetworkx.algorithms.connectivity.connectivity", ["cynetworkx/algorithms/connectivity/connectivity.py"]),
Extension("cynetworkx.algorithms.connectivity.cuts", ["cynetworkx/algorithms/connectivity/cuts.py"]),
Extension("cynetworkx.algorithms.connectivity.disjoint_paths", ["cynetworkx/algorithms/connectivity/disjoint_paths.py"]),
Extension("cynetworkx.algorithms.connectivity.edge_augmentation", ["cynetworkx/algorithms/connectivity/edge_augmentation.py"]),
Extension("cynetworkx.algorithms.connectivity.edge_kcomponents", ["cynetworkx/algorithms/connectivity/edge_kcomponents.py"]),
Extension("cynetworkx.algorithms.connectivity.kcomponents", ["cynetworkx/algorithms/connectivity/kcomponents.py"]),
Extension("cynetworkx.algorithms.connectivity.kcutsets", ["cynetworkx/algorithms/connectivity/kcutsets.py"]),
Extension("cynetworkx.algorithms.connectivity.stoerwagner", ["cynetworkx/algorithms/connectivity/stoerwagner.py"]),
Extension("cynetworkx.algorithms.connectivity.utils", ["cynetworkx/algorithms/connectivity/utils.py"]),
Extension("cynetworkx.algorithms.flow.__init__", ["cynetworkx/algorithms/flow/__init__.py"]),
Extension("cynetworkx.algorithms.flow.boykovkolmogorov", ["cynetworkx/algorithms/flow/boykovkolmogorov.py"]),
Extension("cynetworkx.algorithms.flow.capacityscaling", ["cynetworkx/algorithms/flow/capacityscaling.py"]),
Extension("cynetworkx.algorithms.flow.dinitz_alg", ["cynetworkx/algorithms/flow/dinitz_alg.py"]),
Extension("cynetworkx.algorithms.flow.edmondskarp", ["cynetworkx/algorithms/flow/edmondskarp.py"]),
Extension("cynetworkx.algorithms.flow.gomory_hu", ["cynetworkx/algorithms/flow/gomory_hu.py"]),
Extension("cynetworkx.algorithms.flow.maxflow", ["cynetworkx/algorithms/flow/maxflow.py"]),
Extension("cynetworkx.algorithms.flow.mincost", ["cynetworkx/algorithms/flow/mincost.py"]),
Extension("cynetworkx.algorithms.flow.networksimplex", ["cynetworkx/algorithms/flow/networksimplex.py"]),
Extension("cynetworkx.algorithms.flow.preflowpush", ["cynetworkx/algorithms/flow/preflowpush.py"]),
Extension("cynetworkx.algorithms.flow.shortestaugmentingpath", ["cynetworkx/algorithms/flow/shortestaugmentingpath.py"]),
Extension("cynetworkx.algorithms.flow.utils", ["cynetworkx/algorithms/flow/utils.py"]),
Extension("cynetworkx.algorithms.isomorphism.__init__", ["cynetworkx/algorithms/isomorphism/__init__.py"]),
Extension("cynetworkx.algorithms.isomorphism.isomorph", ["cynetworkx/algorithms/isomorphism/isomorph.py"]),
Extension("cynetworkx.algorithms.isomorphism.isomorphvf2", ["cynetworkx/algorithms/isomorphism/isomorphvf2.py"]),
Extension("cynetworkx.algorithms.isomorphism.matchhelpers", ["cynetworkx/algorithms/isomorphism/matchhelpers.py"]),
Extension("cynetworkx.algorithms.isomorphism.temporalisomorphvf2", ["cynetworkx/algorithms/isomorphism/temporalisomorphvf2.py"]),
Extension("cynetworkx.algorithms.isomorphism.vf2userfunc", ["cynetworkx/algorithms/isomorphism/vf2userfunc.py"]),
Extension("cynetworkx.algorithms.link_analysis.__init__", ["cynetworkx/algorithms/link_analysis/__init__.py"]),
Extension("cynetworkx.algorithms.link_analysis.hits_alg", ["cynetworkx/algorithms/link_analysis/hits_alg.py"]),
Extension("cynetworkx.algorithms.link_analysis.pagerank_alg", ["cynetworkx/algorithms/link_analysis/pagerank_alg.py"]),
Extension("cynetworkx.algorithms.operators.__init__", ["cynetworkx/algorithms/operators/__init__.py"]),
Extension("cynetworkx.algorithms.operators.all", ["cynetworkx/algorithms/operators/all.py"]),
Extension("cynetworkx.algorithms.operators.binary", ["cynetworkx/algorithms/operators/binary.py"]),
Extension("cynetworkx.algorithms.operators.product", ["cynetworkx/algorithms/operators/product.py"]),
Extension("cynetworkx.algorithms.operators.unary", ["cynetworkx/algorithms/operators/unary.py"]),
Extension("cynetworkx.algorithms.shortest_paths.__init__", ["cynetworkx/algorithms/shortest_paths/__init__.py"]),
Extension("cynetworkx.algorithms.shortest_paths.astar", ["cynetworkx/algorithms/shortest_paths/astar.py"]),
Extension("cynetworkx.algorithms.shortest_paths.dense", ["cynetworkx/algorithms/shortest_paths/dense.py"]),
Extension("cynetworkx.algorithms.shortest_paths.generic", ["cynetworkx/algorithms/shortest_paths/generic.py"]),
Extension("cynetworkx.algorithms.shortest_paths.unweighted", ["cynetworkx/algorithms/shortest_paths/unweighted.py"]),
Extension("cynetworkx.algorithms.shortest_paths.weighted", ["cynetworkx/algorithms/shortest_paths/weighted.py"]),
Extension("cynetworkx.algorithms.traversal.__init__", ["cynetworkx/algorithms/traversal/__init__.py"]),
Extension("cynetworkx.algorithms.traversal.beamsearch", ["cynetworkx/algorithms/traversal/beamsearch.py"]),
Extension("cynetworkx.algorithms.traversal.breadth_first_search", ["cynetworkx/algorithms/traversal/breadth_first_search.py"]),
Extension("cynetworkx.algorithms.traversal.depth_first_search", ["cynetworkx/algorithms/traversal/depth_first_search.py"]),
Extension("cynetworkx.algorithms.traversal.edgedfs", ["cynetworkx/algorithms/traversal/edgedfs.py"]),
Extension("cynetworkx.algorithms.tree.__init__", ["cynetworkx/algorithms/tree/__init__.py"]),
Extension("cynetworkx.algorithms.tree.branchings", ["cynetworkx/algorithms/tree/branchings.py"]),
Extension("cynetworkx.algorithms.tree.coding", ["cynetworkx/algorithms/tree/coding.py"]),
Extension("cynetworkx.algorithms.tree.mst", ["cynetworkx/algorithms/tree/mst.py"]),
Extension("cynetworkx.algorithms.tree.operations", ["cynetworkx/algorithms/tree/operations.py"]),
Extension("cynetworkx.algorithms.tree.recognition", ["cynetworkx/algorithms/tree/recognition.py"]),
Extension("cynetworkx.algorithms.__init__", ["cynetworkx/algorithms/__init__.py"]),
Extension("cynetworkx.algorithms.boundary", ["cynetworkx/algorithms/boundary.py"]),
Extension("cynetworkx.algorithms.bridges", ["cynetworkx/algorithms/bridges.py"]),
Extension("cynetworkx.algorithms.chains", ["cynetworkx/algorithms/chains.py"]),
Extension("cynetworkx.algorithms.chordal", ["cynetworkx/algorithms/chordal.py"]),
Extension("cynetworkx.algorithms.clique", ["cynetworkx/algorithms/clique.py"]),
Extension("cynetworkx.algorithms.cluster", ["cynetworkx/algorithms/cluster.py"]),
Extension("cynetworkx.algorithms.communicability_alg", ["cynetworkx/algorithms/communicability_alg.py"]),
Extension("cynetworkx.algorithms.core", ["cynetworkx/algorithms/core.py"]),
Extension("cynetworkx.algorithms.covering", ["cynetworkx/algorithms/covering.py"]),
Extension("cynetworkx.algorithms.cuts", ["cynetworkx/algorithms/cuts.py"]),
Extension("cynetworkx.algorithms.cycles", ["cynetworkx/algorithms/cycles.py"]),
Extension("cynetworkx.algorithms.dag", ["cynetworkx/algorithms/dag.py"]),
Extension("cynetworkx.algorithms.distance_measures", ["cynetworkx/algorithms/distance_measures.py"]),
Extension("cynetworkx.algorithms.distance_regular", ["cynetworkx/algorithms/distance_regular.py"]),
Extension("cynetworkx.algorithms.dominance", ["cynetworkx/algorithms/dominance.py"]),
Extension("cynetworkx.algorithms.domninating", ["cynetworkx/algorithms/dominating.py"]),
Extension("cynetworkx.algorithms.efficiency", ["cynetworkx/algorithms/efficiency.py"]),
Extension("cynetworkx.algorithms.euler", ["cynetworkx/algorithms/euler.py"]),
Extension("cynetworkx.algorithms.graphical", ["cynetworkx/algorithms/graphical.py"]),
Extension("cynetworkx.algorithms.hierarchy", ["cynetworkx/algorithms/hierarchy.py"]),
Extension("cynetworkx.algorithms.hybrid", ["cynetworkx/algorithms/hybrid.py"]),
Extension("cynetworkx.algorithms.isolate", ["cynetworkx/algorithms/isolate.py"]),
Extension("cynetworkx.algorithms.link_prediction", ["cynetworkx/algorithms/link_prediction.py"]),
Extension("cynetworkx.algorithms.lowest_common_ancestors", ["cynetworkx/algorithms/lowest_common_ancestors.py"]),
Extension("cynetworkx.algorithms.matching", ["cynetworkx/algorithms/matching.py"]),
Extension("cynetworkx.algorithms.minors", ["cynetworkx/algorithms/minors.py"]),
Extension("cynetworkx.algorithms.mis", ["cynetworkx/algorithms/mis.py"]),
Extension("cynetworkx.algorithms.reciprocity", ["cynetworkx/algorithms/reciprocity.py"]),
Extension("cynetworkx.algorithms.richclub", ["cynetworkx/algorithms/richclub.py"]),
Extension("cynetworkx.algorithms.similarity", ["cynetworkx/algorithms/similarity.py"]),
Extension("cynetworkx.algorithms.simple_paths", ["cynetworkx/algorithms/simple_paths.py"]),
Extension("cynetworkx.algorithms.smetric", ["cynetworkx/algorithms/smetric.py"]),
Extension("cynetworkx.algorithms.structuralholes", ["cynetworkx/algorithms/structuralholes.py"]),
Extension("cynetworkx.algorithms.swap", ["cynetworkx/algorithms/swap.py"]),
Extension("cynetworkx.algorithms.threshold", ["cynetworkx/algorithms/threshold.py"]),
Extension("cynetworkx.algorithms.tournament", ["cynetworkx/algorithms/tournament.py"]),
Extension("cynetworkx.algorithms.triads", ["cynetworkx/algorithms/triads.py"]),
Extension("cynetworkx.algorithms.vitality", ["cynetworkx/algorithms/vitality.py"]),
Extension("cynetworkx.algorithms.voronoi", ["cynetworkx/algorithms/voronoi.py"]),
Extension("cynetworkx.algorithms.weiner", ["cynetworkx/algorithms/wiener.py"]),
Extension("cynetworkx.classes.__init__", ["cynetworkx/classes/__init__.py"]),
Extension("cynetworkx.classes.coreviews", ["cynetworkx/classes/coreviews.py"]),
Extension("cynetworkx.classes.digraph", ["cynetworkx/classes/digraph.py"]),
Extension("cynetworkx.classes.filters", ["cynetworkx/classes/filters.py"]),
Extension("cynetworkx.classes.function", ["cynetworkx/classes/function.py"]),
Extension("cynetworkx.classes.graph", ["cynetworkx/classes/graph.py"]),
Extension("cynetworkx.classes.graphviews", ["cynetworkx/classes/graphviews.py"]),
Extension("cynetworkx.classes.multidigraph", ["cynetworkx/classes/multidigraph.py"]),
Extension("cynetworkx.classes.multigraph", ["cynetworkx/classes/multigraph.py"]),
Extension("cynetworkx.classes.ordered", ["cynetworkx/classes/ordered.py"]),
Extension("cynetworkx.classes.reportviews", ["cynetworkx/classes/reportviews.py"]),
Extension("cynetworkx.utils.__init__", ["cynetworkx/utils/__init__.py"]),
Extension("cynetworkx.utils.contextmanagers", ["cynetworkx/utils/contextmanagers.py"]),
Extension("cynetworkx.utils.decorators", ["cynetworkx/utils/decorators.py"]),
Extension("cynetworkx.utils.heaps", ["cynetworkx/utils/heaps.py"]),
Extension("cynetworkx.utils.misc", ["cynetworkx/utils/misc.py"]),
Extension("cynetworkx.utils.random_sequence", ["cynetworkx/utils/random_sequence.py"]),
Extension("cynetworkx.utils.rcm", ["cynetworkx/utils/rcm.py"]),
Extension("cynetworkx.utils.union_find", ["cynetworkx/utils/union_find.py"]),
Extension("cynetworkx.drawing.__init__", ["cynetworkx/drawing/__init__.py"]),
Extension("cynetworkx.drawing.layout", ["cynetworkx/drawing/layout.py"]),
Extension("cynetworkx.drawing.nx_agraph", ["cynetworkx/drawing/nx_agraph.py"]),
Extension("cynetworkx.drawing.nx_pydot", ["cynetworkx/drawing/nx_pydot.py"]),
Extension("cynetworkx.drawing.nx_pylab", ["cynetworkx/drawing/nx_pylab.py"]),
Extension("cynetworkx.generators.__init__", ["cynetworkx/generators/__init__.py"]),
Extension("cynetworkx.generators.atlas", ["cynetworkx/generators/atlas.py"]),
Extension("cynetworkx.generators.classic", ["cynetworkx/generators/classic.py"]),
Extension("cynetworkx.generators.community", ["cynetworkx/generators/community.py"]),
Extension("cynetworkx.generators.degree_seq", ["cynetworkx/generators/degree_seq.py"]),
Extension("cynetworkx.generators.directed", ["cynetworkx/generators/directed.py"]),
Extension("cynetworkx.generators.duplication", ["cynetworkx/generators/duplication.py"]),
Extension("cynetworkx.generators.ego", ["cynetworkx/generators/ego.py"]),
Extension("cynetworkx.generators.expanders", ["cynetworkx/generators/expanders.py"]),
Extension("cynetworkx.generators.geometric", ["cynetworkx/generators/geometric.py"]),
Extension("cynetworkx.generators.intersection", ["cynetworkx/generators/intersection.py"]),
Extension("cynetworkx.generators.joint_degree_seq", ["cynetworkx/generators/joint_degree_seq.py"]),
Extension("cynetworkx.generators.lattice", ["cynetworkx/generators/lattice.py"]),
Extension("cynetworkx.generators.line", ["cynetworkx/generators/line.py"]),
Extension("cynetworkx.generators.mycielski", ["cynetworkx/generators/mycielski.py"]),
Extension("cynetworkx.generators.nonisomorphic_trees", ["cynetworkx/generators/nonisomorphic_trees.py"]),
Extension("cynetworkx.generators.random_clustered", ["cynetworkx/generators/random_clustered.py"]),
Extension("cynetworkx.generators.random_graphs", ["cynetworkx/generators/random_graphs.py"]),
Extension("cynetworkx.generators.small", ["cynetworkx/generators/small.py"]),
Extension("cynetworkx.generators.social", ["cynetworkx/generators/social.py"]),
Extension("cynetworkx.generators.stochastic", ["cynetworkx/generators/stochastic.py"]),
Extension("cynetworkx.generators.trees", ["cynetworkx/generators/trees.py"]),
Extension("cynetworkx.generators.triads", ["cynetworkx/generators/triads.py"]),
Extension("cynetworkx.linalg.__init__", ["cynetworkx/linalg/__init__.py"]),
Extension("cynetworkx.linalg.algebraicconnectivity", ["cynetworkx/linalg/algebraicconnectivity.py"]),
Extension("cynetworkx.linalg.attrmatrix", ["cynetworkx/linalg/attrmatrix.py"]),
Extension("cynetworkx.linalg.graphmatrix", ["cynetworkx/linalg/graphmatrix.py"]),
Extension("cynetworkx.linalg.laplacianmatrix", ["cynetworkx/linalg/laplacianmatrix.py"]),
Extension("cynetworkx.linalg.modularitymatrix", ["cynetworkx/linalg/modularitymatrix.py"]),
Extension("cynetworkx.linalg.spectrum", ["cynetworkx/linalg/spectrum.py"]),
Extension("cynetworkx.readwrite.json_graph.__init__", ["cynetworkx/readwrite/json_graph/__init__.py"]),
Extension("cynetworkx.readwrite.json_graph.adjacency", ["cynetworkx/readwrite/json_graph/adjacency.py"]),
Extension("cynetworkx.readwrite.json_graph.cytoscape", ["cynetworkx/readwrite/json_graph/cytoscape.py"]),
Extension("cynetworkx.readwrite.json_graph.jit", ["cynetworkx/readwrite/json_graph/jit.py"]),
Extension("cynetworkx.readwrite.json_graph.node_link", ["cynetworkx/readwrite/json_graph/node_link.py"]),
Extension("cynetworkx.readwrite.json_graph.tree", ["cynetworkx/readwrite/json_graph/tree.py"]),
Extension("cynetworkx.readwrite.__init__", ["cynetworkx/readwrite/__init__.py"]),
Extension("cynetworkx.readwrite.adjlist", ["cynetworkx/readwrite/adjlist.py"]),
Extension("cynetworkx.readwrite.edgelist", ["cynetworkx/readwrite/edgelist.py"]),
Extension("cynetworkx.readwrite.gexf", ["cynetworkx/readwrite/gexf.py"]),
Extension("cynetworkx.readwrite.gml", ["cynetworkx/readwrite/gml.py"]),
Extension("cynetworkx.readwrite.gpickle", ["cynetworkx/readwrite/gpickle.py"]),
Extension("cynetworkx.readwrite.graph6", ["cynetworkx/readwrite/graph6.py"]),
Extension("cynetworkx.readwrite.graphml", ["cynetworkx/readwrite/graphml.py"]),
Extension("cynetworkx.readwrite.leda", ["cynetworkx/readwrite/leda.py"]),
Extension("cynetworkx.readwrite.multiline_adjlist", ["cynetworkx/readwrite/multiline_adjlist.py"]),
Extension("cynetworkx.readwrite.nx_shp", ["cynetworkx/readwrite/nx_shp.py"]),
Extension("cynetworkx.readwrite.nx_yaml", ["cynetworkx/readwrite/nx_yaml.py"]),
Extension("cynetworkx.readwrite.p2g", ["cynetworkx/readwrite/p2g.py"]),
Extension("cynetworkx.readwrite.pajek", ["cynetworkx/readwrite/pajek.py"]),
Extension("cynetworkx.readwrite.sparse6", ["cynetworkx/readwrite/sparse6.py"]),
Extension("cynetworkx.__init__", ["cynetworkx/__init__.py"]),
Extension("cynetworkx.convert", ["cynetworkx/convert.py"]),
Extension("cynetworkx.convert_matrix", ["cynetworkx/convert_matrix.py"]),
Extension("cynetworkx.exception", ["cynetworkx/exception.py"]),
Extension("cynetworkx.relabel", ["cynetworkx/relabel.py"])
]
packages = ["cynetworkx",
"cynetworkx.algorithms",
"cynetworkx.algorithms.assortativity",
"cynetworkx.algorithms.bipartite",
"cynetworkx.algorithms.node_classification",
"cynetworkx.algorithms.centrality",
"cynetworkx.algorithms.community",
"cynetworkx.algorithms.components",
"cynetworkx.algorithms.connectivity",
"cynetworkx.algorithms.coloring",
"cynetworkx.algorithms.flow",
"cynetworkx.algorithms.traversal",
"cynetworkx.algorithms.isomorphism",
"cynetworkx.algorithms.shortest_paths",
"cynetworkx.algorithms.link_analysis",
"cynetworkx.algorithms.operators",
"cynetworkx.algorithms.approximation",
"cynetworkx.algorithms.tree",
"cynetworkx.classes",
"cynetworkx.generators",
"cynetworkx.drawing",
"cynetworkx.linalg",
"cynetworkx.readwrite",
"cynetworkx.readwrite.json_graph",
"cynetworkx.tests",
"cynetworkx.testing",
"cynetworkx.utils"]
docdirbase = 'share/doc/cynetworkx-%s' % version
# add basic documentation
data = [(docdirbase, glob("*.txt"))]
# add examples
for d in ['.',
'advanced',
'algorithms',
'basic',
'3d_drawing',
'drawing',
'graph',
'javascript',
'jit',
'pygraphviz',
'subclass']:
dd = os.path.join(docdirbase, 'examples', d)
pp = os.path.join('examples', d)
data.append((dd, glob(os.path.join(pp, "*.txt"))))
data.append((dd, glob(os.path.join(pp, "*.py"))))
data.append((dd, glob(os.path.join(pp, "*.bz2"))))
data.append((dd, glob(os.path.join(pp, "*.gz"))))
data.append((dd, glob(os.path.join(pp, "*.mbox"))))
data.append((dd, glob(os.path.join(pp, "*.edgelist"))))
# add the tests
package_data = {
'cynetworkx': ['tests/*.py'],
'cynetworkx.algorithms': ['tests/*.py'],
'cynetworkx.algorithms.assortativity': ['tests/*.py'],
'cynetworkx.algorithms.bipartite': ['tests/*.py'],
'cynetworkx.algorithms.node_classification': ['tests/*.py'],
'cynetworkx.algorithms.centrality': ['tests/*.py'],
'cynetworkx.algorithms.community': ['tests/*.py'],
'cynetworkx.algorithms.components': ['tests/*.py'],
'cynetworkx.algorithms.connectivity': ['tests/*.py'],
'cynetworkx.algorithms.coloring': ['tests/*.py'],
'cynetworkx.algorithms.flow': ['tests/*.py', 'tests/*.bz2'],
'cynetworkx.algorithms.isomorphism': ['tests/*.py', 'tests/*.*99'],
'cynetworkx.algorithms.link_analysis': ['tests/*.py'],
'cynetworkx.algorithms.approximation': ['tests/*.py'],
'cynetworkx.algorithms.operators': ['tests/*.py'],
'cynetworkx.algorithms.shortest_paths': ['tests/*.py'],
'cynetworkx.algorithms.traversal': ['tests/*.py'],
'cynetworkx.algorithms.tree': ['tests/*.py'],
'cynetworkx.classes': ['tests/*.py'],
'cynetworkx.generators': ['tests/*.py', 'atlas.dat.gz'],
'cynetworkx.drawing': ['tests/*.py'],
'cynetworkx.linalg': ['tests/*.py'],
'cynetworkx.readwrite': ['tests/*.py'],
'cynetworkx.readwrite.json_graph': ['tests/*.py'],
'cynetworkx.testing': ['tests/*.py'],
'cynetworkx.utils': ['tests/*.py']
}
install_requires = ['decorator>=4.1.0']
extras_require = {'all': ['numpy', 'scipy', 'pandas', 'matplotlib',
'pygraphviz', 'pydot', 'pyyaml', 'gdal', 'lxml','nose']}
if __name__ == "__main__":
setup(
name=release.name.lower(),
version=version,
maintainer=release.maintainer,
maintainer_email=release.maintainer_email,
author=release.authors['Pattern, Inc.'][0],
author_email=release.authors['Pattern, Inc.'][1],
description=release.description,
keywords=release.keywords,
long_description=release.long_description,
license=release.license,
platforms=release.platforms,
url=release.url,
download_url=release.download_url,
classifiers=release.classifiers,
packages=packages,
data_files=data,
package_data=package_data,
install_requires=install_requires,
extras_require=extras_require,
test_suite='nose.collector',
tests_require=['nose>=0.10.1'],
zip_safe=False,
ext_modules=cythonize(extensions)
)
| 70.980583 | 155 | 0.752086 | 2,824 | 29,244 | 7.651912 | 0.126062 | 0.323939 | 0.230321 | 0.22523 | 0.401684 | 0.115785 | 0.034615 | 0.013605 | 0 | 0 | 0 | 0.001386 | 0.087026 | 29,244 | 411 | 156 | 71.153285 | 0.80797 | 0.007181 | 0 | 0 | 0 | 0 | 0.69529 | 0.659488 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.019231 | 0 | 0.019231 | 0.008242 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
74d31461bcce0238b0f60063f66c21a9b0d11bb5 | 4,914 | py | Python | tests/master/test_idle_slaves.py | WillChilds-Klein/mistress-mapreduce | c991a502545bd0d3ec4f914cdc63faf6a40e77ae | [
"Apache-2.0"
] | 2 | 2018-12-02T11:10:15.000Z | 2019-02-21T22:24:00.000Z | tests/master/test_idle_slaves.py | WillChilds-Klein/mistress-mapreduce | c991a502545bd0d3ec4f914cdc63faf6a40e77ae | [
"Apache-2.0"
] | 1 | 2019-02-21T22:23:36.000Z | 2019-02-21T22:23:36.000Z | tests/master/test_idle_slaves.py | WillChilds-Klein/mistress-mapreduce | c991a502545bd0d3ec4f914cdc63faf6a40e77ae | [
"Apache-2.0"
] | 3 | 2018-04-26T16:02:10.000Z | 2018-12-02T11:10:16.000Z | # Mrs
# Copyright 2008-2012 Brigham Young University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mrs.master import IdleSlaves
class Slave(object):
def __init__(self, host, slave_id):
self.host = host
self.id = slave_id
def __str__(self):
return '%s-%s' % (self.host, self.id)
def test_one_host():
host1 = 'host1'
slave1 = Slave(host1, 'slave1')
slave2 = Slave(host1, 'slave2')
# Create a new slaves list.
slaves = IdleSlaves()
# Add some slaves.
slaves.add(slave1)
assert slaves._max_count == 1
slaves.add(slave2)
assert slaves._max_count == 2
# Remove some slaves.
slaves.remove(slave1)
assert slaves._max_count == 1
with pytest.raises(KeyError):
slaves.remove(slave1)
assert slaves._max_count == 1
slaves._consistency_check()
slaves.remove(slave2)
assert slaves._max_count == 0
slaves._consistency_check()
with pytest.raises(KeyError):
slaves.pop()
def test_nonzero():
host1 = 'host1'
slave1 = Slave(host1, 'slave1')
slave2 = Slave(host1, 'slave2')
# Create a new slaves list.
slaves = IdleSlaves()
# test __nonzero__
assert bool(slaves) == False
assert len(slaves) == 0
slaves.add(slave1)
assert bool(slaves) == True
assert len(slaves) == 1
def test_contains():
host1 = 'host1'
slave1 = Slave(host1, 'slave1')
slave2 = Slave(host1, 'slave2')
# Create a new slaves list.
slaves = IdleSlaves()
slaves.add(slave1)
assert slave1 in slaves
assert slave2 not in slaves
assert len(slaves) == 1
slaves._consistency_check()
def test_add_twice():
host1 = 'host1'
slave1 = Slave(host1, 'slave1')
slave2 = Slave(host1, 'slave2')
# Create a new slaves list.
slaves = IdleSlaves()
assert len(slaves) == 0
slaves._consistency_check()
# Add some slaves.
slaves.add(slave1)
assert slaves._max_count == 1
assert len(slaves) == 1
slaves._consistency_check()
slaves.add(slave2)
assert slaves._max_count == 2
assert len(slaves) == 2
slaves._consistency_check()
# Add the same slave a second time.
slaves.add(slave2)
assert slaves._max_count == 2
assert len(slaves) == 2
slaves._consistency_check()
def test_two_hosts():
host1 = 'host1'
slave1 = Slave(host1, 'slave1')
slave2 = Slave(host1, 'slave2')
host2 = 'host2'
slave3 = Slave(host2, 'slave3')
slave4 = Slave(host2, 'slave4')
slave5 = Slave(host2, 'slave5')
# Create a new slaves list.
slaves = IdleSlaves()
# Add some slaves.
slaves.add(slave1)
assert slaves._max_count == 1
slaves._consistency_check()
slaves.add(slave2)
assert slaves._max_count == 2
slaves._consistency_check()
slaves.add(slave3)
assert slaves._max_count == 2
slaves._consistency_check()
slaves.add(slave4)
assert slaves._max_count == 2
slaves._consistency_check()
slaves.add(slave5)
assert slaves._max_count == 3
slaves._consistency_check()
# Pop a slave.
popped_slave = slaves.pop()
assert popped_slave.host == host2
assert slaves._max_count == 2
slaves._consistency_check()
# Make sure that additional slaves are popped for alternating hosts.
popped2 = slaves.pop()
assert slaves._max_count == 2
slaves._consistency_check()
popped3 = slaves.pop()
assert slaves._max_count == 1
assert popped2.host != popped3.host
slaves._consistency_check()
def test_add_to_smaller_host():
host1 = 'host1'
slave1 = Slave(host1, 'slave1')
slave2 = Slave(host1, 'slave2')
host2 = 'host2'
slave3 = Slave(host2, 'slave3')
slave4 = Slave(host2, 'slave4')
slave5 = Slave(host2, 'slave5')
# Create a new slaves list.
slaves = IdleSlaves()
assert len(slaves) == 0
# Add some slaves.
slaves.add(slave1)
assert slaves._max_count == 1
slaves._consistency_check()
slaves.add(slave3)
assert slaves._max_count == 1
slaves._consistency_check()
slaves.add(slave4)
assert slaves._max_count == 2
slaves._consistency_check()
slaves.add(slave5)
assert slaves._max_count == 3
slaves._consistency_check()
# Add a slave to the smaller host.
slaves.add(slave2)
assert slaves._max_count == 3
slaves._consistency_check()
# vim: et sw=4 sts=4
| 26.138298 | 74 | 0.664225 | 634 | 4,914 | 4.974763 | 0.212934 | 0.079899 | 0.099873 | 0.133164 | 0.662651 | 0.626189 | 0.598605 | 0.577045 | 0.510463 | 0.510463 | 0 | 0.036978 | 0.229548 | 4,914 | 187 | 75 | 26.278075 | 0.796091 | 0.20289 | 0 | 0.795276 | 0 | 0 | 0.039382 | 0 | 0 | 0 | 0 | 0 | 0.275591 | 1 | 0.062992 | false | 0 | 0.015748 | 0.007874 | 0.094488 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74d4070cc857186b6a593db78097011098c36c45 | 2,577 | py | Python | examples/source_TestLibraBot.py | kensoi/libragram | a0119244dceb09edca36b23c95f3e97a28ddae9a | [
"Apache-2.0"
] | null | null | null | examples/source_TestLibraBot.py | kensoi/libragram | a0119244dceb09edca36b23c95f3e97a28ddae9a | [
"Apache-2.0"
] | null | null | null | examples/source_TestLibraBot.py | kensoi/libragram | a0119244dceb09edca36b23c95f3e97a28ddae9a | [
"Apache-2.0"
] | null | null | null | import typing
from libragram import librabot
from libragram.objects.decorators import callback
from libragram.objects.filters import *
bot = librabot(token = "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", trust_env=True)
class NotCommand(Filter):
def __init__(self, commands: typing.Union[list, set]):
self.cash = isCommand(commands)
self.update_type = whichUpdate({'message'})
self.priority = 0
async def check(self, package):
response_command = await self.cash.check(package)
response_update = await self.update_type.check(package)
return not response_command and response_update
@callback(isCommand({'start'}), bot = bot)
async def start_message(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = """Welcome to TestLibraBot!
Command list - /help
Copyright 2021 Kensoi""")
@callback(isCommand({'help'}), bot = bot)
async def help_message(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = """Cat pics - /cats
Check Bot work - /ping
Author - /credits
Contributors - /contributors
Source - /source""")
@callback(isCommand({'cats'}), bot = bot)
async def cat_pics(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "meow 🐱")
@callback(isCommand({'ping'}), bot = bot)
async def cat_pics(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "pong :>")
await package.sdk.wait(1)
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "Yeah, I am here, do not worry")
@callback(isCommand({'credits'}), bot = bot)
async def author_info(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "Author's site: kensoi.github.io")
@callback(isCommand({'contributors'}), bot = bot)
async def contributors(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "There's no any contributors :/")
@callback(isCommand({'source'}), bot = bot)
async def source_link(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "To see source check this link: github.com/kensoi/libragram")
@callback(NotCommand({'start', 'help', 'cats', 'ping', 'credits', 'contributors', 'source'}), bot = bot)
async def void(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "Mur Mur Mur")
bot.run(bot.start_polling()) | 28.318681 | 104 | 0.668607 | 326 | 2,577 | 5.205521 | 0.291411 | 0.063642 | 0.088391 | 0.095463 | 0.365351 | 0.34178 | 0.34178 | 0.34178 | 0.34178 | 0.34178 | 0 | 0.012112 | 0.199069 | 2,577 | 91 | 105 | 28.318681 | 0.809593 | 0 | 0 | 0.307692 | 0 | 0 | 0.185027 | 0.026377 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015385 | false | 0 | 0.061538 | 0 | 0.107692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d43131a7fc8558fde515471cde4f6a43cbcff0 | 4,542 | py | Python | utils.py | brain-bzh/videoannotation | d75d3261967a134854a16956fea602cad51949a2 | [
"MIT"
] | 3 | 2020-02-19T09:54:27.000Z | 2020-10-13T14:02:28.000Z | utils.py | courtois-neuromod/videoannotation | d75d3261967a134854a16956fea602cad51949a2 | [
"MIT"
] | null | null | null | utils.py | courtois-neuromod/videoannotation | d75d3261967a134854a16956fea602cad51949a2 | [
"MIT"
] | 2 | 2020-03-13T12:23:13.000Z | 2021-02-01T16:14:04.000Z | ## Author : Nicolas Farrugia, February 2020
from torchvision.models.detection import fasterrcnn_resnet50_fpn
import torch
from torchvision.io import read_video,read_video_timestamps
import matplotlib.patches as patches
from matplotlib import pyplot as plt
import datetime
import os
def convert_Audio(mediaFile, outFile):
cmd = 'ffmpeg -i '+mediaFile+' '+outFile
os.system(cmd)
return outFile
#### imagenet categories
def cat_file():
# load classes file
categories = []
try:
f = open('categories.txt', 'r')
for line in f:
cat = line.split(',')[0].split('\n')[0]
if cat != 'classes':
categories.append(cat)
f.close()
#print('Number of categories:', len(categories))
except:
print('Error opening file ' + ' categories.txt')
quit()
return categories
categories = cat_file() # load category file
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
def annotate_img(preds,vframes,n_obj=5):
global COCO_INSTANCE_CATEGORY_NAMES
### vframes : last three dims input tensor to faster_rcnn
### preds : dictionary of outputs of fater_rcnn
predlabels = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in preds['labels'].numpy()]
scores = [i for i in preds['scores'].detach().numpy()]
bboxes = [i for i in preds['boxes'].detach().numpy()]
test_im = vframes.permute(1,2,0).numpy()
# Create figure and axes
fig,ax = plt.subplots(1,figsize=(20,25))
# Display the image
ax.imshow(test_im)
#### add the annotations
for curbbox,curlab in zip(bboxes[:n_obj],predlabels[:n_obj]):
topleftx = curbbox[0]
toplefty = curbbox[1]
bottomrightx = curbbox[2]
bottomrighty = curbbox[3]
# Create a Rectangle patch
rect = patches.Rectangle((topleftx,toplefty),abs(bottomrightx-topleftx),abs(bottomrighty-toplefty),linewidth=1,edgecolor='r',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
ax.text(topleftx,toplefty,curlab,c='white',fontsize=16)
plt.show()
return fig
def gen_srt(strlabel,onset,srtfile,duration=2,num=1):
starttime = onset
endtime = starttime + duration
string_start = datetime.time(0,starttime//60,starttime%60).strftime("%H:%M:%S")
string_end = datetime.time(0,endtime//60,endtime%60).strftime("%H:%M:%S")
with open(srtfile,'a') as f:
f.write("{}\n".format(num+1))
f.write("{starttime} --> {endtime}\n".format(starttime=string_start,endtime=string_end))
f.write("{}\n".format(strlabel))
f.write("\n")
def gen_srt_coco_multiple(allpreds,onsets,srtfile,n_obj=5):
global COCO_INSTANCE_CATEGORY_NAMES
## check that both lists have the same size
if len(allpreds) != len(onsets):
raise(ValueError('List of predictions and onsets have different sizes'))
for num,(curpred,curonset) in enumerate(zip(allpreds,onsets)):
predlabels = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in curpred['labels'].numpy()[:n_obj]]
starttime = curonset
endtime = curonset + 2
string_start = datetime.time(0,starttime//60,starttime%60).strftime("%H:%M:%S")
string_end = datetime.time(0,endtime//60,endtime%60).strftime("%H:%M:%S")
with open(srtfile,'a') as f:
f.write("{}\n".format(num+1))
f.write("{starttime} --> {endtime}\n".format(starttime=string_start,endtime=string_end))
f.write("{}\n".format(predlabels))
f.write("\n")
| 31.985915 | 150 | 0.627257 | 584 | 4,542 | 4.792808 | 0.458904 | 0.007145 | 0.015005 | 0.044659 | 0.224723 | 0.214362 | 0.214362 | 0.214362 | 0.188639 | 0.158628 | 0 | 0.013789 | 0.201673 | 4,542 | 141 | 151 | 32.212766 | 0.758136 | 0.087406 | 0 | 0.170732 | 0 | 0 | 0.205682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060976 | false | 0 | 0.085366 | 0 | 0.182927 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d545645521efaf64601d79b9a3ec56f365eb34 | 922 | py | Python | cutterdrcov_plugin/extras.py | Semnodime/CutterDRcov | 7e2efd041662128aaba54dfa6230bedfca36e6fd | [
"MIT"
] | 52 | 2019-03-24T20:33:46.000Z | 2021-11-22T00:21:08.000Z | cutterdrcov_plugin/extras.py | Semnodime/CutterDRcov | 7e2efd041662128aaba54dfa6230bedfca36e6fd | [
"MIT"
] | 8 | 2019-03-24T13:38:08.000Z | 2021-12-13T21:19:59.000Z | cutterdrcov_plugin/extras.py | Semnodime/CutterDRcov | 7e2efd041662128aaba54dfa6230bedfca36e6fd | [
"MIT"
] | 10 | 2019-03-24T14:07:43.000Z | 2021-12-07T08:24:30.000Z | import ntpath
def hex_pad(num, pad):
return "{0:#0{1}x}".format(num, pad + 2)
# https://stackoverflow.com/questions/8384737/extract-file-name-from-path-no-matter-what-the-os-path-format
# cuz windows sucks :( .. hard
def file_name(path):
# There's one caveat: Linux filenames may contain backslashes. So on linux,
# r'a/b\c' always refers to the file b\c in the a folder, while on Windows,
# it always refers to the c file in the b subfolder of the a folder. So when
# both forward and backward slashes are used in a path, you need to know the
# associated platform to be able to interpret it correctly. In practice it's
# usually safe to assume it's a windows path since backslashes are seldom
# used in Linux filenames, but keep this in mind when you code so you don't
# create accidental security holes.
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
| 46.1 | 107 | 0.715835 | 160 | 922 | 4.1125 | 0.5875 | 0.018237 | 0.042553 | 0.051672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014885 | 0.198482 | 922 | 19 | 108 | 48.526316 | 0.875507 | 0.745119 | 0 | 0 | 0 | 0 | 0.044643 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.166667 | 0.166667 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
74d59e48d760c2bdd824f63462307dcc9124fac9 | 131 | py | Python | tfidf_matcher/__init__.py | stungkit/tfidf_matcher | 24182504d21f1eb978839b700f1c402c6288df2f | [
"MIT"
] | 13 | 2020-02-24T18:29:15.000Z | 2021-12-28T09:41:35.000Z | tfidf_matcher/__init__.py | stungkit/tfidf_matcher | 24182504d21f1eb978839b700f1c402c6288df2f | [
"MIT"
] | null | null | null | tfidf_matcher/__init__.py | stungkit/tfidf_matcher | 24182504d21f1eb978839b700f1c402c6288df2f | [
"MIT"
] | 3 | 2020-07-21T04:32:45.000Z | 2021-10-21T11:00:56.000Z | # AUTHOR: Louis Tsiattalou
# DESCRIPTION: Init for tfidf_matcher package.
from .ngrams import ngrams
from .matcher import matcher
| 21.833333 | 46 | 0.801527 | 17 | 131 | 6.117647 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145038 | 131 | 5 | 47 | 26.2 | 0.928571 | 0.526718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 4 |
74d5aa0d303e7bb713fa63255c91d0a79ff9c4cc | 17,910 | py | Python | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | null | null | null | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | null | null | null | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | 1 | 2022-01-17T04:58:10.000Z | 2022-01-17T04:58:10.000Z | from typing import final
import numpy as np
import torch
import os
import time
from datetime import datetime
import random
import numbers
from torch.utils.data import Dataset
from .ERA5Reader.readNetCDF import LatTokmPerLon
from .EraExtractors import DefaultEraExtractor
def labelnameToDataname(filename):
return os.path.splitext(filename)[0]+".nc"
def datanameToLabelname(filename, mapTypes, removePrefix):
return {key: os.path.join(str(x[0]), os.path.splitext(filename)[0][removePrefix:]+".txt") for key, x in mapTypes.items()}
# Dataset Class
class WeatherFrontDataset(Dataset):
"""Front dataset."""
def __init__(self, data_dir, label_dir = None, mapTypes = {"NA": ("", (35,70), (-40,35), (0.25,0.25), (1,1), None) }, levelRange = None, transform=None, outSize = None, printFileName = False, labelThickness = 2, label_extractor = None, asCoords = False, era_extractor = DefaultEraExtractor, has_subfolds = (False, False), removePrefix = 0, halfResEval = False):
"""
Args:
data_dir (string): Directory with all the images.
label_dir (string): Directory with all the labls (fronts)
validLats (int,int): Lowest and Highest Latitude (-90 to 90) from wich the data shall be sampled
validLons (int,int): Lowest and Highest Longitude (0 to 360-resolution[1]) from wich the data shall be sampled
resolution (float, float): Step Resolution in Latitudinal and Longitudinal direction
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.data_dir = data_dir
self.label_dir = label_dir
# Cropsize (used before reading from ERA!)
self.cropsize = outSize
# Augmentationtuple (data-augmentation, label-augmentation)
self.transform = transform
# Function that extracts label data from a given range
self.label_extractor = label_extractor
self.asCoords = asCoords
# Function that extracts era data from a given range
self.era_extractor = era_extractor
# Dictionary describing folder, latitudes, longitudes and resolution (signed) for different labels
self.mapTypes = mapTypes
# Should labels be randomly drawn if multiple are available for the same data
self.randomizeMapTypes = True
# Levelrange of era to extract
self.levelrange = levelRange
# Latrange of era to extract for each mapType (uised for crop)
self.latrange = {key: np.arange(int((90-x[1][0])*(1/np.abs(x[3][0]))),int((90-x[1][1])*(1/np.abs(x[3][0]))), 1) for key,x in self.mapTypes.items()}
# lonrange of era to extract for each mapType (used for crop)
self.lonrange = {key: np.arange(int(x[2][0]*(1/x[3][1])), int(x[2][1]*(1/x[3][1])), 1) for key,x in self.mapTypes.items()}
# Print file information
self.printFileName = printFileName
# Extract in a km grid instead of lat lon
self.extractRegularGrid = False
# is the evlauation to be on halfRes
self.halfRes = halfResEval
# Are labels provided? Else do not return labels
self.has_label = (not label_dir is None and not label_extractor is None)
if label_extractor is None:
print("No label extractor given, proceed without extracting labels")
if label_dir is None:
print("No label directory given, Labels need to be generated by the extractor")
# Check if an era_extractor exists
if era_extractor is None:
print("No Era-Extractor given, abort execution!")
exit(1)
self.removePrefix = removePrefix
self.hasSubfolds = has_subfolds
# ERA Data is organized in subfolders (2017->03->20170201_00.nc)
if(self.hasSubfolds[0]):
self.fileList = []
for fold in os.listdir(self.data_dir):
for filen in os.listdir(os.path.join(self.data_dir, fold)):
# if the dataset extracts labels, check if the corresponding labels exist
if(self.has_label):
potLabel = datanameToLabelname(filen, self.mapTypes, self.removePrefix)
labelExists = False
for key, val in potLabel.items():
foldna, filena = val.split("/")
if filena in os.listdir(os.path.join(self.label_dir,foldna)):
labelExists=True
if(labelExists):
self.fileList.append(os.path.join(fold,filen))
# if no labels are to be extracted simply append the data
else:
self.fileList.append(os.path.join(fold,filen))
# ERA Data is organized without subfolders (2017 -> 20170101_00.nc)
else:
self.fileList = []
for filen in os.listdir(self.data_dir):
if(self.has_label):
potLabel = datanameToLabelname(filen, self.mapTypes, self.removePrefix)
labelExists = False
for key, val in potLabel.items():
foldna, filena = val.split("/")
if filena in os.listdir(os.path.join(self.label_dir, foldna)):
labelExists = True
if(labelExists):
self.fileList.append(filen)
else:
self.fileList.append(filen)
# Sort file list
self.fileList = sorted(self.fileList)
def __len__(self):
# Length of all available Data (regardless of the existence of label!)
return len(self.fileList)
# Allow for slices or idx
def __getitem__(self, idx):
if not isinstance(idx, numbers.Number):
print("Currently not working")
exit(1)
return self.getBatch(idx)
filepath = self.fileList[idx]
filename = ""
if(self.hasSubfolds[0]):
filename = filepath.split("/")[-1]
else:
filename = filepath
if(filename == ""):
print("fileNotFound")
print(idx)
img_name = os.path.join(self.data_dir, filepath)
#Initialize projection type and seeds for possible transformations
projection_type = 0
extract_seed = datetime.now()
transform_seed = datetime.now()
mapType = list(self.mapTypes.keys())[0]
fronts = None
if(self.has_label):
# all corresponding front names (Take the first them if multiple are available)
if(self.hasSubfolds[1]):
front_name = datanameToLabelname(filepath, self.mapTypes, self.removePrefix)
else:
if(self.hasSubfolds[0]):
front_name = datanameToLabelname(filename, self.mapTypes, self.removePrefix)
else:
front_name = datanameToLabelname(filename, self.mapTypes, self.removePrefix)
mapType, front_name = self.getProjectionTypeAndFilePath(front_name)
# To distinguish the output name
#filename = os.path.splitext(filename)[0]+mapType+os.path.splitext(filename)[1]
# Read Label Data
#print("label:", filename)
#print(front_name, mapType, filename)
try:
if(self.extractRegularGrid):
fronts = self.getRegularGridLabel(front_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], mapType, extract_seed )
else:
fronts = self.getLabel(front_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], mapType, extract_seed )
except:
print("filename is", front_name)
if(self.printFileName):
print(idx)
print(img_name)
print(front_name)
print()
if(self.has_label and fronts is None):
print("Did not extract a Front even though it should")
print(idx, filename)
# Read Image Data
#print("image:", filename
image = None
try:
if(self.extractRegularGrid):
image = self.getRegularGridImage(img_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed, transform_seed)
else:
image = self.getImage(img_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed, transform_seed)
except Exception as e:
print(e)
print("filename is", filename)
raise Exception(e,"\nfailed to extract image data {}".format(filename))
if(image is None):
print("failed to extract image data")
print(filename, img_name, front_name)
print(idx)
raise Exception("failed to extract image data {}".format(filename))
mask = None
if(len(self.mapTypes[mapType]) == 5 and (not self.mapTypes[mapType][4] is None)):
mask = self.getMask(self.mapTypes[mapType][-1], self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed)
# Perform transformation on the data (affine transformation + randm crop) => Crop enables equally sized images
if self.transform:
finalImage = self.transformImage(image, transform_seed)
if(mask is None):
finalMask = None
else:
finalMask = torch.from_numpy(self.transformImage(mask.reshape((1,*mask.shape)), transform_seed).reshape(*mask.shape)).detach()
if(self.has_label):
finalFronts = self.transformLabel(fronts, transform_seed)
if(self.asCoords):
return [torch.from_numpy(finalImage), finalFronts, filename, finalMask]
else:
return [torch.from_numpy(finalImage), torch.from_numpy(finalFronts), filename, finalMask]
else:
return [torch.from_numpy(finalImage), None, filename, finalMask]
else:
if(mask is None):
pass
else:
mask = torch.from_numpy(mask)
if(self.has_label):
if(self.asCoords):
return [torch.from_numpy(image), fronts, filename, mask]
else:
return [torch.from_numpy(image), torch.from_numpy(fronts), filename, mask]
else:
return [torch.from_numpy(image), None, filename, mask]
def getCropRange(self, latrange, lonrange, res, seed):
if(self.cropsize is None):
return latrange, lonrange
else:
# perform crop before reading data, to reduce memory usage
common_seed= seed
h,w = int(np.abs((latrange[1]-latrange[0]+res[0]-0.001)/res[0])), int(np.abs((lonrange[1]-lonrange[0])/res[1]))
th,tw = self.cropsize
random.seed(common_seed)
i = random.randint(0, h-th)*res[0]
j = random.randint(0, w-tw)*res[1]
th *= res[0]
tw *= res[1]
return (latrange[0]+i, latrange[0]+i+th), (lonrange[0]+j, lonrange[0]+j+tw)
def getImage(self, filename, latrange, lonrange, res, seed, tseed = 0):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
return self.era_extractor(filename, tgt_latrange, tgt_lonrange, self.levelrange, tseed)
def getLabel(self, filename, latrange, lonrange, res, types, seed):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
if(self.halfRes):
return self.label_extractor(filename, (tgt_latrange[0], tgt_latrange[1]), (tgt_lonrange[0], tgt_lonrange[1]), (res[0]*2, res[1]*2), types)
else:
return self.label_extractor(filename, (tgt_latrange[0], tgt_latrange[1]), (tgt_lonrange[0], tgt_lonrange[1]), res, types)
def getMask(self, mask, latrange, lonrange, res, seed):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
return mask[int((90-tgt_latrange[0])/np.abs(res[0])):int((90-tgt_latrange[1])/np.abs(res[0])), int((180+tgt_lonrange[0])/res[1]):int((180+tgt_lonrange[1])/res[1])]
def transformImage(self, image, seed):
if(self.transform[0] is None):
return image
finalImage = np.zeros_like(image)
for channel in range(image.shape[0]):
#for level in range(image.shape[1]):
random.seed(seed)
finalImage[channel, :,:] = self.transform[0](image[channel,:,:])
return finalImage
def transformLabel(self, label, seed):
if(self.transform[1] is None):
return label
if(self.asCoords):
finalLabel = label
for group in range(len(label)):
random.seed(seed)
finalLabel[group] = self.transform[1](finalLabel[group])
else:
finalLabel = np.zeros((label.shape))
for channel in range(label.shape[2]):
random.seed(seed)
finalLabel[:,:,channel] = self.transform[1](label[:,:,channel])
return finalLabel
def getProjectionTypeAndFilePath(self, front_name):
projection_type = ""
keys, names = [], []
for key, fname in front_name.items():
currFold = os.path.join(self.label_dir, key)
# get filename without path
filename = fname.split("/")[-1]
#print(filename, currFold, fname)
#print(os.listdir(currFold))
if(filename in os.listdir(currFold)):
keys.append(key), names.append(os.path.join(self.label_dir, fname))
idx = 0
if(len(keys)>0):
if(self.randomizeMapTypes):
idx = random.randint(0,len(keys)-1)
return keys[idx], names[idx]
# No Label found
print(front_name)
print(os.listdir(self.label_dir))
print("Invalid label data pair, no label found!")
return projection_type, front_name
def __repr__(self):
myString = "WeatherFrontDataset\n"
myString += str(self.__dict__)
return myString
def getInfo(self):
myString = "WeatherFrontDataset\n"
myString += "data_dir :: "+ "str :: " +str(self.data_dir)+" :: end\n"
myString += "label_dir :: "+ "str :: " +str(self.label_dir)+" :: end\n"
myString += "map_types :: "+ "dict(str: tuple(str, tuple(float,float), tuple(float,float), tuple(float,float))) :: " +str(self.mapTypes)+" :: end\n"
myString += "levelrange :: "+ "list(int) :: " +str(list(self.levelrange))+" :: end\n"
myString += "transforms :: "+ "obj :: " +str(self.transform)+" :: end\n"
myString += "outsize :: "+ "tuple(int,int) :: " +str(self.cropsize)+" :: end\n"
myString += "translat :: "+ "tuple(int,int) :: " +str(self.label_extractor.imageCreator.maxOff)+" :: end\n"
myString += "printFileName :: "+ "bool :: " +str(self.printFileName)+" :: end\n"
myString += "labelThickness :: "+ "int :: " +str(self.label_extractor.imageCreator.thickness)+" :: end\n"
myString += "labelGrouping :: "+ "str :: " +str(self.label_extractor.imageCreator.labelGrouping)+" :: end\n"
myString += "Variables :: "+ "list(str) :: " +str(self.era_extractor.variables)+" :: end\n"
myString += "NormType :: "+ "int :: " +str(self.era_extractor.reader.normalize_type)+" :: end\n"
return myString
class WeatherFrontBatch:
def __init__ (self, data, label_as_float = True, transpose_rate = 0.5, swap_indices = None):
transposed_data = (list(zip(*data)))
self.data = torch.stack(transposed_data[0],0).float()
if(transposed_data[1][0] is None):
self.labels = None
else:
if(label_as_float):
self.labels = torch.stack(transposed_data[1],0).float()
else:
self.labels = torch.stack(transposed_data[1],0).long()
self.filenames = transposed_data[2]
def pin_memory(self):
self.data = self.data.pin_memory()
return [self.data, self.labels, self.filenames]
class WeatherFrontsAsCoordinatesBatch:
def __init__ (self, data, label_as_float = True, transpose_rate = 0.5, swap_indices = None):
transposed_data = (list(zip(*data)))
self.data = torch.stack(transposed_data[0],0).float()
if(transposed_data[1][0] is None):
self.labels = None
else:
self.labels = transposed_data[1]
if(transposed_data[3][0] is None):
self.masks = None
else:
self.masks = torch.stack(transposed_data[3],0).float()
self.filenames = transposed_data[2]
def pin_memory(self):
self.data = self.data.pin_memory()
return [self.data, self.labels, self.filenames, self.masks]
class collate_wrapper:
def __init__(self, binary = True, asCoordinates=False, transpose_rate = 0.5, swap_indices = None):
self.label_as_float = binary
self.transpose_rate = transpose_rate
self.swap_indices = swap_indices
self.asCoords = asCoordinates
def __call__(self, batch):
if(self.asCoords):
return WeatherFrontsAsCoordinatesBatch(batch, label_as_float=self.label_as_float, transpose_rate=self.transpose_rate, swap_indices = self.swap_indices)
else:
return WeatherFrontBatch(batch, label_as_float=self.label_as_float, transpose_rate=self.transpose_rate, swap_indices = self.swap_indices)
| 46.884817 | 365 | 0.599107 | 2,101 | 17,910 | 5.007139 | 0.161352 | 0.031939 | 0.03251 | 0.007985 | 0.355418 | 0.305228 | 0.262262 | 0.237833 | 0.208365 | 0.187643 | 0 | 0.016155 | 0.28459 | 17,910 | 381 | 366 | 47.007874 | 0.804886 | 0.128085 | 0 | 0.334495 | 0 | 0.003484 | 0.059803 | 0.00271 | 0.041812 | 0 | 0 | 0 | 0 | 1 | 0.069686 | false | 0.003484 | 0.038328 | 0.010453 | 0.219512 | 0.087108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d7b0958d8d2379fabfb2fe33a6017490b1f91e | 2,865 | py | Python | packages/mccomponents/tests/mccomponents/sample/kernel-orientation/kernelorientation_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mccomponents/tests/mccomponents/sample/kernel-orientation/kernelorientation_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mccomponents/tests/mccomponents/sample/kernel-orientation/kernelorientation_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
# Jiao Lin <jiao.lin@gmail.com
"""
This test check the "orientation" parameter of kernels.
* Sub-kernels in a "KernelContainer" have the parameter "orientation"
to specify its orientation relative to its parent kernel.
* The root level KernelContainer always has the same coordinate system
as the scatterer.
In this test the coordinate system of the kernel
is rotated 30 deg around the y axis (vertical up)
with respect to the scatterer.
Roughtly it is illustrated below:
x' ^ x
|\ |
\ |
\ | > z'
\ | . '
\ | . '
\|. ' ) 30 deg
-------------------> z
So the transformation matrix is
sqrt(3)/2 0 1/2
0 1 0
-1/2 0 sqrt(3)/2
This is specified in cyl/X-scatterer.xml.
In kernel's coordinate system, we set the momentum transfer
of the kernel to be [2,0,0], which is is along x' axis.
The incident neutron is along z axis with energy 100meV.
With these information, we can compute the momentum transfer
in instrument cooridnate system, and then the final energy
and energy transfer E.
Turns out E = -37.07822meV, and this is set in cyl/X-scatterer.xml.
In the following test, we make sure the final velocities of
the scattered neutrons are expected, and the neutrons
have valid probabilities.
"""
import unittest, numpy as np
class TestCase(unittest.TestCase):
def test1(self):
'kernel orientation'
# source
from mcni.components.MonochromaticSource import MonochromaticSource
import mcni, numpy as np
Ei = 100
from mcni.utils import conversion as Conv
ki = Conv.e2k(Ei)
vi = Conv.e2v(Ei)
Qdir = np.array([np.sqrt(3)/2, 0, -1./2])
Q = Qdir * 2
kf = np.array([0,0,ki]) - Q
Ef = Conv.k2e(np.linalg.norm(kf))
E = Ei-Ef
dv = Qdir * Conv.k2v(Q)
vf = np.array([0,0,vi]) - dv
# print ki, Q, kf
# print Ei, Ef, E
neutron = mcni.neutron(r=(0,0,-1), v=(0,0,vi), prob=1)
source = MonochromaticSource('s', neutron, dx=0.001, dy=0.001, dE=0)
# sample
from mccomponents.sample import samplecomponent
scatterer = samplecomponent('sa', 'cyl/sampleassembly.xml' )
# incident
N = 1000
neutrons = mcni.neutron_buffer(N)
neutrons = source.process(neutrons)
# print neutrons
# scatter
scatterer.process(neutrons)
# print neutrons
self.assertEqual(len(neutrons), N)
for neutron in neutrons:
np.allclose(neutron.state.velocity, vf)
self.assertTrue(neutron.probability > 0)
continue
return
pass # end of scattererxml_TestCase
def main(): unittest.main()
if __name__ == "__main__": main()
# End of file
| 27.285714 | 76 | 0.610471 | 392 | 2,865 | 4.436224 | 0.431122 | 0.00575 | 0.010351 | 0.008051 | 0.033353 | 0.033353 | 0 | 0 | 0 | 0 | 0 | 0.033399 | 0.289354 | 2,865 | 104 | 77 | 27.548077 | 0.820727 | 0.520768 | 0 | 0 | 0 | 0 | 0.037199 | 0.016047 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.058824 | false | 0.029412 | 0.147059 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d7f5258c12f0959a81cebaa0a5b91827535d65 | 27,239 | py | Python | python-modules/robcoewminterface/robcoewminterface/ewm.py | yschiebelhut/ewm-cloud-robotics | bdf3a6c13850d266b70168912494300c32d4d803 | [
"Apache-2.0"
] | 25 | 2019-07-31T12:50:33.000Z | 2022-01-11T15:53:40.000Z | python-modules/robcoewminterface/robcoewminterface/ewm.py | yschiebelhut/ewm-cloud-robotics | bdf3a6c13850d266b70168912494300c32d4d803 | [
"Apache-2.0"
] | 10 | 2019-07-11T13:12:12.000Z | 2022-03-15T15:46:58.000Z | python-modules/robcoewminterface/robcoewminterface/ewm.py | isabella232/ewm-cloud-robotics | 8210843df323379ded92ec14ec73b1f3ef6b2f41 | [
"Apache-2.0"
] | 23 | 2019-08-07T21:23:38.000Z | 2022-03-08T00:16:10.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""EWM OData provider for robcoewminterface."""
import logging
from typing import Any, Dict, List, Optional
from requests import Response
from robcoewmtypes.warehouse import Warehouse, WarehouseDescription, StorageBin
from robcoewmtypes.warehouseorder import (
WarehouseOrder, WarehouseTask, WarehouseTaskConfirmation, ConfirmWarehouseTask)
from robcoewmtypes.robot import (
Robot, RobotResourceType, ResourceGroup, ResourceTypeDescription, ResourceGroupDescription)
from .conversion import odata_to_attr
from .exceptions import ODataAPIException, get_exception_class
from .odata import ODataHandler
_LOGGER = logging.getLogger(__name__)
HTTP_SUCCESS = [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]
HTTP_BUS_EXCEPTION = [404, 500]
STATE_SUCCEEDED = 'SUCCEEDED'
class EWMOdata:
"""Base class for EWM OData interface."""
def __init__(self, odata: ODataHandler) -> None:
"""Construct."""
self._odata = odata
def handle_http_response(self, endpoint: str, http_resp: Response) -> Any:
"""
Handle an OData HTTP request response.
Returns attrs data class in case of success and raises exception on error.
For PATCH requests the body of an OData request is empty on success. Returning True then.
"""
# Return code handling
if http_resp.status_code in HTTP_SUCCESS:
self._odata.odata_counter.labels( # pylint: disable=no-member
endpoint=endpoint, result=STATE_SUCCEEDED).inc()
if http_resp.text:
return odata_to_attr(http_resp.json())
else:
return True
# Determine error code
if http_resp.status_code == 403:
error_code = '403'
else:
# Get error code from HTTP response
try:
error_code = http_resp.json()['error']['code']
except KeyError:
error_code = ''
if http_resp.status_code == 404 and not error_code:
error_code = '404'
# Error handling for business exceptions raised in EWM backend
if http_resp.status_code in HTTP_BUS_EXCEPTION:
exception_class = get_exception_class(error_code)
self._odata.odata_counter.labels( # pylint: disable=no-member
endpoint=endpoint, result=exception_class.ERROR_CODE).inc()
raise exception_class()
# For any other error use generic exception
self._odata.odata_counter.labels( # pylint: disable=no-member
endpoint=endpoint, result=error_code).inc()
raise ODataAPIException(error_code=error_code)
class WarehouseOData(EWMOdata):
"""Interaction with EWM warehouse APIs."""
def get_warehouse(
self, lgnum: str, descriptions: bool = False, storagebins: bool = False) -> Warehouse:
"""
Get data of one warehouse.
Optionally expand descriptions and storage bins.
"""
# define endpoint
endpoint = '/WarehouseNumberSet'
# create URL parameter
params = {}
if descriptions or storagebins:
exvalues = []
if descriptions:
exvalues.append('WarehouseDescriptions')
if storagebins:
exvalues.append('StorageBins')
params['$expand'] = ','.join(exvalues)
# create IDs
ids = {'Lgnum': lgnum}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_warehouses(self, descriptions: bool = False,
storagebins: bool = False) -> Optional[List[Warehouse]]:
"""
Get data of all warehouses.
Optionally expand descriptions and storage bins.
"""
# define endpoint
endpoint = '/WarehouseNumberSet'
# create URL parameter
params = {}
if descriptions or storagebins:
exvalues = []
if descriptions:
exvalues.append('WarehouseDescriptions')
if storagebins:
exvalues.append('StorageBins')
params['$expand'] = ','.join(exvalues)
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_whdescription(self, lgnum: str, spras: str) -> WarehouseDescription:
"""Get description of one warehouse in a language."""
# define endpoint
endpoint = '/WarehouseDescriptionSet'
# create IDs
ids = {'Lgnum': lgnum, 'Spras': spras}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_whdescriptions(self, lgnum: Optional[str] = None) -> List[WarehouseDescription]:
"""
Get descriptions of warehouses in all languages.
Optionally filter by warehouse.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/WarehouseDescriptions'
else:
# define endpoint
endpoint = '/WarehouseDescriptionSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_storagebin(self, lgnum: str, lgpla: str) -> StorageBin:
"""Get one specific storage bin."""
# define endpoint
endpoint = '/StorageBinSet'
# create IDs
ids = {'Lgnum': lgnum, 'Lgpla': lgpla}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_storagebins(self, lgnum: Optional[str] = None) -> List[WarehouseDescription]:
"""
Get all storage bins from the system.
Optionally filter by warehouse.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/StorageBins'
else:
# define endpoint
endpoint = '/StorageBinSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
class WarehouseOrderOData(EWMOdata):
"""Interaction with EWM warehouse order APIs."""
def get_warehouseorder(
self, lgnum: str, who: str, openwarehousetasks: bool = False) -> WarehouseOrder:
"""
Get data of one warehouse order.
Optionally expand warehouse tasks.
"""
# define endpoint
endpoint = '/WarehouseOrderSet'
# create URL parameter
params = {}
if openwarehousetasks:
exvalues = []
exvalues.append('OpenWarehouseTasks')
params['$expand'] = ','.join(exvalues)
# create IDs
ids = {'Lgnum': lgnum, 'Who': who}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_warehouseorders(
self, lgnum: Optional[str] = None, topwhoid: Optional[str] = None,
openwarehousetasks: bool = False) -> List[WarehouseOrder]:
"""
Get data of all warehouse orders.
Optionally filter by warehouse expand warehouse tasks.
"""
# create URL parameter
params = {}
if openwarehousetasks:
exvalues = []
exvalues.append('OpenWarehouseTasks')
params['$expand'] = ','.join(exvalues)
# Define endpoint IDs and navigation based on parameter selection
if lgnum and topwhoid:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = None
# create navigation
nav = None
# add filter URL param
params['$filter'] = "Lgnum eq '{}' and Topwhoid eq '{}'".format(
lgnum, topwhoid)
elif lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/WarehouseOrders'
elif topwhoid:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = None
# create navigation
nav = None
# add filter URL param
params['$filter'] = "Topwhoid eq '{}'".format(topwhoid)
else:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(
endpoint, urlparams=params, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_robot_warehouseorders(self, lgnum: str, rsrc: str) -> List[WarehouseOrder]:
"""Get warehouse orders assigned to the robot resource."""
# define endpoint
endpoint = '/GetRobotWarehouseOrders'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc)}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def getnew_robot_warehouseorder(self, lgnum: str, rsrc: str) -> WarehouseOrder:
"""
Get a new warehouse order for a robot resource.
The warehouse order will be immediately assigned to the robot
resource in EWM.
"""
# define endpoint
endpoint = '/GetNewRobotWarehouseOrder'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint,
urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def getnew_rtype_warehouseorders(
self, lgnum: str, rsrcgrp: str, rsrctype: str, nowho: int) -> List[WarehouseOrder]:
"""
Get #nowho new warehouse orders for a robot type.
The warehouse order is marked as 'in process', but not assigned to a
robot resource yet. This needs to be done by calling the method:
assign_robot_warehouseorder.
"""
# define endpoint
endpoint = '/GetNewRobotTypeWarehouseOrders'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum),
'RsrcGrp': "'{}'".format(rsrcgrp),
'RsrcType': "'{}'".format(rsrctype),
'NoWho': int(nowho)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_in_process_warehouseorders(
self, lgnum: str, rsrcgrp: str, rsrctype: str) -> List[WarehouseOrder]:
"""Get warehouse orders in process but not assigned to a robot resource."""
# define endpoint
endpoint = '/GetInProcessWarehouseOrders'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum),
'RsrcGrp': "'{}'".format(rsrcgrp),
'RsrcType': "'{}'".format(rsrctype)}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def assign_robot_warehouseorder(self, lgnum: str, rsrc: str, who: str) -> WarehouseOrder:
"""Assign a robot resource to a warehouse order."""
# define endpoint
endpoint = '/AssignRobotToWarehouseOrder'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Who': "'{}'".format(who)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_openwarehousetask(self, lgnum: str, tanum: str) -> WarehouseTask:
"""Get data from one warehouse task."""
# define endpoint
endpoint = '/OpenWarehouseTaskSet'
# create IDs
ids = {'Lgnum': lgnum, 'Tanum': tanum}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_openwarehousetasks(
self, lgnum: Optional[str] = None, who: Optional[str] = None) -> List[WarehouseTask]:
"""
Get data of all open warehouse tasks.
Optionally filter by warehouse and warehouse order.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum and who:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = {'Lgnum': lgnum, 'Who': who}
# create navigation
nav = '/OpenWarehouseTasks'
elif lgnum or who:
raise AttributeError(
'Either filter "lgnum" AND "who" or none of them ')
else:
# define endpoint
endpoint = '/OpenWarehouseTaskSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def confirm_warehousetask(
self, lgnum: str, tanum: str, rsrc: str) -> WarehouseTaskConfirmation:
"""
Confirm a warehouse task - putaway.
TODO: Implement exceptions: partly confirmations, bin change etc.
"""
# define endpoint
endpoint = '/ConfirmWarehouseTask'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Tanum': "'{}'".format(tanum),
'Rsrc': "'{}'".format(rsrc)}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def confirm_warehousetask_firststep(
self, lgnum: str, tanum: str, rsrc: str) -> WarehouseTaskConfirmation:
"""
Confirm a warehouse task - first step.
First confirmation of a warehouse task.
This also assigns the warehouse task to the resource.
"""
# define endpoint
endpoint = '/ConfirmWarehouseTaskFirstStep'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Tanum': "'{}'".format(tanum),
'Rsrc': "'{}'".format(rsrc)}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def send_confirmation_error(
self, lgnum: str, rsrc: str, who: str, tanum: str, confnumber: str) -> WarehouseOrder:
"""Send error before confirmation of a warehouse task."""
# define endpoint
if confnumber == ConfirmWarehouseTask.FIRST_CONF:
endpoint = '/SendFirstConfirmationError'
elif confnumber == ConfirmWarehouseTask.SECOND_CONF:
endpoint = '/SendSecondConfirmationError'
else:
raise ValueError('Could be used only for FIRST and SECOND confirmation')
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Who': "'{}'".format(who), 'Tanum': "'{}'".format(tanum)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def unassign_robot_warehouseorder(self, lgnum: str, rsrc: str, who: str) -> WarehouseOrder:
"""Unassign a robot resource from a warehouse order."""
# define endpoint
endpoint = '/UnassignRobotFromWarehouseOrder'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Who': "'{}'".format(who)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def unset_warehouseorder_in_process(self, lgnum: str, who: str) -> WarehouseOrder:
"""Unset in process status of a warehouse order."""
# define endpoint
endpoint = '/UnsetWarehouseorderInProcessStatus'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Who': "'{}'".format(who)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
class RobotOData(EWMOdata):
"""Interaction with EWM warehouse robot APIs."""
def get_robot(self, lgnum: str, rsrc: str) -> Robot:
"""Get data of one robot."""
# define endpoint
endpoint = '/RobotSet'
# create IDs
ids = {'Lgnum': lgnum, 'Rsrc': rsrc}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_robots(self, lgnum: Optional[str] = None) -> List[Robot]:
"""
Get data of all robots.
Optionally filter by warehouse.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/Robots'
else:
# define endpoint
endpoint = '/RobotSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def create_robot(self, lgnum: str, rsrc: str, rsrctype: str, rsrcgrp: str) -> Robot:
"""Create a new robot resource in EWM."""
# define endpoint
endpoint = '/RobotSet'
# create body
jsonbody = {'Lgnum': lgnum, 'Rsrc': rsrc, 'RsrcType': rsrctype, 'RsrcGrp': rsrcgrp}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, jsonbody=jsonbody)
return self.handle_http_response(endpoint, http_resp)
def change_robot(
self, lgnum: str, rsrc: str, rsrctype: Optional[str] = None,
rsrcgrp: Optional[str] = None) -> bool:
"""Change an existing robot resource in EWM."""
# define endpoint
endpoint = '/RobotSet'
# create IDs
ids = {'Lgnum': lgnum, 'Rsrc': rsrc}
# create body
jsonbody = {}
if rsrctype is not None:
jsonbody['RsrcType'] = rsrctype
if rsrcgrp is not None:
jsonbody['RsrcGrp'] = rsrcgrp
# HTTP OData PATCH request
http_resp = self._odata.http_patch_post('patch', endpoint, ids=ids, jsonbody=jsonbody)
# No HTTP body on successfull PATCH requests
# Body only exists in case of exceptions
return self.handle_http_response(endpoint, http_resp)
def set_robot_status(self, lgnum: str, rsrc: str, exccode: str) -> Robot:
"""Set exception codes for robot resources in EWM."""
# define endpoint
endpoint = '/SetRobotStatus'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Exccode': "'{}'".format(exccode)}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_robot_resource_type(self, lgnum: str, rsrctype: str) -> RobotResourceType:
"""Get data of one robot resource type."""
# define endpoint
endpoint = '/RobotResourceTypeSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcType': rsrctype}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_robot_resource_types(self, lgnum: Optional[str] = None) -> List[RobotResourceType]:
"""
Get data of all robot resource types.
Optionally filter by warehouse.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/RobotResourceTypes'
else:
# define endpoint
endpoint = '/RobotResourceTypeSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_resource_type_description(
self, lgnum: str, rsrctype: str, langu: str) -> ResourceTypeDescription:
"""Get description of one resource type in a language."""
# define endpoint
endpoint = '/ResourceTypeDescriptionSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcType': rsrctype, 'Langu': langu}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_resource_type_descriptions(
self, lgnum: Optional[str] = None,
rsrctype: Optional[str] = None) -> List[ResourceTypeDescription]:
"""
Get descriptions of resource types in all languages.
Optionally filter by warehouse and resource type.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum or rsrctype:
# define endpoint
endpoint = '/RobotResourceTypeSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcType': rsrctype}
# create navigation
nav = '/ResourceTypeDescriptions'
else:
# define endpoint
endpoint = '/ResourceTypeDescriptionSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_resource_group(self, lgnum: str, rsrcgrp: str) -> ResourceGroup:
"""Get data of one robot resource group."""
# define endpoint
endpoint = '/ResourceGroupSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcGrp': rsrcgrp}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_resource_groups(self, lgnum: Optional[str] = None) -> List[ResourceGroup]:
"""
Get data of all resource groups.
Optionally filter by warehouse.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/ResourceGroups'
else:
# define endpoint
endpoint = '/ResourceGroupSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_resource_group_description(
self, lgnum: str, rsrcgrp: str, langu: str) -> ResourceGroupDescription:
"""Get description of one resource group in a language."""
# define endpoint
endpoint = '/ResourceGroupDescriptionSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcGrp': rsrcgrp, 'Langu': langu}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_resource_group_descriptions(
self, lgnum: Optional[str] = None,
rsrcgrp: Optional[str] = None) -> List[ResourceGroupDescription]:
"""
Get descriptions of resource groups in all languages.
Optionally filter by warehouse and resource group.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum or rsrcgrp:
# define endpoint
endpoint = '/ResourceGroupSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcGrp': rsrcgrp}
# create navigation
nav = '/ResourceGroupDescriptions'
else:
# define endpoint
endpoint = '/ResourceGroupDescriptionSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
| 33.921544 | 98 | 0.598003 | 2,786 | 27,239 | 5.727925 | 0.110194 | 0.037097 | 0.059281 | 0.039291 | 0.692881 | 0.648264 | 0.615929 | 0.540795 | 0.526068 | 0.492668 | 0 | 0.002906 | 0.305224 | 27,239 | 802 | 99 | 33.96384 | 0.840317 | 0.231286 | 0 | 0.585635 | 0 | 0 | 0.098287 | 0.034256 | 0 | 0 | 0 | 0.001247 | 0 | 1 | 0.096685 | false | 0 | 0.024862 | 0 | 0.229282 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d9d4320921729eea09ee6fc647c058abfb15da | 71 | py | Python | pm4pymdl/util/parquet_importer/parameters.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 5 | 2021-01-31T22:45:29.000Z | 2022-02-22T14:26:06.000Z | pm4pymdl/util/parquet_importer/parameters.py | Javert899/pm4py-mdl | 4cc875999100f3f1ad60b925a20e40cf52337757 | [
"MIT"
] | 3 | 2021-07-07T15:32:55.000Z | 2021-07-07T16:15:36.000Z | pm4pymdl/util/parquet_importer/parameters.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 9 | 2020-09-23T15:34:11.000Z | 2022-03-17T09:15:40.000Z | from enum import Enum
class Parameters(Enum):
COLUMNS = "columns"
| 14.2 | 23 | 0.71831 | 9 | 71 | 5.666667 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.197183 | 71 | 4 | 24 | 17.75 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0.098592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
74dbc6e6353172f4c10b84fea774baef4a531397 | 95 | py | Python | src/sementeira/iii_controllers/pesquisar_pessoa.py | torraodocerrado/sementeira | 962d15bef63a73493b8cf29a22b656f19aa161ff | [
"Apache-2.0"
] | 2 | 2021-02-25T23:52:40.000Z | 2021-02-25T23:52:42.000Z | src/sementeira/iii_controllers/pesquisar_pessoa.py | torraodocerrado/sementeira | 962d15bef63a73493b8cf29a22b656f19aa161ff | [
"Apache-2.0"
] | null | null | null | src/sementeira/iii_controllers/pesquisar_pessoa.py | torraodocerrado/sementeira | 962d15bef63a73493b8cf29a22b656f19aa161ff | [
"Apache-2.0"
] | null | null | null | from .abstract_query import AbstractQuery
class PesquisarPessoa(AbstractQuery):
pass
| 15.833333 | 41 | 0.778947 | 9 | 95 | 8.111111 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.178947 | 95 | 5 | 42 | 19 | 0.935897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
74dbf3662176f3c18f639a38531d9a01acbedb79 | 632 | py | Python | time_struct.py | Kalpavrikshika/python_modules | 9f338ab006dd5653fd7f65ff253bc50e0fd61fc6 | [
"Apache-2.0"
] | 1 | 2018-07-02T03:37:03.000Z | 2018-07-02T03:37:03.000Z | time_struct.py | Kalpavrikshika/python_modules | 9f338ab006dd5653fd7f65ff253bc50e0fd61fc6 | [
"Apache-2.0"
] | null | null | null | time_struct.py | Kalpavrikshika/python_modules | 9f338ab006dd5653fd7f65ff253bc50e0fd61fc6 | [
"Apache-2.0"
] | null | null | null | #gmtime() returns the current time in UTC
#localtime() returns the current time with the current time zone
#struct_time converts to f.p representation
import time
def show_struct(s):
print(' tm_year:', s.tm_year)
print(' tm_mon:', s.tm_mon)
print(' tm_mday:', s.tm_mday)
print(' tm_hour:', s.tm_hour)
print(' tm_min:', s.tm_min)
print(' tm_sec:' , s.tm_sec)
print(' tm_wday:', s.tm_wday)
print(' tm_yday:', s.tm_yday)
print(' tm_isdst:', s.tm_isdst)
print('gmtime:')
show_struct(time.gmtime())
print('\nlocaltime')
show_struct(time.localtime())
print('\nmktime:', time.mktime(time.localtime())) | 30.095238 | 64 | 0.677215 | 102 | 632 | 3.980392 | 0.333333 | 0.155172 | 0.103448 | 0.103448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153481 | 632 | 21 | 65 | 30.095238 | 0.758879 | 0.22943 | 0 | 0 | 0 | 0 | 0.218557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.125 | 0.75 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
74dd569b79e52382362c820740fae840ef4bfce6 | 4,121 | py | Python | pyflowline/mesh/tin/create_tin_mesh.py | changliao1025/pyflowline | fb8677c5ebb3d0db8638f7fcc495ffb97376e00f | [
"Unlicense"
] | 4 | 2022-03-23T12:10:20.000Z | 2022-03-29T13:41:16.000Z | pyflowline/mesh/tin/create_tin_mesh.py | changliao1025/pyflowline | fb8677c5ebb3d0db8638f7fcc495ffb97376e00f | [
"Unlicense"
] | 1 | 2022-03-24T16:08:35.000Z | 2022-03-24T16:08:35.000Z | pyflowline/mesh/tin/create_tin_mesh.py | changliao1025/pyflowline | fb8677c5ebb3d0db8638f7fcc495ffb97376e00f | [
"Unlicense"
] | null | null | null | import os, sys
import numpy as np
from osgeo import ogr
from pyflowline.classes.tin import pytin
from pyflowline.formats.convert_coordinates import convert_pcs_coordinates_to_cell
def create_tin_mesh(dX_left_in, dY_bot_in, dResolution_meter_in, ncolumn_in, nrow_in,
sFilename_output_in, sFilename_spatial_reference_in):
if os.path.exists(sFilename_output_in):
#delete it if it exists
os.remove(sFilename_output_in)
pDriver_shapefile = ogr.GetDriverByName('Esri Shapefile')
pDataset = pDriver_shapefile.CreateDataSource(sFilename_output_in)
pDataset_shapefile = pDriver_shapefile.Open(sFilename_spatial_reference_in, 0)
pLayer_shapefile = pDataset_shapefile.GetLayer(0)
pSrs = pLayer_shapefile.GetSpatialRef()
#pSrs = osr.SpatialReference()
#pSrs.ImportFromEPSG(4326) # WGS84 lat/lon
pLayer = pDataset.CreateLayer('cell', pSrs, ogr.wkbPolygon)
# Add one attribute
pLayer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger64)) #long type for high resolution
pLayerDefn = pLayer.GetLayerDefn()
pFeature = ogr.Feature(pLayerDefn)
xleft = dX_left_in
ybottom = dY_bot_in
dArea = np.power(dResolution_meter_in,2.0)
#tin edge
dLength_edge = np.sqrt( 4.0 * dArea / np.sqrt(3.0) )
dX_shift = 0.5 * dLength_edge
dY_shift = 0.5 * dLength_edge * np.sqrt(3.0)
dX_spacing = dX_shift * 2
dY_spacing = dY_shift
lID =0
#geojson
aTin=list()
#.........
#(x2,y2)-----(x3,y3)
# | |
#(x1,y1)-----(x4,y4)
#...............
for column in range(0, ncolumn_in):
for row in range(0, nrow_in):
if column % 2 == 0 :
if row % 2 == 0:
#define a polygon here
x1 = xleft + (column * dX_shift)
y1 = ybottom + (row * dY_spacing)
x2 = x1 + dX_spacing
y2 = y1
x3 = x1 + dX_shift
y3 = y1 + dY_spacing
else:
x1 = xleft + (column * dX_shift)
y1 = ybottom + (row +1)* dY_spacing
x2 = x1 + dX_shift
y2 = y1 - dY_shift
x3 = x1 + dX_spacing
y3 = y1
else:
if row % 2 == 0:
x1 = xleft + column * dX_shift
y1 = ybottom + (row + 1)* dY_spacing
x2 = x1 + dX_shift
y2 = y1 - dY_shift
x3 = x1 + dX_spacing
y3 = y1
else:
x1 = xleft + column * dX_shift
y1 = ybottom + (row )* dY_spacing
x2 = x1 + dX_spacing
y2 = y1
x3 = x1 + dX_shift
y3 = y1 + dY_spacing
aCoords = np.full((4,2), -9999.0, dtype=float)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x1, y1)
ring.AddPoint(x2, y2)
ring.AddPoint(x3, y3)
ring.AddPoint(x1, y1)
pPolygon = ogr.Geometry(ogr.wkbPolygon)
pPolygon.AddGeometry(ring)
pFeature.SetGeometry(pPolygon)
pFeature.SetField("id", lID)
pLayer.CreateFeature(pFeature)
lID = lID + 1
#dummy = loads( ring.ExportToWkt() )
#aCoords = dummy.exterior.coords
aCoords[0,0] = x1
aCoords[0,1] = y1
aCoords[1,0] = x2
aCoords[1,1] = y2
aCoords[2,0] = x3
aCoords[2,1] = y3
aCoords[3,0] = x1
aCoords[3,1] = y1
dummy1= np.array(aCoords)
pHexagon = convert_pcs_coordinates_to_cell(1, dummy1)
aTin.append(pHexagon)
pass
pDataset = pLayer = pFeature = None
return aTin
| 29.435714 | 92 | 0.499151 | 451 | 4,121 | 4.390244 | 0.305987 | 0.035354 | 0.034343 | 0.030303 | 0.226263 | 0.170707 | 0.170707 | 0.170707 | 0.170707 | 0.168687 | 0 | 0.051293 | 0.408639 | 4,121 | 139 | 93 | 29.647482 | 0.761182 | 0.078379 | 0 | 0.337209 | 0 | 0 | 0.005817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011628 | false | 0.011628 | 0.05814 | 0 | 0.081395 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74ddd9005216a50508d6244b7fa925e4745d61c6 | 1,046 | py | Python | lecturePractice/coordinate.py | serook/mit_edx_i2_cs_python | 34cb08c6f4c5fb0a951d91cbd782f24a76e2479c | [
"Apache-2.0"
] | 1 | 2021-02-17T02:17:29.000Z | 2021-02-17T02:17:29.000Z | lecturePractice/coordinate.py | serook/mit_edx_i2_cs_python | 34cb08c6f4c5fb0a951d91cbd782f24a76e2479c | [
"Apache-2.0"
] | null | null | null | lecturePractice/coordinate.py | serook/mit_edx_i2_cs_python | 34cb08c6f4c5fb0a951d91cbd782f24a76e2479c | [
"Apache-2.0"
] | 1 | 2021-02-17T02:17:31.000Z | 2021-02-17T02:17:31.000Z | class Coordinate(object):
def __init__(self, x, y):
"""
:rtype: object
"""
self.x = x
self.y = y
def getX(self):
# Getter method for a Coordinate object's x coordinate.
# Getter methods are better practice than just accessing an attribute directly
return self.x
def getY(self):
# Getter method for a Coordinate object's y coordinate
return self.y
def __str__(self):
return '<{0},{1}>'.format(str(self.getX()), str(self.getY))
def __eq__(self, other):
# First make sure `other` is of the same type
"""
:type other: object
"""
assert type(self) == type(other)
# Since `other` is the same type, test if coordinates are equal
return self.getX() == other.getX() and self.getY == other.getY
@property
def __repr__(self):
return 'Coordinate({0}, {1})'.format(str(self.getX()), str(self.getY))
c1 = Coordinate(3, 4)
c2 = Coordinate(4, 3)
print c1,c2
print c1==c2
| 24.325581 | 86 | 0.577438 | 141 | 1,046 | 4.170213 | 0.390071 | 0.059524 | 0.054422 | 0.064626 | 0.227891 | 0.227891 | 0.227891 | 0.227891 | 0.102041 | 0 | 0 | 0.019048 | 0.297323 | 1,046 | 42 | 87 | 24.904762 | 0.780952 | 0.276291 | 0 | 0 | 0 | 0 | 0.043413 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0 | null | null | 0 | 0 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74dfa0968b1a584e0d2faaacfbe4171a4e652fc1 | 1,415 | py | Python | 450.Delete-Node-in-a-BST.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | 18 | 2018-07-14T12:45:37.000Z | 2022-03-26T14:51:04.000Z | 450.Delete-Node-in-a-BST.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | null | null | null | 450.Delete-Node-in-a-BST.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | 3 | 2019-05-29T04:09:22.000Z | 2021-06-07T23:37:46.000Z | # https://leetcode.com/problems/delete-node-in-a-bst/
#
# algorithms
# Medium (38.78%)
# Total Accepted: 52,907
# Total Submissions: 136,417
# beats 93.27% of python submissions
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
node = root
parent = root
while node and node.val != key:
parent = node
if node.val > key:
node = node.left
else:
node = node.right
if not node:
return root
def delete_node(node):
if not node.left and not node.right:
return None
if not node.left:
return node.right
if not node.right:
return node.left
tmp = node.right
while tmp.left:
tmp = tmp.left
tmp.left = node.left
return node.right
new_node = delete_node(node)
if parent.val > key:
parent.left = new_node
return root
if parent.val < key:
parent.right = new_node
return root
return new_node
| 23.983051 | 53 | 0.504594 | 165 | 1,415 | 4.266667 | 0.339394 | 0.076705 | 0.051136 | 0.039773 | 0.160511 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022809 | 0.411307 | 1,415 | 58 | 54 | 24.396552 | 0.822329 | 0.267138 | 0 | 0.15625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e04283616bdcecdb6e7f34d9657947a999aff3 | 8,407 | py | Python | tests/components/test_tasks.py | jbenden/pipeline | 43c5196e466324007cf6e2e173d4610102d6a838 | [
"MIT"
] | null | null | null | tests/components/test_tasks.py | jbenden/pipeline | 43c5196e466324007cf6e2e173d4610102d6a838 | [
"MIT"
] | null | null | null | tests/components/test_tasks.py | jbenden/pipeline | 43c5196e466324007cf6e2e173d4610102d6a838 | [
"MIT"
] | null | null | null | """Testing of class Tasks."""
# pylint: disable=no-self-use, invalid-name
import unittest
from hamcrest import assert_that, equal_to
from spline.components.tasks import Tasks, worker
from spline.components.hooks import Hooks
from spline.components.config import ApplicationOptions
from spline.pipeline import PipelineData
class FakePipeline(object):
"""Fake pipeline class for tests."""
def __init__(self, hooks=None):
"""Initialization of fake pipeline."""
self.data = PipelineData(hooks)
self.model = {}
self.options = ApplicationOptions(definition='fake.yaml')
self.variables = {}
class TestTasks(unittest.TestCase):
"""Testing of class Tasks."""
def test_tasks_ordered(self):
"""Testing with two task only (ordered)."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
document = [{'shell': {'script': '''echo hello1''', 'when': ''}},
{'shell': {'script': '''echo hello2''', 'when': ''}},
{'python': {'script': '''print("hello3")''', 'when': ''}}]
result = tasks.process(document)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(3))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello2'))
assert_that(output[2], equal_to('hello3'))
def test_two_tasks_parallel(self):
"""Testing with two task only (parallel)."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=True)
definition = [{'shell': {'script': '''echo hello1''', 'when': ''}},
{'shell': {'script': '''echo hello2''', 'when': ''}}]
result = tasks.process(definition)
output = sorted([line for line in result['output'] if line.find("hello") >= 0])
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello2'))
def test_failed_ordered(self):
"""Testing cleanup when a task has failed (ordered)."""
hooks = Hooks()
hooks.cleanup = '''echo cleanup hello'''
pipeline = FakePipeline(hooks=hooks)
tasks = Tasks(pipeline, parallel=False)
definition = [{'shell': {'script': '''exit 123''', 'when': ''}},
{'shell': {'script': '''echo hello''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('cleanup hello'))
def test_failed_parallel(self):
"""Testing cleanup when a task has failed (parallel)."""
hooks = Hooks()
hooks.cleanup = '''echo cleanup 123'''
pipeline = FakePipeline(hooks=hooks)
tasks = Tasks(pipeline, parallel=True)
definition = [{'shell': {'script': '''exit 123''', 'when': ''}},
{'shell': {'script': '''echo hello''', 'when': ''}}]
result = tasks.process(definition)
output = sorted([line for line in result['output']
if line.find("hello") >= 0 or line.find("cleanup") >= 0])
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('cleanup 123'))
assert_that(output[1], equal_to('hello'))
def test_failed_two_blocks(self):
"""Testing cleanup when a task has failed (ordered with two blocks)."""
hooks = Hooks()
hooks.cleanup = '''echo cleanup hello'''
pipeline = FakePipeline(hooks=hooks)
tasks = Tasks(pipeline, parallel=False)
definition = [{'shell': {'script': '''exit 123''', 'when': ''}},
{'shell': {'script': '''echo hello1''', 'when': ''}},
{'env': {'block': 'two'}},
{'shell': {'script': '''echo hello2''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('cleanup hello'))
def test_tags_ordered(self):
"""Testing for filtering of tags."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
definition = [{'shell': {'script': '''echo hello1''', 'when': '', 'tags': ['first']}},
{'shell': {'script': '''echo hello2''', 'when': '', 'tags': ['second']}}]
pipeline.options.tags = ['first']
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('hello1'))
pipeline.options.tags = ['second']
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('hello2'))
def test_env_ordered(self):
"""Testing environment variables (ordered)."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
definition = [{'env': {'message': 'hello'}},
{'shell': {'script': '''echo "1:{{env.message}}"''', 'when': ''}},
{'shell': {'script': '''echo "2:$message"''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('1:hello'))
assert_that(output[1], equal_to('2:hello'))
def test_worker(self):
"""Testing worker used by class Tasks for parallel execution."""
data = {'id': 1, 'creator': 'shell',
'entry': {'script': '''echo "{{model.mode}}:{{env.message}} {{ variables.message }}"''',
'when': ''},
'env': {'message': 'hello'}, 'model': {'mode': 'test'}, 'item': None,
'dry_run': False, 'debug': False, 'variables': {'message': 'world'}, 'strict': False,
'temporary_scripts_path': ''}
result = worker(data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('test:hello world'))
def test_dry_run(self):
"""Testing dry run mode."""
pipeline = FakePipeline()
pipeline.options.dry_run = True
tasks = Tasks(pipeline, parallel=True)
definition = [{'shell': {'script': '''echo hello1''', 'when': ''}},
{'shell': {'script': '''echo hello2''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if len(line.strip()) > 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(4))
assert_that(tasks.parallel, equal_to(False))
assert_that(output[0], equal_to('''#!/bin/bash'''))
assert_that(output[1], equal_to('''echo hello1'''))
assert_that(output[2], equal_to('''#!/bin/bash'''))
assert_that(output[3], equal_to('''echo hello2'''))
def test_variables(self):
"""Testing variables."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
document = [{'shell': {'script': '''echo hello1''', 'variable': 'hello1', 'when': ''}},
{'shell': {'script': '''echo {{ variables.hello1 }}''', 'when': ''}}]
result = tasks.process(document)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello1'))
| 43.559585 | 104 | 0.569406 | 940 | 8,407 | 4.973404 | 0.12234 | 0.08984 | 0.068449 | 0.030588 | 0.698824 | 0.679144 | 0.629305 | 0.61369 | 0.561711 | 0.543316 | 0 | 0.0139 | 0.246937 | 8,407 | 192 | 105 | 43.786458 | 0.72453 | 0.067801 | 0 | 0.542254 | 0 | 0 | 0.156966 | 0.006825 | 0 | 0 | 0 | 0 | 0.295775 | 1 | 0.077465 | false | 0 | 0.042254 | 0 | 0.133803 | 0.007042 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e0e926c6c1b0dd7463e03c1ecc0a04002c96d2 | 9,119 | py | Python | src/utils/stalkMarketGraphs.py | amadea-system/StalkMarketBot | dc43e496e49361fe75ce9b94486981e134edc39e | [
"Apache-2.0"
] | null | null | null | src/utils/stalkMarketGraphs.py | amadea-system/StalkMarketBot | dc43e496e49361fe75ce9b94486981e134edc39e | [
"Apache-2.0"
] | null | null | null | src/utils/stalkMarketGraphs.py | amadea-system/StalkMarketBot | dc43e496e49361fe75ce9b94486981e134edc39e | [
"Apache-2.0"
] | null | null | null | """
Graphing code for Stalk Market Predictions
Part of Stalk Market Bot.
"""
import logging
from io import BytesIO
from typing import TYPE_CHECKING, Optional, Dict, List, Union, Tuple, NamedTuple, Any
import matplotlib.pyplot as plt
from scipy.interpolate import Akima1DInterpolator, pchip_interpolate
import numpy as np
import discord
from utils.stalkMarketPredictions import day_segment_names, Pattern, fix_sell_prices_length, analyze_possibilities, max_guild_predictions
if TYPE_CHECKING:
from utils.stalkMarketHelpers import UserPredictions
log = logging.getLogger(__name__)
def smooth_plot(x_data: List[Any], y_data: List[float]):
# return old_smooth_plot(x_data, y_data)
x = np.arange(len(y_data))
xnew = np.linspace(x[0], x[-1], 300)
# ynew = Akima1DInterpolator(x, y_data)(xnew)
ynew = pchip_interpolate(x, y_data, xnew)
return xnew, ynew
def format_plot(ax: plt.Axes):
"""Apply formatting to a plot"""
# Add the legend
legend = ax.legend(shadow=True, fontsize='medium')
ax.grid(linewidth="0.5", color="#283442") # Add gridlines. #283442
ax.set_axisbelow(True) # Make sure the gridlines are behind the graphs
# Remove the border
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.tick_params(color="#000000")
# Make room for the x axis labels
plt.gcf().subplots_adjust(bottom=0.15)#, right=0.1)
plt.tight_layout()
def matplotgraph_predictions(user: discord.Member, predictions: List[Pattern], min_max_pattern: Pattern, average_prices: List[float], testing=False) -> BytesIO:
"""Graph the predictions"""
x_axis = day_segment_names[2:]
abs_min_points = [price.min for price in min_max_pattern.prices][2:]
abs_max_points = [price.max for price in min_max_pattern.prices][2:]
# avg_points = [0 for i in abs_max_points]
if min_max_pattern.prices[0].min is not None:
buy_price_points = [min_max_pattern.prices[0].min for i in abs_max_points]
else:
buy_price_points = None
actual_price_points = [price.actual if price.is_actual_price() else None for price in min_max_pattern.prices][2:]
# for pred in predictions:
# for i, price in enumerate(pred.prices[2:]):
# avg_points[i] += price.min + price.max
# avg_points = [i/(len(predictions)*2) for i in avg_points]
avg_points = average_prices
title = f"{user.display_name}'s Stalk Market Predictions" if user is not None else f"Stalk Market Predictions"
# Set up the plots
plt.style.use('dark_background')
fig: plt.Figure
ax: plt.Axes
fig, ax = plt.subplots()
ax.plot(*smooth_plot(x_axis, avg_points), color="#1f77b4", label="Potential Price")
ax.plot(x_axis, abs_min_points, color="#000000", alpha=0)
ax.plot(x_axis, abs_max_points, color="#000000", alpha=0)
smooth_x, smooth_min_points = smooth_plot(x_axis, abs_min_points)
smooth_x, smooth_msx_points = smooth_plot(x_axis, abs_max_points)
ax.fill_between(smooth_x, smooth_min_points, smooth_msx_points, alpha=0.5, color="#1f77b4")
# ax.plot(x_axis, avg_points)
# ax.plot(x_axis, abs_min_points)
# ax.plot(x_axis, abs_max_points)
if buy_price_points is not None:
ax.plot(x_axis, buy_price_points, color="#FF7F0E", alpha=0.7, marker=0, linestyle='None', label="Buy Price")
ax.plot(x_axis, actual_price_points, 'o', color="#C5FFFF", label="Actual Price")#color="#BD9467")
plt.xticks(np.arange(12), x_axis, rotation=-50) # Set the x ticks to the day names
format_plot(ax)
if testing:
# plt.show()
plt.savefig("test_plot.png", format="png", dpi=150) # , bbox_inches='tight')
plt.close()
return None
imgBuffer = BytesIO()
plt.savefig(imgBuffer, format="png", dpi=150) #, bbox_inches='tight')
plt.close()
return imgBuffer
"""
fig: go.Figure = go.Figure(layout_title_text=title,
layout_template="plotly_dark",
layout_xaxis_title="Day of the Week",
layout_yaxis_title="Bells",
)
plot = get_filled_scatter_plot("Potential Turnip Prices", x_axis, abs_min_points, abs_max_points, avgs=avg_points, )
plot.set_color(DEFAULT_PLOTLY_COLORS[0])
ht = '<b>%{x}</b><br><br>' + \
'%{text}' + \
'<extra></extra>'
custom_text = []
for i in range(len(abs_min_points)):
txt = f"<i>Avg Price</i>: {avg_points[i]:.2f}<br>" +\
f"Max Price: {abs_max_points[i]}<br>" + \
f"Min Price: {abs_min_points[i]}<br>"
if actual_price_points[i] is not None:
txt += f"Actual Price: {actual_price_points[i]}<br>"
if buy_price_points is not None:
txt += f"Buy Price: {buy_price_points[i]}<br>"
custom_text.append(txt)
plot.set_hover_template(ht, custom_text)
plot.add_to_fig(fig)
if buy_price_points is not None:
# Add plot indicating the buy price.
fig.add_trace(go.Scatter(x=x_axis, y=buy_price_points,
mode='lines',
name=f"Buy Price",
line_dash='dash',
hoverinfo="none",
# hovertemplate=ht,
# text=custom_text,
# line_width=0,
# line_shape='spline',
# showlegend=False,
# legendgroup=name,
)
)
# Add plot indicating the actual price.
fig.add_trace(go.Scatter(x=x_axis, y=actual_price_points,
mode='lines',
name=f"Actual Sell Price",
line_dash='dash',
hoverinfo="none",
line_shape='spline',
# hovertemplate=ht,
# text=custom_text,
)
)
fig.show()
"""
def matplotgraph_guild_predictions(users_predictions: List['UserPredictions']) -> BytesIO:
"""Graph the predictions"""
max_graphs = max_guild_predictions
x_axis = day_segment_names[2:]
plt.style.use('dark_background')
fig: plt.Figure
ax: plt.Axes
fig, ax = plt.subplots()
for i, pred in enumerate(users_predictions):
if i >= max_graphs:
break
best_price_points = [price.actual if price.is_actual_price() else price.max for price in pred.best().prices][2:]
ax.plot(*smooth_plot(x_axis, best_price_points), label=f"{pred.user_name} - Best")
# avg_price_points = pred.average
# ax.plot(*smooth_plot(x_axis, avg_price_points), label=f"{pred.user_name} - Average")
plt.xticks(np.arange(12), x_axis, rotation=-50) # Set the x ticks to the day names
format_plot(ax)
imgBuffer = BytesIO()
plt.savefig(imgBuffer, format="png", dpi=150) #, bbox_inches='tight')
plt.close()
return imgBuffer
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
# test_graph()
# buy_price = 90
# sell_price = [buy_price, buy_price]
#
# sell_price.append(78)
# sell_price.append(74)
#
# sell_price.append(70)
# sell_price.append(104)
#
# sell_price.append(167)
# sell_price.append(518)
# #
# sell_price.append(160)
# sell_price.append(98)
buy_price = 93
sell_price = [buy_price, buy_price]
sell_price.append(100)
sell_price.append(100)
sell_price.append(98)
sell_price = fix_sell_prices_length(sell_price)
possibilities, min_max_pattern, avg_prices = analyze_possibilities(sell_price)
print(avg_prices)
for prediction in possibilities:
# desc.append(prediction.description)
log.info(f"\nDesc: {prediction.description}\n\n"
f"Sunday Sell: {prediction.prices[0]}\n"
f"Monday AM: {prediction.prices[2]}\n"
f"Monday PM: {prediction.prices[3]}\n"
f"Tuesday AM: {prediction.prices[4]}\n"
f"Tuesday PM: {prediction.prices[5]}\n"
f"Wednesday AM: {prediction.prices[6]}\n"
f"Wednesday AM: {prediction.prices[7]}\n"
f"Thursday AM: {prediction.prices[8]}\n"
f"Thursday AM: {prediction.prices[9]}\n"
f"Friday AM: {prediction.prices[10]}\n"
f"Friday AM: {prediction.prices[11]}\n"
f"Saturday AM: {prediction.prices[12]}\n"
f"Saturday AM: {prediction.prices[13]}"
f"\n")
matplotgraph_predictions(None, possibilities, min_max_pattern, avg_prices, testing=True)
print("Done")
| 33.40293 | 160 | 0.605768 | 1,198 | 9,119 | 4.394825 | 0.222037 | 0.018993 | 0.020513 | 0.014625 | 0.392403 | 0.34302 | 0.225071 | 0.154226 | 0.137132 | 0.121937 | 0 | 0.022034 | 0.273385 | 9,119 | 272 | 161 | 33.525735 | 0.772563 | 0.137515 | 0 | 0.233645 | 0 | 0 | 0.161243 | 0.066362 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037383 | false | 0 | 0.084112 | 0 | 0.158879 | 0.018692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e246baa4ba88b3c44d649fb3735c42f268b166 | 2,719 | py | Python | addr/parser.py | euske/osmtools | 581da9129f489cb57763578127ead42fa43b5c1f | [
"MIT"
] | null | null | null | addr/parser.py | euske/osmtools | 581da9129f489cb57763578127ead42fa43b5c1f | [
"MIT"
] | null | null | null | addr/parser.py | euske/osmtools | 581da9129f489cb57763578127ead42fa43b5c1f | [
"MIT"
] | 3 | 2015-12-27T22:13:40.000Z | 2019-12-23T14:34:54.000Z | #!/usr/bin/env python
import sys
import xml.parsers.expat
## Parser
##
class Parser(object):
def __init__(self):
self.pos = {}
self.name = {}
self._state = 0
self._expat = xml.parsers.expat.ParserCreate()
self._expat.StartElementHandler = self._start_element
self._expat.EndElementHandler = self._end_element
self._expat.CharacterDataHandler = self._char_data
return
def feed(self, data):
self._expat.Parse(data)
return
def get(self):
for (k,(x,y)) in self.pos.iteritems():
name = self.name[k]
yield (name,(x,y))
return
def _start_element(self, name, attrs):
#print 'start', name, attrs
if name == 'jps:GM_Point':
self.id = attrs['id']
self._state = 1
elif self._state == 1 and name == 'DirectPosition.coordinate':
self._state = 2
elif name == 'ksj:FB01':
self._state = 3
elif self._state == 3 and name == 'ksj:POS':
self.idref = attrs['idref']
elif self._state == 3 and name in ('ksj:NA0','ksj:NA8'):
self._state = 4
elif self._state == 3 and name == 'ksj:AAC':
self._state = 5
return
def _end_element(self, name):
if name == 'ksj:FB01':
self.name[self.idref] = (self._name1, self._cid)
elif self._state == 2:
self._state = 0
elif self._state == 4:
self._state = 3
elif self._state == 5:
self._state = 3
return
def _char_data(self, data):
#print 'char', len(data)
if self._state == 2:
(lat,lng) = data.split(' ')
self.pos[self.id] = (lat,lng)
#print (float(x), float(y))
elif self._state == 4:
self._name1 = data
#print (data,)
elif self._state == 5:
self._cid = int(data)
#print (data,)
return
# main
def main(argv):
import re
import os.path
import zipfile
import csv
pat = re.compile(r'P\d\d-\d\d_\d\d.xml')
args = argv[1:]
out = csv.writer(sys.stdout)
for path in args:
zf = zipfile.ZipFile(path)
for name in zf.namelist():
if not pat.match(os.path.basename(name)): continue
print >>sys.stderr, name
data = zf.read(name)
p = Parser()
p.feed(data)
for ((name,cid),(lat,lng)) in p.get():
row = (cid,name.encode('utf-8'),lat,lng)
out.writerow(row)
zf.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
| 28.030928 | 70 | 0.515631 | 341 | 2,719 | 3.953079 | 0.293255 | 0.126855 | 0.086795 | 0.031157 | 0.12092 | 0.075668 | 0.035608 | 0 | 0 | 0 | 0 | 0.01644 | 0.351232 | 2,719 | 96 | 71 | 28.322917 | 0.747732 | 0.049283 | 0 | 0.210526 | 0 | 0 | 0.047009 | 0.009713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092105 | false | 0 | 0.078947 | 0 | 0.276316 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e275ec0fee421cce4e54186df885cf8877867b | 83 | py | Python | graphene_django/forms/types.py | mebel-akvareli/graphene-django | 23008ad22094f3e7b8fb26b73811ce49b20cca25 | [
"MIT"
] | 4,038 | 2016-09-18T01:45:22.000Z | 2022-03-31T01:06:57.000Z | graphene_django/forms/types.py | mebel-akvareli/graphene-django | 23008ad22094f3e7b8fb26b73811ce49b20cca25 | [
"MIT"
] | 1,104 | 2016-09-19T20:10:22.000Z | 2022-03-30T17:37:46.000Z | graphene_django/forms/types.py | mebel-akvareli/graphene-django | 23008ad22094f3e7b8fb26b73811ce49b20cca25 | [
"MIT"
] | 791 | 2016-09-18T13:48:11.000Z | 2022-03-29T08:32:06.000Z | from ..types import ErrorType # noqa Import ErrorType for backwards compatability
| 41.5 | 82 | 0.819277 | 10 | 83 | 6.8 | 0.8 | 0.441176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144578 | 83 | 1 | 83 | 83 | 0.957746 | 0.590361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 5 |
74e37ad87a05efcc33dc8d935bf247558d91a19e | 434 | py | Python | django_project/stocks/models.py | justinekang/Athena_Stocks | 024826b7cf1bac78c570824b884a3310e5a8120e | [
"MIT"
] | 1 | 2021-09-18T19:49:46.000Z | 2021-09-18T19:49:46.000Z | django_project/stocks/models.py | webclinic017/Athena_Stocks | 548a7ee40e41542b436753ff79f9a46c48312234 | [
"MIT"
] | 23 | 2021-07-06T22:27:47.000Z | 2021-08-13T21:34:55.000Z | django_project/stocks/models.py | webclinic017/Athena_Stocks | 548a7ee40e41542b436753ff79f9a46c48312234 | [
"MIT"
] | 11 | 2021-07-11T05:04:22.000Z | 2021-09-18T19:49:43.000Z | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
#inspired by https://docs.djangoproject.com/en/3.2/topics/db/models/
class Fav_Stocks(models.Model):
user = models.CharField(max_length=100)
stocks = models.TextField()
date_made = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
| 36.166667 | 68 | 0.774194 | 62 | 434 | 5.354839 | 0.677419 | 0.090361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013089 | 0.119816 | 434 | 11 | 69 | 39.454545 | 0.856021 | 0.209677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
74e3c847b45abe7dd310f46f2b20094db76be087 | 9,934 | py | Python | tests/test_functions.py | Qfabiolous/QuanGuru | 285ca44ae857cc61337f73ea2eb600f485a09e32 | [
"BSD-3-Clause"
] | 9 | 2021-05-23T06:30:45.000Z | 2021-12-27T13:33:54.000Z | tests/test_functions.py | cahitkargi/QuanGuru | 9b5c94465cd58bc32f6ff845f29dfdec7e0f9075 | [
"BSD-3-Clause"
] | 26 | 2022-03-18T02:40:54.000Z | 2022-03-25T07:00:25.000Z | tests/test_functions.py | cahitkargi/QuanGuru | 9b5c94465cd58bc32f6ff845f29dfdec7e0f9075 | [
"BSD-3-Clause"
] | 5 | 2021-05-23T06:30:24.000Z | 2022-02-04T02:40:08.000Z | import numpy as np
import pytest
from quanguru.QuantumToolbox import linearAlgebra as la #pylint: disable=import-error
from quanguru.QuantumToolbox import operators as ops #pylint: disable=import-error
from quanguru.QuantumToolbox import functions as fns #pylint: disable=import-error
#testCase = collections.namedtuple('testCase', ['operator', 'state', 'expected'])
def test_expectationWithNumber(helpers):
# using randomly generated ket states of random dimension, and also by converting them into density matrix
# test expectation function by using number operator, whose expectation should be sum of photon_number*populations
for _ in range(3):
state, dim, excs = helpers.generateRndPureState()
calcva = fns.expectation(ops.number(dim), state)
expect = sum([k*v for (k, v) in excs.items()])
assert round(calcva, 12) == round(expect, 12)
denMat = la.outerProd(state)
assert round(fns.expectation(ops.number(dim), denMat), 12) == round(expect, 12)
def test_expectationWithJz(helpers):
# using randomly generated ket states of random j value, and also by converting them into density matrix
# test expectation function by using Jz operator, whose expectation is jValue*populations
for _ in range(3):
state, dim, excs = helpers.generateRndPureState()
calcva = fns.expectation(ops.Jz((dim-1)/2), state)
expect = sum([((dim-1)/2-k)*v for (k, v) in excs.items()])
assert round(calcva, 12) == round(expect, 12)
denMat = la.outerProd(state)
assert round(fns.expectation(ops.Jz((dim-1)/2), denMat), 12) == round(expect, 12)
def test_expectationWithSigmaz(helpers, singleQubitOperators):
# using randomly generated ket states, and also by converting them into density matrix
# test expectation function by using sigmaz operator, whose expectation is +-1*populations
op = singleQubitOperators['sz']
for _ in range(3):
state, _, excs = helpers.generateRndPureState(dim=2)
calcva = fns.expectation(op, state)
expect = sum([((not bool(k))-k)*v for (k, v) in excs.items()])
assert round(calcva, 12) == round(expect, 12)
denMat = la.outerProd(state)
assert round(fns.expectation(op, denMat), 12) == round(expect, 12)
@pytest.mark.parametrize("op, ex", [
['sz', [1, -1, 0, 0, 0, 0]], ['sy', [0, 0, 0, 0, 1, -1]], ['sx', [0, 0, 1, -1, 0, 0]]
])
def test_expectationWithSigmaOps(op, ex, specialQubitStates, singleQubitOperators):
# test expectation of Pauli operators against eigenvectors
op = singleQubitOperators[op]
zp = fns.expectation(op, specialQubitStates['1'])
zm = fns.expectation(op, specialQubitStates['0'])
xp = fns.expectation(op, specialQubitStates['x+'])
xm = fns.expectation(op, specialQubitStates['x-'])
yp = fns.expectation(op, specialQubitStates['y+'])
ym = fns.expectation(op, specialQubitStates['y-'])
assert [round(a, 12) for a in [zp, zm, xp, xm, yp, ym]] == ex
zpdm = fns.expectation(op, la.outerProd(specialQubitStates['1']))
zmdm = fns.expectation(op, la.outerProd(specialQubitStates['0']))
xpdm = fns.expectation(op, la.outerProd(specialQubitStates['x+']))
xmdm = fns.expectation(op, la.outerProd(specialQubitStates['x-']))
ypdm = fns.expectation(op, la.outerProd(specialQubitStates['y+']))
ymdm = fns.expectation(op, la.outerProd(specialQubitStates['y-']))
assert [round(a, 12) for a in [zpdm, zmdm, xpdm, xmdm, ypdm, ymdm]] == ex
def test_fidelityPure(helpers):
# using randomly generated states, and also by converting them into density matrix
# test fidelity (which uses linerAlgebra.py) against hard coded calculation of fidelity from populations
for _ in range(3):
state1, dim1, excs1 = helpers.generateRndPureState()
state2, _, excs2 = helpers.generateRndPureState(dim=dim1)
fid = fns.fidelityPure(state1, state2)
fin = abs(sum([np.sqrt(excs2[k2]*excs1[k1]) for k1 in excs1 for k2 in excs2 if k1 == k2]))**2
assert round(fid, 12) == round(fin, 12)
state1 = la.outerProd(state1)
fid = fns.fidelityPure(state1, state2)
assert round(fid, 12) == round(fin, 12)
state2 = la.outerProd(state2)
fid = fns.fidelityPure(state1, state2)
assert round(fid, 12) == round(fin, 12)
stateNames = ['0', '1', 'x+', 'x-', 'y+', 'y-']
bellStateN = ['BellPhi+', 'BellPhi-', 'BellPsi+', 'BellPsi-']
productNames = ['product1', 'product2', 'product3', 'product4']
@pytest.mark.parametrize("state1, state2, fid", [
*[(stateNames[0], name, f) for name, f in zip(stateNames, [1, 0, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[1], name, f) for name, f in zip(stateNames, [0, 1, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[2], name, f) for name, f in zip(stateNames, [0.5, 0.5, 1, 0, 0.5, 0.5])],
*[(stateNames[3], name, f) for name, f in zip(stateNames, [0.5, 0.5, 0, 1, 0.5, 0.5])],
*[(stateNames[4], name, f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 1, 0])],
*[(stateNames[5], name, f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 0, 1])],
*[(bellStateN[0], name, f) for name, f in zip(bellStateN, [1, 0, 0, 0])],
*[(bellStateN[1], name, f) for name, f in zip(bellStateN, [0, 1, 0, 0])],
*[(bellStateN[2], name, f) for name, f in zip(bellStateN, [0, 0, 1, 0])],
*[(bellStateN[3], name, f) for name, f in zip(bellStateN, [0, 0, 0, 1])]
])
def test_fidelityPureWithSpecialQubitStates(state1, state2, fid, specialQubitStates):
# test fidelity with some known ket states (and their density matrices) and expected fidelities between them
state1 = specialQubitStates[state1]
state2 = specialQubitStates[state2]
fidCalc = fns.fidelityPure(state1, state2)
assert round(fidCalc, 12) == fid
state1 = la.outerProd(state1)
fidCalc = fns.fidelityPure(state1, state2)
assert round(fidCalc, 12) == fid
state2 = la.outerProd(state2)
fidCalc = fns.fidelityPure(state1, state2)
assert round(fidCalc, 12) == fid
@pytest.mark.parametrize("mat1, mat2, fid", [
*[(stateNames[0]+'dm', name+'dm', f) for name, f in zip(stateNames, [1, 0, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[1]+'dm', name+'dm', f) for name, f in zip(stateNames, [0, 1, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[2]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 1, 0, 0.5, 0.5])],
*[(stateNames[3]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 0, 1, 0.5, 0.5])],
*[(stateNames[4]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 1, 0])],
*[(stateNames[5]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 0, 1])],
*[(bellStateN[0]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 0, 0, 0])],
*[(bellStateN[1]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 1, 0, 0])],
*[(bellStateN[2]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 0, 1, 0])],
*[(bellStateN[3]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 0, 0, 1])]
])
def test_fidelityWithPureDensityMatrices(mat1, mat2, fid, specialQubitStates):
# test fidelity with some known density matrices
fidCalc = fns.fidelityPure(specialQubitStates[mat1], specialQubitStates[mat2])
assert round(fidCalc, 12) == fid
def test_entropyPureState(specialQubitStates):
# should give zero for a pure state (uses known states), tests both ket and density matrix inputs
for v in specialQubitStates.values():
assert round(fns.entropy(v), 12) == 0
assert round(fns.entropy(la.outerProd(v)), 12) == 0
@pytest.mark.parametrize('name', bellStateN)
def test_entropyReducedBell(name, specialQubitStates):
# test entropy of reduced Bell states, tests both ket and density matrix inputs
qs1 = la.partialTrace(0, [2, 2], specialQubitStates[name])
qs2 = la.partialTrace(1, [2, 2], specialQubitStates[name])
e1 = fns.entropy(qs1)
e2 = fns.entropy(qs2)
expe = round(np.log(2), 12)
assert e1 == e2
assert round(e1, 12) == expe
assert round(fns.entropy(la.outerProd(qs1)), 12) == expe
assert round(fns.entropy(la.outerProd(qs2)), 12) == expe
@pytest.mark.parametrize('name, val', [*[(b, 1) for b in bellStateN], *[(p, 0) for p in productNames]])
def test_concurrenceBellAndProduct(name, val, specialQubitStates):
# test concurrence of Bell states, tests both ket and density matrix inputs
state = specialQubitStates[name]
cKet = fns.concurrence(state)
cDm = fns.concurrence(la.outerProd(state))
assert round(cKet, 12) == val
assert round(cDm, 12) == val
sq2 = 1/np.sqrt(2)
@pytest.mark.parametrize("mat1, mat2, dis", [
*[(stateNames[0]+'dm', name+'dm', f) for name, f in zip(stateNames, [0, 1, sq2, sq2, sq2, sq2])],
*[(stateNames[1]+'dm', name+'dm', f) for name, f in zip(stateNames, [1, 0, sq2, sq2, sq2, sq2])],
*[(stateNames[2]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, 0, 1, sq2, sq2])],
*[(stateNames[3]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, 1, 0, sq2, sq2])],
*[(stateNames[4]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, sq2, sq2, 0, 1])],
*[(stateNames[5]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, sq2, sq2, 1, 0])],
*[(bellStateN[0]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 1, 1, 1])],
*[(bellStateN[1]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 0, 1, 1])],
*[(bellStateN[2]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 1, 0, 1])],
*[(bellStateN[3]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 1, 1, 0])]
])
def test_traceDistanceWithPureDensityMatrices(mat1, mat2, dis, specialQubitStates):
# uses density matrices of known states and compare the output with known values
disCalc = fns.traceDistance(specialQubitStates[mat1], specialQubitStates[mat2])
assert round(disCalc, 12) == round(dis, 12)
| 58.781065 | 118 | 0.649487 | 1,457 | 9,934 | 4.415923 | 0.131778 | 0.014921 | 0.016786 | 0.019894 | 0.61004 | 0.551912 | 0.515853 | 0.460989 | 0.409854 | 0.38281 | 0 | 0.052037 | 0.179787 | 9,934 | 168 | 119 | 59.130952 | 0.737604 | 0.147876 | 0 | 0.234043 | 0 | 0 | 0.0296 | 0 | 0 | 0 | 0 | 0 | 0.170213 | 1 | 0.078014 | false | 0 | 0.035461 | 0 | 0.113475 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e527a7d122c6ecc18ce4d006746f0a16e5cb57 | 702 | py | Python | src/big_torch/models/shared.py | Denchidlo/big-torch | f5a65e6216e46e6d4fe98670c52618e4cccc8163 | [
"MIT"
] | null | null | null | src/big_torch/models/shared.py | Denchidlo/big-torch | f5a65e6216e46e6d4fe98670c52618e4cccc8163 | [
"MIT"
] | 1 | 2021-11-21T13:11:31.000Z | 2021-11-22T00:18:29.000Z | src/big_torch/models/shared.py | Denchidlo/big-torch | f5a65e6216e46e6d4fe98670c52618e4cccc8163 | [
"MIT"
] | null | null | null | import multiprocessing as mp
POOL = None
def open_pool_session(n_jobs):
global POOL
POOL = mp.Pool(n_jobs).__enter__()
def close_pool_session():
global POOL
POOL.__exit__(None, None, None)
POOL = None
class BasicModelParams:
def __init__(self, layers) -> None:
self.layers = layers
def transform(self, gradients, eta):
for idx, layer in enumerate(reversed(self.layers)):
layer.change(gradients[idx], eta)
def _avg_grads(self, gradients_list):
resulting = []
for idx, layer in enumerate(reversed(self.layers)):
resulting.append(layer.average([el[idx] for el in gradients_list]))
return resulting
| 21.9375 | 79 | 0.656695 | 89 | 702 | 4.932584 | 0.426966 | 0.091116 | 0.063781 | 0.059226 | 0.182232 | 0.182232 | 0.182232 | 0.182232 | 0 | 0 | 0 | 0 | 0.242165 | 702 | 31 | 80 | 22.645161 | 0.825188 | 0 | 0 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.05 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74e8211474a71b34bfa3a7500bf5a3ef7c8f9bf0 | 700 | py | Python | 2020/day09/two.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | 2020/day09/two.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | 2020/day09/two.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | TARGET = 776203571
data = []
with open("input.txt") as file_handler:
for n, line in enumerate(file_handler):
data.append(int(line.strip()))
def contiguous_sum(index):
c_sum = 0
for i in range(index, len(data)):
c_sum += data[i]
if c_sum == TARGET:
return True, i
elif c_sum < TARGET:
pass
elif c_sum > TARGET:
return False, i
for start_index in range(len(data)):
match, end_index = contiguous_sum(start_index)
if match:
print("match %d - %d" % (start_index, end_index))
result_range = data[start_index:end_index+1]
print(min(result_range) + max(result_range))
break
| 24.137931 | 57 | 0.594286 | 99 | 700 | 4.010101 | 0.434343 | 0.050378 | 0.075567 | 0.080605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022267 | 0.294286 | 700 | 28 | 58 | 25 | 0.781377 | 0 | 0 | 0 | 0 | 0 | 0.031429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0 | 0 | 0.136364 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e8c675fdd045baa869a7b1fb7f3e6afa25b115 | 4,641 | py | Python | week05/hw5p1.py | taoyichen/CS110-Assignments-Python | f2e4e485c820b835981e2e4b8bd0a26cc31cfe73 | [
"MIT"
] | null | null | null | week05/hw5p1.py | taoyichen/CS110-Assignments-Python | f2e4e485c820b835981e2e4b8bd0a26cc31cfe73 | [
"MIT"
] | null | null | null | week05/hw5p1.py | taoyichen/CS110-Assignments-Python | f2e4e485c820b835981e2e4b8bd0a26cc31cfe73 | [
"MIT"
] | 1 | 2020-06-06T08:21:18.000Z | 2020-06-06T08:21:18.000Z | import package_test
from package_test import hw5p2
#import package_test.hw5p2
#package_test.hw5p2.main()
hw5p2.main()
"""
'''
TAO YICHEN
ytao15@binghamton.edu
Lab section: B56
CA name: Paul Maino
Assignment #5 Part 1
Phone: 6079532749
'''
'''
RESTATEMENT:
Display tax for single and married filers given set of incomes
OUTPUT to monitor:
marital_status[status] (str)
total_income[status][income] (float)
tax (float)
GIVEN:
marital_status (str) - ['single', 'married']
total_income[status][income] (float):
[[0,9075, 9076, 36900, 36901, 89350, 89351,
186350, 186351, 405100, 405101, 406750, 406751],
[0, 18150, 18151, 73800, 73801, 148850, 148851,
226850, 226851, 405100, 405101, 457600, 457601]]
Define constants below
FORMULA:
tax = base tax amount for bracket
+ (tax rate for bracket * (total_income[status][income]
- base total_income[status][income] level for bracket))
'''
# No MAGIC numbers!
# CONSTANTS
# base total_income[status][income] levels
# for single and married tax brackets
SINGLE_BRACKET0 = 0
SINGLE_BRACKET1 = 9075
SINGLE_BRACKET2 = 36900
SINGLE_BRACKET3 = 89350
SINGLE_BRACKET4 = 186350
SINGLE_BRACKET5 = 405100
SINGLE_BRACKET6 = 406750
MARRIED_BRACKET0 = 0
MARRIED_BRACKET1 = 18150
MARRIED_BRACKET2 = 73800
MARRIED_BRACKET3 = 148850
MARRIED_BRACKET4 = 226850
MARRIED_BRACKET5 = 405100
MARRIED_BRACKET6 = 457600
# Define base tax amounts for single and married tax brackets
SINGLE_BASE_TAX0 = 0
SINGLE_BASE_TAX1 = 907.50
SINGLE_BASE_TAX2 = 5081.25
SINGLE_BASE_TAX3 = 18193.75
SINGLE_BASE_TAX4 = 45353.75
SINGLE_BASE_TAX5 = 117541.25
SINGLE_BASE_TAX6 = 118118.75
MARRIED_BASE_TAX0 = 0
MARRIED_BASE_TAX1 = 1815.0
MARRIED_BASE_TAX2 = 10162.5
MARRIED_BASE_TAX3 = 28925.0
MARRIED_BASE_TAX4 = 50765.0
MARRIED_BASE_TAX5 = 109587.5
MARRIED_BASE_TAX6 = 127962.5
# Define tax rate applied to total_income[status][income] over
# base total_income[status][income] of given tax bracket
TAX_RATE0 = 0.1
TAX_RATE1 = 0.15
TAX_RATE2 = 0.25
TAX_RATE3 = 0.28
TAX_RATE4 = 0.33
TAX_RATE5 = 0.35
TAX_RATE6 = 0.396
single_answer = ("single","Single","SINGLE")
married_answer = ("married","Married","MARRIED")
def main(status,income):
if status in single_answer:
if income >= SINGLE_BRACKET6:
tax = SINGLE_BASE_TAX6 + TAX_RATE6 * (income - SINGLE_BRACKET6)
elif income >= SINGLE_BRACKET5:
tax = SINGLE_BASE_TAX5 + TAX_RATE5* (income - SINGLE_BRACKET5)
elif income >= SINGLE_BRACKET4:
tax = SINGLE_BASE_TAX4 + TAX_RATE4 * (income - SINGLE_BRACKET4)
elif income >= SINGLE_BRACKET3:
tax = SINGLE_BASE_TAX3 + TAX_RATE3 * (income - SINGLE_BRACKET3)
elif income >= SINGLE_BRACKET2:
tax = SINGLE_BASE_TAX2 + TAX_RATE2 * (income - SINGLE_BRACKET2)
elif income >= SINGLE_BRACKET1:
tax = SINGLE_BASE_TAX1 + TAX_RATE1 * (income - SINGLE_BRACKET1)
elif income >= SINGLE_BRACKET0:
tax = SINGLE_BASE_TAX0 + TAX_RATE0 * (income - SINGLE_BRACKET0)
elif status in married_answer:
if income >= MARRIED_BRACKET6:
tax = MARRIED_BASE_TAX6 + TAX_RATE6 * (income - MARRIED_BRACKET6)
elif income >= MARRIED_BRACKET5:
tax = MARRIED_BASE_TAX5 + TAX_RATE5 * (income -MARRIED_BRACKET5)
elif income >= MARRIED_BRACKET4:
tax = MARRIED_BASE_TAX4 + TAX_RATE4 * (income -MARRIED_BRACKET4)
elif income >= MARRIED_BRACKET3:
tax = MARRIED_BASE_TAX3 + TAX_RATE3 * (income -MARRIED_BRACKET3)
elif income >= MARRIED_BRACKET2:
tax = MARRIED_BASE_TAX2 + TAX_RATE2 * (income -MARRIED_BRACKET2)
elif income >= MARRIED_BRACKET1:
tax = MARRIED_BASE_TAX1 + TAX_RATE1 * (income -MARRIED_BRACKET1)
elif income >= MARRIED_BRACKET0:
tax = MARRIED_BASE_TAX0 + TAX_RATE0 * (income -MARRIED_BRACKET0)
print("Tax for %7s filer, with income $%9.2f = $%9.2f" %
(status, income, tax))
status = input("What is your marital status? Press <Enter> to quite.\n")
while status:
while not ((status in single_answer) or (status in married_answer)):
status = input("Wrong entry!\nWhat is your status?\n")
income_str = input("What is your income? (Round to whole numbers please)\n")
while not income_str.isdigit and int(income_str)>0:
income_str = input("Wrong entry!\nWhat is your inocme?\n")
while not int(income_str)>0:
income_str = input("Wrong entry!\nWhat is your inocme?\n")
while not income_str.isdigit:
income_str = input("Wrong entry!\nWhat is your inocme?\n")
income = int(income_str)
main(status,income)
status = input("What is your marital status? Press <Enter> to quite.\n")
"""
| 31.571429 | 78 | 0.716009 | 645 | 4,641 | 4.924031 | 0.243411 | 0.044081 | 0.037469 | 0.050693 | 0.269521 | 0.129408 | 0.108312 | 0.085642 | 0.085642 | 0.085642 | 0 | 0.112642 | 0.187029 | 4,641 | 146 | 79 | 31.787671 | 0.729128 | 0.010774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 3 |
74e8fba391102d86316280011dd925cbf3994183 | 736 | py | Python | lists.py | RocqJones/python-starter-reference | 0fa4d4ed81aea8cbad1a3f19ff0bbae99ace01fd | [
"MIT"
] | null | null | null | lists.py | RocqJones/python-starter-reference | 0fa4d4ed81aea8cbad1a3f19ff0bbae99ace01fd | [
"MIT"
] | null | null | null | lists.py | RocqJones/python-starter-reference | 0fa4d4ed81aea8cbad1a3f19ff0bbae99ace01fd | [
"MIT"
] | 1 | 2020-07-08T08:26:19.000Z | 2020-07-08T08:26:19.000Z | # A List is a collection which is ordered and changeable. Allows duplicate members.
# Create a list
numbers = [1,2,3,4,5]
fruits = ['Apples', 'Oranges', 'Grapes', 'Peas']
# use a constructor
#numbers2 = list((5,6,7,8,9,10))
print(numbers)
# Get a value
print(fruits[2])
# Change a value
fruits[0] = 'Blueberries'
print(fruits)
# Get length
print(len(fruits))
# Append to list
fruits.append('Mangos')
print(fruits)
# Remove from list
fruits.remove('Grapes')
print(fruits)
# Insert into position
fruits.insert(2, 'Strawberry')
print(fruits)
# Remove with pop
fruits.pop(2)
print(fruits)
# Reverse list
fruits.reverse()
print(fruits)
# sort list
fruits.sort()
print(fruits)
# Reverse sort
fruits.sort(reverse=True)
print(fruits) | 16 | 83 | 0.716033 | 112 | 736 | 4.705357 | 0.473214 | 0.187856 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026814 | 0.138587 | 736 | 46 | 84 | 16 | 0.804416 | 0.389946 | 0 | 0.380952 | 0 | 0 | 0.128736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.52381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
74ebb2dd053188f1723fc6d317273465d1b9d65c | 8,096 | py | Python | visualization_and_test/evaluate_prototypes.py | jodaiber/semantic_compound_splitting | 6b6b8aea9c320ef3b26dca4d8345fb9a08950a42 | [
"Apache-2.0"
] | 17 | 2015-10-14T09:44:38.000Z | 2021-02-19T16:45:32.000Z | visualization_and_test/evaluate_prototypes.py | jodaiber/semantic_compound_splitting | 6b6b8aea9c320ef3b26dca4d8345fb9a08950a42 | [
"Apache-2.0"
] | null | null | null | visualization_and_test/evaluate_prototypes.py | jodaiber/semantic_compound_splitting | 6b6b8aea9c320ef3b26dca4d8345fb9a08950a42 | [
"Apache-2.0"
] | 8 | 2015-09-07T16:29:37.000Z | 2020-08-08T05:43:12.000Z | __author__ = 'rwechsler'
import datetime
import time
import cPickle as pickle
from annoy import AnnoyIndex
import gensim
import argparse
import numpy as np
import sys
import random
from scipy import spatial
import multiprocessing as mp
from collections import defaultdict
import codecs
def timestamp():
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
def load_candidate_dump(file_name):
return pickle.load(open(file_name, "rb"))
def load_annoy_tree(model_file_name, vector_dims):
tree = AnnoyIndex(vector_dims)
tree.load(model_file_name)
return tree
def load_prototype_dump(file_name):
return pickle.load(open(file_name, "rb"))
def load_word2vecmodel(file_name):
return gensim.models.Word2Vec.load_word2vec_format(file_name, binary=True)
def get_rank_annoy_knn(annoy_tree, vector, true_index, k=100):
neighbours = annoy_tree.get_nns_by_vector(list(vector), k)
try:
return neighbours.index(true_index) + 1
except ValueError:
return 0
def get_rank_word2vec_knn(word2vec_model, vector, true_index, k=100):
neighbours, _ = zip(*word2vec_model.most_similar(positive=[vector], topn=k))
try:
return neighbours.index(word2vec_model.index2word[true_index]) + 1
except ValueError:
return 0
def candidate_generator(evaluation_set, rank_threshold, sim_threshold):
for prefix_prototype_pair in evaluation_set:
yield (prefix_prototype_pair, evaluation_set[prefix_prototype_pair], rank_threshold, sim_threshold)
def mp_wrapper_evaluate_set(argument):
return evaluate_set(*argument)
def get_nn_hitrate(ranks):
return (len(ranks) - ranks.count(0)) / float(len(ranks))
def get_sim_hitrate(similarities, threshold):
return np.sum([1 for s in similarities if s >= threshold]) / float(len(similarities))
def get_average_rank(ranks):
return np.mean([r for r in ranks if r > 0] or 0)
def get_average_similarity(similarities):
return np.mean(similarities)
def get_hitrate(ranks, similarities, threshold):
count = 0
for i, r in enumerate(ranks):
if r > 0 and similarities[i] >= threshold:
count += 1
return count / float(len(ranks))
def get_word_representation(prefix, comp_index, tail_index, word2vec_model):
comp = word2vec_model.index2word[comp_index]
tail = word2vec_model.index2word[tail_index]
fl = comp[len(prefix):-len(tail)]
if fl:
fl = "[" + fl + "]"
return fl + tail
if __name__ == "__main__":
#### Default Parameters-------------------------------------------####
rank_threshold = 30
vector_dims = 500
sim_threshold = 0.5
sample_set_size = np.inf
n_processes = 2
####End-Parametes-------------------------------------------------####
parser = argparse.ArgumentParser(description='Evaluate candidates')
parser.add_argument('-w', action='store', dest="word2vec_file", required=True)
parser.add_argument('-v', action="store", dest="prototypes_file", required=True)
parser.add_argument('-d', action="store", dest="vector_dims", type=int, default=vector_dims)
parser.add_argument('-t', action="store", dest="annoy_tree_file")
parser.add_argument('-c', action="store", dest="candidates_index_file")
parser.add_argument('-o', action="store", dest="result_output_file", required=True)
parser.add_argument('-p', action="store", dest="n_processes", type=int, default=n_processes)
parser.add_argument('-s', action="store", dest="sample_set_size", type=int, default=sample_set_size)
parser.add_argument('-r', action="store", dest="rank_threshold", type=int, default=rank_threshold)
parser.add_argument('-z', action="store", dest="sim_threshold", type=float, default=sim_threshold)
arguments = parser.parse_args(sys.argv[1:])
print timestamp(), "loading word2vec model"
word2vec_model = load_word2vecmodel(arguments.word2vec_file)
print timestamp(), "loading prototypes"
prototypes = load_prototype_dump(arguments.prototypes_file)
if arguments.candidates_index_file:
print timestamp(), "loading candidates"
candidates = load_candidate_dump(arguments.candidates_index_file)
evaluation_set = dict()
# keys are (prefix, prototype_pair)
for prefix in prototypes:
for prototype, evidence_set in prototypes[prefix]:
if arguments.candidates_index_file:
evaluation_set[(prefix, prototype)] = candidates[prefix]
else:
evaluation_set[(prefix, prototype)] = evidence_set
print timestamp(), "preprocess candidates"
# only store vectors that we need. And sample already.
word2vec_vectors = dict()
for prototype_tup in evaluation_set:
if len(evaluation_set[prototype_tup]) > arguments.sample_set_size:
evaluation_set[prototype_tup] = set(random.sample(evaluation_set[prototype_tup], arguments.sample_set_size))
for (i,j) in evaluation_set[prototype_tup]:
word2vec_vectors[i] = np.array(word2vec_model.syn0[i])
word2vec_vectors[j] = np.array(word2vec_model.syn0[j])
word2vec_vectors[prototype_tup[1][0]] = np.array(word2vec_model.syn0[prototype_tup[1][0]])
word2vec_vectors[prototype_tup[1][1]] = np.array(word2vec_model.syn0[prototype_tup[1][1]])
print timestamp(), "number of vectors: ", len(word2vec_vectors)
if arguments.annoy_tree_file and arguments.vector_dims:
del word2vec_model
print timestamp(), "loading annoy tree"
# global annoy_tree
model = load_annoy_tree(arguments.annoy_tree_file, arguments.vector_dims)
knn_method = get_rank_annoy_knn
else:
print timestamp(), "using word2vec model"
model = word2vec_model
knn_method = get_rank_word2vec_knn
def evaluate_set(prefix_prototype_pair, evidence_set, rank_threshold=100, sim_threshold=0.5):
global model
global word2vec_vectors
ranks = []
similarities = []
prefix, vector_pair = prefix_prototype_pair
diff = word2vec_vectors[vector_pair[0]]- word2vec_vectors[vector_pair[1]]
for comp, tail in evidence_set:
predicted = word2vec_vectors[tail] + diff
true_vector = word2vec_vectors[comp]
rank = knn_method(model, predicted, comp, rank_threshold)
ranks.append(rank)
sim = spatial.distance.cosine(predicted, true_vector)
similarities.append(sim)
# returns hitrate, hitrate_nn, hitrate_sim, average_rank_if_found, average_similarity_if_found
results = get_hitrate(ranks, similarities, threshold=sim_threshold), get_nn_hitrate(ranks), get_sim_hitrate(similarities, threshold=sim_threshold), get_average_rank(ranks), get_average_similarity(similarities)
return (prefix_prototype_pair,results)
print timestamp(), "evaluating candidates"
pool = mp.Pool(processes=arguments.n_processes)
params = candidate_generator(evaluation_set, arguments.rank_threshold, arguments.sim_threshold)
results = pool.map(mp_wrapper_evaluate_set, params)
pool.close()
pool.join()
del pool
print timestamp(), "pickling"
pickle.dump(results, open(arguments.result_output_file, "wb"))
if arguments.annoy_tree_file:
print timestamp(), "loading word2vec model"
word2vec_model = load_word2vecmodel(arguments.word2vec_file)
else:
word2vec_model = model
print timestamp(), "mapping indices to word"
scores = defaultdict(dict)
for ((prefix, vector), eval_scores) in results:
vector_repr = get_word_representation(prefix, vector[0], vector[1], word2vec_model)
scores[prefix][vector_repr] = eval_scores
print timestamp(), "writing result file"
outfile = codecs.open(arguments.result_output_file, "w", "utf-8")
for prefix in scores:
for vector in scores[prefix]:
outfile.write("\t".join([prefix, vector] + map(str, scores[prefix][vector])) + "\n")
outfile.close()
print timestamp(), "done" | 37.137615 | 217 | 0.700099 | 1,020 | 8,096 | 5.301961 | 0.196078 | 0.045673 | 0.031435 | 0.018491 | 0.244083 | 0.138129 | 0.093935 | 0.093935 | 0.049556 | 0.049556 | 0 | 0.013759 | 0.183053 | 8,096 | 218 | 218 | 37.137615 | 0.803901 | 0.039649 | 0 | 0.107595 | 0 | 0 | 0.067054 | 0.002708 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.082278 | null | null | 0.082278 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74ede2de89dc2c3fd6b6dea5649ebb972c3b175a | 2,722 | py | Python | generated-libraries/python/netapp/disk/storage_ssd_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/disk/storage_ssd_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/disk/storage_ssd_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
class StorageSsdInfo(NetAppObject):
"""
Storage info block for solid-state storage devices.
"""
_percent_spares_consumed = None
@property
def percent_spares_consumed(self):
"""
Percentage of device spare blocks that have been used.
Each device has a number of spare blocks that will be
used when a data block can no longer be used to store
data. This value reports what percentage of the spares
have already been consumed. Omitted if value is unknown.
"""
return self._percent_spares_consumed
@percent_spares_consumed.setter
def percent_spares_consumed(self, val):
if val != None:
self.validate('percent_spares_consumed', val)
self._percent_spares_consumed = val
_percent_spares_consumed_limit = None
@property
def percent_spares_consumed_limit(self):
"""
Spares consumed percentage limit reported by the
device. Omitted if value is unknown.
"""
return self._percent_spares_consumed_limit
@percent_spares_consumed_limit.setter
def percent_spares_consumed_limit(self, val):
if val != None:
self.validate('percent_spares_consumed_limit', val)
self._percent_spares_consumed_limit = val
_percent_rated_life_used = None
@property
def percent_rated_life_used(self):
"""
An estimate of the percentage of device life that has
been used, based on the actual device usage and the
manufacturer's prediction of device life. A value
greater than 99 indicates that the estimated endurance
has been consumed, but may not indicate a device failure.
Omitted if value is unknown.
"""
return self._percent_rated_life_used
@percent_rated_life_used.setter
def percent_rated_life_used(self, val):
if val != None:
self.validate('percent_rated_life_used', val)
self._percent_rated_life_used = val
@staticmethod
def get_api_name():
return "storage-ssd-info"
@staticmethod
def get_desired_attrs():
return [
'percent-spares-consumed',
'percent-spares-consumed-limit',
'percent-rated-life-used',
]
def describe_properties(self):
return {
'percent_spares_consumed': { 'class': int, 'is_list': False, 'required': 'optional' },
'percent_spares_consumed_limit': { 'class': int, 'is_list': False, 'required': 'optional' },
'percent_rated_life_used': { 'class': int, 'is_list': False, 'required': 'optional' },
}
| 36.293333 | 104 | 0.653196 | 325 | 2,722 | 5.218462 | 0.298462 | 0.15684 | 0.222877 | 0.137972 | 0.478774 | 0.367335 | 0.235849 | 0.215212 | 0.121462 | 0.121462 | 0 | 0.001009 | 0.271859 | 2,722 | 74 | 105 | 36.783784 | 0.854692 | 0.260838 | 0 | 0.177778 | 0 | 0 | 0.175961 | 0.121819 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.022222 | 0.066667 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74f0d0e289d375ba55597c4169366e6d54f28cd5 | 1,544 | py | Python | team_builder/accounts/models.py | taylorculver/Django_Team_Builder | 62a9e6a37c435876206697f982f66089e2e82b35 | [
"Unlicense"
] | null | null | null | team_builder/accounts/models.py | taylorculver/Django_Team_Builder | 62a9e6a37c435876206697f982f66089e2e82b35 | [
"Unlicense"
] | 7 | 2018-08-08T18:42:36.000Z | 2018-10-01T18:46:40.000Z | team_builder/accounts/models.py | taylorculver/Django_Team_Builder | 62a9e6a37c435876206697f982f66089e2e82b35 | [
"Unlicense"
] | null | null | null | from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
"""
Model for user-created Profile
https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html
"""
username = models.OneToOneField(
User,
on_delete=models.CASCADE,
unique=True
)
full_name = models.CharField(max_length=200)
description = models.TextField()
avatar = models.ImageField(
upload_to='avatars/',
default='avatars/default.png')
def __str__(self):
return str(self.username)
class Skill(models.Model):
"""Model for listing user skills"""
profile = models.ForeignKey(
Profile,
on_delete=models.CASCADE
)
skill = models.CharField(max_length=200)
def __str__(self):
return self.skill
class GitHub(models.Model):
"""Model for listing user GitHub Projects"""
profile = models.ForeignKey(
Profile,
on_delete=models.CASCADE
)
github_project = models.CharField(max_length=200)
github_url = models.URLField()
def __str__(self):
return self.github_project
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(username=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| 24.507937 | 98 | 0.687824 | 185 | 1,544 | 5.578378 | 0.372973 | 0.03876 | 0.046512 | 0.055233 | 0.330426 | 0.213178 | 0.098837 | 0.098837 | 0 | 0 | 0 | 0.013878 | 0.206606 | 1,544 | 62 | 99 | 24.903226 | 0.828571 | 0.126295 | 0 | 0.268293 | 0 | 0 | 0.020486 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0 | 0.097561 | 0.073171 | 0.585366 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
74f235f44770ebb4082a9f693309b40e2e0bc8f1 | 3,907 | py | Python | PIDKiller.py | godoyp/PIDKiller | 94ae8b77b5e5bca0552dee4ecaa1c1da16d3b39e | [
"MIT"
] | null | null | null | PIDKiller.py | godoyp/PIDKiller | 94ae8b77b5e5bca0552dee4ecaa1c1da16d3b39e | [
"MIT"
] | null | null | null | PIDKiller.py | godoyp/PIDKiller | 94ae8b77b5e5bca0552dee4ecaa1c1da16d3b39e | [
"MIT"
] | null | null | null | # Modulos
import PySimpleGUI as Sg
import wmi
from smb.SMBConnection import SMBConnection
from configparser import ConfigParser
from time import sleep
import sys
from multiprocessing import Process, freeze_support
# Variavel de controle do While (sair = 1 finaliza o programa)
sair = 0
# Carrega o arquivo de configuração
cfg = ConfigParser()
cfg.read('config.ini')
comp = cfg.get('Server', 'IP')
user = cfg.get('Server', 'user')
passwd = cfg.get('Server', 'passwd')
# Layout de tela
Sg.theme('Reddit')
layout = [[Sg.Text('PID Killer'), Sg.Text('Servidor:'), Sg.Text(comp)],
[Sg.Output(size=(90, 30), key='-OUTPUT-')],
[Sg.Button('Carregar Pool'), Sg.Button('Carregar Task'), Sg.Button('Sair')],
[Sg.Text('Qual PID deseja finalizar?'), Sg.Input(key='input'), Sg.Button('Finalizar Processo')]]
window = Sg.Window('PIDKiller', layout, icon='icon.ico')
# Function de Loading
def _splash():
for i in range(500000):
Sg.PopupAnimated('load.gif', background_color='white', time_between_frames=60)
# noinspection PyTypeChecker
Sg.PopupAnimated(None)
# Function Principal
def _program():
# Cria a conexão WMI usando os dados informados, para executar comandos remotos
try:
remoto = wmi.WMI(comp, user=user, password=passwd)
local = wmi.WMI()
except wmi.x_wmi:
sleep(3)
Sg.popup("Atenção, não foi possível conectar ao servidor! Verifique as configurações!", title="Atenção!")
sys.exit(1)
# Executa o comando definido, gerando um arquivo TXT na raiz disco
remoto.Win32_Process.Create(CommandLine='cmd.exe /c C:/Windows/System32/inetsrv/appcmd.exe list wp >> '
'"C:/InfoPool.txt"')
remoto.Win32_Process.Create(CommandLine='cmd.exe /c tasklist >> "C:/InfoList.txt"')
# Realiza a conexão SMB com a maquina remota para copia dos arquivos para a maquina local
conn = SMBConnection(user, passwd, 'client', comp)
conn.connect(comp, 139, timeout=10000)
global sair
while sair := 0:
with open('C:/InfoOutPool.txt', 'wb') as fp1:
sleep(1)
conn.retrieveFile('C$', '/InfoPool.txt', fp1)
arquivo1 = open('C:/InfoOutPool.txt', 'r')
listapool = arquivo1.read()
arquivo1.close()
with open('C:/InfoOutList.txt', 'wb') as fp2:
sleep(1)
conn.retrieveFile('C$', '/InfoList.txt', fp2)
arquivo2 = open('C:/InfoOutList.txt', 'r')
listatask = arquivo2.read()
arquivo2.close()
sleep(1)
# Eventos da Interface Gráfica
while True:
(event, values) = window.read(timeout=100)
if event == 'Carregar Task':
window['-OUTPUT-'].update(listatask)
if event == 'Carregar Pool':
window['-OUTPUT-'].update(listapool)
if event == Sg.WIN_CLOSED or event == 'Sair':
remoto.Win32_Process.Create(CommandLine='cmd.exe /c DEL "C:/Info*.txt"')
local.Win32_Process.Create(CommandLine='cmd.exe /c DEL "C:/Info*.txt"')
Sg.popup_auto_close('Saindo...', auto_close_duration=2, button_type=5, no_titlebar=True)
sair = 1
break
if event == 'Finalizar Processo':
processo = values['input']
killer = str(processo)
remoto.Win32_Process.Create(CommandLine="cmd.exe /b /c taskkill -pid " + killer + " /f")
Sg.popup_ok('Processo Finalizado com Sucesso!')
break
conn.close()
window.close()
if __name__ == '__main__':
freeze_support()
load = Process(target=_splash)
exe = Process(target=_program)
jobs = [load, exe]
for job in jobs:
job.start()
| 33.681034 | 114 | 0.594574 | 466 | 3,907 | 4.918455 | 0.446352 | 0.026178 | 0.039267 | 0.063264 | 0.118237 | 0.098168 | 0.098168 | 0.080279 | 0.041012 | 0.041012 | 0 | 0.019788 | 0.275659 | 3,907 | 115 | 115 | 33.973913 | 0.790106 | 0.113386 | 0 | 0.064103 | 0 | 0 | 0.210968 | 0.011387 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0.038462 | 0.089744 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f2722b6fd0d9275b8a2fdd984c9a1ced8700d9 | 21,298 | py | Python | eventdata/parameter_sources/randomevent.py | ywelsch/rally-eventdata-track | 148fe2ffc90f192a1d3d68c614031e40ecc67eae | [
"Apache-2.0"
] | 33 | 2017-02-22T17:59:46.000Z | 2021-11-02T07:07:40.000Z | eventdata/parameter_sources/randomevent.py | ywelsch/rally-eventdata-track | 148fe2ffc90f192a1d3d68c614031e40ecc67eae | [
"Apache-2.0"
] | 68 | 2017-03-10T12:57:36.000Z | 2021-07-14T14:26:03.000Z | eventdata/parameter_sources/randomevent.py | isabella232/rally-eventdata-track | d7f25419ba3ef554998d89caa3fdb5a2d2100d41 | [
"Apache-2.0"
] | 45 | 2017-02-22T18:03:58.000Z | 2022-01-01T02:18:41.000Z | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import gzip
import itertools
import json
import os
import random
import re
from eventdata.parameter_sources.timeutils import TimestampStructGenerator
from eventdata.parameter_sources.weightedarray import WeightedArray
from eventdata.utils import elasticlogs_bulk_source as ebs
cwd = os.path.dirname(__file__)
class Agent:
def __init__(self):
if '_agents' in ebs.global_lookups.keys():
self._agents = ebs.global_lookups['_agents']
else:
self._agents = WeightedArray('%s/data/agents.json.gz' % cwd)
ebs.global_lookups['_agents'] = self._agents
if '_agents_name_lookup' in ebs.global_lookups.keys():
self._agents_name_lookup = ebs.global_lookups['_agents_name_lookup']
else:
with gzip.open('%s/data/agents_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_name_lookup = json.load(data_file)
ebs.global_lookups['_agents_name_lookup'] = self._agents_name_lookup
if '_agents_os_lookup' in ebs.global_lookups.keys():
self._agents_os_lookup = ebs.global_lookups['_agents_os_lookup']
else:
with gzip.open('%s/data/agents_os_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_os_lookup = json.load(data_file)
ebs.global_lookups['_agents_os_lookup'] = self._agents_os_lookup
if '_agents_os_name_lookup' in ebs.global_lookups.keys():
self._agents_os_name_lookup = ebs.global_lookups['_agents_os_name_lookup']
else:
with gzip.open('%s/data/agents_os_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_os_name_lookup = json.load(data_file)
ebs.global_lookups['_agents_os_name_lookup'] = self._agents_os_name_lookup
if '_agents_os_major_lookup' in ebs.global_lookups.keys():
self._agents_os_major_lookup = ebs.global_lookups['_agents_os_major_lookup']
else:
with gzip.open('%s/data/agents_os_major_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_os_major_lookup = json.load(data_file)
ebs.global_lookups['_agents_os_major_lookup'] = self._agents_os_major_lookup
if '_agents_major_lookup' in ebs.global_lookups.keys():
self._agents_major_lookup = ebs.global_lookups['_agents_major_lookup']
else:
with gzip.open('%s/data/agents_major_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_major_lookup = json.load(data_file)
ebs.global_lookups['_agents_major_lookup'] = self._agents_major_lookup
if '_agents_device_lookup' in ebs.global_lookups.keys():
self._agents_device_lookup = ebs.global_lookups['_agents_device_lookup']
else:
with gzip.open('%s/data/agents_device_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_device_lookup = json.load(data_file)
ebs.global_lookups['_agents_device_lookup'] = self._agents_device_lookup
if '_agent_lookup' in ebs.global_lookups.keys():
self._agent_lookup = ebs.global_lookups['_agent_lookup']
else:
with gzip.open('%s/data/agent_lookup.json.gz' % cwd, 'rt') as data_file:
self._agent_lookup = json.load(data_file)
ebs.global_lookups['_agent_lookup'] = self._agent_lookup
def add_fields(self, event):
agent = self._agents.get_random()
event['useragent_name'] = self.__get_lookup_value(self._agents_name_lookup, agent[0])
event['useragent_os'] = self.__get_lookup_value(self._agents_os_lookup, agent[1])
event['useragent_os_name'] = self.__get_lookup_value(self._agents_os_name_lookup, agent[2])
event['useragent_device'] = self.__get_lookup_value(self._agents_device_lookup, agent[3])
event['useragent_os_major'] = self.__get_lookup_value(self._agents_os_major_lookup, agent[4])
event['useragent_major'] = self.__get_lookup_value(self._agents_major_lookup, agent[5])
event['agent'] = self.__get_lookup_value(self._agent_lookup, agent[6])
def __get_lookup_value(self, lookup, key):
if key == "":
return key
else :
return lookup[key]
class ClientIp:
def __init__(self):
self._rare_clientip_probability = 0.269736965199
if '_clientips' in ebs.global_lookups.keys():
self._clientips = ebs.global_lookups['_clientips']
else:
self._clientips = WeightedArray('%s/data/clientips.json.gz' % cwd)
ebs.global_lookups['_clientips'] = self._clientips
if '_rare_clientips' in ebs.global_lookups.keys():
self._rare_clientips = ebs.global_lookups['_rare_clientips']
else:
self._rare_clientips = WeightedArray('%s/data/rare_clientips.json.gz' % cwd)
ebs.global_lookups['_rare_clientips'] = self._rare_clientips
if '_clientips_country_name_lookup' in ebs.global_lookups.keys():
self._clientips_country_name_lookup = ebs.global_lookups['_clientips_country_name_lookup']
else:
with gzip.open('%s/data/clientips_country_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_country_name_lookup = json.load(data_file)
ebs.global_lookups['_clientips_country_name_lookup'] = self._clientips_country_name_lookup
if '_clientips_country_iso_code_lookup' in ebs.global_lookups.keys():
self._clientips_country_iso_code_lookup = ebs.global_lookups['_clientips_country_iso_code_lookup']
else:
with gzip.open('%s/data/clientips_country_iso_code_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_country_iso_code_lookup = json.load(data_file)
ebs.global_lookups['_clientips_country_iso_code_lookup'] = self._clientips_country_iso_code_lookup
if '_clientips_continent_name_lookup' in ebs.global_lookups.keys():
self._clientips_continent_name_lookup = ebs.global_lookups['_clientips_continent_name_lookup']
else:
with gzip.open('%s/data/clientips_continent_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_continent_name_lookup = json.load(data_file)
ebs.global_lookups['_clientips_continent_name_lookup'] = self._clientips_continent_name_lookup
if '_clientips_continent_code_lookup' in ebs.global_lookups.keys():
self._clientips_continent_code_lookup = ebs.global_lookups['_clientips_continent_code_lookup']
else:
with gzip.open('%s/data/clientips_continent_code_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_continent_code_lookup = json.load(data_file)
ebs.global_lookups['_clientips_continent_code_lookup'] = self._clientips_continent_code_lookup
if '_clientips_city_name_lookup' in ebs.global_lookups.keys():
self._clientips_city_name_lookup = ebs.global_lookups['_clientips_city_name_lookup']
else:
with gzip.open('%s/data/clientips_city_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_city_name_lookup = json.load(data_file)
ebs.global_lookups['_clientips_city_name_lookup'] = self._clientips_city_name_lookup
def add_fields(self, event):
p = random.random()
if p < self._rare_clientip_probability:
data = self._rare_clientips.get_random()
event['clientip'] = self.__fill_out_ip_prefix(data[0])
else:
data = self._clientips.get_random()
event['clientip'] = data[0]
event['geoip_location_lat'] = data[1][0]
event['geoip_location_lon'] = data[1][1]
event['geoip_city_name'] = self.__get_lookup_value(self._clientips_city_name_lookup, data[2])
event['geoip_country_name'] = self.__get_lookup_value(self._clientips_country_name_lookup, data[3])
event['geoip_country_iso_code'] = self.__get_lookup_value(self._clientips_country_iso_code_lookup, data[4])
event['geoip_continent_name'] = self.__get_lookup_value(self._clientips_continent_name_lookup, data[5])
event['geoip_continent_code'] = self.__get_lookup_value(self._clientips_continent_code_lookup, data[5])
def __fill_out_ip_prefix(self, ip_prefix):
rnd1 = random.random()
v1 = rnd1 * (1 - rnd1) * 255 * 4
k1 = (int)(v1)
rnd2 = random.random()
v2 = rnd2 * (1 - rnd2) * 255 * 4
k2 = (int)(v2)
return "{}.{}.{}".format(ip_prefix, k1, k2)
def __get_lookup_value(self, lookup, key):
if key == "":
return key
else :
return lookup[key]
class Referrer:
def __init__(self):
if '_referrers' in ebs.global_lookups.keys():
self._referrers = ebs.global_lookups['_referrers']
else:
self._referrers = WeightedArray('%s/data/referrers.json.gz' % cwd)
ebs.global_lookups['_referrers'] = self._referrers
if '_referrers_url_base_lookup' in ebs.global_lookups.keys():
self._referrers_url_base_lookup = ebs.global_lookups['_referrers_url_base_lookup']
else:
with gzip.open('%s/data/referrers_url_base_lookup.json.gz' % cwd, 'rt') as data_file:
self._referrers_url_base_lookup = json.load(data_file)
ebs.global_lookups['_referrers_url_base_lookup'] = self._referrers_url_base_lookup
def add_fields(self, event):
data = self._referrers.get_random()
event['referrer'] = "%s%s" % (self._referrers_url_base_lookup[data[0]], data[1])
class Request:
def __init__(self):
if '_requests' in ebs.global_lookups.keys():
self._requests = ebs.global_lookups['_requests']
else:
self._requests = WeightedArray('%s/data/requests.json.gz' % cwd)
ebs.global_lookups['_requests'] = self._requests
if '_requests_url_base_lookup' in ebs.global_lookups.keys():
self._requests_url_base_lookup = ebs.global_lookups['_requests_url_base_lookup']
else:
with gzip.open('%s/data/requests_url_base_lookup.json.gz' % cwd, 'rt') as data_file:
self._requests_url_base_lookup = json.load(data_file)
ebs.global_lookups['_requests_url_base_lookup'] = self._requests_url_base_lookup
def add_fields(self, event):
data = self._requests.get_random()
event['request'] = "{}{}".format(self._requests_url_base_lookup[data[0]], data[1])
event['bytes'] = data[2]
event['verb'] = data[3]
event['response'] = data[4]
event['httpversion'] = data[5]
def convert_to_bytes(size):
matched_size = re.match(r"^(\d+)\s?(kB|MB|GB)?$", size)
if matched_size:
value = int(matched_size.group(1))
unit = matched_size.group(2)
if unit == "kB":
return value << 10
elif unit == "MB":
return value << 20
elif unit == "GB":
return value << 30
elif unit is None:
return value
else:
# we should only reach this if the regex does not match the code here
raise ValueError("Unrecognized unit [{}] for byte size value [{}]".format(unit, size))
else:
raise ValueError("Invalid byte size value [{}]".format(size))
class RandomEvent:
def __init__(self, params, agent=Agent, client_ip=ClientIp, referrer=Referrer, request=Request):
self._agent = agent()
self._clientip = client_ip()
self._referrer = referrer()
self._request = request()
# We will reuse the event dictionary. This assumes that each field will be present (and thus overwritten) in each event.
# This reduces object churn and improves peak indexing throughput.
self._event = {}
if "index" in params:
index = re.sub(r"<\s*yyyy\s*>", "{ts[yyyy]}", params["index"], flags=re.IGNORECASE)
index = re.sub(r"<\s*yy\s*>", "{ts[yy]}", index, flags=re.IGNORECASE)
index = re.sub(r"<\s*mm\s*>", "{ts[mm]}", index, flags=re.IGNORECASE)
index = re.sub(r"<\s*dd\s*>", "{ts[dd]}", index, flags=re.IGNORECASE)
index = re.sub(r"<\s*hh\s*>", "{ts[hh]}", index, flags=re.IGNORECASE)
self._index = index
self._index_pattern = True
else:
self._index = "elasticlogs"
self._index_pattern = False
self._type = "doc"
self._timestamp_generator = TimestampStructGenerator(
params.get("starting_point", "now"),
params.get("offset"),
float(params.get("acceleration_factor", "1.0")),
# this is only expected to be used in tests
params.get("__utc_now")
)
if "daily_logging_volume" in params and "client_count" in params:
# in bytes
self.daily_logging_volume = convert_to_bytes(params["daily_logging_volume"]) // int(params["client_count"])
else:
self.daily_logging_volume = None
self.current_logging_volume = 0
self.total_days = params.get("number_of_days")
self.remaining_days = self.total_days
self.record_raw_event_size = params.get("record_raw_event_size", False)
self._offset = 0
self._web_host = itertools.cycle([1, 2, 3])
self._timestruct = None
self._index_name = None
self._time_interval_current_bulk = 0
@property
def percent_completed(self):
if self.daily_logging_volume is None or self.total_days is None:
return None
else:
full_days = self.total_days - self.remaining_days
already_generated = self.daily_logging_volume * full_days + self.current_logging_volume
total = self.total_days * self.daily_logging_volume
return already_generated / total
def start_bulk(self, bulk_size):
self._time_interval_current_bulk = 1 / bulk_size
self._timestruct = self._timestamp_generator.next_timestamp()
self._index_name = self.__generate_index_pattern(self._timestruct)
def generate_event(self):
if self.remaining_days == 0:
raise StopIteration()
# advance time by a few micros
self._timestruct = self._timestamp_generator.simulate_tick(self._time_interval_current_bulk)
# index for the current line - we may cross a date boundary later if we're above the daily logging volume
index = self._index_name
event = self._event
event["@timestamp"] = self._timestruct["iso"]
# assume a typical event size of 263 bytes but limit the file size to 4GB
event["offset"] = (self._offset + 263) % (4 * 1024 * 1024 * 1024)
self._agent.add_fields(event)
self._clientip.add_fields(event)
self._referrer.add_fields(event)
self._request.add_fields(event)
event["hostname"] = "web-%s-%s.elastic.co" % (event["geoip_continent_code"], next(self._web_host))
if self.record_raw_event_size or self.daily_logging_volume:
# determine the raw event size (as if this were contained in nginx log file). We do not bother to
# reformat the timestamp as this is not worth the overhead.
raw_event = '%s - - [%s] "%s %s HTTP/%s" %s %s "%s" "%s"' % (event["clientip"], event["@timestamp"],
event["verb"], event["request"],
event["httpversion"], event["response"],
event["bytes"], event["referrer"],
event["agent"])
if self.daily_logging_volume:
self.current_logging_volume += len(raw_event)
if self.current_logging_volume > self.daily_logging_volume:
if self.remaining_days is not None:
self.remaining_days -= 1
self._timestamp_generator.skip(datetime.timedelta(days=1))
# advance time now for real (we usually use #simulate_tick() which will keep everything except for
# microseconds constant.
self._timestruct = self._timestamp_generator.next_timestamp()
self._index_name = self.__generate_index_pattern(self._timestruct)
self.current_logging_volume = 0
if self.record_raw_event_size:
# we are on the hot code path here and thus we want to avoid conditionally creating strings so we duplicate
# the event.
line = '{"@timestamp": "%s", ' \
'"_raw_event_size":%d, ' \
'"offset":%s, ' \
'"source":"/usr/local/var/log/nginx/access.log","fileset":{"module":"nginx","name":"access"},"input":{"type":"log"},' \
'"beat":{"version":"6.3.0","hostname":"%s","name":"%s"},' \
'"prospector":{"type":"log"},' \
'"nginx":{"access":{"user_name": "-",' \
'"agent":"%s","user_agent": {"major": "%s","os": "%s","os_major": "%s","name": "%s","os_name": "%s","device": "%s"},' \
'"remote_ip": "%s","remote_ip_list":["%s"],' \
'"geoip":{"continent_name": "%s","city_name": "%s","country_name": "%s","country_iso_code": "%s","location":{"lat": %s,"lon": %s} },' \
'"referrer":"%s",' \
'"url": "%s","body_sent":{"bytes": %s},"method":"%s","response_code":%s,"http_version":"%s"} } }' % \
(event["@timestamp"],
len(raw_event),
event["offset"],
event["hostname"],event["hostname"],
event["agent"], event["useragent_major"], event["useragent_os"], event["useragent_os_major"], event["useragent_name"], event["useragent_os_name"], event["useragent_device"],
event["clientip"], event["clientip"],
event["geoip_continent_name"], event["geoip_city_name"], event["geoip_country_name"], event["geoip_country_iso_code"], event["geoip_location_lat"], event["geoip_location_lon"],
event["referrer"],
event["request"], event["bytes"], event["verb"], event["response"], event["httpversion"])
else:
line = '{"@timestamp": "%s", ' \
'"offset":%s, ' \
'"source":"/usr/local/var/log/nginx/access.log","fileset":{"module":"nginx","name":"access"},"input":{"type":"log"},' \
'"beat":{"version":"6.3.0","hostname":"%s","name":"%s"},' \
'"prospector":{"type":"log"},' \
'"nginx":{"access":{"user_name": "-",' \
'"agent":"%s","user_agent": {"major": "%s","os": "%s","os_major": "%s","name": "%s","os_name": "%s","device": "%s"},' \
'"remote_ip": "%s","remote_ip_list":["%s"],' \
'"geoip":{"continent_name": "%s","city_name": "%s","country_name": "%s","country_iso_code": "%s","location":{"lat": %s,"lon": %s} },' \
'"referrer":"%s",' \
'"url": "%s","body_sent":{"bytes": %s},"method":"%s","response_code":%s,"http_version":"%s"} } }' % \
(event["@timestamp"],
event["offset"],
event["hostname"],event["hostname"],
event["agent"], event["useragent_major"], event["useragent_os"], event["useragent_os_major"], event["useragent_name"], event["useragent_os_name"], event["useragent_device"],
event["clientip"], event["clientip"],
event["geoip_continent_name"], event["geoip_city_name"], event["geoip_country_name"], event["geoip_country_iso_code"], event["geoip_location_lat"], event["geoip_location_lon"],
event["referrer"],
event["request"], event["bytes"], event["verb"], event["response"], event["httpversion"])
return line, index, self._type
def __generate_index_pattern(self, timestruct):
if self._index_pattern:
return self._index.format(ts=timestruct)
else:
return self._index
| 51.569007 | 196 | 0.621326 | 2,612 | 21,298 | 4.721286 | 0.12902 | 0.041599 | 0.073954 | 0.027733 | 0.566818 | 0.495864 | 0.453779 | 0.366607 | 0.353876 | 0.252595 | 0 | 0.007143 | 0.250681 | 21,298 | 412 | 197 | 51.694175 | 0.765587 | 0.076909 | 0 | 0.273846 | 0 | 0.027692 | 0.238792 | 0.127471 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052308 | false | 0 | 0.030769 | 0 | 0.141538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f27c4a87e0d0092113e2de9c038d951d050bbb | 12,791 | py | Python | modules/zivid/_settings_converter.py | knzivid/zivid-python | 5f05d5c17a6f649e89d58a69d0744c525e02b5eb | [
"BSD-3-Clause"
] | null | null | null | modules/zivid/_settings_converter.py | knzivid/zivid-python | 5f05d5c17a6f649e89d58a69d0744c525e02b5eb | [
"BSD-3-Clause"
] | null | null | null | modules/zivid/_settings_converter.py | knzivid/zivid-python | 5f05d5c17a6f649e89d58a69d0744c525e02b5eb | [
"BSD-3-Clause"
] | null | null | null | """Auto generated, do not edit."""
import zivid
import _zivid
def to_settings_acquisition(internal_acquisition):
return zivid.Settings.Acquisition(
aperture=internal_acquisition.aperture.value,
brightness=internal_acquisition.brightness.value,
exposure_time=internal_acquisition.exposure_time.value,
gain=internal_acquisition.gain.value,
)
def to_settings_processing_color_balance(internal_balance):
return zivid.Settings.Processing.Color.Balance(
blue=internal_balance.blue.value,
green=internal_balance.green.value,
red=internal_balance.red.value,
)
def to_settings_processing_color(internal_color):
return zivid.Settings.Processing.Color(
balance=to_settings_processing_color_balance(internal_color.balance),
gamma=internal_color.gamma.value,
)
def to_settings_processing_filters_experimental_contrast_distortion_correction(
internal_correction,
):
return zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Correction(
enabled=internal_correction.enabled.value,
strength=internal_correction.strength.value,
)
def to_settings_processing_filters_experimental_contrast_distortion_removal(
internal_removal,
):
return zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Removal(
enabled=internal_removal.enabled.value,
threshold=internal_removal.threshold.value,
)
def to_settings_processing_filters_experimental_contrast_distortion(
internal_contrast_distortion,
):
return zivid.Settings.Processing.Filters.Experimental.ContrastDistortion(
correction=to_settings_processing_filters_experimental_contrast_distortion_correction(
internal_contrast_distortion.correction
),
removal=to_settings_processing_filters_experimental_contrast_distortion_removal(
internal_contrast_distortion.removal
),
)
def to_settings_processing_filters_experimental(internal_experimental):
return zivid.Settings.Processing.Filters.Experimental(
contrast_distortion=to_settings_processing_filters_experimental_contrast_distortion(
internal_experimental.contrast_distortion
),
)
def to_settings_processing_filters_noise_removal(internal_removal):
return zivid.Settings.Processing.Filters.Noise.Removal(
enabled=internal_removal.enabled.value,
threshold=internal_removal.threshold.value,
)
def to_settings_processing_filters_noise(internal_noise):
return zivid.Settings.Processing.Filters.Noise(
removal=to_settings_processing_filters_noise_removal(internal_noise.removal),
)
def to_settings_processing_filters_outlier_removal(internal_removal):
return zivid.Settings.Processing.Filters.Outlier.Removal(
enabled=internal_removal.enabled.value,
threshold=internal_removal.threshold.value,
)
def to_settings_processing_filters_outlier(internal_outlier):
return zivid.Settings.Processing.Filters.Outlier(
removal=to_settings_processing_filters_outlier_removal(
internal_outlier.removal
),
)
def to_settings_processing_filters_reflection_removal(internal_removal):
return zivid.Settings.Processing.Filters.Reflection.Removal(
enabled=internal_removal.enabled.value,
)
def to_settings_processing_filters_reflection(internal_reflection):
return zivid.Settings.Processing.Filters.Reflection(
removal=to_settings_processing_filters_reflection_removal(
internal_reflection.removal
),
)
def to_settings_processing_filters_smoothing_gaussian(internal_gaussian):
return zivid.Settings.Processing.Filters.Smoothing.Gaussian(
enabled=internal_gaussian.enabled.value, sigma=internal_gaussian.sigma.value,
)
def to_settings_processing_filters_smoothing(internal_smoothing):
return zivid.Settings.Processing.Filters.Smoothing(
gaussian=to_settings_processing_filters_smoothing_gaussian(
internal_smoothing.gaussian
),
)
def to_settings_processing_filters(internal_filters):
return zivid.Settings.Processing.Filters(
experimental=to_settings_processing_filters_experimental(
internal_filters.experimental
),
noise=to_settings_processing_filters_noise(internal_filters.noise),
outlier=to_settings_processing_filters_outlier(internal_filters.outlier),
reflection=to_settings_processing_filters_reflection(
internal_filters.reflection
),
smoothing=to_settings_processing_filters_smoothing(internal_filters.smoothing),
)
def to_settings_processing(internal_processing):
return zivid.Settings.Processing(
color=to_settings_processing_color(internal_processing.color),
filters=to_settings_processing_filters(internal_processing.filters),
)
def to_settings(internal_settings):
return zivid.Settings(
processing=to_settings_processing(internal_settings.processing),
acquisitions=[
to_settings_acquisition(element)
for element in internal_settings.acquisitions.value
],
)
def to_internal_settings_acquisition(acquisition):
internal_acquisition = _zivid.Settings.Acquisition()
internal_acquisition.aperture = _zivid.Settings.Acquisition.Aperture(
acquisition.aperture
)
internal_acquisition.brightness = _zivid.Settings.Acquisition.Brightness(
acquisition.brightness
)
internal_acquisition.exposure_time = _zivid.Settings.Acquisition.ExposureTime(
acquisition.exposure_time
)
internal_acquisition.gain = _zivid.Settings.Acquisition.Gain(acquisition.gain)
return internal_acquisition
def to_internal_settings_processing_color_balance(balance):
internal_balance = _zivid.Settings.Processing.Color.Balance()
internal_balance.blue = _zivid.Settings.Processing.Color.Balance.Blue(balance.blue)
internal_balance.green = _zivid.Settings.Processing.Color.Balance.Green(
balance.green
)
internal_balance.red = _zivid.Settings.Processing.Color.Balance.Red(balance.red)
return internal_balance
def to_internal_settings_processing_color(color):
internal_color = _zivid.Settings.Processing.Color()
internal_color.gamma = _zivid.Settings.Processing.Color.Gamma(color.gamma)
internal_color.balance = to_internal_settings_processing_color_balance(
color.balance
)
return internal_color
def to_internal_settings_processing_filters_experimental_contrast_distortion_correction(
correction,
):
internal_correction = (
_zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Correction()
)
internal_correction.enabled = _zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Correction.Enabled(
correction.enabled
)
internal_correction.strength = _zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Correction.Strength(
correction.strength
)
return internal_correction
def to_internal_settings_processing_filters_experimental_contrast_distortion_removal(
removal,
):
internal_removal = (
_zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Removal()
)
internal_removal.enabled = _zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Removal.Enabled(
removal.enabled
)
internal_removal.threshold = _zivid.Settings.Processing.Filters.Experimental.ContrastDistortion.Removal.Threshold(
removal.threshold
)
return internal_removal
def to_internal_settings_processing_filters_experimental_contrast_distortion(
contrast_distortion,
):
internal_contrast_distortion = (
_zivid.Settings.Processing.Filters.Experimental.ContrastDistortion()
)
internal_contrast_distortion.correction = to_internal_settings_processing_filters_experimental_contrast_distortion_correction(
contrast_distortion.correction
)
internal_contrast_distortion.removal = to_internal_settings_processing_filters_experimental_contrast_distortion_removal(
contrast_distortion.removal
)
return internal_contrast_distortion
def to_internal_settings_processing_filters_experimental(experimental):
internal_experimental = _zivid.Settings.Processing.Filters.Experimental()
internal_experimental.contrast_distortion = to_internal_settings_processing_filters_experimental_contrast_distortion(
experimental.contrast_distortion
)
return internal_experimental
def to_internal_settings_processing_filters_noise_removal(removal):
internal_removal = _zivid.Settings.Processing.Filters.Noise.Removal()
internal_removal.enabled = _zivid.Settings.Processing.Filters.Noise.Removal.Enabled(
removal.enabled
)
internal_removal.threshold = _zivid.Settings.Processing.Filters.Noise.Removal.Threshold(
removal.threshold
)
return internal_removal
def to_internal_settings_processing_filters_noise(noise):
internal_noise = _zivid.Settings.Processing.Filters.Noise()
internal_noise.removal = to_internal_settings_processing_filters_noise_removal(
noise.removal
)
return internal_noise
def to_internal_settings_processing_filters_outlier_removal(removal):
internal_removal = _zivid.Settings.Processing.Filters.Outlier.Removal()
internal_removal.enabled = _zivid.Settings.Processing.Filters.Outlier.Removal.Enabled(
removal.enabled
)
internal_removal.threshold = _zivid.Settings.Processing.Filters.Outlier.Removal.Threshold(
removal.threshold
)
return internal_removal
def to_internal_settings_processing_filters_outlier(outlier):
internal_outlier = _zivid.Settings.Processing.Filters.Outlier()
internal_outlier.removal = to_internal_settings_processing_filters_outlier_removal(
outlier.removal
)
return internal_outlier
def to_internal_settings_processing_filters_reflection_removal(removal):
internal_removal = _zivid.Settings.Processing.Filters.Reflection.Removal()
internal_removal.enabled = _zivid.Settings.Processing.Filters.Reflection.Removal.Enabled(
removal.enabled
)
return internal_removal
def to_internal_settings_processing_filters_reflection(reflection):
internal_reflection = _zivid.Settings.Processing.Filters.Reflection()
internal_reflection.removal = to_internal_settings_processing_filters_reflection_removal(
reflection.removal
)
return internal_reflection
def to_internal_settings_processing_filters_smoothing_gaussian(gaussian):
internal_gaussian = _zivid.Settings.Processing.Filters.Smoothing.Gaussian()
internal_gaussian.enabled = _zivid.Settings.Processing.Filters.Smoothing.Gaussian.Enabled(
gaussian.enabled
)
internal_gaussian.sigma = _zivid.Settings.Processing.Filters.Smoothing.Gaussian.Sigma(
gaussian.sigma
)
return internal_gaussian
def to_internal_settings_processing_filters_smoothing(smoothing):
internal_smoothing = _zivid.Settings.Processing.Filters.Smoothing()
internal_smoothing.gaussian = to_internal_settings_processing_filters_smoothing_gaussian(
smoothing.gaussian
)
return internal_smoothing
def to_internal_settings_processing_filters(filters):
internal_filters = _zivid.Settings.Processing.Filters()
internal_filters.experimental = to_internal_settings_processing_filters_experimental(
filters.experimental
)
internal_filters.noise = to_internal_settings_processing_filters_noise(
filters.noise
)
internal_filters.outlier = to_internal_settings_processing_filters_outlier(
filters.outlier
)
internal_filters.reflection = to_internal_settings_processing_filters_reflection(
filters.reflection
)
internal_filters.smoothing = to_internal_settings_processing_filters_smoothing(
filters.smoothing
)
return internal_filters
def to_internal_settings_processing(processing):
internal_processing = _zivid.Settings.Processing()
internal_processing.color = to_internal_settings_processing_color(processing.color)
internal_processing.filters = to_internal_settings_processing_filters(
processing.filters
)
return internal_processing
def to_internal_settings(settings):
internal_settings = _zivid.Settings()
internal_settings.processing = to_internal_settings_processing(settings.processing)
temp = _zivid.Settings().Acquisitions()
for acq in settings.acquisitions:
temp.append(to_internal_settings_acquisition(acq))
internal_settings.acquisitions = temp
return internal_settings
| 33.572178 | 130 | 0.787272 | 1,287 | 12,791 | 7.423465 | 0.03885 | 0.216663 | 0.232887 | 0.116182 | 0.682437 | 0.628951 | 0.443898 | 0.281662 | 0.164852 | 0.088549 | 0 | 0 | 0.148776 | 12,791 | 380 | 131 | 33.660526 | 0.87748 | 0.002189 | 0 | 0.114695 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.007168 | 0.064516 | 0.265233 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
74f423b852ccc3707d9d4c3d66b423e8f820b294 | 11,608 | py | Python | scripts/tracking/main_tracking.py | MaaaasayaK/Self-Supervised-Small-Soccer-Player-Detection-Tracking | 96d87367afdf4cca8aeca3f32c313e8632c70fe4 | [
"MIT"
] | 1 | 2021-08-17T18:22:12.000Z | 2021-08-17T18:22:12.000Z | scripts/tracking/main_tracking.py | cballester/Self-Supervised-Small-Soccer-Player-Detection-Tracking | a5d2d0c31a992919a270bd0e02379844196271f0 | [
"MIT"
] | null | null | null | scripts/tracking/main_tracking.py | cballester/Self-Supervised-Small-Soccer-Player-Detection-Tracking | a5d2d0c31a992919a270bd0e02379844196271f0 | [
"MIT"
] | 1 | 2021-08-19T14:21:52.000Z | 2021-08-19T14:21:52.000Z | import sys
import torchvision
import os
import torch
from tracking_utils import light_track
from natsort import natsorted, ns
import numpy as np
from argparse import ArgumentParser
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--data_name', type=str, default='issia')
parser.add_argument('--use_GT_position', dest='use_GT_position', action='store_true')
parser.set_defaults(use_GT_position=False)
parser.add_argument('--rescale_img_factor', type=float, default=1.0)
parser.add_argument('--model_name', type=str, default='frcnn_fpn')
parser.add_argument('--backbone', type=str, default='resnet18')
parser.add_argument('--checkpoint', type=str, default='../../checkpoints_runs/player_det_resnet18_student.pth')
parser.add_argument('--detection_score_thres', type=float, default=0.8)
parser.add_argument('--no_use_context', dest='use_context', action='store_false')
parser.set_defaults(use_context=True)
parser.add_argument('--no_use_soft_nms', dest='use_soft_nms', action='store_false')
parser.set_defaults(use_soft_nms=True)
parser.add_argument('--nms_thres', type=float, default=0.4)
parser.add_argument('--anchor_sizes', type=int, nargs='+', default=[32, 64, 128, 256, 512])
parser.add_argument('--use_track_branch_model', dest='use_track_branch_model', action='store_true')
parser.set_defaults(use_track_branch_model=False)
parser.add_argument('--use_track_branch_embed', dest='use_track_branch_embed', action='store_true')
parser.set_defaults(use_track_branch_embed=False)
parser.add_argument('--pose_model', type=str, default='mobile-deconv')
parser.add_argument('--keyframe_interval', type=int, default=1)
parser.add_argument('--frame_interval', type=int, default=1)
parser.add_argument('--init_frame', type=int, default=100)
parser.add_argument('--n_img_max', type=int, default=50)
parser.add_argument('--no_use_IOU', dest='use_IOU', action='store_false')
parser.set_defaults(use_IOU=True)
parser.add_argument('--spacial_iou_thresh', type=float, default=0.5)
parser.add_argument('--no_use_features', dest='use_features', action='store_false')
parser.set_defaults(use_features=True)
parser.add_argument('--no_use_visual_feat', dest='use_visual_feat', action='store_false')
parser.set_defaults(use_visual_feat=True)
parser.add_argument('--visual_feat_model_name', type=str, default='faster-rcnn')
parser.add_argument('--imagenet_model', dest='imagenet_model', action='store_false')
parser.set_defaults(imagenet_model=True)
parser.add_argument('--use_pose', dest='use_pose', action='store_true')
parser.set_defaults(use_pose=False)
parser.add_argument('--weight_loss', dest='weight_loss', action='store_true')
parser.set_defaults(weight_loss=False)
parser.add_argument('--w_spacial', type=float, default=0.97)
parser.add_argument('--w_visual', type=float, default=0.03)
parser.add_argument('--w_pose', type=float, default=0.0)
parser.add_argument('--visual_metric', type=str, default='l2')
parser.add_argument('--use_filter_tracks', dest='use_filter_tracks', action='store_true')
parser.set_defaults(use_filter_tracks=False)
parser.add_argument('--thres_count_ids', type=int, default=2)
parser.add_argument('--use_ReID_module', dest='use_ReID_module', action='store_true')
parser.set_defaults(use_ReID_module=False)
parser.add_argument('--max_vis_reID', type=int, default=4)
parser.add_argument('--max_vis_feat', type=int, default=4)
parser.add_argument('--N_past_to_keep_reID', type=int, default=3)
parser.add_argument('--N_past_to_keep', type=int, default=1)
parser.add_argument('--N_frame_lost_keep', type=int, default=10)
parser.add_argument('--display_pose', dest='display_pose', action='store_true')
parser.set_defaults(display_pose=False)
parser.add_argument('--write_csv', dest='write_csv', action='store_true')
parser.set_defaults(write_csv=False)
parser.add_argument('--write_video', dest='write_video', action='store_true')
parser.set_defaults(write_video=False)
parser.add_argument('--visualize', dest='visualize', action='store_true')
parser.set_defaults(visualize=False)
parser.add_argument('--output_path', type=str, default='../../data/intermediate/tracking')
hparams = parser.parse_args()
hparams.current_model_detection = None
hparams.flag_method = True
if not hparams.use_visual_feat:
hparams.w_visual = 0
if not hparams.use_pose:
hparams.w_pose = 0
if hparams.visual_feat_model_name == 'faster-rcnn':
hparams.imagenet_model = False
max_dist_factor_feat = 32 * (1 / hparams.rescale_img_factor)
max_dist_factor_reID = max_dist_factor_feat / 4
if not hparams.use_GT_position:
if hparams.current_model_detection is None:
from train_tracker import get_model_detection
model_detection = get_model_detection(hparams.model_name, hparams.weight_loss, hparams.backbone, False,
False, False, hparams.detection_score_thres, False,
hparams.use_soft_nms, anchor_sizes=hparams.anchor_sizes, use_context=hparams.use_context,
nms_thres=hparams.nms_thres, use_track_branch=hparams.use_track_branch_model)
model_detection.load_state_dict(torch.load(hparams.checkpoint))
model_detection.to(torch.device('cuda'))
model_detection.eval()
else:
model_detection = hparams.current_model_detection
else:
model_detection = None
if hparams.use_visual_feat:
if hparams.visual_feat_model_name == 'faster-rcnn':
if hparams.current_model_detection is None:
from train_tracker import get_model_detection
visual_feat_model = get_model_detection(hparams.model_name, hparams.weight_loss, hparams.backbone, False,
False, False, hparams.detection_score_thres, False,
hparams.use_soft_nms, anchor_sizes=hparams.anchor_sizes,
use_context=hparams.use_context, nms_thres=hparams.nms_thres,
use_track_branch=hparams.use_track_branch_model)
visual_feat_model.load_state_dict(torch.load(hparams.checkpoint))
visual_feat_model.to(torch.device('cuda'))
else:
visual_feat_model = hparams.current_model_detection
visual_feat_model.eval()
layer = visual_feat_model._modules.get('fc7')
elif hparams.visual_feat_model_name == 'resnet50':
visual_feat_model = torchvision.models.resnet50(pretrained=True)
visual_feat_model.to(torch.device('cuda'))
visual_feat_model.eval()
layer = visual_feat_model._modules.get('avgpool')
elif hparams.visual_feat_model_name == 'vgg19':
visual_feat_model = torchvision.models.vgg19(pretrained=True)
visual_feat_model.to(torch.device('cuda'))
visual_feat_model.eval()
layer = visual_feat_model._modules.get('avgpool')
else:
print(' visual feature model does not exist')
use_visual_feat = False
else:
visual_feat_model = None
layer = None
if hparams.use_pose:
if hparams.pose_model == 'mobile-deconv':
from network_mobile_deconv import Network
pose_model_path = "../other_utils/lighttrack/weights/mobile-deconv/snapshot_296.ckpt"
elif hparams.pose_model == 'MSRA152':
from network_MSRA152 import Network
pose_model_path = "../other_utils/lighttrack/weights/MSRA152/MSRA_snapshot_285.ckpt"
elif hparams.pose_model == 'CPN101':
from network_CPN101 import Network
pose_model_path = '../other_utils/lighttrack/weights/CPN101/CPN_snapshot_293.ckpt'
else:
sys.exit('pose model not available')
# initialize pose estimator
pose_estimator = Tester(Network(), cfg)
pose_estimator.load_weights(pose_model_path)
else:
pose_estimator = None
if hparams.data_name == 'issia':
base_image_folder = '../../data/issia/frames/'
base_annotation_folder = '../../data/issia/annotations/'
rescale_bbox = [0., 0.]
if hparams.data_name == 'SoccerNet':
base_image_folder = '../../data/SoccerNet/sequences/'
base_annotation_folder = None
rescale_bbox = [0., 0.]
if hparams.data_name == 'panorama':
base_image_folder = '../../data/panorama/frames/'
base_annotation_folder = None
rescale_bbox = [0., 0.]
if hparams.data_name == 'SPD':
base_image_folder = '../../data/SPD/frames/'
base_annotation_folder = None
rescale_bbox = [0., 0.]
for s in natsorted(os.listdir(base_image_folder), alg=ns.PATH | ns.IGNORECASE):
print('eval tracking on seq', s)
image_folder = base_image_folder + str(s) + '/'
if base_annotation_folder is not None:
annotation_folder = base_annotation_folder + str(s) + '/'
else:
annotation_folder = None
base_dir = hparams.output_path + '/output_tracking'
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base_dir = os.path.join(base_dir, hparams.data_name)
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base_dir = os.path.join(base_dir, str(s))
if not os.path.exists(base_dir):
os.mkdir(base_dir)
visualize_folder = os.path.join(base_dir, 'visualize_tracking')
if not os.path.exists(visualize_folder):
os.mkdir(visualize_folder)
output_folder = os.path.join(base_dir, 'output_tracking')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
output_video_path = os.path.join(output_folder, "out.mp4")
output_csv_path = os.path.join(output_folder, "out.csv")
if hparams.write_csv and os.path.exists(output_csv_path):
continue
out = light_track(pose_estimator, model_detection, visual_feat_model, layer,
image_folder, annotation_folder, rescale_bbox, hparams.rescale_img_factor,
visualize_folder, output_video_path, output_csv_path, hparams.use_features,
hparams.w_spacial, hparams.w_visual, hparams.w_pose, hparams.use_IOU, hparams.spacial_iou_thresh,
hparams.detection_score_thres, hparams.use_pose, hparams.use_visual_feat, hparams.imagenet_model,
hparams.display_pose, hparams.use_GT_position, hparams.flag_method,hparams.n_img_max, hparams.init_frame,
hparams.frame_interval, hparams.write_csv, hparams.write_video, hparams.keyframe_interval, hparams.visualize,
hparams.use_filter_tracks, hparams.thres_count_ids, hparams.visual_metric,
hparams.N_frame_lost_keep, hparams.N_past_to_keep, hparams.use_ReID_module,
hparams.N_past_to_keep_reID,hparams.max_vis_feat, max_dist_factor_feat, hparams.max_vis_reID,
max_dist_factor_reID,
hparams.use_track_branch_embed)
| 50.912281 | 139 | 0.67419 | 1,475 | 11,608 | 4.959322 | 0.139661 | 0.052905 | 0.099932 | 0.031579 | 0.453042 | 0.379631 | 0.332878 | 0.225974 | 0.200547 | 0.159535 | 0 | 0.01086 | 0.21468 | 11,608 | 227 | 140 | 51.136564 | 0.791575 | 0.002154 | 0 | 0.188776 | 0 | 0 | 0.154238 | 0.049253 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066327 | 0 | 0.066327 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f5824cf904c5800c6d4ef10dc07a58fe417b71 | 7,245 | py | Python | src/analyse/ternary.py | timtroendle/money-land | fe3ed6e531cfe91156886d4fa685a14840749f36 | [
"MIT"
] | null | null | null | src/analyse/ternary.py | timtroendle/money-land | fe3ed6e531cfe91156886d4fa685a14840749f36 | [
"MIT"
] | null | null | null | src/analyse/ternary.py | timtroendle/money-land | fe3ed6e531cfe91156886d4fa685a14840749f36 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import ternary
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
TICK_FONT_SIZE = 9
RED = "#A01914"
BLUE = "#4F6DB8"
SEQUENTIAL_PALETTE = sns.light_palette(RED, as_cmap=True)
RED_TO_BLUE = [ # from https://gka.github.io using lightness correction
'#002d6e', '#375aa2', '#6f8ad1', '#a7bffa',
'#f5f5f5', '#fdad97', '#e36b55', '#b23125', '#720000'
]
DIVERGING_PALETTE = matplotlib.colors.LinearSegmentedColormap.from_list("signature-BlRd", RED_TO_BLUE)
idx = pd.IndexSlice
@dataclass
class PlotData:
data: pd.Series
norm: matplotlib.colors.Normalize
left_axis_label: str
panel_name: str
bottom_axis_label: str = "Utility-scale PV (%) →"
right_axis_label: str = "← Onshore wind (%)"
def plot_both_ternary(path_to_data, path_to_plot):
plot_datas = read_data(path_to_data)
fig = plt.figure(figsize=(7.5, 7.5))
gs = gridspec.GridSpec(3, 2, width_ratios=[5, 5], height_ratios=[25, 25, 1])
ax_1 = fig.add_subplot(gs[0, 0])
ax_2 = fig.add_subplot(gs[0, 1])
ax_3 = fig.add_subplot(gs[1, 0])
ax_4 = fig.add_subplot(gs[1, 1])
cbar_ax_1 = fig.add_subplot(gs[2, 0])
cbar_ax_2 = fig.add_subplot(gs[2, 1])
plot_ternary(plot_datas[0], ax=ax_1, cmap=SEQUENTIAL_PALETTE)
plot_ternary(plot_datas[1], ax=ax_2, cmap=DIVERGING_PALETTE)
plot_ternary(plot_datas[2], ax=ax_3, cmap=SEQUENTIAL_PALETTE)
plot_ternary(plot_datas[3], ax=ax_4, cmap=DIVERGING_PALETTE)
plot_sequential_colorbar(fig, cbar_ax_1, plot_datas[0].norm, cmap=SEQUENTIAL_PALETTE,
label="Cost relative to cost minimal case")
plot_diverging_colorbar(fig, cbar_ax_2, plot_datas[1].norm, cmap=DIVERGING_PALETTE,
label="Land requirements relative to cost minimal case",
land_use_data=plot_datas[1].data)
plt.subplots_adjust(
left=0.05,
bottom=0.07,
right=0.95,
top=0.98,
wspace=0.2,
hspace=0.05
)
fig.savefig(path_to_plot, pil_kwargs={"compression": "tiff_lzw"})
def read_data(path_to_data):
data = xr.open_dataset(path_to_data)
data.coords["roof"] = data.coords["roof"] // 10
data.coords["util"] = data.coords["util"] // 10
data.coords["wind"] = data.coords["wind"] // 10
data.coords["offshore"] = data.coords["offshore"] // 10
data = (
data
.mean("sample_id")
.sel(scenario=(data.roof == 0) | (data.offshore == 0))
.to_dataframe()
.set_index(["util", "wind", "roof", "offshore"])
)
data = data / data.loc[data.cost.idxmin()]
return [
PlotData(
data=filter_three_dimensions(data.cost, "roof"),
left_axis_label="← Rooftop PV (%)",
norm=matplotlib.colors.Normalize(vmin=data.cost.min(), vmax=data.cost.max()),
panel_name="a"
),
PlotData(
data=filter_three_dimensions(data.land_use, "roof"),
left_axis_label="← Rooftop PV (%)",
norm=matplotlib.colors.Normalize(vmin=data.land_use.min(), vmax=1 + (1 - data.land_use.min())),
panel_name="b"
),
PlotData(
data=filter_three_dimensions(data.cost, "offshore"),
left_axis_label="← Offshore wind (%)",
norm=matplotlib.colors.Normalize(vmin=data.cost.min(), vmax=data.cost.max()),
panel_name="c"
),
PlotData(
data=filter_three_dimensions(data.land_use, "offshore"),
left_axis_label="← Offshore wind (%)",
norm=matplotlib.colors.Normalize(vmin=data.land_use.min(), vmax=1 + (1 - data.land_use.min())),
panel_name="d"
)
]
def filter_three_dimensions(data, case):
if case == "roof":
column = "offshore"
else:
column = "roof"
return (
data
.reset_index()[data.reset_index()[column] == 0]
.drop(columns=[column])
.set_index(["util", "wind", case])
.iloc[:, 0]
)
def plot_ternary(plot_data, ax, cmap):
scale = 10
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
figure, tax = ternary.figure(ax=ax, scale=scale)
tax.boundary(linewidth=1.0)
tax.heatmap(
plot_data.data.to_dict(),
scale=10,
style="triangular",
colorbar=False,
cmap=cmap,
vmin=plot_data.norm.vmin,
vmax=plot_data.norm.vmax
)
tax.bottom_axis_label(plot_data.bottom_axis_label, ha="center")
tax.right_axis_label(plot_data.right_axis_label, offset=0.16)
tax.left_axis_label(plot_data.left_axis_label, ha="center", offset=0.14)
tax.ticks(ticks=range(0, 110, 20), axis='b', linewidth=1, multiple=1, offset=0.02, fontsize=TICK_FONT_SIZE)
tax.ticks(ticks=range(0, 110, 20), axis='l', linewidth=1, multiple=1, offset=0.03, fontsize=TICK_FONT_SIZE)
tax.ticks(ticks=range(0, 110, 20), axis='r', linewidth=1, multiple=1, offset=0.04, fontsize=TICK_FONT_SIZE)
tax.clear_matplotlib_ticks()
tax._redraw_labels()
ax.set_title(plot_data.panel_name, loc="left")
ax.set_aspect(1)
def plot_sequential_colorbar(fig, ax, norm, cmap, label):
s_m = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
s_m.set_array([])
cbar = fig.colorbar(s_m, ax=ax, fraction=1, aspect=35, shrink=1.0, orientation="horizontal")
cbar_ticks = np.linspace(
start=norm.vmin,
stop=norm.vmax,
num=4
)
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(["{:.1f}".format(tick)
for tick in cbar.get_ticks()])
cbar.outline.set_linewidth(0)
cbar.set_label(label)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axis('off')
def plot_diverging_colorbar(fig, ax, norm, cmap, label, land_use_data):
s_m = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
cmap = s_m.get_cmap()
rel_max = (land_use_data.max() - land_use_data.min()) / (norm.vmax - norm.vmin)
colors = cmap(np.linspace(0, rel_max, cmap.N))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('cut_jet', colors)
s_m = matplotlib.cm.ScalarMappable(cmap=cmap, norm=matplotlib.colors.Normalize(vmin=0, vmax=land_use_data.max()))
s_m.set_array([])
cbar = fig.colorbar(s_m, ax=ax, fraction=1, aspect=35, shrink=1.0, orientation="horizontal")
cbar_ticks = [0, 0.5, 1.0, land_use_data.max()]
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(["{:.1f}".format(tick)
for tick in cbar.get_ticks()])
cbar.outline.set_linewidth(0)
cbar.set_label(label)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axis('off')
if __name__ == "__main__":
plot_both_ternary(
path_to_data=snakemake.input.results,
path_to_plot=snakemake.output[0]
)
| 35.866337 | 117 | 0.642926 | 1,029 | 7,245 | 4.314869 | 0.217687 | 0.026351 | 0.040541 | 0.042117 | 0.496622 | 0.43018 | 0.373423 | 0.336486 | 0.301351 | 0.281532 | 0 | 0.03263 | 0.208972 | 7,245 | 201 | 118 | 36.044776 | 0.741057 | 0.007315 | 0 | 0.264045 | 0 | 0 | 0.077608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033708 | false | 0 | 0.050562 | 0 | 0.134831 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f5eeb590efc75b4298bcbbba1e165b62b754b6 | 20,491 | py | Python | cogs/game/minigames/black_box/game.py | FellowHashbrown/omega-psi-py | 4ea33cdbef15ffaa537f2c9e382de508c58093fc | [
"MIT"
] | 4 | 2018-12-23T08:49:40.000Z | 2021-03-25T16:51:43.000Z | cogs/game/minigames/black_box/game.py | FellowHashbrown/omega-psi-py | 4ea33cdbef15ffaa537f2c9e382de508c58093fc | [
"MIT"
] | 23 | 2020-11-03T17:40:40.000Z | 2022-02-01T17:12:59.000Z | cogs/game/minigames/black_box/game.py | FellowHashbrown/omega-psi-py | 4ea33cdbef15ffaa537f2c9e382de508c58093fc | [
"MIT"
] | 1 | 2019-07-11T23:40:13.000Z | 2019-07-11T23:40:13.000Z | from discord import Embed
from random import randint
from cogs.errors import get_error_message
from cogs.game.minigames.base_game.game import Game
from cogs.game.minigames.black_box.variables import NUMBERS, SYMBOLS, LEFT, RIGHT, UP, DOWN, GUESS, DIRECT, FINALIZE, HIT, MISS
from cogs.game.minigames.functions import wait_for_reaction
from util.database.database import database
from util.functions import get_embed_color
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dir_to_initial = {
LEFT: "right",
RIGHT: "left",
UP: "bottom",
DOWN: "top"
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class BlackBoxGame(Game):
"""A BlackBoxGame contains information about a game of
Black Box being played
"""
def __init__(self, bot, ctx, challenger):
super().__init__(
bot, ctx,
challenger = challenger,
opponent = challenger
)
self.current_player = 0
self.locations = []
self.message = None
self.guesses = {
"left": [ None ] * 8,
"right": [ None ] * 8,
"top": [ None ] * 8,
"bottom": [ None ] * 8
}
self.amt_guesses = 0
self.location_guesses = []
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_black_box(self, *, show_atoms=False) -> str:
"""Turns the black box into emojis to present inside
a Discord Embed object
:param show_atoms: Whether or not to actually show the atoms in the black box string
"""
# Add the top layer of column emojis
grid = ":white_large_square: "
for column in range(8):
if self.guesses["top"][column] is not None:
grid += self.guesses["top"][column] + " "
else:
grid += NUMBERS[column] + " "
grid += ":white_large_square:\n"
# Add each row of the black box
for row in range(8):
# Add the left column of row emojis
if self.guesses["left"][row] is not None:
grid += self.guesses["left"][row] + " "
else:
grid += NUMBERS[row] + " "
for col in range(8):
# If we want to show the atoms (at the end of the game)
# we choose specific circles for the following:
# actual location of atoms: blue circle
# correct location of atom: green circle
# incorrect location of atom: red circle
if show_atoms:
if (col, row) in self.locations and (col, row) in self.location_guesses:
grid += ":green_circle: "
elif (col, row) in self.locations:
grid += ":blue_circle: "
elif (col, row) in self.location_guesses:
grid += ":red_circle: "
else:
grid += ":black_large_square: "
# However, if we are not showing the atoms, just show white squares
# for where the guesses are and black squares for other squares
else:
if (col, row) in self.location_guesses:
grid += ":white_large_square: "
else:
grid += ":black_large_square: "
# Add the right column of row emojis
if self.guesses["right"][row] is not None:
grid += self.guesses["right"][row] + "\n"
else:
grid += NUMBERS[row] + "\n"
# Add the bottom layer of column emojis
grid += ":white_large_square: "
for column in range(8):
if self.guesses["bottom"][column] is not None:
grid += self.guesses["bottom"][column] + " "
else:
grid += NUMBERS[column] + " "
grid += ":white_large_square: \n"
return grid
def direct_laser(self, direction, offset):
"""Directs a laser through the black box
:param direction: The direction to move the laser in
:param offset: The row or column to move the laser through
"""
# Start off at the block on the proper side
# and create the movement tuple
if direction == "left":
movement = [-1, 0]
current = initial = (7, offset)
initial_side = "right"
elif direction == "right":
movement = [1, 0]
current = initial = (0, offset)
initial_side = "left"
elif direction == "up":
movement = [0, -1]
current = initial = (offset, 7)
initial_side = "bottom"
elif direction == "down":
movement = [0, 1]
current = initial = (offset, 0)
initial_side = "top"
# Continue looping until the laser either hits an atom
# or leaves the black box
blocks_processed = 0
while True:
# Check if the current location is an atom
if current in self.locations:
self.guesses[initial_side][offset] = HIT
break
# Test all the boxes in the corners of the current block:
# upper left, upper right, lower left, lower right
# tuple is (column, row)
ul_block = (current[0] - 1, current[1] - 1)
ur_block = (current[0] + 1, current[1] - 1)
ll_block = (current[0] - 1, current[1] + 1)
lr_block = (current[0] + 1, current[1] + 1)
if blocks_processed == 0:
if direction in ["left", "right"]:
up_block = (current[0], current[1] - 1)
lo_block = (current[0], current[1] + 1)
else:
ri_block = (current[0] + 1, current[1])
le_block = (current[0] - 1, current[1])
# Check the first blocks depending on if this is the first block processed
# if so, the laser bounces back to the input
if blocks_processed == 0:
if direction in ["left", "right"] and (up_block in self.locations or lo_block in self.locations):
movement[0] = -movement[0]
elif direction in ["up", "down"] and (ri_block in self.locations or le_block in self.locations):
movement[1] = -movement[1]
# Check the corner blocks even if on the first block
if movement[0] != 0:
u_block = ul_block if movement[0] == -1 else ur_block
l_block = ll_block if movement[0] == -1 else lr_block
if u_block in self.locations and l_block in self.locations:
movement[0] = -movement[0]
elif u_block in self.locations:
movement = [0, 1]
elif l_block in self.locations:
movement = [0, -1]
else:
l_block = ul_block if movement[1] == -1 else ll_block
r_block = ur_block if movement[1] == -1 else lr_block
if l_block in self.locations and r_block in self.locations:
movement[1] = -movement[1]
elif l_block in self.locations:
movement = [1, 0]
elif r_block in self.locations:
movement = [-1, 0]
# Check if the next movement will leave the black box
if ((current[0] + movement[0]) >= 8 or (current[0] + movement[0]) < 0 or
(current[1] + movement[1]) >= 8 or (current[1] + movement[1]) < 0):
if current == initial:
self.guesses[initial_side][offset] = MISS
else:
self.guesses[initial_side][offset] = SYMBOLS[self.amt_guesses]
if current[0] == 0 and movement[0] == -1:
self.guesses["left"][current[1]] = SYMBOLS[self.amt_guesses]
elif current[0] == 7 and movement[0] == 1:
self.guesses["right"][current[1]] = SYMBOLS[self.amt_guesses]
elif current[1] == 0 and movement[1] == -1:
self.guesses["top"][current[0]] = SYMBOLS[self.amt_guesses]
elif current[1] == 7 and movement[1] == 1:
self.guesses["bottom"][current[0]] = SYMBOLS[self.amt_guesses]
self.amt_guesses += 1
break
current = (current[0] + movement[0], current[1] + movement[1])
blocks_processed += 1
async def setup(self):
"""Sets up the game by asking the player how many atoms they want"""
message = await self.ctx.send(embed = Embed(
title = "Configuration",
description = "How many atoms do you want to exist in the black box?",
colour = await get_embed_color(self.challenger)
))
for reaction in NUMBERS[2:5]:
await message.add_reaction(reaction)
num_atoms = await wait_for_reaction(
self.bot, message,
self.challenger, NUMBERS[2:5])
num_atoms = NUMBERS.index(num_atoms) + 1
# Add the locations of the "atoms"
invalid_locations = []
for locations in range(num_atoms):
location = (randint(0, 7), randint(0, 7))
while location in invalid_locations:
location = (randint(0, 7), randint(0, 7))
self.locations.append(location)
# Add invalid locations that exist around the created "atom"
for r_off in range(-1, 2):
for c_off in range(-1, 2):
if (location[0] + c_off >= 0 and location[1] + r_off >= 0 and
location[0] + c_off < 8 and location[1] + r_off < 8):
invalid_locations.append((location[0] + c_off, location[1] + r_off))
async def play(self):
"""Allows the player to play a game of Black Box"""
await self.setup()
# Continue looping until the player finishes their game
self.message = await self.ctx.send("_ _")
while True:
found = False
valid_options = [GUESS]
# If there can still be lasers pushed through, add the DIRECT
# reaction
for direction in self.guesses:
for item in self.guesses[direction]:
if item is None:
valid_options.append(DIRECT)
found = True
break
if found:
break
# If there are an equivalent amount of location guesses
# as there are locations, give the option to finalize the guesses
if len(self.location_guesses) == len(self.locations):
valid_options.append(FINALIZE)
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{}\n\n{}\n{}\n{}".format(
self.get_black_box(),
"To make a guess, react with {}".format(GUESS),
"To direct a \"laser\", react with {}".format(DIRECT) if DIRECT in valid_options else "",
"To finalize your guesses, react with {}".format(FINALIZE) if FINALIZE in valid_options else ""
),
color = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
for reaction in valid_options:
await self.message.add_reaction(reaction)
# Ask the player if they want to make a guess or direct a "laser" (or finalize their guesses)
option = await wait_for_reaction(
self.bot, self.message,
self.challenger, valid_options)
await self.message.clear_reactions()
if option == GUESS:
await self.make_location_guess()
elif option == DIRECT:
await self.make_input_guess()
else:
if await self.finalize_guesses():
break
async def make_location_guess(self):
"""Allows the player to make a guess on where an atom may be"""
# Check if all guesses have been made
# if so, don't try asking for any more guesses
if len(self.location_guesses) == len(self.locations):
await self.ctx.send(embed = get_error_message(
"You have already made {} guesses. Remove one to make another!".format(
len(self.locations)
)
))
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{0}\n\n{1} {2}\n{1} {3}".format(
self.get_black_box(), GUESS,
"To place a guess, react with the column first and then the row",
"To remove a guess, react with the same column and row as it is in"
),
colour = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
for number in NUMBERS:
await self.message.add_reaction(number)
column = await wait_for_reaction(
self.bot, self.message,
self.challenger, NUMBERS)
row = await wait_for_reaction(
self.bot, self.message,
self.challenger, NUMBERS)
await self.message.clear_reactions()
column = NUMBERS.index(column)
row = NUMBERS.index(row)
if (column, row) in self.location_guesses:
self.location_guesses.remove((column, row))
elif len(self.location_guesses) < len(self.locations):
self.location_guesses.append((column, row))
async def make_input_guess(self):
"""Allows the player to make a guess on the sides of the black box"""
# Get the direction the user wants to move input through
# and which row or column they want to move input through
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{}\n\n{}{}".format(
self.get_black_box(), DIRECT,
"Choose a direction to push a laser through using the directional arrows"
),
colour = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
# Create a new list of valid direction reactions the user
# can react with
# Then add the reactions to the message and have the user
# select which direction they want to move in
directions = []
for direction in [LEFT, RIGHT, UP, DOWN]:
if not all(self.guesses[dir_to_initial[direction]]):
directions.append(direction)
for direction in directions:
await self.message.add_reaction(direction)
direction = await wait_for_reaction(
self.bot, self.message,
self.challenger, directions)
direction = {LEFT: "left", RIGHT: "right", UP: "up", DOWN: "down"}[direction]
# Ask the user which row or column to push a laser through
await self.message.clear_reactions()
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{}\n\n{}{}".format(
self.get_black_box(), DIRECT,
"Choose which {} to push the laser through".format(
"row" if direction in ["left", "right"] else "column"
)
),
colour = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
# Create a new list of valid number reactions the user
# can react with
# Then add the reactions to the message and have the user
# select which row or column they want to push a laser through
numbers = []
for i in range(len(NUMBERS)):
if ((direction == "left" and self.guesses["right"][i] is None) or
(direction == "right" and self.guesses["left"][i] is None) or
(direction == "up" and self.guesses["bottom"][i] is None) or
(direction == "down" and self.guesses["top"][i] is None)):
numbers.append(NUMBERS[i])
for number in numbers:
await self.message.add_reaction(number)
offset = await wait_for_reaction(
self.bot, self.message,
self.challenger, numbers)
offset = NUMBERS.index(offset)
await self.message.clear_reactions()
self.direct_laser(direction, offset) # Direct the laser through the black box
async def finalize_guesses(self):
"""Finalizes the guesses of the player and determines if they won or lost
In order for the player to win, they must get up to one less than the amount of atoms
of the spots correct
"""
if len(self.location_guesses) != len(self.locations):
await self.ctx.send(embed = get_error_message(
"You need to place at least {} more guess{}!".format(
len(self.locations) - len(self.location_guesses),
"" if len(self.location_guesses) == (len(self.locations) - 1) else "es"
)
))
return False
else:
correct = 0
for location in self.locations:
if location in self.location_guesses:
correct += 1
won = correct >= (len(self.locations) - 1)
embed = Embed(
title = "You Won!" if won else "You Lost :(",
description = self.get_black_box(show_atoms = True),
colour = await get_embed_color(self.challenger))
await self.message.edit(embed = embed)
await database.users.update_black_box(self.challenger, won)
return True
| 43.229958 | 127 | 0.516373 | 2,346 | 20,491 | 4.426257 | 0.127451 | 0.037558 | 0.023112 | 0.023112 | 0.499037 | 0.39975 | 0.356029 | 0.289099 | 0.267623 | 0.242103 | 0 | 0.012264 | 0.391147 | 20,491 | 473 | 128 | 43.321353 | 0.820055 | 0.128886 | 0 | 0.343558 | 0 | 0 | 0.0724 | 0.001392 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009202 | false | 0 | 0.02454 | 0 | 0.046012 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f9d1119f5a828ec576c313747eb837e48217fb | 3,437 | py | Python | rewrite_example.py | Giangblackk/hacksterio-smart-nids | 283166e8880aaeb280520053b4dcd431d30b3ed3 | [
"MIT"
] | null | null | null | rewrite_example.py | Giangblackk/hacksterio-smart-nids | 283166e8880aaeb280520053b4dcd431d30b3ed3 | [
"MIT"
] | null | null | null | rewrite_example.py | Giangblackk/hacksterio-smart-nids | 283166e8880aaeb280520053b4dcd431d30b3ed3 | [
"MIT"
] | null | null | null | # Steps for offline training:
# 1. load benign pcap file
# 2. extract features
# 3. train feature mapper model and save model
import numpy as np
from kitsune.FeatureExtractor import FE
from kitsune.KitNET import corClust as CC
from kitsune.KitNET import dA as AE
from scipy.stats import norm
from matplotlib import pyplot as plt
if __name__ == "__main__":
# load benign pcap file
packet_file = "capEC2AMAZ-O4EL3NG-172.31.69.26a.pcap.tsv"
packet_limit = np.Inf
max_AE = 10
FM_grace = 10000
AD_grace = 20000
threshold_grace = 30000
learning_rate = 0.1
hidden_ratio = 0.75
# create feature extractor to get next input vector
fe = FE(packet_file, limit=packet_limit)
fm = CC.corClust(fe.get_num_features())
# get next input vector
print("Feature Mapper training")
curIndex = 0
while True:
x = fe.get_next_vector()
if len(x) == 0:
break
# train feature mapper
fm.update(x)
curIndex += 1
if curIndex == FM_grace:
break
# get trained feature mapper
feature_map = fm.cluster(max_AE)
print(feature_map)
# intialize ensemble layers and output layer
ensembleLayers = []
for m in feature_map:
params = AE.dA_params(
n_visible=len(m),
n_hidden=0,
lr=learning_rate,
corruption_level=0,
gracePeriod=0,
hiddenRatio=hidden_ratio,
)
ensembleLayers.append(AE.dA(params))
params = AE.dA_params(
len(feature_map),
n_hidden=0,
lr=learning_rate,
corruption_level=0,
gracePeriod=0,
hiddenRatio=hidden_ratio,
)
outputLayer = AE.dA(params)
print("Anomaly Detector training")
# put input vector into feature mapper to train it
while True:
x = fe.get_next_vector()
if len(x) == 0:
break
# train
S_l1 = np.zeros(len(ensembleLayers))
for a in range(len(ensembleLayers)):
xi = x[feature_map[a]]
S_l1[a] = ensembleLayers[a].train(xi)
outputLayer.train(S_l1)
curIndex += 1
if curIndex == AD_grace:
break
print("Prediction")
# execute trained model on benign part of dataset
RMSEs = []
while True:
x = fe.get_next_vector()
if len(x) == 0:
break
# execute
S_l1 = np.zeros(len(ensembleLayers))
for a in range(len(ensembleLayers)):
xi = x[feature_map[a]]
S_l1[a] = ensembleLayers[a].execute(xi)
pred = outputLayer.execute(S_l1)
RMSEs.append(pred)
curIndex += 1
if curIndex == threshold_grace:
break
# calculate threshold
benignSample = np.log(RMSEs)
logProbs = norm.logsf(np.log(RMSEs), np.mean(benignSample), np.std(benignSample))
print(np.min(logProbs), np.max(logProbs))
print(np.min(RMSEs), np.max(RMSEs))
# plot the RMSE anomaly scores
plt.figure(figsize=(10, 5))
fig = plt.scatter(range(len(RMSEs)), RMSEs, s=1.1, c=logProbs, cmap="RdYlGn")
plt.yscale("log")
plt.title("Anomaly Scores from Kitsune's Execution Phase")
plt.ylabel("RMSE (log scaled")
plt.xlabel("Time elapsed [min]")
figbar = plt.colorbar()
figbar.ax.set_ylabel("Log Probability\n ", rotation=270)
plt.show()
# save trained mapper to file
| 26.037879 | 85 | 0.609252 | 450 | 3,437 | 4.533333 | 0.353333 | 0.029412 | 0.019608 | 0.017647 | 0.216176 | 0.216176 | 0.216176 | 0.216176 | 0.216176 | 0.216176 | 0 | 0.026283 | 0.291533 | 3,437 | 131 | 86 | 26.236641 | 0.811499 | 0.142566 | 0 | 0.4 | 0 | 0 | 0.072721 | 0.013998 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f9eecab8ce2f291f5fec2748dfb9c29a2b9af0 | 2,003 | py | Python | szz/Commit.py | fastluca/szz-mpi | 3ebc266cb98080f2c7d34ca6cdcc03b6ac0902ae | [
"MIT"
] | 1 | 2019-03-21T23:27:31.000Z | 2019-03-21T23:27:31.000Z | szz/Commit.py | fastluca/szz-mpi | 3ebc266cb98080f2c7d34ca6cdcc03b6ac0902ae | [
"MIT"
] | null | null | null | szz/Commit.py | fastluca/szz-mpi | 3ebc266cb98080f2c7d34ca6cdcc03b6ac0902ae | [
"MIT"
] | 1 | 2019-02-17T12:10:20.000Z | 2019-02-17T12:10:20.000Z | class Commit:
def __init__(self, sha: str, timestamp, author_id: str, committer_id: str, message: str, num_parents: int, num_additions: int, num_deletions: int, num_files_changed: int, files: int, src_loc_added: int, src_loc_deleted: int, num_src_files_touched: int, src_files: str):
self.__sha = sha
self.__timestamp = timestamp
self.__author_id = author_id
self.__committer_id = committer_id
self.__message = message
self.__num_parents = num_parents
self.__num_additions = num_additions
self.__num_deletions = num_deletions
self.__num_files_changed = num_files_changed
self.__files = files # semi-colon list of file names
self.__src_loc_added = src_loc_added
self.__src_loc_deleted = src_loc_deleted
self.__num_src_files_touched = num_src_files_touched
self.__src_files = src_files # semi-colon list of file names
@property
def sha(self):
return self.__sha
@property
def timestamp(self):
return self.__timestamp
@property
def author_id(self):
return self.__author_id
@property
def committer_id(self):
return self.__committer_id
@property
def message(self):
return self.__message
@property
def num_parents(self):
return self.__num_parents
@property
def num_additions(self):
return self.__num_additions
@property
def num_deletions(self):
return self.__num_deletions
@property
def num_files_changed(self):
return self.__num_files_changed
@property
def src_loc_added(self):
return self.__src_loc_added
@property
def src_loc_deleted(self):
return self.__src_loc_deleted
@property
def num_src_files_touched(self):
return self.__num_src_files_touched
@property
def src_files(self):
return self.__src_files
@property
def files(self):
return self.__files
| 27.067568 | 273 | 0.678982 | 259 | 2,003 | 4.72973 | 0.123552 | 0.125714 | 0.16 | 0.073469 | 0.151837 | 0.047347 | 0.047347 | 0 | 0 | 0 | 0 | 0 | 0.254618 | 2,003 | 73 | 274 | 27.438356 | 0.820496 | 0.029456 | 0 | 0.241379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.258621 | false | 0 | 0 | 0.241379 | 0.517241 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
74fb274c512334729a04267dec3df32bc39cd9ae | 452 | py | Python | position/urls.py | drowolath/position | 2d27a56732d195003d35762931fd2484ac270501 | [
"BSD-2-Clause"
] | null | null | null | position/urls.py | drowolath/position | 2d27a56732d195003d35762931fd2484ac270501 | [
"BSD-2-Clause"
] | null | null | null | position/urls.py | drowolath/position | 2d27a56732d195003d35762931fd2484ac270501 | [
"BSD-2-Clause"
] | null | null | null | import views
from django.conf.urls import url
urlpatterns = [
url(
r'(?P<latitude>[\d.@+-]+)/(?P<longitude>[\d.@+-]+)',
views.mapit,
name='mapit'),
url(
r'(?P<name>[\alphanum]+)',
views.trackers,
name='named_liveposition'),
url(
r'(?P<imei>\d{15})',
views.trackers,
name='liveposition'),
url(
r'^$',
views.index,
name='index'),
]
| 19.652174 | 60 | 0.462389 | 47 | 452 | 4.425532 | 0.468085 | 0.076923 | 0.072115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006579 | 0.327434 | 452 | 22 | 61 | 20.545455 | 0.677632 | 0 | 0 | 0.3 | 0 | 0 | 0.283186 | 0.154867 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fb6a3fa3da896167c7fb6828069fd1cdde4840 | 249 | py | Python | pure_func.py | TomHam2021/Python2a_week5 | 49678f4e57a5ccdfb9b8312e163ed45b61b8eba1 | [
"MIT"
] | null | null | null | pure_func.py | TomHam2021/Python2a_week5 | 49678f4e57a5ccdfb9b8312e163ed45b61b8eba1 | [
"MIT"
] | null | null | null | pure_func.py | TomHam2021/Python2a_week5 | 49678f4e57a5ccdfb9b8312e163ed45b61b8eba1 | [
"MIT"
] | null | null | null | '''
# funtional programming undviker for-loopar men måste inte inehålla rekursiva anrop
def fib(n, a=0, b=1):
return a if n < 1 else \
b if n < 2 else \
fib(n - 1, b, a + b)
print(fib(100))
# Output: 354224848179261915075
'''
| 19.153846 | 83 | 0.610442 | 40 | 249 | 3.8 | 0.65 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15847 | 0.26506 | 249 | 12 | 84 | 20.75 | 0.672131 | 0.947791 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
74fb9b012c42b23d4eedebb4c1b383a5c6b105c8 | 1,268 | py | Python | Codewars/5kyu/buddyPairs.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | 1 | 2021-06-06T19:55:22.000Z | 2021-06-06T19:55:22.000Z | Codewars/5kyu/buddyPairs.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | 1 | 2022-01-20T19:20:33.000Z | 2022-01-20T23:51:46.000Z | Codewars/5kyu/buddyPairs.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | null | null | null | # Attempt
def buddy(start, limit):
add = 0
mDivisorSum = 0
nDivisorSum = 0
if (limit > start):
for n in range(start, limit + 1):
nDivisorSum = 0
# Gets divisors for i
for j in range(1, n // 2 + 1):
if type(n / j) != float:
nDivisorSum += 1
# Loops while divisor sum is less than i
while (mDivisorSum <= start + 1):
mDivisorSum = 0
m = n + add
# Iterates through half of the number
# to obtain it's proper divisors
for j in range(1, ((m) // 2) + 1):
if type(n / j) != float:
mDivisorSum += j
if mDivisorSum == n + 1 and nDivisorSum == m + 1:
return [n, m]
add += 1
return 'Nothing'
# Solution
def buddy(start, limit):
for n in range(start, limit + 1):
m = s(n)
if m > n and n == s(m):
return [n, m]
return "Nothing"
def s(n):
s = 0
for i in range(2, round(n ** 0.5)):
if n % i == 0:
s += i
s += n // i
return s
| 19.507692 | 65 | 0.395899 | 153 | 1,268 | 3.281046 | 0.30719 | 0.069721 | 0.051793 | 0.071713 | 0.195219 | 0.14741 | 0.14741 | 0 | 0 | 0 | 0 | 0.036566 | 0.503943 | 1,268 | 65 | 66 | 19.507692 | 0.761526 | 0.112776 | 0 | 0.363636 | 0 | 0 | 0.012511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74fbcda21347132ff292b55b6df302641ca59260 | 408 | py | Python | uitestcore/custom_assertion.py | talawson05/ui-test-core | 6578398d6cfad97cee552f676a027b8b37755a73 | [
"MIT"
] | 8 | 2019-09-16T14:31:38.000Z | 2022-02-03T21:26:04.000Z | uitestcore/custom_assertion.py | talawson05/ui-test-core | 6578398d6cfad97cee552f676a027b8b37755a73 | [
"MIT"
] | 12 | 2019-09-13T14:47:26.000Z | 2022-01-10T11:24:52.000Z | uitestcore/custom_assertion.py | talawson05/ui-test-core | 6578398d6cfad97cee552f676a027b8b37755a73 | [
"MIT"
] | 4 | 2019-09-16T14:49:53.000Z | 2022-02-02T15:42:01.000Z | """
Create any custom assertion in here
"""
from hamcrest import assert_that, is_
def assert_no_failures(failure_description):
"""
Assert that the string passed is empty representing no failures - to be used in test steps
:param failure_description: a string describing failures in a test step, or empty if no failures
"""
assert_that(failure_description, is_(""), failure_description)
| 31.384615 | 100 | 0.75 | 57 | 408 | 5.192982 | 0.578947 | 0.243243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181373 | 408 | 12 | 101 | 34 | 0.886228 | 0.546569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
74fc030b30d76aec5a1c6845ceaf50cdeded83c1 | 297 | py | Python | blog/app/__init__.py | kiza054/woodhall-scout-blog-mongodb | 380f181cbe987eda46ed64a774d3188344f4de55 | [
"MIT"
] | null | null | null | blog/app/__init__.py | kiza054/woodhall-scout-blog-mongodb | 380f181cbe987eda46ed64a774d3188344f4de55 | [
"MIT"
] | null | null | null | blog/app/__init__.py | kiza054/woodhall-scout-blog-mongodb | 380f181cbe987eda46ed64a774d3188344f4de55 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_admin import Admin
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object('config')
admin = Admin(app, name='microblog', template_mode='bootstrap3')
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
from app import views
| 21.214286 | 64 | 0.781145 | 44 | 297 | 5.045455 | 0.409091 | 0.121622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003817 | 0.117845 | 297 | 13 | 65 | 22.846154 | 0.843511 | 0 | 0 | 0 | 0 | 0 | 0.10101 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
74fd94e4feeda1bcbc1a8631162221222bc2c165 | 2,010 | py | Python | bin/run_all_benchmarks.py | finiteautomata/finetune_vs_scratch | 444c9f9f2e1086f833c674e5d819b7a16ff8345a | [
"MIT"
] | 12 | 2021-11-19T18:40:17.000Z | 2022-03-07T10:56:54.000Z | bin/run_all_benchmarks.py | finiteautomata/finetune_vs_scratch | 444c9f9f2e1086f833c674e5d819b7a16ff8345a | [
"MIT"
] | 2 | 2022-02-20T17:28:00.000Z | 2022-03-06T21:34:21.000Z | bin/run_all_benchmarks.py | finiteautomata/finetune_vs_scratch | 444c9f9f2e1086f833c674e5d819b7a16ff8345a | [
"MIT"
] | null | null | null | import os
import re
import fire
import json
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run_all(times=10):
models = [
("finiteautomata/robertuito-base-uncased", "robertuito-uncased.json"),
("finiteautomata/robertuito-base-cased", "robertuito-cased.json"),
("finiteautomata/robertuito-base-deacc", "robertuito-deacc.json"),
("bertin-project/bertin-roberta-base-spanish", "bertin.json"),
("BSC-TeMU/roberta-base-bne", "roberta-bne.json"),
("dccuchile/bert-base-spanish-wwm-uncased", "beto-uncased.json"),
("models/beto-uncased-2500", "beto-uncased-2500.json"),
("models/beto-uncased-5000", "beto-uncased-5000.json"),
("models/beto-uncased-10000", "beto-uncased-10000.json"),
("models/beto-uncased-20000", "beto-uncased-20000.json"),
("dccuchile/bert-base-spanish-wwm-cased", "beto-cased.json"),
("models/beto-cased-2500", "beto-cased-2500.json"),
("models/beto-cased-5000", "beto-cased-5000.json"),
("models/beto-cased-10000", "beto-cased-10000.json"),
("models/beto-cased-20000", "beto-cased-20000.json"),
]
logger.info("Running benchmarks")
for model_name, output_path in models:
logger.info(f"Running model: {model_name}")
output_path=f"output/{output_path}"
if os.path.exists(output_path):
with open(output_path) as f:
report = json.load(f)
run_times = len(report["hate"])
if run_times >= times:
logger.info(f"Skipping model: {model_name}")
continue
else:
logger.info(f"Found {run_times}")
effective_times = times - run_times
else:
effective_times = times
cmd = f"python bin/run_benchmark.py {model_name} {effective_times} {output_path} --max_length 128"
os.system(cmd)
if __name__ == "__main__":
fire.Fire(run_all)
| 34.655172 | 106 | 0.621891 | 243 | 2,010 | 5.00823 | 0.292181 | 0.081348 | 0.09203 | 0.069022 | 0.050945 | 0.050945 | 0 | 0 | 0 | 0 | 0 | 0.049296 | 0.222886 | 2,010 | 57 | 107 | 35.263158 | 0.729834 | 0 | 0 | 0.044444 | 0 | 0 | 0.471877 | 0.317571 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.111111 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fdb498bb4874db8d8ec3451adbf242259fd94c | 1,110 | py | Python | Python/Backspace_String_Compare.py | treethree/LeetCode | 4c6d6e1ee92d87424fe5b9f20b8eef8d34e74761 | [
"Unlicense"
] | null | null | null | Python/Backspace_String_Compare.py | treethree/LeetCode | 4c6d6e1ee92d87424fe5b9f20b8eef8d34e74761 | [
"Unlicense"
] | null | null | null | Python/Backspace_String_Compare.py | treethree/LeetCode | 4c6d6e1ee92d87424fe5b9f20b8eef8d34e74761 | [
"Unlicense"
] | null | null | null | #Approach #1: Build String(Stack)
#Time Complexity: O(M + N), where M, N are the lengths of S and T respectively.
#Space Complexity: O(M + N).
class Solution(object):
def backspaceCompare(self, S, T):
def build(S):
ans = []
for c in S:
if c != '#':
ans.append(c)
elif ans:
ans.pop()
return "".join(ans)
return build(S) == build(T)
#Approach #2: Two Pointer
#Time Complexity: O(M + N), where M, N are the lengths of S and T respectively.
#Space Complexity: O(1).
class Solution2():
def backspaceCompare(self, S, T):
i, j = len(S) - 1, len(T) - 1
backS = backT = 0
while True:
while i >= 0 and (backS or S[i] == '#'):
backS += 1 if S[i] == '#' else -1
i -= 1
while j >= 0 and (backT or T[j] == '#'):
backT += 1 if T[j] == '#' else -1
j -= 1
if not (i >= 0 and j >= 0 and S[i] == T[j]):
return i == j == -1
i, j = i - 1, j - 1
| 32.647059 | 79 | 0.43964 | 159 | 1,110 | 3.069182 | 0.320755 | 0.020492 | 0.07377 | 0.079918 | 0.397541 | 0.295082 | 0.295082 | 0.295082 | 0.295082 | 0.295082 | 0 | 0.030628 | 0.411712 | 1,110 | 33 | 80 | 33.636364 | 0.716692 | 0.234234 | 0 | 0.08 | 0 | 0 | 0.005938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fe0f956d636c3650d5cdff79ce6ca98d344de7 | 7,008 | py | Python | hyperion/pdfs/jfa/jfa_total.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 14 | 2021-12-19T04:24:15.000Z | 2022-03-18T03:24:04.000Z | hyperion/pdfs/jfa/jfa_total.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | null | null | null | hyperion/pdfs/jfa/jfa_total.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 5 | 2021-12-14T20:41:27.000Z | 2022-02-24T14:18:11.000Z | """
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import numpy as np
from scipy import linalg as sla
from ...hyp_defs import float_cpu
from ...utils.math import (
invert_pdmat,
invert_trimat,
logdet_pdmat,
vec2symmat,
symmat2vec,
)
from ..core.pdf import PDF
class JFATotal(PDF):
def __init__(self, K, y_dim=None, T=None, **kwargs):
super(JFATotal, self).__init__(**kwargs)
if T is not None:
y_dim = T.shape[0]
self.K = K
self.y_dim = y_dim
self.T = T
# aux
self._TT = None
self.__upptr = None
def reset_aux(self):
self._TT = None
@property
def is_init():
if self._is_init:
return True
if self.T is not None:
self._is_init = True
return self._is_init
def initialize(self, N, F):
assert N.shape[0] == self.K
self.T = np.random.randn(self.y_dim, F.shape[1]).astype(float_cpu(), copy=False)
def compute_py_g_x(
self, N, F, G=None, return_cov=False, return_elbo=False, return_acc=False
):
assert self.is_init
x_dim = int(F.shape[1] / self.K)
M = F.shape[0]
y_dim = self.y_dim
compute_inv = return_cov or return_acc
return_tuple = compute_inv or return_elbo
TF = np.dot(F, self.T.T)
L = self.compute_L(self.TT, N, self._upptr)
y = np.zeros((M, y_dim), dtype=float_cpu())
if return_cov:
Sy = np.zeros((M, y_dim * (y_dim + 1) / 2), dtype=float_cpu())
else:
Sy = None
if return_elbo:
elbo = np.zeros((M,), dtype=float_cpu())
if return_acc:
Py = np.zeros((y_dim, y_dim), dtype=float_cpu())
Ry = np.zeros((self.K, y_dim * (y_dim + 1) / 2), dtype=float_cpu())
Li = np.zeros((self.y_dim, self.y_dim), dtype=float_cpu())
for i in range(N.shape[0]):
Li[self._upptr] = L[i]
r = invert_pdmat(
Li, right_inv=True, return_logdet=return_elbo, return_inv=compute_inv
)
mult_iL = r[0]
if return_elbo:
elbo[i] = -r[2] / 2
if compute_inv:
iL = r[-1]
y[i] = mult_iL(TF[i])
if return_cov:
Sy[i] = iL[self.__upptr]
if return_acc:
iL += np.outer(y[i], y[i])
Py += iL
Ry += iL[self.__uppr] * N[i][:, None]
if not return_tuple:
return y
r = [y]
if return_cov:
r += [Sy]
if return_elbo:
if G is not None:
elbo += G
elbo += 0.5 * np.sum(VF * y, axis=-1)
r += [elbo]
if return_acc:
r += [Ry, Py]
return tuple(r)
def Estep(self, N, F, G=None):
y, elbo, Ry, Py = self.compute_py_g_x(
N, F, G, return_elbo=True, return_acc=True
)
M = y.shape[0]
y_acc = np.sum(y, axis=0)
Cy = np.dot(F, y)
elbo = np.sum(elbo)
stats = (elbo, M, y_acc, Ry, Cy, Py)
return stats
def MstepML(self, stats):
_, M, y_acc, Ry, Cy, _ = stats
T = np.zeros_like(self.T)
Ryk = np.zeros((self.y_dim, self.y_dim), dtype=float_cpu())
x_dim = T.shape[1] / self.K
for k in range(self.K):
idx = k * x_dim
Ryk[self._upptr] = Ry[k]
iRyk_mult = invert_pdmat(Ryk, right_inv=False)[0]
T[:, idx : idx + x_dim] = iRyk_mult(Cy[idx : idx + x_dim].T)
self.T = T
self.reset_aux()
def MstepMD(self, stats):
_, M, y_acc, Ry, Cy, Py = stats
mu_y = y_acc / M
Cy = Py / M - np.outer(my_y, mu_y)
chol_Cy = la.cholesky(Cy, lower=False, overwrite_a=True)
self.T = np.dot(chol_Cy, self.T)
self.reset_aux()
def fit(
self,
N,
F,
G=None,
N_val=None,
F_val=None,
epochs=20,
ml_md="ml+md",
md_epochs=None,
):
use_ml = False if ml_md == "md" else True
use_md = False if ml_md == "ml" else True
if not self.is_init:
self.initialize(N, F)
elbo = np.zeros((epochs,), dtype=float_cpu())
elbo_val = np.zeros((epochs,), dtype=float_cpu())
for epoch in range(epochs):
stats = self.Estep(N, F, G)
elbo[epoch] = stats[0]
if N_val is not None and F_val is not None:
_, elbo_val_e = self.compute_py_x(N, F, G, return_elbo=True)
elbo_val[epoch] = np.sum(elbo_val_e)
if use_ml:
self.MstepML(stats)
if use_md and (md_epochs is None or epoch in md_epochs):
self.MstepMD(stats)
elbo_norm = elbo / np.sum(N)
if x_val is None:
return elbo, elbo_norm
else:
elbo_val_norm = elbo_val / np.sum(N_val)
return elbo, elbo_norm, elbo_val, elbo_val_norm
@property
def TT(self):
if self._TT is None:
self._TT = self.compute_TT(self.T, self.K)
return self._TT
@property
def _upptr(self):
if self.__upptr is None:
I = np.eye(self.y_dim, dtype=float_cpu())
self.__upptr = np.triu(I).ravel()
return self.__upptr
@staticmethod
def compute_TT(self, T, K, upptr):
x_dim = int(T.shape[1] / K)
y_dim = T.shape[0]
TT = np.zeros((K, y_dim * (y_dim + 1) / 2), dtype=float_cpu())
for k in range(K):
idx = k * x_dim
T_k = T[:, idx : idx + x_dim]
TT_k = np.dot(T_k, T_k.T)
TT[k] = TT_k[self._upptr]
return TT
@staticmethod
def compute_L(TT, N, upptr):
y_dim = self._upptr.shape[0]
I = np.eye(y_dim, dtype=float_cpu())[self._upptr]
return I + np.dot(N, TT)
@staticmethod
def normalize_T(T, chol_prec):
Tnorm = np.zeros_like(T)
K = chol_prec.shape[0]
x_dim = int(T.shape[1] / K)
for k in range(K):
idx = k * x_dim
Tnorm[:, idx : idx + x_dim] = np.dot(
T[:, idx : idx + x_dim], chol_prec[k].T
)
return Tnorm
def get_config(self):
config = {"K": self.K}
base_config = super(JFATotal, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def save_params(self, f):
params = {"T": self.T}
self._save_params_from_dict(f, params)
@classmethod
def load_params(cls, f, config):
param_list = ["T"]
params = cls._load_params_to_dict(f, config["name"], param_list)
kwargs = dict(list(config.items()) + list(params.items()))
return cls(**kwargs)
def sample(self, num_samples):
pass
| 27.057915 | 88 | 0.516124 | 1,043 | 7,008 | 3.25791 | 0.152445 | 0.028252 | 0.045909 | 0.02472 | 0.187758 | 0.119482 | 0.099176 | 0.053855 | 0.053855 | 0.035315 | 0 | 0.009097 | 0.356878 | 7,008 | 258 | 89 | 27.162791 | 0.744841 | 0.018122 | 0 | 0.17 | 0 | 0 | 0.002329 | 0 | 0 | 0 | 0 | 0 | 0.01 | 1 | 0.09 | false | 0.005 | 0.025 | 0 | 0.19 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fee5c75b2916c60cd68cc9f257e9cc4d35e86d | 19,063 | py | Python | dockercask.py | dockercask/dockercask | 75103d683f12c2428783d6c729164f0727157e51 | [
"MIT"
] | 13 | 2016-01-11T05:39:34.000Z | 2020-02-26T03:50:17.000Z | dockercask.py | dockercask/dockercask | 75103d683f12c2428783d6c729164f0727157e51 | [
"MIT"
] | null | null | null | dockercask.py | dockercask/dockercask | 75103d683f12c2428783d6c729164f0727157e51 | [
"MIT"
] | 2 | 2017-04-21T18:44:02.000Z | 2018-04-15T23:38:25.000Z | import os
import shutil
import subprocess
import sys
import random
import threading
import json
import time
import traceback
import signal
BASE_IMAGE = 'archlinux'
USER_HOME_DIR = '~'
HOME_DIR = '~/Docker'
APP_DIR = '~/.local/share/applications'
PULSE_COOKIE_PATH = '~/.config/pulse/cookie'
LOCALTIME_PATH = '/etc/localtime'
TMP_DIR = '/tmp'
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
APPS_DIR = os.path.join(ROOT_DIR, 'apps', BASE_IMAGE)
SCRIPT_PATH = os.path.join(ROOT_DIR, os.path.basename( __file__))
PULSE_SERVER = 'unix:/var/run/pulse/native'
DESKTOP_ENTRY = '''[Desktop Entry]
Version=1.0
Type=Application
Terminal=false
Name=Docker - %s
Comment=Docker - %s
Exec=%s
Icon=%s
Categories=Other;
'''
USER_HOME_DIR = os.path.expanduser(USER_HOME_DIR)
HOME_DIR = os.path.expanduser(HOME_DIR)
DESKTOP_DIR = os.path.expanduser(APP_DIR)
PULSE_COOKIE_PATH = os.path.expanduser(PULSE_COOKIE_PATH)
TMP_DIR = os.path.expanduser(TMP_DIR)
CONF_DIR = os.path.join(HOME_DIR, '.config')
BASE_CONF_PATH = os.path.join(CONF_DIR, 'base.json')
interrupt = False
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path)
mkdirs(CONF_DIR)
if not os.path.exists(BASE_CONF_PATH):
shutil.copyfile(
os.path.join(APPS_DIR, 'base', 'settings.json'),
BASE_CONF_PATH,
)
with open(BASE_CONF_PATH, 'r') as conf_file:
conf_data = json.loads(conf_file.read())
INCREASE_SHM = conf_data.get('increase_shm', True)
SHARE_CLIPBOARD = conf_data.get('share_clipboard', True)
SHARE_FONTS = conf_data.get('share_fonts', True)
SHARE_THEMES = conf_data.get('share_themes', False)
SHARE_ICONS = conf_data.get('share_icons', False)
SHARE_USER_FONTS = conf_data.get('share_user_fonfs', True)
SHARE_USER_THEMES = conf_data.get('share_user_themes', True)
DEFAULT_WIN_SIZE = conf_data.get('default_win_size', '1024x768')
DEFAULT_VOLUMES = conf_data.get('default_volumes', [])
DPI = conf_data.get('dpi', '96')
DEBUG = False
try:
subprocess.check_call(
['docker', 'ps'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
SUDO_DOCKER = False
except:
SUDO_DOCKER = True
GPU = conf_data.get('gpu', 'auto')
if GPU == 'auto':
try:
subprocess.check_output(['which', 'nvidia-settings'],
stderr=subprocess.PIPE)
GPU = 'nvidia'
except:
GPU = 'intel'
def kill_process(process):
# Attempt to interrupt process then kill
terminated = False
for _ in xrange(200):
try:
process.send_signal(signal.SIGINT)
except OSError as error:
if error.errno != 3:
raise
for _ in xrange(4):
if process.poll() is not None:
terminated = True
break
time.sleep(0.0025)
if terminated:
break
if not terminated:
for _ in xrange(10):
if process.poll() is not None:
break
try:
process.send_signal(signal.SIGKILL)
except OSError as error:
if error.errno != 3:
raise
time.sleep(0.01)
def image_exists(image):
image_id = subprocess.check_output((['sudo'] if SUDO_DOCKER else []) + [
'docker',
'images',
'-q',
image,
]).strip()
return bool(image_id)
def pull():
if BASE_IMAGE == 'ubuntu':
image = 'ubuntu'
else:
image = 'pritunl/archlinux'
subprocess.check_call((['sudo'] if SUDO_DOCKER else []) + [
'docker',
'pull',
image,
])
def exists_pull():
if BASE_IMAGE == 'ubuntu':
image = 'ubuntu'
else:
image = 'pritunl/archlinux'
if not image_exists(image):
pull()
def build(app):
app = app.split('#')[0]
if app == 'base-intel' or app == 'base-nvidia':
image_name = 'base-xorg'
else:
image_name = app
app_dir = os.path.join(APPS_DIR, app)
subprocess.check_call((['sudo'] if SUDO_DOCKER else []) + [
'docker',
'build',
'--rm',
'-t', 'dockercask/' + image_name,
'.',
], cwd=app_dir)
if app == 'base':
build('base-' + GPU)
def exists_build(app):
app = app.split('#')[0]
if not image_exists('dockercask/base'):
build('base')
if not image_exists('dockercask/base-xorg'):
build('base-' + GPU)
if not image_exists('dockercask/' + app):
build(app)
def build_all():
build('base')
for app in os.listdir(HOME_DIR):
if not os.path.isdir(os.path.join(HOME_DIR, app)) or \
app.startswith('.'):
continue
build(app)
def add(app):
app_dir = os.path.join(HOME_DIR, app)
icon_path = os.path.join(APPS_DIR, app.split('#')[0], 'icon.png')
app_conf_path = os.path.join(CONF_DIR, app + '.json')
desktop_entry_path = os.path.join(DESKTOP_DIR,
'docker-%s.desktop' % app.replace('#', '-'))
mkdirs(app_dir)
if not os.path.exists(app_conf_path):
with open(app_conf_path, 'w') as app_conf_file:
app_conf_file.write('{}\n')
if DEBUG:
cmd = 'xfce4-terminal --command="python2 %s %s --debug"' % (
SCRIPT_PATH, app)
else:
cmd = 'python2 %s %s' % (SCRIPT_PATH, app)
formated_app_name = app.replace('#', ' ').replace('-', ' ').split()
formated_app_name = ' '.join([x.capitalize() for x in formated_app_name])
if os.path.exists(icon_path):
with open(desktop_entry_path, 'w') as desktop_file:
desktop_file.write(DESKTOP_ENTRY % (
formated_app_name,
formated_app_name,
cmd,
icon_path,
))
def remove(app):
app_dir = os.path.join(HOME_DIR, app)
app_conf_path = os.path.join(CONF_DIR, app + '.json')
desktop_entry_path = os.path.join(DESKTOP_DIR,
'docker-%s.desktop' % app.replace('#', '-'))
for path in (app_dir, app_conf_path, desktop_entry_path):
subprocess.check_call([
'rm',
'-rf',
path,
])
def app_exists(app):
if os.path.exists(os.path.join(HOME_DIR, app)):
return True
return False
def focus_app(app):
try:
subprocess.check_call([
'wmctrl',
'-F',
'-R',
'dockercask:' + app,
])
return True
except subprocess.CalledProcessError:
pass
return False
def run(app):
app_dir = os.path.join(HOME_DIR, app)
app_conf_path = os.path.join(CONF_DIR, app + '.json')
app_default_conf_path = os.path.join(
APPS_DIR, app.split('#')[0], 'settings.json')
fonts_dir = os.path.join(USER_HOME_DIR, '.fonts')
themes_dir = os.path.join(USER_HOME_DIR, '.themes')
cmd = []
docker_args = []
volume_args = []
env_args = []
with open(app_conf_path, 'r') as app_conf_file:
app_conf_data = json.loads(app_conf_file.read())
with open(app_default_conf_path, 'r') as app_default_conf_file:
app_default_conf_data = json.loads(app_default_conf_file.read())
mount_path = app_conf_data.get('mount_path',
app_default_conf_data.get('mount_path', '/home/docker/Docker'))
admin = app_conf_data.get('admin',
app_default_conf_data.get('admin'))
host_x11 = app_conf_data.get('host_x11',
app_default_conf_data.get('host_x11'))
headless = app_conf_data.get('headless',
app_default_conf_data.get('headless'))
cli = app_conf_data.get('cli',
app_default_conf_data.get('cli'))
increase_shm = app_conf_data.get('increase_shm',
app_default_conf_data.get('increase_shm', INCREASE_SHM))
dpi = app_conf_data.get('dpi',
app_default_conf_data.get('increase_shm', DPI))
if DEBUG or cli:
docker_args.append('-it')
cmd.append('/bin/bash')
if admin:
docker_args += ['--cap-add', 'SYS_ADMIN']
if host_x11:
volume_args += ['-v', '/etc/machine-id:/etc/machine-id:ro']
docker_args += ['--device', '/dev/dri:/dev/dri']
docker_args += ['--device', '/dev/nvidia0:/dev/nvidia0']
docker_args += ['--device', '/dev/nvidiactl:/dev/nvidiactl']
docker_args += ['--device', '/dev/nvidia-modeset:/dev/nvidia-modeset']
if increase_shm:
if isinstance(increase_shm, basestring):
shm_size = increase_shm
else:
shm_size = '1g'
docker_args += ['--shm-size', shm_size]
for src, dest in DEFAULT_VOLUMES:
volume_args += [
'-v', '%s:%s' % (os.path.expanduser(src), dest),
]
if SHARE_FONTS:
volume_args += [
'-v', '%s:%s' % ('/usr/share/fonts', '/usr/share/fonts:ro'),
]
if SHARE_ICONS:
volume_args += [
'-v', '%s:%s' % ('/usr/share/icons', '/usr/share/icons:ro'),
]
if SHARE_THEMES:
volume_args += [
'-v', '%s:%s' % ('/usr/share/themes', '/usr/share/themes:ro'),
]
if SHARE_USER_FONTS:
volume_args += [
'-v', '%s:%s' % (fonts_dir, '/home/docker/.fonts:ro'),
]
if SHARE_USER_THEMES:
volume_args += [
'-v', '%s:%s' % (themes_dir, '/home/docker/.themes:ro'),
]
mkdirs(fonts_dir)
mkdirs(themes_dir)
if host_x11:
# Get the cookie for the host display
x_cookie = subprocess.check_output(['xauth', 'list', ':0']).split()[-1]
x_num = '0'
elif not headless:
# Create a cookie for the new Xephyr window
x_cookie = subprocess.check_output(['mcookie'])
x_num = str(random.randint(1000, 32000))
x_auth_path = os.path.join(TMP_DIR, '.X11-docker-' + x_num)
with open(x_auth_path, 'w') as _:
pass
# Store the cookie in a file for Xephyr to read
subprocess.check_call([
'xauth',
'-f', x_auth_path,
'add',
':' + x_num,
'MIT-MAGIC-COOKIE-1',
x_cookie,
])
# Add the cookie to the hosts xauth to allow xsel and pulseaudio to
# access the Xephyr window
subprocess.check_call([
'xauth',
'add',
':' + x_num,
'MIT-MAGIC-COOKIE-1',
x_cookie,
])
if not headless:
x_screen_path = os.path.join(TMP_DIR, '.X11-unix', 'X' + x_num)
volume_args += [
'-v', '%s:%s:ro' % (x_screen_path, x_screen_path),
'-v', '%s:%s:ro' % (PULSE_COOKIE_PATH, '/tmp/.pulse-cookie'),
'-v', '/var/run/user/%s/pulse/native:/var/run/pulse/native' % (
os.getuid()),
]
env_args += [
'-e', 'DISPLAY=:' + x_num,
'-e', 'XAUTHORITY=/tmp/.Xauth',
'-e', 'XCOOKIE=' + x_cookie,
'-e', 'PULSE_SERVER=' + PULSE_SERVER,
]
x_proc = None
docker_id = None
clean_lock = threading.Lock()
def clean_up():
if not clean_lock.acquire(False):
return
global interrupt
interrupt = True
if docker_id:
try:
subprocess.check_output((['sudo'] if SUDO_DOCKER else []) + [
'docker',
'rm',
'-f',
docker_id,
], stderr=subprocess.PIPE)
except:
pass
if x_proc:
kill_process(x_proc)
if not host_x11:
try:
# Remove the Xephyr display from xauth
subprocess.check_output([
'xauth',
'remove',
':' + x_num,
], stderr=subprocess.PIPE)
except:
pass
try:
os.remove(x_auth_path)
except:
pass
try:
os.remove(x_screen_path)
except:
pass
if not host_x11 and not headless:
args = [
'Xephyr',
'-auth', x_auth_path,
'-screen', DEFAULT_WIN_SIZE,
'-title', 'dockercask:' + app,
'-br',
'-resizeable',
'-no-host-grab',
'-nolisten', 'tcp',
]
if dpi:
args += ['-dpi', dpi]
# Create Xephyr window secured with cookie
x_proc = subprocess.Popen(args + [':' + x_num])
def x_thread_func():
try:
x_proc.wait()
finally:
clean_up()
thread = threading.Thread(target=x_thread_func)
thread.start()
# The module-x11-publish for the Xephyr display does not appear to be
# needed and will crash the pulseaudio server if the Xephyr window is
# closed while the module is loaded. Module is loaded by xfce4-sesion
def pacmd_thread_func():
if DEBUG:
while True:
time.sleep(1)
unload_pulseaudio(x_num)
else:
for _ in xrange(20):
time.sleep(0.5)
unload_pulseaudio(x_num)
if not headless:
thread = threading.Thread(target=pacmd_thread_func)
thread.daemon = True
thread.start()
args = (['sudo'] if SUDO_DOCKER else []) + [
'docker',
'run',
'-i',
'--rm' if (DEBUG or cli) else '--detach',
] + docker_args + [
'-v', '%s:%s:ro' % (LOCALTIME_PATH, LOCALTIME_PATH),
'-v', '%s:%s:ro' % (BASE_CONF_PATH, '/base.json'),
'-v', '%s:%s:ro' % (app_conf_path, '/app.json'),
'-v', '%s:%s' % (app_dir, mount_path),
] + volume_args + [
'-u', 'docker',
'-e', 'HOME=/home/docker',
] + env_args + [
'dockercask/' + app.split('#')[0],
] + cmd
print ' '.join(args)
if not host_x11 and not headless and SHARE_CLIPBOARD:
thread = threading.Thread(target=share_clipboard, args=(x_num,))
thread.daemon = True
thread.start()
if DEBUG or cli:
try:
subprocess.check_call(args)
finally:
clean_up()
else:
docker_id = subprocess.check_output(args).strip()
try:
subprocess.check_call((['sudo'] if SUDO_DOCKER else []) +
['docker', 'wait', docker_id])
finally:
clean_up()
def set_clipboard(num, val):
if not val:
return
process = subprocess.Popen(
['xsel', '--display', ':' + num, '-b', '-i'],
stdin=subprocess.PIPE,
)
process.stdin.write(val)
process.stdin.close()
for _ in xrange(75):
time.sleep(0.005)
exit_code = process.poll()
if exit_code is not None:
if exit_code != 0:
raise Exception('Error from xsel process')
return
process.kill()
raise Exception('Timeout setting clipboard')
def get_clipboard(num):
process = subprocess.Popen(
['xsel', '--display', ':' + num, '-b', '-o', '-t', '250'],
stdout=subprocess.PIPE,
)
for _ in xrange(75):
time.sleep(0.005)
exit_code = process.poll()
if exit_code is not None:
if exit_code != 0:
raise Exception('Error from xsel process')
output, _ = process.communicate()
return output[:3072]
process.kill()
raise Exception('Timeout getting clipboard')
def share_clipboard(app_num):
time.sleep(1)
try:
val = get_clipboard('0')
set_clipboard(app_num, val)
clipboards = [val, get_clipboard(app_num)]
except:
traceback.print_exc()
time.sleep(3)
share_clipboard(app_num)
return
while not interrupt:
try:
for num in ('0', app_num):
val = get_clipboard(num)
i = 0 if num == '0' else 1
if val != clipboards[i]:
set_num = app_num if num == '0' else '0'
set_i = 1 if num == '0' else 0
set_clipboard(app_num if num == '0' else '0', val)
clipboards[i] = val
clipboards[set_i] = get_clipboard(set_num)
time.sleep(0.2)
except:
if not interrupt:
traceback.print_exc()
time.sleep(3)
def unload_pulseaudio(x_num, count=0):
# Unload the pulse audio module specific to the Xephyr window. Pacmd will
# sometimes return an error when busy.
if count > 2:
return
try:
modules = subprocess.check_output(['pacmd', 'list-modules'])
except:
traceback.print_exc()
time.sleep(0.1)
unload_pulseaudio(x_num, count + 1)
return
index = None
for line in modules.splitlines():
line = line.strip()
if line.startswith('index:'):
index = line.split()[-1]
if 'display=:' + x_num in line and index:
for _ in xrange(3):
try:
subprocess.check_call(['pacmd', 'unload-module', index])
break
except:
traceback.print_exc()
time.sleep(0.1)
def kill_pulseaudio(x_num, count=0):
# Kill the pulse audio client specific to the Xephyr window. Pacmd will
# sometimes return an error when busy.
if count > 2:
return
try:
clients = subprocess.check_output(['pacmd', 'list-clients'])
except:
traceback.print_exc()
time.sleep(0.1)
kill_pulseaudio(x_num, count + 1)
return
index = None
for line in clients.splitlines():
line = line.strip()
if line.startswith('index:'):
index = line.split()[-1]
if 'window.x11.display' in line and ':' + x_num in line and index:
for _ in xrange(3):
try:
subprocess.check_call(['pacmd', 'kill-client', index])
break
except:
traceback.print_exc()
time.sleep(0.1)
command = sys.argv[1]
if sys.argv[-1] == '--debug':
DEBUG = True
if command == 'build':
app = sys.argv[2]
exists_pull()
build(app)
elif command == 'build-all':
build_all()
elif command == 'update':
if len(sys.argv) > 2:
app = sys.argv[2]
else:
app = None
pull()
if app:
build('base')
build(app)
else:
build_all()
elif command == 'add':
app = sys.argv[2]
exists_pull()
exists_build(app)
add(app)
elif command == 'remove':
app = sys.argv[2]
remove(app)
else:
if command == 'run':
app = sys.argv[2]
else:
app = sys.argv[1]
if not app_exists(app):
print 'App must be added before running'
exit(1)
if focus_app(app):
exit(0)
exists_pull()
exists_build(app)
run(app)
| 27.116643 | 79 | 0.544458 | 2,340 | 19,063 | 4.237179 | 0.137607 | 0.02239 | 0.027736 | 0.015532 | 0.38467 | 0.272617 | 0.217751 | 0.172365 | 0.165507 | 0.119919 | 0 | 0.011513 | 0.321093 | 19,063 | 702 | 80 | 27.155271 | 0.754597 | 0.039396 | 0 | 0.386364 | 0 | 0 | 0.122431 | 0.01749 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.01049 | 0.017483 | null | null | 0.013986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
74ff1cb180abc2244375933d5f39c89666c57b20 | 314 | py | Python | Fun Excercise/decorator3.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 32 | 2020-04-05T08:29:40.000Z | 2022-01-08T03:10:00.000Z | Fun Excercise/decorator3.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 3 | 2021-06-02T04:09:11.000Z | 2022-03-02T14:55:03.000Z | Fun Excercise/decorator3.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 3 | 2020-07-13T05:44:04.000Z | 2021-03-03T07:07:58.000Z | def say_hello(hello_var):
print(hello_var)
def say_hi(hi_var):
print(hello_var + " " + hi_var)
return say_hi
say_hi_func = say_hello("Hello") # Print Hello and returns say_hi function which gets stored in say_hi_func variable
say_hi_func("Hi") # Call say_hi function and print "Hello Hi" | 26.166667 | 117 | 0.710191 | 54 | 314 | 3.814815 | 0.314815 | 0.169903 | 0.131068 | 0.15534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.203822 | 314 | 12 | 118 | 26.166667 | 0.824 | 0.39172 | 0 | 0 | 0 | 0 | 0.042328 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0 | 0 | 0.428571 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2d0005654c5252f8fe1a1bf0048e42c68ed61045 | 16,082 | py | Python | ski_conditions/apps/app_scraping/management/commands/do_scraping.py | JKProjects-Org/ski-conditions | e7f9350bb3c290853f49f65e30d495ee0aa3b737 | [
"MIT"
] | 2 | 2019-11-03T16:37:33.000Z | 2020-01-08T19:05:20.000Z | ski_conditions/apps/app_scraping/management/commands/do_scraping.py | JKProjects-Org/ski-conditions | e7f9350bb3c290853f49f65e30d495ee0aa3b737 | [
"MIT"
] | 8 | 2019-11-04T02:49:30.000Z | 2022-02-10T12:22:15.000Z | ski_conditions/apps/app_scraping/management/commands/do_scraping.py | JKProjects-Org/ski-conditions | e7f9350bb3c290853f49f65e30d495ee0aa3b737 | [
"MIT"
] | null | null | null | import json
import re
import requests
from bs4 import BeautifulSoup
from django.core.management.base import BaseCommand
from ski_conditions.apps.app_scraping.models import SkiResort
class AbstractScraper:
def scrape(self):
pass
class AbstractScriptScraper(AbstractScraper):
def _common_scrape(self):
page = requests.get(self.url)
soup = BeautifulSoup(page.text, 'html.parser')
return soup
class AbstractVailScraper(AbstractScraper):
def _common_scrape(self):
page = requests.get(self.url)
# create a BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
# search for class c118__number1--v1
trails_summary = soup.find(class_='terrain_summary row')
# look for stuff in <span> tags
trails_summary_items = trails_summary.find_all(class_='c118__number1--v1')
# look for trail and lift totals
trail_totals = trails_summary.find_all(class_='c118__number2--v1')
return (trail_totals, trails_summary_items)
def terrain_status(self):
# gets info on individual lifts and trails, such as status
page = requests.get(self.url)
soup = BeautifulSoup(page.text, 'html.parser')
# need to look through script to get rest of values
pattern = re.compile("FR.TerrainStatusFeed = ({.*})")
regex_find = soup.find_all('script', text=pattern)
# has numbers for Status. ex. Status = 0 or 1
regex_find_numbers = regex_find[0].text
# has words for Status, Type. ex. Status = Open, Type = Black
regex_find_words = regex_find[1].text
# need to apply regex again to get just the json part
status_numbers = re.findall(pattern, regex_find_numbers)[0]
json_data_numbers = json.loads(status_numbers)
json_lifts_numbers = json_data_numbers['Lifts']
status_words = re.findall(pattern, regex_find_words)[0]
json_data_words = json.loads(status_words)
json_trails_words = json_data_words['GroomingAreas']
# fields: Id, Name, Type (Green, Blue, Black, DoubleBlack), IsOpen (True, False)
json_lifts_words = json_data_words['Lifts']
# fields: Name, Status (Open, Closed, OnHold), Type, SortOrder, Mountain
return json_trails_words, json_lifts_words
def trail_specifics(self, json_trails_words):
black_diamonds_open = 0
double_black_diamonds_open = 0
# go through each section of mountain, ex. frontside, backside (defined by vail)
for area in json_trails_words:
# tally runs in this area, ex. frontside
area_runs = area['Runs']
for run in area_runs:
if run['IsOpen']:
# tally number of black diamond runs open
if run['Type'] == 'Black':
black_diamonds_open += 1
elif run['Type'] == 'DoubleBlack':
double_black_diamonds_open += 1
return black_diamonds_open, double_black_diamonds_open
class KeystoneScraper(AbstractVailScraper):
name = 'Keystone'
url = 'https://www.keystoneresort.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
new_total_trails = int(trail_totals[2].get_text()[2:])
new_total_lifts = int(trail_totals[3].get_text()[2:])
new_acres_open = int(trails_summary_items[0].get_text())
new_terrain_percent = int(trails_summary_items[1].get_text())
new_trails_open = int(trails_summary_items[2].get_text())
new_lifts_open = int(trails_summary_items[3].get_text())
# TODO Use a struct or other data structure
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
}
class NorthstarScraper(AbstractVailScraper):
name = 'Northstar'
url = 'https://www.northstarcalifornia.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
new_total_trails = int(trail_totals[2].get_text()[2:])
new_total_lifts = int(trail_totals[1].get_text()[2:])
# remove comma from new_acres_open if present)
new_acres_open = int(trails_summary_items[0].get_text().replace(',', ''))
new_terrain_percent = int(trails_summary_items[3].get_text())
new_trails_open = int(trails_summary_items[2].get_text())
new_lifts_open = int(trails_summary_items[1].get_text())
# get json from site script containing trail, lift specifics
json_trails_words, json_lifts_words = self.terrain_status()
# get number of black diamond, double black diamonds open
black_diamonds_open, double_black_diamonds_open = self.trail_specifics(json_trails_words)
# get number of lifts on hold
lifts_on_hold = 0
for lift in json_lifts_words:
if lift['Status'] == 'OnHold':
lifts_on_hold += 1
# TODO Use a struct or other data structure
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
'lifts_on_hold': lifts_on_hold,
'black_diamonds_open': black_diamonds_open,
'double_black_diamonds_open': double_black_diamonds_open,
}
class KirkwoodScraper(AbstractVailScraper):
name = 'Kirkwood'
url = 'https://www.kirkwood.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
# only acres open and terrain percent are shown on site
new_acres_open = int(trails_summary_items[1].get_text().replace(',', ''))
new_terrain_percent = int(trails_summary_items[0].get_text())
# TODO: put the following in some function
json_trails_words, json_lifts_words = self.terrain_status()
# GroomingAreas/trails = [{frontside,runs[]}, {backside,runs[]}]
# to make applicable to all resorts, go through each element in GroomingAreas list
new_trails_open = 0
new_total_trails = 0
new_lifts_open = 0
new_total_lifts = 0
# trail and lift specifics
black_diamonds_open = 0
double_black_diamonds_open = 0
lifts_on_hold = 0
# go through each section of mountain, ex. frontside, backside (defined by vail)
for area in json_trails_words:
# tally runs in this area, ex. frontside
area_runs = area['Runs']
for run in area_runs:
new_total_trails += 1
if run['IsOpen']:
new_trails_open += 1
# tally number of black diamond runs open
if run['Type'] == 'Black':
black_diamonds_open += 1
elif run['Type'] == 'DoubleBlack':
double_black_diamonds_open += 1
# tally number of lifts open
for lift in json_lifts_words:
new_total_lifts += 1
if lift['Status'] == 'Open':
new_lifts_open += 1
elif lift['Status'] == 'OnHold':
lifts_on_hold += 1
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
'lifts_on_hold': lifts_on_hold,
'black_diamonds_open': black_diamonds_open,
'double_black_diamonds_open': double_black_diamonds_open,
}
class HeavenlyScraper(AbstractVailScraper):
name = 'Heavenly'
url = 'https://www.skiheavenly.com/the-mountain/mountain-conditions/terrain-and-lift-status.aspx'
def scrape(self):
trail_totals, trails_summary_items = self._common_scrape()
# assign text to variables
new_total_trails = int(trail_totals[3].get_text()[2:])
new_total_lifts = int(trail_totals[1].get_text()[2:])
# assign ints to variables
new_acres_open = int(trails_summary_items[0].get_text().replace(',', ''))
new_terrain_percent = int(trails_summary_items[2].get_text())
new_trails_open = int(trails_summary_items[3].get_text())
new_lifts_open = int(trails_summary_items[1].get_text())
# get json from site script containing trail, lift specifics
json_trails_words, json_lifts_words = self.terrain_status()
# get number of black diamond, double black diamonds open
black_diamonds_open, double_black_diamonds_open = self.trail_specifics(json_trails_words)
# get number of lifts on hold
lifts_on_hold = 0
for lift in json_lifts_words:
if lift['Status'] == 'OnHold':
lifts_on_hold += 1
return {
'total_trails': new_total_trails,
'total_lifts': new_total_lifts,
'acres_open': new_acres_open,
'terrain_percent': new_terrain_percent,
'trails_open': new_trails_open,
'lifts_open': new_lifts_open,
'lifts_on_hold': lifts_on_hold,
'black_diamonds_open': black_diamonds_open,
'double_black_diamonds_open': double_black_diamonds_open,
}
class KirkwoodSnowReport(AbstractScriptScraper):
name = 'Kirkwood'
url = 'https://www.kirkwood.com/the-mountain/mountain-conditions/snow-and-weather-report.aspx'
def scrape(self):
soup = self._common_scrape()
# create regex pattern to find snowReportData json
# only grabs stuff in parens
pattern = re.compile("snowReportData = ({.*})")
# find html that contains pattern, will contain script tags
script_items = soup.find_all('script', text=pattern)
# get script body that contains snow report numbers
script_snow_report = script_items[0].text
# use regex pattern to grab only json part
# returns a list, grab first and only element
snow_data = re.findall(pattern, script_snow_report)[0]
# use json module to read json snow_data
json_snow_data = json.loads(snow_data)
return json_snow_data
def unpack_json(self, json_data):
return {
'overnight': json_data['OvernightSnowfall']['Inches'],
'24hr': json_data['TwentyFourHourSnowfall']['Inches'],
'48hr': json_data['FortyEightHourSnowfall']['Inches'],
'7day': json_data['SevenDaySnowfall']['Inches'],
'base_depth': json_data['BaseDepth']['Inches'],
'current_season': json_data['CurrentSeason']['Inches'],
}
class HeavenlySnowReport(AbstractScriptScraper):
name = 'Heavenly'
url = 'https://www.skiheavenly.com/the-mountain/mountain-conditions/snow-and-weather-report.aspx'
def scrape(self):
soup = self._common_scrape()
# create regex pattern to find snowReportData json
# only grabs stuff in parens
pattern = re.compile("snowReportData = ({.*})")
# find html that contains pattern, will contain script tags
script_items = soup.find_all('script', text=pattern)
# get script body that contains snow report numbers
script_snow_report = script_items[0].text
# use regex pattern to grab only json part
# returns a list, grab first and only element
snow_data = re.findall(pattern, script_snow_report)[0]
# use json module to read json snow_data
json_snow_data = json.loads(snow_data)
return json_snow_data
def unpack_json(self, json_data):
return {
'overnight': json_data['OvernightSnowfall']['Inches'],
'24hr': json_data['TwentyFourHourSnowfall']['Inches'],
'48hr': json_data['FortyEightHourSnowfall']['Inches'],
'7day': json_data['SevenDaySnowfall']['Inches'],
'base_depth': json_data['BaseDepth']['Inches'],
'current_season': json_data['CurrentSeason']['Inches'],
}
class NorthstarSnowReport(AbstractScriptScraper):
name = 'Northstar'
url = 'https://www.northstarcalifornia.com/the-mountain/mountain-conditions/snow-and-weather-report.aspx'
def scrape(self):
soup = self._common_scrape()
# create regex pattern to find snowReportData json
# only grabs stuff in parens
pattern = re.compile("snowReportData = ({.*})")
# find html that contains pattern, will contain script tags
script_items = soup.find_all('script', text=pattern)
# get script body that contains snow report numbers
script_snow_report = script_items[0].text
# use regex pattern to grab only json part
# returns a list, grab first and only element
snow_data = re.findall(pattern, script_snow_report)[0]
# use json module to read json snow_data
json_snow_data = json.loads(snow_data)
return json_snow_data
def unpack_json(self, json_data):
return {
'overnight': json_data['OvernightSnowfall']['Inches'],
'24hr': json_data['TwentyFourHourSnowfall']['Inches'],
'48hr': json_data['FortyEightHourSnowfall']['Inches'],
'7day': json_data['SevenDaySnowfall']['Inches'],
'base_depth': json_data['BaseDepth']['Inches'],
'current_season': json_data['CurrentSeason']['Inches'],
}
class Command(BaseCommand):
help = "Scrapes ski resort website and updates database"
def handle(self, *args, **options):
# Trail and Lift Conditions
scrapers = [
HeavenlyScraper(),
NorthstarScraper(),
KirkwoodScraper(),
]
for scraper in scrapers:
name = scraper.name
scraped = scraper.scrape()
SkiResort.objects.update_or_create(
resort_name=name,
defaults={
'total_trails': scraped['total_trails'],
'acres_open': scraped['acres_open'],
'terrain_percent': scraped['terrain_percent'],
'trails_open': scraped['trails_open'],
'lifts_open': scraped['lifts_open'],
'total_lifts': scraped['total_lifts'],
'lifts_on_hold': scraped['lifts_on_hold'],
'black_diamonds_open': scraped['black_diamonds_open'],
'double_black_diamonds_open': scraped['double_black_diamonds_open'],
}
)
# Snow Conditions
snow_reports = [
KirkwoodSnowReport(),
HeavenlySnowReport(),
NorthstarSnowReport(),
]
for snow in snow_reports:
name = snow.name
snow_json_data = snow.scrape()
snow_data = snow.unpack_json(snow_json_data)
SkiResort.objects.update_or_create(
resort_name=name,
defaults={
'overnight_snowfall': snow_data['overnight'],
'twenty_four_hour_snowfall': snow_data['24hr'],
'forty_eight_hour_snowfall': snow_data['48hr'],
'seven_day_snowfall': snow_data['7day'],
'base_depth': snow_data['base_depth'],
'current_season': snow_data['current_season'],
}
)
self.stdout.write('SkiResort model updated')
| 37.751174 | 109 | 0.628778 | 1,876 | 16,082 | 5.10661 | 0.130597 | 0.043424 | 0.056785 | 0.040814 | 0.714718 | 0.704593 | 0.68215 | 0.672234 | 0.661169 | 0.632568 | 0 | 0.008227 | 0.274406 | 16,082 | 425 | 110 | 37.84 | 0.812752 | 0.160179 | 0 | 0.588235 | 0 | 0.025735 | 0.186719 | 0.0232 | 0 | 0 | 0 | 0.002353 | 0 | 1 | 0.058824 | false | 0.003676 | 0.022059 | 0.011029 | 0.227941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
2d00069adda8a7efef667916f453029310a82aea | 515 | py | Python | CodeChef/Python/arrange.py | dfm066/Programming | 53d28460cd40b966cca1d4695d9dc6792ced4c6f | [
"MIT"
] | null | null | null | CodeChef/Python/arrange.py | dfm066/Programming | 53d28460cd40b966cca1d4695d9dc6792ced4c6f | [
"MIT"
] | null | null | null | CodeChef/Python/arrange.py | dfm066/Programming | 53d28460cd40b966cca1d4695d9dc6792ced4c6f | [
"MIT"
] | null | null | null | facts = [1]
fact = 1
for i in range(1,100001):
fact *= i
fact %= 1000000007
facts.append(fact)
T = int(input())
letters = []
for i in range(0,26):
letters.append(0)
while T > 0:
T -= 1
s = input()
cnt = 0
ans = 1
for i in s:
letters[ord(i)-97] += 1
for i in letters:
if i != 0:
cnt += 1
ans = ans*facts[i]
ans = ans*facts[cnt]%1000000007
for i in range(0, 26):
letters[i] = 0;
print(ans)
| 19.074074 | 36 | 0.464078 | 79 | 515 | 3.025316 | 0.303797 | 0.083682 | 0.125523 | 0.087866 | 0.175732 | 0.175732 | 0.175732 | 0 | 0 | 0 | 0 | 0.14791 | 0.396117 | 515 | 26 | 37 | 19.807692 | 0.620579 | 0 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d00f7d06ce5d59e251acaa9db5fafba0b34215b | 356 | py | Python | prettyqt/paths.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 7 | 2019-05-01T01:34:36.000Z | 2022-03-08T02:24:14.000Z | prettyqt/paths.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 141 | 2019-04-16T11:22:01.000Z | 2021-04-14T15:12:36.000Z | prettyqt/paths.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 5 | 2019-04-17T11:48:19.000Z | 2021-11-21T10:30:19.000Z | from importlib import resources
import pathlib
ROOT_PATH = pathlib.Path(resources.files("prettyqt")) # type: ignore
LOCALIZATION_PATH = ROOT_PATH / "localization"
THEMES_PATH = ROOT_PATH / "themes"
RE_LEXER_PATH = (
ROOT_PATH / "syntaxhighlighters" / "pygments" / "regularexpressionlexer.py"
)
ICON_FONT_PATH = ROOT_PATH / "iconprovider" / "fonts"
| 27.384615 | 79 | 0.755618 | 41 | 356 | 6.292683 | 0.560976 | 0.155039 | 0.186047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.134831 | 356 | 12 | 80 | 29.666667 | 0.837662 | 0.033708 | 0 | 0 | 0 | 0 | 0.274854 | 0.073099 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2d0218e45fe129209799296000566bde73d084af | 4,380 | py | Python | poptimizer/shared/app.py | poliyev/poptimizer | 71935c4365b0572e65b6d3172f925701dda283db | [
"Unlicense"
] | null | null | null | poptimizer/shared/app.py | poliyev/poptimizer | 71935c4365b0572e65b6d3172f925701dda283db | [
"Unlicense"
] | null | null | null | poptimizer/shared/app.py | poliyev/poptimizer | 71935c4365b0572e65b6d3172f925701dda283db | [
"Unlicense"
] | 1 | 2021-12-02T13:32:44.000Z | 2021-12-02T13:32:44.000Z | """Unit of Work and EventBus."""
import asyncio
import contextlib
from types import TracebackType
from typing import Callable, Generic, Optional, TypeVar
from poptimizer import config
from poptimizer.shared import adapters, domain
EntityType = TypeVar("EntityType", bound=domain.BaseEntity)
class UoW(
contextlib.AbstractAsyncContextManager[domain.AbstractRepo[EntityType]],
domain.AbstractRepo[EntityType],
):
"""Контекстный менеджер транзакции.
Предоставляет интерфейс репо, хранит загруженные доменные объекты и сохраняет их при выходе из
контекста.
"""
def __init__(self, mapper: adapters.Mapper[EntityType]) -> None:
"""Сохраняет mapper и является его тонкой надстройкой."""
self._mapper = mapper
self._seen: set[EntityType] = set()
async def __call__(self, id_: domain.ID) -> EntityType:
"""Загружает доменный объект из базы."""
entity = await self._mapper(id_)
self._seen.add(entity)
return entity
async def __aenter__(self) -> domain.AbstractRepo[EntityType]:
"""Возвращает репо с таблицами."""
return self
async def __aexit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Сохраняет изменные доменные объекты в MongoDB."""
commit = self._mapper.commit
await asyncio.gather(*[commit(entity) for entity in self._seen])
FutureEvent = asyncio.Future[list[domain.AbstractEvent]]
PendingTasks = set[FutureEvent]
class EventBus(Generic[EntityType]):
"""Шина для обработки событий."""
_logger = adapters.AsyncLogger()
def __init__(
self,
uow_factory: Callable[[], UoW[EntityType]],
event_handler: domain.AbstractHandler[EntityType],
):
"""Для работы нужна фабрика транзакций и обработчик событий."""
self._uow_factory = uow_factory
self._event_handler = event_handler
def handle_event(
self,
event: domain.AbstractEvent,
) -> None:
"""Обработка события."""
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self._handle_event(event))
except config.POptimizerError:
_shutdown_tasks(loop)
raise
async def _handle_event(
self,
event: domain.AbstractEvent,
) -> None:
"""Асинхронная обработка события и следующих за ним."""
pending: PendingTasks = self._create_tasks([event])
while pending:
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
for task in done:
pending |= self._create_tasks(task.result())
def _create_tasks(self, events: list[domain.AbstractEvent]) -> set[FutureEvent]:
"""Создает задания для событий."""
return {asyncio.create_task(self._handle_one_command(event)) for event in events}
async def _handle_one_command(self, event: domain.AbstractEvent) -> list[domain.AbstractEvent]:
"""Обрабатывает одно событие и помечает его сделанным."""
self._logger(str(event))
async with self._uow_factory() as repo:
return await self._event_handler.handle_event(event, repo)
def _shutdown_tasks(loop: asyncio.AbstractEventLoop) -> None:
"""Завершение в случае ошибки.
После ошибки происходит отмена всех заданий, чтобы не захламлять сообщение об ошибке множеством
сообщений, о том, что результат выполнения задания не был awaited.
Идея кода позаимствована из реализации asyncio.app.
"""
to_cancel = asyncio.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
for canceled_task in to_cancel:
if canceled_task.cancelled():
continue
if canceled_task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled EventBus exception",
"exception": canceled_task.exception(),
"task": canceled_task,
},
)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(loop.shutdown_default_executor())
| 32.686567 | 99 | 0.663242 | 473 | 4,380 | 5.938689 | 0.40592 | 0.040584 | 0.017088 | 0.02848 | 0.055536 | 0.055536 | 0.032752 | 0.032752 | 0 | 0 | 0 | 0 | 0.241781 | 4,380 | 133 | 100 | 32.932331 | 0.84583 | 0.136301 | 0 | 0.130952 | 0 | 0 | 0.016695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059524 | false | 0 | 0.071429 | 0 | 0.22619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d04272c0dbb7d2f3b68496a3df2f8724ac5e827 | 1,372 | py | Python | osh/builtin_misc_test.py | Schweinepriester/oil | 8b0e5c58a825223341896064d63a95c8b57a9c05 | [
"Apache-2.0"
] | 2,209 | 2016-11-20T10:32:58.000Z | 2022-03-31T20:51:27.000Z | osh/builtin_misc_test.py | Schweinepriester/oil | 8b0e5c58a825223341896064d63a95c8b57a9c05 | [
"Apache-2.0"
] | 1,074 | 2016-12-07T05:02:48.000Z | 2022-03-22T02:09:11.000Z | osh/builtin_misc_test.py | Schweinepriester/oil | 8b0e5c58a825223341896064d63a95c8b57a9c05 | [
"Apache-2.0"
] | 147 | 2016-12-11T04:13:28.000Z | 2022-03-27T14:50:00.000Z | #!/usr/bin/env python2
"""
builtin_misc_test.py: Tests for builtin_misc.py
"""
from __future__ import print_function
import unittest
from core import pyutil
from frontend import flag_def # side effect: flags are defined!
_ = flag_def
from osh import split
from osh import builtin_misc # module under test
class BuiltinTest(unittest.TestCase):
def testAppendParts(self):
# allow_escape is True by default, but False when the user passes -r.
CASES = [
(['Aa', 'b', ' a b'], 100, 'Aa b \\ a\\ b'),
(['a', 'b', 'c'], 3, 'a b c '),
]
for expected_parts, max_results, line in CASES:
sp = split.IfsSplitter(split.DEFAULT_IFS, '')
spans = sp.Split(line, True)
print('--- %r' % line)
for span in spans:
print(' %s %s' % span)
parts = []
builtin_misc._AppendParts(line, spans, max_results, False, parts)
strs = [buf.getvalue() for buf in parts]
self.assertEqual(expected_parts, strs)
print('---')
def testPrintHelp(self):
# Localization: Optionally use GNU gettext()? For help only. Might be
# useful in parser error messages too. Good thing both kinds of code are
# generated? Because I don't want to deal with a C toolchain for it.
loader = pyutil.GetResourceLoader()
builtin_misc.Help([], loader)
if __name__ == '__main__':
unittest.main()
| 28 | 77 | 0.648688 | 190 | 1,372 | 4.531579 | 0.573684 | 0.063879 | 0.010453 | 0.011614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004744 | 0.231778 | 1,372 | 48 | 78 | 28.583333 | 0.812144 | 0.290087 | 0 | 0 | 0 | 0 | 0.055208 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.068966 | false | 0 | 0.206897 | 0 | 0.310345 | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d060e9092d4d4c7bf4bc0ec5921fae018518af1 | 3,670 | py | Python | src/python/dart/service/message.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 18 | 2016-03-03T19:10:21.000Z | 2021-07-14T22:37:35.000Z | src/python/dart/service/message.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 62 | 2016-04-11T15:17:23.000Z | 2017-09-08T17:18:53.000Z | src/python/dart/service/message.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 15 | 2016-03-03T15:38:34.000Z | 2019-03-27T19:33:08.000Z | import os
import boto3
from boto.regioninfo import RegionInfo
from sqlalchemy import text
from dart.model.orm import MessageDao
from dart.context.database import db
from dart.service.patcher import patch_difference
class MessageService(object):
def __init__(self, ecs_task_status_override=None, region='us-east-1'):
self._ecs_task_status_override = ecs_task_status_override
self._region = RegionInfo(self, region, 'ecs.%s.amazonaws.com' % region) if region else None
self._conn = None
@staticmethod
def save_message(message_id, message_body, state):
message_dao = MessageDao()
message_dao.id = message_id
message_dao.message_body = message_body
message_dao.instance_id = os.environ['DART_INSTANCE_ID']
message_dao.container_id = os.environ['DART_CONTAINER_ID']
message_dao.ecs_cluster = os.environ['DART_ECS_CLUSTER']
message_dao.ecs_container_instance_arn = os.environ['DART_ECS_CONTAINER_INSTANCE_ARN']
message_dao.ecs_family = os.environ['DART_ECS_FAMILY']
message_dao.ecs_task_arn = os.environ['DART_ECS_TASK_ARN']
message_dao.state = state
db.session.add(message_dao)
db.session.commit()
return message_dao.to_model()
def get_batch_job_status(self, message):
""" :type message: dart.model.message.Message """
if self._ecs_task_status_override:
if self._ecs_task_status_override == 'passthrough':
return 'RUNNING' if message.state == 'RUNNING' else 'STOPPED'
return self._ecs_task_status_override
return self.get_batch_job_status_direct(message.batch_job_id)
# http://boto3.readthedocs.io/en/latest/reference/services/batch.html#Batch.Client.describe_jobs
def get_batch_job_status_direct(self, job_id):
if not job_id:
return None # we commented out the call to this flow from broker.py:receive_message().
result = self.conn.describe_jobs(jobs=[job_id])
jobs = result['jobs']
if len(jobs) == 0:
return None
# batch possible statuses: 'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
batch_status = jobs[0]['status']
# we translate the batch status to RUNNING|COMPLETED|FAILED
# see dart.model.message.MessageState and dart.message.broker
if batch_status == 'SUBMITTED':
return 'QUEUED'
elif batch_status in ('PENDING', 'RUNNABLE', 'STARTING'):
return 'PENDING'
elif batch_status == 'RUNNING':
return 'RUNNING'
elif batch_status == 'SUCCEEDED':
return 'COMPLETED'
else:
return 'FAILED'
return None
@staticmethod
def get_message(message_id, raise_when_missing=True):
message_dao = MessageDao.query.get(message_id)
if not message_dao and raise_when_missing:
raise Exception('message with id=%s not found' % message_id)
return message_dao.to_model() if message_dao else None
@staticmethod
def update_message_state(message, state):
""" :type message: dart.model.message.Message """
source_message = message.copy()
message.state = state
return patch_difference(MessageDao, source_message, message)
@staticmethod
def purge_old_messages():
db.session.execute(text(""" DELETE FROM message WHERE created < (NOW() - INTERVAL '5 days') """))
db.session.commit()
@property
def conn(self):
if self._conn:
return self._conn
self._conn = boto3.client('batch')
return self._conn
| 39.462366 | 109 | 0.672207 | 459 | 3,670 | 5.11329 | 0.283224 | 0.068172 | 0.033234 | 0.053686 | 0.149127 | 0.051981 | 0 | 0 | 0 | 0 | 0 | 0.002482 | 0.231608 | 3,670 | 92 | 110 | 39.891304 | 0.829787 | 0.128338 | 0 | 0.150685 | 0 | 0 | 0.114744 | 0.009745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0.013699 | 0.09589 | 0 | 0.438356 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d06a010a220a8f3363a231e8addbcd8ff9894e9 | 770 | py | Python | setPixmap.py | EvaGalois/LinsImgPro | daed9bffcf5bea6bf41f36d21f773be18374f7bc | [
"MIT"
] | 1 | 2020-05-19T08:58:58.000Z | 2020-05-19T08:58:58.000Z | setPixmap.py | EvaGalois/LinsImgPro | daed9bffcf5bea6bf41f36d21f773be18374f7bc | [
"MIT"
] | null | null | null | setPixmap.py | EvaGalois/LinsImgPro | daed9bffcf5bea6bf41f36d21f773be18374f7bc | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QLabel
from PyQt5.QtGui import QPixmap
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
hbox = QHBoxLayout(self) # 创建布局
lb1 = QLabel(self) # 实例化 QLabel 类
lb1.setPixmap(QPixmap('./inputImgs/test2.jpg')) # 给 QLabel 的实例嵌入图片
hbox.addWidget(lb1) # 布局中加入 这个 QLabel 的实例
self.setLayout(hbox) # 给 self 设置这个布局
self.move(300, 300)
self.setWindowTitle('像素图控件')
self.show()
# def showDate(self, date):
# self.lb1.setText(date.toString())
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | 24.83871 | 74 | 0.618182 | 91 | 770 | 5.043956 | 0.571429 | 0.039216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022847 | 0.261039 | 770 | 31 | 75 | 24.83871 | 0.783831 | 0.171429 | 0 | 0 | 0 | 0 | 0.053883 | 0.033281 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d075408ad95df9320ec62cc02811a6bf1787742 | 3,292 | py | Python | tests/test_exporter.py | coderanger/celery-local-exporter | 3db869b7e0ec09309834b8835c619edbe8898504 | [
"Apache-2.0"
] | 3 | 2020-06-30T22:26:18.000Z | 2021-09-27T23:52:11.000Z | tests/test_exporter.py | coderanger/celery-local-exporter | 3db869b7e0ec09309834b8835c619edbe8898504 | [
"Apache-2.0"
] | null | null | null | tests/test_exporter.py | coderanger/celery-local-exporter | 3db869b7e0ec09309834b8835c619edbe8898504 | [
"Apache-2.0"
] | null | null | null | import os
import os.path
import subprocess
import sys
import time
import pytest
import requests
@pytest.fixture
def launch_worker(tmp_path_factory):
procs = []
def _inner(pool="threads", *args):
if procs:
raise ValueError("already started")
os.environ["DATA_FOLDER_IN"] = str(tmp_path_factory.mktemp("data_in"))
os.environ["DATA_FOLDER_OUT"] = str(
tmp_path_factory.mktemp("data_out")
)
os.environ["RESULTS"] = str(tmp_path_factory.mktemp("results"))
proc = subprocess.Popen(
[
sys.executable,
"-m",
"celery",
"-A",
"app1",
"worker",
"-l",
"debug",
"-P",
pool,
]
+ list(args),
cwd=os.path.dirname(os.path.abspath(__file__)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
procs.append(proc)
# Wait a second to let it start up.
time.sleep(1)
# For future calls to run(), set them up to deliver to the inbox.
os.environ["DATA_FOLDER_OUT"] = os.environ["DATA_FOLDER_IN"]
return proc
yield _inner
for proc in procs:
proc.terminate()
proc.wait()
out, err = proc.communicate()
print(out.decode())
print(err.decode())
proc.stdout.close()
proc.stderr.close()
@pytest.fixture
def run():
def _inner(code):
read, write = os.pipe()
os.write(write, code.encode())
os.close(write)
return subprocess.check_call(
[sys.executable],
cwd=os.path.dirname(os.path.abspath(__file__)),
stdin=read,
)
return _inner
def test_starting(launch_worker):
w = launch_worker()
assert w.poll() is None
r = requests.get("http://localhost:9000/")
assert "celery_task_execution_time" in r.text
def test_run_task_add(launch_worker, run):
w = launch_worker()
assert w.poll() is None
run(
"""
import app1
app1.add.delay(1, 1).wait(60)
"""
)
r = requests.get("http://localhost:9000/")
assert (
'celery_task_postrun_count_total{state="SUCCESS",task="app1.add"} 1.0'
in r.text
)
def test_run_task_add_twice(launch_worker, run):
w = launch_worker()
assert w.poll() is None
run(
"""
import app1
x = app1.add.delay(1, 1)
y = app1.add.delay(1, 2)
x.wait(60)
y.wait(60)
"""
)
r = requests.get("http://localhost:9000/")
assert (
'celery_task_postrun_count_total{state="SUCCESS",task="app1.add"} 2.0'
in r.text
)
def test_run_task_sleep(launch_worker, run):
w = launch_worker()
assert w.poll() is None
run(
"""
import app1
app1.sleep.delay(5).wait(60)
"""
)
r = requests.get("http://localhost:9000/")
assert (
'celery_task_postrun_count_total{state="SUCCESS",task="app1.sleep"} 1.0'
in r.text
)
assert (
'celery_task_execution_time_bucket{le="5.0",task="app1.sleep"} 0.0'
in r.text
)
assert (
'celery_task_execution_time_bucket{le="7.5",task="app1.sleep"} 1.0'
in r.text
)
| 23.683453 | 80 | 0.559235 | 416 | 3,292 | 4.247596 | 0.278846 | 0.061121 | 0.054329 | 0.022637 | 0.55631 | 0.473118 | 0.442558 | 0.442558 | 0.332201 | 0.306735 | 0 | 0.025877 | 0.307412 | 3,292 | 138 | 81 | 23.855072 | 0.749123 | 0.029465 | 0 | 0.278846 | 0 | 0 | 0.196524 | 0.114305 | 0 | 0 | 0 | 0 | 0.096154 | 1 | 0.076923 | false | 0 | 0.067308 | 0 | 0.173077 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d07733d95d25265a8538f87085014c15240fde9 | 5,787 | py | Python | third_party/ros_aarch64/lib/python2.7/dist-packages/novatel_msgs/msg/_Ack.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | 2 | 2018-01-29T03:10:39.000Z | 2020-12-08T09:08:41.000Z | third_party/ros_aarch64/lib/python2.7/dist-packages/novatel_msgs/msg/_Ack.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | null | null | null | third_party/ros_aarch64/lib/python2.7/dist-packages/novatel_msgs/msg/_Ack.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | 3 | 2018-01-29T12:22:56.000Z | 2020-12-08T09:08:46.000Z | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from novatel_msgs/Ack.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Ack(genpy.Message):
_md5sum = "f84607dc6beaf1cb6772d23af7221bdf"
_type = "novatel_msgs/Ack"
_has_header = False #flag to mark the presence of a Header object
_full_text = """uint16 transaction
uint16 id
uint16 RESPONSE_NOT_APPLICABLE=0
uint16 RESPONSE_ACCEPTED=1
uint16 RESPONSE_ACCEPTED_TOO_LONG=2
uint16 RESPONSE_ACCEPTED_TOO_SHORT=3
uint16 RESPONSE_PARAM_ERROR=4
uint16 RESPONSE_NOT_APPLICABLE_IN_CURRENT_STATE=5
uint16 RESPONSE_DATA_NOT_AVAILABLE=6
uint16 RESPONSE_MESSAGE_START_ERROR=7
uint16 RESPONSE_MESSAGE_END_ERROR=8
uint16 RESPONSE_BYTE_COUNT_ERROR=9
uint16 RESPONSE_CHECKSUM_ERROR=10
uint16 response_code
uint8 PARAMS_NO_CHANGE=0
uint8 PARAMS_CHANGE=1
uint8 params_status
uint8[32] error_parameter_name
"""
# Pseudo-constants
RESPONSE_NOT_APPLICABLE = 0
RESPONSE_ACCEPTED = 1
RESPONSE_ACCEPTED_TOO_LONG = 2
RESPONSE_ACCEPTED_TOO_SHORT = 3
RESPONSE_PARAM_ERROR = 4
RESPONSE_NOT_APPLICABLE_IN_CURRENT_STATE = 5
RESPONSE_DATA_NOT_AVAILABLE = 6
RESPONSE_MESSAGE_START_ERROR = 7
RESPONSE_MESSAGE_END_ERROR = 8
RESPONSE_BYTE_COUNT_ERROR = 9
RESPONSE_CHECKSUM_ERROR = 10
PARAMS_NO_CHANGE = 0
PARAMS_CHANGE = 1
__slots__ = ['transaction','id','response_code','params_status','error_parameter_name']
_slot_types = ['uint16','uint16','uint16','uint8','uint8[32]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
transaction,id,response_code,params_status,error_parameter_name
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Ack, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.transaction is None:
self.transaction = 0
if self.id is None:
self.id = 0
if self.response_code is None:
self.response_code = 0
if self.params_status is None:
self.params_status = 0
if self.error_parameter_name is None:
self.error_parameter_name = chr(0)*32
else:
self.transaction = 0
self.id = 0
self.response_code = 0
self.params_status = 0
self.error_parameter_name = chr(0)*32
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3HB.pack(_x.transaction, _x.id, _x.response_code, _x.params_status))
_x = self.error_parameter_name
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_struct_32B.pack(*_x))
else:
buff.write(_struct_32s.pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 7
(_x.transaction, _x.id, _x.response_code, _x.params_status,) = _struct_3HB.unpack(str[start:end])
start = end
end += 32
self.error_parameter_name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3HB.pack(_x.transaction, _x.id, _x.response_code, _x.params_status))
_x = self.error_parameter_name
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_struct_32B.pack(*_x))
else:
buff.write(_struct_32s.pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 7
(_x.transaction, _x.id, _x.response_code, _x.params_status,) = _struct_3HB.unpack(str[start:end])
start = end
end += 32
self.error_parameter_name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_32B = struct.Struct("<32B")
_struct_32s = struct.Struct("<32s")
_struct_3HB = struct.Struct("<3HB")
| 34.041176 | 145 | 0.693624 | 823 | 5,787 | 4.63548 | 0.227218 | 0.044037 | 0.047182 | 0.040367 | 0.546265 | 0.469201 | 0.437746 | 0.404194 | 0.383748 | 0.328178 | 0 | 0.031501 | 0.204597 | 5,787 | 169 | 146 | 34.242604 | 0.797306 | 0.248834 | 0 | 0.478261 | 1 | 0 | 0.192693 | 0.075931 | 0 | 0 | 0.002388 | 0 | 0 | 1 | 0.052174 | false | 0 | 0.026087 | 0 | 0.278261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2d079a19d871b67d7ba2ce209923cccc01b9ff8d | 242 | py | Python | ecart/serializer.py | micael-grilo/E-Cart | 76e86b4c7ea5bd2becda23ef8c69470c86630c5e | [
"MIT"
] | 5 | 2016-09-20T21:33:29.000Z | 2018-10-10T06:07:45.000Z | ecart/serializer.py | micael-grilo/E-Cart | 76e86b4c7ea5bd2becda23ef8c69470c86630c5e | [
"MIT"
] | 1 | 2016-05-03T07:54:54.000Z | 2016-05-03T13:16:48.000Z | ecart/serializer.py | micael-grilo/E-Cart | 76e86b4c7ea5bd2becda23ef8c69470c86630c5e | [
"MIT"
] | 3 | 2016-09-18T14:54:49.000Z | 2020-01-08T18:19:51.000Z | import json
class Serializer(object):
"""docstring for Serializer"""
@staticmethod
def dumps(data_obj):
return json.dumps(data_obj)
@staticmethod
def loads(data_string):
return json.loads(data_string)
| 16.133333 | 38 | 0.665289 | 28 | 242 | 5.607143 | 0.535714 | 0.191083 | 0.152866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.239669 | 242 | 14 | 39 | 17.285714 | 0.853261 | 0.099174 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.125 | 0.25 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 4 |
2d092a4e4e1af0db3f5776c271d8aede971fce4f | 17,330 | py | Python | tests/webservice/test_client.py | 3D-e-Chem/python-modified-tanimoto | 618cc4ae3cb55d9cba2cc297e9c05212353b218e | [
"Apache-2.0"
] | 8 | 2017-05-25T19:40:37.000Z | 2021-06-12T06:59:26.000Z | tests/webservice/test_client.py | 3D-e-Chem/kripodb | 618cc4ae3cb55d9cba2cc297e9c05212353b218e | [
"Apache-2.0"
] | 44 | 2016-02-05T14:02:57.000Z | 2019-07-29T07:58:20.000Z | tests/webservice/test_client.py | 3D-e-Chem/python-modified-tanimoto | 618cc4ae3cb55d9cba2cc297e9c05212353b218e | [
"Apache-2.0"
] | 1 | 2016-05-05T08:47:49.000Z | 2016-05-05T08:47:49.000Z | # Copyright 2016 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import requests_mock
from rdkit.Chem.AllChem import Mol
from requests import HTTPError
from kripodb.webservice.client import WebserviceClient, IncompleteFragments, IncompletePharmacophores
from .test_server import expected_fragments_info, expected_fragments_info_with_mol
from ..test_pharmacophores import example1_phar, example3_phar
@pytest.fixture
def base_url():
return 'http://localhost:8084/kripo'
@pytest.fixture
def client(base_url):
return WebserviceClient(base_url)
def test_similar_fragments(base_url, client):
with requests_mock.mock() as m:
expected = [
{'query_frag_id': '3j7u_NDP_frag24', 'hit_frag_id': '3j7u_NDP_frag23', 'score': 0.8991},
]
url = base_url + '/fragments/3j7u_NDP_frag24/similar?cutoff=0.75&limit=1'
m.get(url, json=expected)
response = client.similar_fragments(fragment_id='3j7u_NDP_frag24', cutoff=0.75, limit=1)
assert response == expected
def test_fragments_by_id(base_url, client):
with requests_mock.mock() as m:
expected = [
{'smiles': '[*]C1OC(COP(=O)([O-])OP(=O)([O-])OCC2OC(N3C=CCC(C(N)=O)=C3)C(O)C2O)C(O)C1[*]',
'pdb_code': '3j7u',
'pdb_title': 'Catalase structure determined by electron crystallography of thin 3D crystals',
'atom_codes': 'PA,O1A,O2A,O5B,C5B,C4B,O4B,C3B,O3B,C2B,C1B,O3,PN,O1N,O2N,O5D,C5D,C4D,O4D,C3D,O3D,C2D,O2D,C1D,N1N,C2N,C3N,C7N,O7N,N7N,C4N,C5N,C6N',
'uniprot_acc': 'P00432',
'mol': '3j7u_NDP_frag24\n RDKit 3D\n\n 35 37 0 0 0 0 0 0 0 0999 V2000\n -15.1410 -11.1250 -79.4200 P 0 0 0 0 0 0 0 0 0 0 0 0\n -14.6900 -10.9960 -80.8600 O 0 0 0 0 0 0 0 0 0 0 0 0\n -16.5040 -11.6890 -79.0770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.9990 -9.6870 -78.7060 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.1870 -8.4550 -79.4050 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.6700 -7.3160 -78.5260 C 0 0 0 0 0 0 0 0 0 0 0 0\n -13.2400 -7.2390 -78.5880 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.2130 -5.9510 -78.9460 C 0 0 0 0 0 0 0 0 0 0 0 0\n -16.1600 -5.4570 -77.9880 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.0000 -5.0420 -79.0650 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.1790 -3.8250 -78.3260 R 0 0 0 0 0 1 0 0 0 0 0 0\n -12.8370 -5.8690 -78.5180 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.5470 -5.6210 -79.2410 R 0 0 0 0 0 1 0 0 0 0 0 0\n -14.0270 -11.9960 -78.6490 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.1810 -13.5930 -78.4870 P 0 0 0 0 0 0 0 0 0 0 0 0\n -14.5480 -14.2030 -79.8230 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.0330 -13.8500 -77.2690 O 0 0 0 0 0 0 0 0 0 0 0 0\n -12.6800 -14.0730 -78.1770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -12.1840 -14.2350 -76.8490 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.1340 -13.1670 -76.6050 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.6880 -11.8550 -76.6770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.5070 -13.2750 -75.2350 C 0 0 0 0 0 0 0 0 0 0 0 0\n -9.4070 -14.1780 -75.3000 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.0970 -11.8400 -74.9280 C 0 0 0 0 0 0 0 0 0 0 0 0\n -8.6920 -11.6460 -75.1050 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.8280 -10.9760 -75.9460 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.5890 -9.8540 -75.3660 N 0 0 0 0 0 0 0 0 0 0 0 0\n -12.7860 -10.0630 -74.7850 C 0 0 0 0 0 0 0 0 0 0 0 0\n -13.5340 -9.0090 -74.2510 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.8620 -9.2740 -73.5990 C 0 0 0 0 0 0 0 0 0 0 0 0\n -15.1890 -10.4300 -73.3940 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.6600 -8.2650 -73.2400 N 0 0 0 0 0 0 0 0 0 0 0 0\n -13.0230 -7.5870 -74.3390 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.7130 -7.4960 -74.9740 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.0640 -8.6200 -75.4710 C 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 2 0\n 1 3 1 0\n 1 4 1 0\n 1 14 1 0\n 4 5 1 0\n 5 6 1 0\n 6 7 1 0\n 6 8 1 0\n 7 12 1 0\n 8 9 1 0\n 8 10 1 0\n 10 11 1 0\n 10 12 1 0\n 12 13 1 0\n 14 15 1 0\n 15 16 2 0\n 15 17 1 0\n 15 18 1 0\n 18 19 1 0\n 19 20 1 0\n 20 21 1 0\n 20 22 1 0\n 21 26 1 0\n 22 23 1 0\n 22 24 1 0\n 24 25 1 0\n 24 26 1 0\n 26 27 1 0\n 27 28 1 0\n 27 35 1 0\n 28 29 2 0\n 29 30 1 0\n 29 33 1 0\n 30 31 2 0\n 30 32 1 0\n 33 34 1 0\n 34 35 2 0\nM CHG 2 3 -1 17 -1\nM END\n',
'prot_chain': 'A', 'het_seq_nr': 602, 'het_code': 'NDP', 'prot_name': 'Catalase',
'ec_number': '1.11.1.6', 'frag_nr': 24, 'frag_id': '3j7u_NDP_frag24', 'rowid': 7059,
'uniprot_name': 'Catalase', 'nr_r_groups': 2, 'het_chain': 'A', 'hash_code': '6ef5a609fb192dba'}
]
url = base_url + '/fragments?fragment_ids=3j7u_NDP_frag24,3j7u_NDP_frag23'
m.get(url, json=expected)
response = client.fragments_by_id(fragment_ids=['3j7u_NDP_frag24', '3j7u_NDP_frag23'])
assert isinstance(response[0]['mol'], Mol)
del response[0]['mol']
del expected[0]['mol']
assert response == expected
def test_fragments_by_pdb_codes(base_url, client):
with requests_mock.mock() as m:
molblock = '3j7u_NDP_frag24\n RDKit 3D\n\n 35 37 0 0 0 0 0 0 0 0999 V2000\n -15.1410 -11.1250 -79.4200 P 0 0 0 0 0 0 0 0 0 0 0 0\n -14.6900 -10.9960 -80.8600 O 0 0 0 0 0 0 0 0 0 0 0 0\n -16.5040 -11.6890 -79.0770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.9990 -9.6870 -78.7060 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.1870 -8.4550 -79.4050 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.6700 -7.3160 -78.5260 C 0 0 0 0 0 0 0 0 0 0 0 0\n -13.2400 -7.2390 -78.5880 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.2130 -5.9510 -78.9460 C 0 0 0 0 0 0 0 0 0 0 0 0\n -16.1600 -5.4570 -77.9880 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.0000 -5.0420 -79.0650 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.1790 -3.8250 -78.3260 R 0 0 0 0 0 1 0 0 0 0 0 0\n -12.8370 -5.8690 -78.5180 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.5470 -5.6210 -79.2410 R 0 0 0 0 0 1 0 0 0 0 0 0\n -14.0270 -11.9960 -78.6490 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.1810 -13.5930 -78.4870 P 0 0 0 0 0 0 0 0 0 0 0 0\n -14.5480 -14.2030 -79.8230 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.0330 -13.8500 -77.2690 O 0 0 0 0 0 0 0 0 0 0 0 0\n -12.6800 -14.0730 -78.1770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -12.1840 -14.2350 -76.8490 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.1340 -13.1670 -76.6050 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.6880 -11.8550 -76.6770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.5070 -13.2750 -75.2350 C 0 0 0 0 0 0 0 0 0 0 0 0\n -9.4070 -14.1780 -75.3000 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.0970 -11.8400 -74.9280 C 0 0 0 0 0 0 0 0 0 0 0 0\n -8.6920 -11.6460 -75.1050 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.8280 -10.9760 -75.9460 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.5890 -9.8540 -75.3660 N 0 0 0 0 0 0 0 0 0 0 0 0\n -12.7860 -10.0630 -74.7850 C 0 0 0 0 0 0 0 0 0 0 0 0\n -13.5340 -9.0090 -74.2510 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.8620 -9.2740 -73.5990 C 0 0 0 0 0 0 0 0 0 0 0 0\n -15.1890 -10.4300 -73.3940 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.6600 -8.2650 -73.2400 N 0 0 0 0 0 0 0 0 0 0 0 0\n -13.0230 -7.5870 -74.3390 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.7130 -7.4960 -74.9740 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.0640 -8.6200 -75.4710 C 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 2 0\n 1 3 1 0\n 1 4 1 0\n 1 14 1 0\n 4 5 1 0\n 5 6 1 0\n 6 7 1 0\n 6 8 1 0\n 7 12 1 0\n 8 9 1 0\n 8 10 1 0\n 10 11 1 0\n 10 12 1 0\n 12 13 1 0\n 14 15 1 0\n 15 16 2 0\n 15 17 1 0\n 15 18 1 0\n 18 19 1 0\n 19 20 1 0\n 20 21 1 0\n 20 22 1 0\n 21 26 1 0\n 22 23 1 0\n 22 24 1 0\n 24 25 1 0\n 24 26 1 0\n 26 27 1 0\n 27 28 1 0\n 27 35 1 0\n 28 29 2 0\n 29 30 1 0\n 29 33 1 0\n 30 31 2 0\n 30 32 1 0\n 33 34 1 0\n 34 35 2 0\nM CHG 2 3 -1 17 -1\nM END\n'
m.get(base_url + '/fragments?pdb_codes=3j7u', json=[{'pdb_code': '3j7u', 'mol': molblock}])
m.get(base_url + '/fragments?pdb_codes=3wxm', json=[{'pdb_code': '3wxm', 'mol': molblock}])
response = client.fragments_by_pdb_codes(pdb_codes=['3j7u', '3wxm'], chunk_size=1)
assert isinstance(response[0]['mol'], Mol)
assert isinstance(response[1]['mol'], Mol)
del response[0]['mol']
del response[1]['mol']
expected = [{'pdb_code': '3j7u'}, {'pdb_code': '3wxm'}]
assert response == expected
def test_fragments_by_id_withmolisnone(base_url, client):
with requests_mock.mock() as m:
expected = [
{'smiles': None,
'pdb_code': '3j7u',
'pdb_title': 'Catalase structure determined by electron crystallography of thin 3D crystals',
'atom_codes': 'PA,O1A,O2A,O5B,C5B,C4B,O4B,C3B,O3B,C2B,C1B,O3,PN,O1N,O2N,O5D,C5D,C4D,O4D,C3D,O3D,C2D,O2D,C1D,N1N,C2N,C3N,C7N,O7N,N7N,C4N,C5N,C6N',
'uniprot_acc': 'P00432',
'mol': None,
'prot_chain': 'A', 'het_seq_nr': 602, 'het_code': 'NDP', 'prot_name': 'Catalase',
'ec_number': '1.11.1.6', 'frag_nr': 24, 'frag_id': '3j7u_NDP_frag24', 'rowid': 7059,
'uniprot_name': 'Catalase', 'nr_r_groups': 2, 'het_chain': 'A', 'hash_code': '6ef5a609fb192dba'}
]
url = base_url + '/fragments?fragment_ids=3j7u_NDP_frag24,3j7u_NDP_frag23'
m.get(url, json=expected)
response = client.fragments_by_id(fragment_ids=['3j7u_NDP_frag24', '3j7u_NDP_frag23'])
assert response == expected
def test_fragments_by_id___withsinglechunk_withsomenotfound(base_url, client, expected_fragments_info_with_mol):
with requests_mock.mock() as m:
url = base_url + '/fragments?fragment_ids=3j7u_NDP_frag24,foo'
molblock = '3j7u_NDP_frag24\n RDKit 3D\n\n 35 37 0 0 0 0 0 0 0 0999 V2000\n -15.1410 -11.1250 -79.4200 P 0 0 0 0 0 0 0 0 0 0 0 0\n -14.6900 -10.9960 -80.8600 O 0 0 0 0 0 0 0 0 0 0 0 0\n -16.5040 -11.6890 -79.0770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.9990 -9.6870 -78.7060 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.1870 -8.4550 -79.4050 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.6700 -7.3160 -78.5260 C 0 0 0 0 0 0 0 0 0 0 0 0\n -13.2400 -7.2390 -78.5880 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.2130 -5.9510 -78.9460 C 0 0 0 0 0 0 0 0 0 0 0 0\n -16.1600 -5.4570 -77.9880 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.0000 -5.0420 -79.0650 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.1790 -3.8250 -78.3260 R 0 0 0 0 0 1 0 0 0 0 0 0\n -12.8370 -5.8690 -78.5180 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.5470 -5.6210 -79.2410 R 0 0 0 0 0 1 0 0 0 0 0 0\n -14.0270 -11.9960 -78.6490 O 0 0 0 0 0 0 0 0 0 0 0 0\n -14.1810 -13.5930 -78.4870 P 0 0 0 0 0 0 0 0 0 0 0 0\n -14.5480 -14.2030 -79.8230 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.0330 -13.8500 -77.2690 O 0 0 0 0 0 0 0 0 0 0 0 0\n -12.6800 -14.0730 -78.1770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -12.1840 -14.2350 -76.8490 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.1340 -13.1670 -76.6050 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.6880 -11.8550 -76.6770 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.5070 -13.2750 -75.2350 C 0 0 0 0 0 0 0 0 0 0 0 0\n -9.4070 -14.1780 -75.3000 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.0970 -11.8400 -74.9280 C 0 0 0 0 0 0 0 0 0 0 0 0\n -8.6920 -11.6460 -75.1050 O 0 0 0 0 0 0 0 0 0 0 0 0\n -10.8280 -10.9760 -75.9460 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.5890 -9.8540 -75.3660 N 0 0 0 0 0 0 0 0 0 0 0 0\n -12.7860 -10.0630 -74.7850 C 0 0 0 0 0 0 0 0 0 0 0 0\n -13.5340 -9.0090 -74.2510 C 0 0 0 0 0 0 0 0 0 0 0 0\n -14.8620 -9.2740 -73.5990 C 0 0 0 0 0 0 0 0 0 0 0 0\n -15.1890 -10.4300 -73.3940 O 0 0 0 0 0 0 0 0 0 0 0 0\n -15.6600 -8.2650 -73.2400 N 0 0 0 0 0 0 0 0 0 0 0 0\n -13.0230 -7.5870 -74.3390 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.7130 -7.4960 -74.9740 C 0 0 0 0 0 0 0 0 0 0 0 0\n -11.0640 -8.6200 -75.4710 C 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 2 0\n 1 3 1 0\n 1 4 1 0\n 1 14 1 0\n 4 5 1 0\n 5 6 1 0\n 6 7 1 0\n 6 8 1 0\n 7 12 1 0\n 8 9 1 0\n 8 10 1 0\n 10 11 1 0\n 10 12 1 0\n 12 13 1 0\n 14 15 1 0\n 15 16 2 0\n 15 17 1 0\n 15 18 1 0\n 18 19 1 0\n 19 20 1 0\n 20 21 1 0\n 20 22 1 0\n 21 26 1 0\n 22 23 1 0\n 22 24 1 0\n 24 25 1 0\n 24 26 1 0\n 26 27 1 0\n 27 28 1 0\n 27 35 1 0\n 28 29 2 0\n 29 30 1 0\n 29 33 1 0\n 30 31 2 0\n 30 32 1 0\n 33 34 1 0\n 34 35 2 0\nM CHG 2 3 -1 17 -1\nM END\n'
mocked_body = {
'detail': "Fragment with identifier 'foo,bar' not found",
'absent_identifiers': ['foo'],
'fragments': [{
'smiles': '[*]C1OC(COP(=O)([O-])OP(=O)([O-])OCC2OC(N3C=CCC(C(N)=O)=C3)C(O)C2O)C(O)C1[*]',
'pdb_code': '3j7u',
'pdb_title': 'Catalase structure determined by electron crystallography of thin 3D crystals',
'atom_codes': 'PA,O1A,O2A,O5B,C5B,C4B,O4B,C3B,O3B,C2B,C1B,O3,PN,O1N,O2N,O5D,C5D,C4D,O4D,C3D,O3D,C2D,O2D,C1D,N1N,C2N,C3N,C7N,O7N,N7N,C4N,C5N,C6N',
'uniprot_acc': 'P00432',
'prot_chain': 'A', 'het_seq_nr': 602, 'het_code': 'NDP', 'prot_name': 'Catalase',
'ec_number': '1.11.1.6', 'frag_nr': 24, 'frag_id': '3j7u_NDP_frag24', 'rowid': 7059,
'uniprot_name': 'Catalase', 'nr_r_groups': 2, 'het_chain': 'A', 'hash_code': '6ef5a609fb192dba',
'mol': molblock
}],
'status': 404,
'title': 'Not Found',
'type': 'about:blank'
}
m.get(url, json=mocked_body, status_code=404, headers={'Content-Type': 'application/problem+json'})
with pytest.raises(IncompleteFragments) as e:
client.fragments_by_id(fragment_ids=['3j7u_NDP_frag24', 'foo'])
assert len(e.value.fragments) == 1
assert e.value.fragments[0]['frag_id'] == '3j7u_NDP_frag24'
assert e.value.absent_identifiers == ['foo']
def test_pharmacophores(base_url, client, example1_phar, example3_phar):
with requests_mock.mock() as m:
m.get(base_url + '/fragments/3j7u_NDP_frag24.phar', text=example1_phar)
m.get(base_url + '/fragments/3j7u_NDP_frag23.phar', text=example3_phar)
response = client.pharmacophores(['3j7u_NDP_frag24', '3j7u_NDP_frag23'])
assert response == [example1_phar, example3_phar]
def test_pharmacophores_somenotfound_incomplete(base_url, client, example1_phar):
with requests_mock.mock() as m:
m.get(base_url + '/fragments/3j7u_NDP_frag24.phar', text=example1_phar)
notfound = {
'detail': "Fragment with identifier '3j7u_NDP_frag23' not found",
'identifier': '3j7u_NDP_frag23',
'status': 404,
'title': 'Not Found',
'type': 'about:blank'
}
m.get(base_url + '/fragments/3j7u_NDP_frag23.phar', status_code=404, json=notfound, headers={'Content-Type': 'application/problem+json'})
with pytest.raises(IncompletePharmacophores) as excinfo:
client.pharmacophores(['3j7u_NDP_frag24', '3j7u_NDP_frag23'])
assert excinfo.value.absent_identifiers == ['3j7u_NDP_frag23']
assert excinfo.value.pharmacophores == [example1_phar, None]
def test_pharmacophores_server500(base_url, client):
with requests_mock.mock() as m:
m.get(base_url + '/fragments/3j7u_NDP_frag24.phar', text='Internal server error', status_code=500)
with pytest.raises(HTTPError) as excinfo:
client.pharmacophores(['3j7u_NDP_frag24'])
assert excinfo.value.response.status_code == 500
| 97.359551 | 3,145 | 0.540969 | 3,805 | 17,330 | 2.4 | 0.101708 | 0.254271 | 0.343955 | 0.408673 | 0.793145 | 0.774529 | 0.757994 | 0.733027 | 0.71901 | 0.672908 | 0 | 0.368292 | 0.343047 | 17,330 | 177 | 3,146 | 97.909605 | 0.433816 | 0.032429 | 0 | 0.456693 | 0 | 0.062992 | 0.711293 | 0.059448 | 0 | 0 | 0 | 0 | 0.110236 | 1 | 0.07874 | false | 0 | 0.062992 | 0.015748 | 0.15748 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 |
2d0a2f1569fadbc34d20318046924ba2aa98f716 | 2,906 | py | Python | examples/Structural/main.py | HerminioTH/GeoFlow1D | 44a5c11e3297827b265c1ea44bb18256b074fa66 | [
"MIT"
] | 2 | 2020-02-10T11:23:16.000Z | 2020-07-01T20:28:57.000Z | examples/Structural/main.py | HerminioTH/GeoFlow1D | 44a5c11e3297827b265c1ea44bb18256b074fa66 | [
"MIT"
] | null | null | null | examples/Structural/main.py | HerminioTH/GeoFlow1D | 44a5c11e3297827b265c1ea44bb18256b074fa66 | [
"MIT"
] | null | null | null | import geoflow1D
from geoflow1D.GridModule import *
from geoflow1D.FieldsModule import *
from geoflow1D.LinearSystemModule import *
from geoflow1D.GeoModule import *
from geoflow1D.SolverModule import *
import numpy as np
from matplotlib import pyplot as plt
# -------------- PROBLEM ILLUSTRATION -----------------
# | sigma
# |
# +---V---+ ---
# | | |
# | | |
# | | |
# | | |
# | | |
# | | | H
# | | |
# | | |
# | | |
# x ^ | | |
# | | | |
# _|_ |_______| _|_
# -----------------------------------------------------
class SolidProps(object):
def __init__(self, grid, M, rho):
self.M = ScalarField(grid.getNumberOfRegions())
self.M.setValue(grid.getRegions()[0], M)
self.rho = ScalarField(grid.getNumberOfRegions())
self.rho.setValue(grid.getRegions()[0], rho)
mm = 1000.
# -------------- GRID DATA ----------------------------
H = 10
nVertices = 15
nodesCoord, elemConn = createGridData(H, nVertices)
gridData = GridData()
gridData.setElementConnectivity(elemConn)
gridData.setNodeCoordinates(nodesCoord)
grid = Grid_1D(gridData)
grid.buildStencil()
# -----------------------------------------------------
# -------------- PROPERTIES ----------------------------
M = 1.3e8 # Constrained modulus
rho = 2300. # Solid density
props = SolidProps(grid, M, rho)
g = -9.81
# -----------------------------------------------------
# ------------- CREATE LINEAR SYSTEM ------------------
nDOF = 1
ls = LinearSystemCOO(grid.stencil, nDOF)
ls.initialize()
# -----------------------------------------------------
# -------------- NUMERICAL SOLUTION -------------------
AssemblyStiffnessMatrix(ls, grid, props, 0)
AssemblyGravityToVector(ls, grid, props, g, 0)
# -----------------------------------------------------
# ------------- BOUNDARY CONDITIONS -------------------
ls.applyDirichlet(0, 0)
sigma = -5e4
ls.applyNeumann(-1, sigma)
# -----------------------------------------------------
# ----------------- DEFINE SOLVER ---------------------
solver = Solver(tol=1e-8, maxiter=500)
solver.solve(ls.matrix, ls.rhs)
# -----------------------------------------------------
# ------------- ANALYTICAL SOLUTION -------------------
def analyticalSolution(M, stress, L, x, gravity, rho):
x = np.array(x)
return x*(-stress + rho*g*L)/M - rho*g*x*x/(2*M)
x_a = np.linspace(0, H, 100)
u_a = analyticalSolution(M, sigma, H, x_a, g, rho)
# -----------------------------------------------------
# -------------- PLOT SOLUTION ------------------------
x_n = [v.getCoordinate() for v in grid.getVertices()]
u_n = solver.solution
plt.plot(u_n*mm, x_n, 'o', label='Numeric')
plt.plot(u_a*mm, x_a, '-', label='Analytic')
plt.grid(True)
plt.xlabel('Displacement (mm)')
plt.ylabel('Coordinate X (m)')
plt.show()
# -----------------------------------------------------
| 29.653061 | 56 | 0.456986 | 260 | 2,906 | 5.011538 | 0.442308 | 0.049885 | 0.058327 | 0.056792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018428 | 0.15967 | 2,906 | 97 | 57 | 29.958763 | 0.515152 | 0.433586 | 0 | 0 | 0 | 0 | 0.031056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d0e5939c50882dfd177fbde933852e0ecf02d4f | 1,024 | py | Python | torch2trt/converters/matmul.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | torch2trt/converters/matmul.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | torch2trt/converters/matmul.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import tensorrt as trt
@tensorrt_converter('torch.matmul')
def convert_matmul(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
input_a_trt, input_b_trt = trt_(ctx.network, input_a, input_b)
output = ctx.method_return
mm_op = trt.MatrixOperation.NONE
layer = ctx.network.add_matrix_multiply(input_a_trt, mm_op, input_b_trt, mm_op)
output._trt = layer.get_output(0)
class MatmulTest(torch.nn.Module):
def __init__(self):
super(MatmulTest, self).__init__()
def forward(self, x, y):
return torch.matmul(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6), (1, 2, 6, 7)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6), (1, 2, 6, 7)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6), (1, 6, 7)])
# @add_module_test(torch.float32, torch.device('cuda'), [(4, 6), (6, 7)])
def test_matmul():
return MatmulTest() | 32 | 83 | 0.6875 | 164 | 1,024 | 4.018293 | 0.29878 | 0.091047 | 0.098634 | 0.109256 | 0.273141 | 0.273141 | 0.273141 | 0.273141 | 0.273141 | 0.273141 | 0 | 0.044983 | 0.15332 | 1,024 | 32 | 84 | 32 | 0.71511 | 0.069336 | 0 | 0 | 0 | 0 | 0.02521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.136364 | 0.090909 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d0f65ea12ba88c4f09486a14c66b101fd4846c7 | 148 | py | Python | src/020-factorial-digit-sum/python/solver.py | xfbs/ProjectEulerRust | e26768c56ff87b029cb2a02f56dc5cd32e1f7c87 | [
"MIT"
] | 1 | 2018-01-26T21:18:12.000Z | 2018-01-26T21:18:12.000Z | src/020-factorial-digit-sum/python/solver.py | xfbs/ProjectEulerRust | e26768c56ff87b029cb2a02f56dc5cd32e1f7c87 | [
"MIT"
] | 3 | 2017-12-09T14:49:30.000Z | 2017-12-09T14:59:39.000Z | src/020-factorial-digit-sum/python/solver.py | xfbs/ProjectEulerRust | e26768c56ff87b029cb2a02f56dc5cd32e1f7c87 | [
"MIT"
] | null | null | null | import math
def solve(m):
f = math.factorial(m)
digit_sum = 0
while f > 0:
digit_sum += f % 10
f //= 10
return digit_sum
| 12.333333 | 25 | 0.547297 | 24 | 148 | 3.25 | 0.541667 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061856 | 0.344595 | 148 | 11 | 26 | 13.454545 | 0.742268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
2d1184aa5ea419982c4d302f3708a26aca0b8cd0 | 1,454 | py | Python | ngpy/vector2d.py | liuyxpp/ngpy | 24f4c07e336d255302618ea113ba2e02f60e01b4 | [
"BSD-3-Clause"
] | 1 | 2021-09-06T10:19:55.000Z | 2021-09-06T10:19:55.000Z | ngpy/vector2d.py | liuyxpp/ngpy | 24f4c07e336d255302618ea113ba2e02f60e01b4 | [
"BSD-3-Clause"
] | null | null | null | ngpy/vector2d.py | liuyxpp/ngpy | 24f4c07e336d255302618ea113ba2e02f60e01b4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import math
"""
2D math vector
Since 2011.8.19
AUTHOUR:
Yi-Xin Liu <liuyxpp@gmail.com>
Fudan University
REVISION:
2011.8.22
"""
from persistent import Persistent
class Vector2D(Persistent):
def __init__(self,x=0.0,y=0.0):
self.x=float(x)
self.y=float(y)
def __sub__(self,other):
return Vector2D(self.x-other.x,self.y-other.y)
def __isub__(self,other):
self.x -= other.x
self.y -= other.y
def __str__(self):
return "("+str(self.x)+","+str(self.y)+")"
def __eq__(self,other):
return ((self.x==other.x) and (self.y==other.y))
def length2(self):
dx=self.x
dy=self.y
return dx*dx + dy*dy
def length(self):
return math.sqrt(self.length2())
def distance2(self,other):
return (self-other).length2()
def distance(self,other):
return (self-other).length()
def test():
x1=1.1
y1=-3
x2=-2.3
y2=0
point0=Vector2D()
point1=Vector2D(x1,y1)
point2=Vector2D(x2,y2)
print point0,'= (0,0)?'
print point1,'= (',x1,',',y1,')?'
print point2,'= (',x2,',',y2,')?'
print point1.length(),'= ',math.sqrt(x1*x1+y1*y1),'?'
print point2.length2(),'= ',x2*x2+y2*y2,'?'
print point0.distance2(point2),'= ',(0-x2)**2+(0-y2)**2,'?'
print point1.distance(point2),'= ',math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)),'?'
if __name__=='__main__':
test()
| 23.836066 | 85 | 0.570151 | 216 | 1,454 | 3.708333 | 0.291667 | 0.043695 | 0.074906 | 0.041199 | 0.139825 | 0.062422 | 0.062422 | 0.062422 | 0.062422 | 0 | 0 | 0.074336 | 0.222834 | 1,454 | 60 | 86 | 24.233333 | 0.634513 | 0.013755 | 0 | 0 | 0 | 0 | 0.033001 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.047619 | null | null | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2d11b724ff940a49feced129c545ac4d65ca924d | 24 | py | Python | pyranges/version.py | biocore-ntnu/pyranges | 5dd7cda7e42051c4b4a75eb6f8650464fb416f7a | [
"MIT"
] | 299 | 2019-03-22T18:28:01.000Z | 2022-03-11T16:14:19.000Z | pyranges/version.py | biocore-ntnu/pyranges | 5dd7cda7e42051c4b4a75eb6f8650464fb416f7a | [
"MIT"
] | 157 | 2019-04-06T18:05:27.000Z | 2022-03-07T14:50:10.000Z | pyranges/version.py | biocore-ntnu/pyranges | 5dd7cda7e42051c4b4a75eb6f8650464fb416f7a | [
"MIT"
] | 33 | 2019-04-12T14:44:53.000Z | 2022-03-16T16:58:06.000Z | __version__ = "0.0.111"
| 12 | 23 | 0.666667 | 4 | 24 | 3 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238095 | 0.125 | 24 | 1 | 24 | 24 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0.291667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
2d1213c410c2a6b8aac4888a4f6bc94463fa2640 | 4,538 | py | Python | HayStack_Client/IOTA_Module.py | ConsensusGroup/Haystack | c2d0b8fb7b2064b05a5d256bb949dda9a0ef569d | [
"MIT"
] | 1 | 2019-11-28T08:50:26.000Z | 2019-11-28T08:50:26.000Z | HayStack_Client/IOTA_Module.py | ConsensusGroup/Haystack | c2d0b8fb7b2064b05a5d256bb949dda9a0ef569d | [
"MIT"
] | 3 | 2019-11-22T04:23:47.000Z | 2019-11-30T07:11:24.000Z | HayStack_Client/IOTA_Module.py | ConsensusGroup/Haystack | c2d0b8fb7b2064b05a5d256bb949dda9a0ef569d | [
"MIT"
] | 3 | 2018-03-19T05:20:44.000Z | 2019-11-22T00:56:31.000Z | ####################################################################################
############# The purpose of the module is to handle IOTA interactions #############
####################################################################################
#IOTA library
from iota import TryteString, Address, ProposedBundle, ProposedTransaction, Bundle
from iota.crypto.addresses import AddressGenerator
from iota.adapter.wrappers import RoutingWrapper
from iota.adapter import HttpAdapter
from iota import *
#Other libraries
from random import SystemRandom
from Configuration_Module import Configuration
from Tools_Module import Tools
import config
######## Base IOTA classes ########
def Seed_Generator():
random_trytes = [i for i in map(chr, range(65,91))]
random_trytes.append('9')
seed = [random_trytes[SystemRandom().randrange(len(random_trytes))] for x in range(81)]
return ''.join(seed)
def Return_Fastest_Node():
x = Configuration()
Node_Dictionary = Tools().Read_From_Json(directory = x.UserFolder+"/"+x.NodeFolder+"/"+x.NodeFile)
Send_initial = 999.0
Receive_initial = 999.0
Fastest_Combination = {}
for Node, Stats in Node_Dictionary.items():
try:
Send = Stats["Send"]
Receive = Stats["Receive"]
float
except TypeError:
Send = 999.0
Receive = 999.0
if Send_initial > Send:
Send_initial = Send
Fastest_Combination["Send"] = Node
if Receive_initial > Receive:
Receive_initial = Receive
Fastest_Combination["Receive"] = Node
return Fastest_Combination
class IOTA_Module(Configuration):
def __init__(self, Seed, IOTA_Instance = ""):
Configuration.__init__(self)
try:
Optimal_Node = Return_Fastest_Node()["Send"]
if Optimal_Node == 999.0:
Optimal_Node = Return_Fastest_Node()["Receive"]
config.Node = Optimal_Node
except:
config.Node = "http://localhost:14265"
if config.Node == "http://localhost:14265":
self.IOTA_Api = Iota(RoutingWrapper(str(config.Node)).add_route('attachToTangle', 'http://localhost:14265'), seed = Seed)
else:
self.IOTA_Api = Iota(config.Node, seed = Seed)
if IOTA_Instance != "":
self.IOTA_Api = IOTA_Instance
self.Seed_Copy = Seed
def Generate_Address(self, Index = 0):
generate = self.IOTA_Api.get_new_addresses(index = int(Index))
Address = str(generate.get('addresses')).strip("[Address(").strip(")]").strip("'")
return Address
def Send(self, ReceiverAddress, Message, Test_Node = False):
def Bundle_Generation(Recepient, ToSend):
text_transfer = TryteString.from_string(str(ToSend))
txn_2 = ProposedTransaction(address = Address(Recepient), message = text_transfer, value = 0)
bundle.add_transaction(txn_2)
bundle = ProposedBundle()
if type(ReceiverAddress) == list and type(Message) == list and (len(ReceiverAddress) == len(Message)):
for i in range(len(ReceiverAddress)):
Bundle_Generation(ReceiverAddress[i], Message[i])
elif type(ReceiverAddress) == str and type(Message) == str:
Bundle_Generation(ReceiverAddress, Message)
bundle.finalize()
coded = bundle.as_tryte_strings()
hashed = bundle.hash
#Return the fastest sender node from the DB if localhost is not present.
if str(self.Node) != "http://localhost:14265":
if Test_Node == False:
self.Node = Return_Fastest_Node()["Send"]
self.IOTA_Api = Iota(self.Node, seed = self.Seed_Copy)
send = self.IOTA_Api.send_trytes(trytes = coded, depth = 4)
return hashed
def Receive(self, Start = 0, Stop = "", JSON = False, Test_Node = False):
#Return the fastest sender node from the DB if localhost is not present.
if self.Node != "http://localhost:14265":
if Test_Node == False:
self.Node = Return_Fastest_Node()["Receive"]
self.IOTA_Api = Iota(self.Node, seed = self.Seed_Copy)
#This chunck of code is used to choose a segment of Tx history to be retrieved
if Stop == "":
mess = self.IOTA_Api.get_account_data(start = Start)
else:
mess = self.IOTA_Api.get_account_data(start = Start, stop = Stop)
#Decompose the Bundle into components
bundle = mess.get('bundles')
Message = []
self.Message = []
for i in bundle:
message = str(i.get_messages()).strip("[u'").strip("']")
if JSON == True:
Json = i.as_json_compatible()[0]
message = [Json,message]
self.Message.append(message)
return self
def LatestTangleTime(self):
Node = self.IOTA_Api.get_node_info()
self.TangleTime = Node.get("time")
return self
| 33.865672 | 124 | 0.666373 | 568 | 4,538 | 5.167254 | 0.272887 | 0.027257 | 0.037479 | 0.025554 | 0.185349 | 0.139012 | 0.139012 | 0.139012 | 0.139012 | 0.112436 | 0 | 0.015763 | 0.175187 | 4,538 | 133 | 125 | 34.120301 | 0.768368 | 0.078889 | 0 | 0.10101 | 0 | 0 | 0.052485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080808 | false | 0 | 0.090909 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d15578f96b8143429650898e1882b35ef941ed3 | 2,412 | py | Python | demo.py | TiagoFilipeSousaGoncalves/code2model-codesprinters | 9f38a994a3a1036916ea82f9523baa8a5eed8226 | [
"MIT"
] | null | null | null | demo.py | TiagoFilipeSousaGoncalves/code2model-codesprinters | 9f38a994a3a1036916ea82f9523baa8a5eed8226 | [
"MIT"
] | null | null | null | demo.py | TiagoFilipeSousaGoncalves/code2model-codesprinters | 9f38a994a3a1036916ea82f9523baa8a5eed8226 | [
"MIT"
] | null | null | null | # Imports
import streamlit as st
import pandas as pd
import difflib
# Load results .CSV
code_simil_results = pd.read_csv('results/resultados.csv')
prog_lang_results = pd.read_csv('results/resultados_language.csv')
# Create a select box to choose the demo
add_selectbox = st.sidebar.selectbox(
"What demo would you like to see?",
("Code similarity", "Language identification")
)
# The input index of our data
input_number = st.number_input('Select an index', min_value=0, max_value=len(code_simil_results))
# Code similarity
if add_selectbox == 'Code similarity':
st.write("Code similarity")
col1, col2, col3 = st.columns(3)
original = code_simil_results.iloc[input_number][['corpo']].values[0]
most_similar = code_simil_results.iloc[input_number][['most_similar']].values[0]
similarity = code_simil_results.iloc[input_number][['most_similar']].values[0]
with col1:
st.text("Original Code")
st.text(original)
with col2:
st.text("Most similar code")
st.text(most_similar)
with col3:
st.text("Diff between code")
init_text = ''
for text in difflib.unified_diff(original.split("\n"), most_similar.split("\n")):
if text[:3] not in ('+++', '---', '@@ '):
if '+' in text[0]:
text = f"<p style='color: green'> {text} </p>"
elif '-' in text[0]:
text = f"<p style='color: red'> {text} </p>"
init_text = init_text + '\n' + text
st.markdown(init_text, unsafe_allow_html=True)
# st.text(init_text)
# Language identification
elif add_selectbox == 'Language identification':
st.write("Language identification")
col1, col2, col3 = st.columns(3)
original_code = prog_lang_results.iloc[input_number][['corpo']].values[0]
original_prog_lang = prog_lang_results.iloc[input_number][['platafor']].values[0]
predicted_prog_lan = prog_lang_results.iloc[input_number][['platafor_predict']].values[0]
with col1:
st.text("Original Code")
st.text(original_code)
with col2:
st.text("Predicted Programming Language")
st.text(predicted_prog_lan)
with col3:
st.text("Original Programming Language")
st.text(original_prog_lang)
| 28.714286 | 98 | 0.620232 | 306 | 2,412 | 4.699346 | 0.29085 | 0.05007 | 0.066759 | 0.091794 | 0.363004 | 0.363004 | 0.308067 | 0.207928 | 0.128651 | 0.128651 | 0 | 0.013296 | 0.251658 | 2,412 | 83 | 99 | 29.060241 | 0.78338 | 0.062604 | 0 | 0.208333 | 0 | 0 | 0.220277 | 0.024424 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d1815bfd6d647756ab866fe6efa2fc1a8f472f8 | 15,759 | py | Python | fn_portal/filters/FishAttr_filters.py | AdamCottrill/FishNetPortal | 4e58e05f52346ac1ab46698a03d4229c74828406 | [
"MIT"
] | null | null | null | fn_portal/filters/FishAttr_filters.py | AdamCottrill/FishNetPortal | 4e58e05f52346ac1ab46698a03d4229c74828406 | [
"MIT"
] | null | null | null | fn_portal/filters/FishAttr_filters.py | AdamCottrill/FishNetPortal | 4e58e05f52346ac1ab46698a03d4229c74828406 | [
"MIT"
] | null | null | null | import django_filters
from .common_filters import NumberInFilter, ValueInFilter, GeomFilter, GeoFilterSet
class FishAttrFilters(GeoFilterSet):
"""A filter set that contains filters that are common to all the FN125 child
tables - FN125Lamprey, FN125Tag, Fn126, and FN127. Filtersets for those class
inherit from this one, and add their own models and model specific filters.
Filters in this class include filters from FN011 to FN125 Tables.
"""
roi = GeomFilter(
field_name="fish__catch__effort__sample__geom__within", method="filter_roi"
)
buffered_point = GeomFilter(
field_name="fish__catch__effort__sample__geom__within", method="filter_point"
)
management_unit__in = ValueInFilter(
field_name="fish__catch__effort__sample__management_units__slug"
)
management_unit__not__in = ValueInFilter(
field_name="fish__catch__effort__sample__management_units__slug", exclude=True
)
# FN011 (PROJECT) ATTRIBUTES
year = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__year", lookup_expr="exact"
)
year__gte = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__project__year", lookup_expr="gte"
)
year__lte = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__project__year", lookup_expr="lte"
)
year__gt = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__project__year", lookup_expr="gt"
)
year__lt = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__project__year", lookup_expr="lt"
)
prj_date0 = django_filters.DateFilter(
field_name="fish__catch__effort__sample__project__prj_date0",
help_text="format: yyyy-mm-dd",
)
prj_date0__gte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__project__prj_date0",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
prj_date0__lte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__project__prj_date0",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
prj_date1 = django_filters.DateFilter(
field_name="fish__catch__effort__sample__project__prj_date1",
help_text="format: yyyy-mm-dd",
)
prj_date1__gte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__project__prj_date1",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
prj_date1__lte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__project__prj_date1",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
prj_cd = ValueInFilter(field_name="fish__catch__effort__sample__project__prj_cd")
prj_cd__not = ValueInFilter(
field_name="fish__catch__effort__sample__project__prj_cd", exclude=True
)
prj_cd__like = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__prj_cd",
lookup_expr="icontains",
)
prj_cd__not_like = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__prj_cd",
lookup_expr="icontains",
exclude=True,
)
prj_cd__endswith = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__prj_cd",
lookup_expr="endswith",
)
prj_cd__not_endswith = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__prj_cd",
lookup_expr="endswith",
exclude=True,
)
prj_nm__like = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__prj_nm",
lookup_expr="icontains",
)
prj_nm__not_like = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__prj_nm",
lookup_expr="icontains",
exclude=True,
)
prj_ldr = django_filters.CharFilter(
field_name="fish__catch__effort__sample__project__prj_ldr__username",
lookup_expr="iexact",
)
protocol = ValueInFilter(
field_name="fish__catch__effort__sample__project__protocol__abbrev"
)
protocol__not = ValueInFilter(
field_name="fish__catch__effort__sample__project__protocol__abbrev",
exclude=True,
)
lake = ValueInFilter(
field_name="fish__catch__effort__sample__project__lake__abbrev",
)
lake__not = ValueInFilter(
field_name="fish__catch__effort__sample__project__lake__abbrev", exclude=True
)
# FN121 (NET SET) ATTRIBUTES:
sam = ValueInFilter(field_name="fish__catch__effort__sample__sam")
sam__not = ValueInFilter(
field_name="fish__catch__effort__sample__sam", exclude=True
)
sidep__gte = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__sidep", lookup_expr="gte"
)
sidep__lte = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__sidep", lookup_expr="lte"
)
grtp = ValueInFilter(field_name="fish__catch__effort__sample__grtp")
grtp__not = ValueInFilter(
field_name="fish__catch__effort__sample__grtp", exclude=True
)
gr = ValueInFilter(field_name="fish__catch__effort__sample__gr")
gr__not = ValueInFilter(field_name="fish__catch__effort__sample__gr", exclude=True)
# grid is a little trick - requires us to filter lake too - user beware!
grid = NumberInFilter(field_name="fish__catch__effort__sample__grid__grid")
grid__not = NumberInFilter(
field_name="fish__catch__effort__sample__grid__grid", exclude=True
)
effdur__gte = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__effdur", lookup_expr="gte"
)
effdur__lte = django_filters.NumberFilter(
field_name="fish__catch__effort__sample__effdur", lookup_expr="lte"
)
set_date = django_filters.DateFilter(
field_name="fish__catch__effort__sample__effdt0", help_text="format: yyyy-mm-dd"
)
set_date__gte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__effdt0",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
set_date__lte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__effdt0",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
lift_date = django_filters.DateFilter(
field_name="fish__catch__effort__sample__effdt1", help_text="format: yyyy-mm-dd"
)
lift_date__gte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__effdt1",
lookup_expr="gte",
help_text="format: yyyy-mm-dd",
)
lift_date__lte = django_filters.DateFilter(
field_name="fish__catch__effort__sample__effdt1",
lookup_expr="lte",
help_text="format: yyyy-mm-dd",
)
set_time = django_filters.TimeFilter(
field_name="fish__catch__effort__sample__efftm0", help_text="format: HH:MM"
)
set_time__gte = django_filters.TimeFilter(
field_name="fish__catch__effort__sample__efftm0",
lookup_expr="gte",
help_text="format: HH:MM",
)
set_time__lte = django_filters.TimeFilter(
field_name="fish__catch__effort__sample__efftm0",
lookup_expr="lte",
help_text="format: HH:MM",
)
lift_time = django_filters.TimeFilter(
field_name="fish__catch__effort__sample__efftm1", help_text="format: HH:MM"
)
lift_time__gte = django_filters.TimeFilter(
field_name="fish__catch__effort__sample__efftm1",
lookup_expr="gte",
help_text="format: HH:MM",
)
lift_time__lte = django_filters.TimeFilter(
field_name="fish__catch__effort__sample__efftm1",
lookup_expr="lte",
help_text="format: HH:MM",
)
# FN122 (EFFORT) ATTRIBUTES
eff = ValueInFilter(field_name="fish__catch__effort__eff")
eff__not = ValueInFilter(field_name="fish__catch__effort__eff", exclude=True)
effdst = django_filters.NumberFilter(
field_name="fish__catch__effort__effdst", lookup_expr="exact"
)
effdst__gte = django_filters.NumberFilter(
field_name="fish__catch__effort__effdst", lookup_expr="gte"
)
effdst__lte = django_filters.NumberFilter(
field_name="fish__catch__effort__effdst", lookup_expr="lte"
)
effdst__gt = django_filters.NumberFilter(
field_name="fish__catch__effort__effdst", lookup_expr="gt"
)
effdst__lt = django_filters.NumberFilter(
field_name="fish__catch__effort__effdst", lookup_expr="lt"
)
grdep = django_filters.NumberFilter(
field_name="fish__catch__effort__grdep", lookup_expr="exact"
)
grdep__gte = django_filters.NumberFilter(
field_name="fish__catch__effort__grdep", lookup_expr="gte"
)
grdep__lte = django_filters.NumberFilter(
field_name="fish__catch__effort__grdep", lookup_expr="lte"
)
grdep__gt = django_filters.NumberFilter(
field_name="fish__catch__effort__grdep", lookup_expr="gt"
)
grdep__lt = django_filters.NumberFilter(
field_name="fish__catch__effort__grdep", lookup_expr="lt"
)
grtem0 = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem0", lookup_expr="exact"
)
grtem0__gte = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem0", lookup_expr="gte"
)
grtem0__lte = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem0", lookup_expr="lte"
)
grtem0__gt = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem0", lookup_expr="gt"
)
grtem0__lt = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem0", lookup_expr="lt"
)
grtem1 = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem1", lookup_expr="exact"
)
grtem1__gte = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem1", lookup_expr="gte"
)
grtem1__lte = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem1", lookup_expr="lte"
)
grtem1__gt = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem1", lookup_expr="gt"
)
grtem1__lt = django_filters.NumberFilter(
field_name="fish__catch__effort__grtem1", lookup_expr="lt"
)
# FN123 (CATCH) ATTRIBUTES:
grp = ValueInFilter(field_name="fish__catch__grp")
grp__not = ValueInFilter(field_name="fish__catch__grp", exclude=True)
spc = ValueInFilter(field_name="fish__catch__species__spc")
spc__not = ValueInFilter(field_name="fish__catch__species__spc", exclude=True)
catcnt = django_filters.NumberFilter(
field_name="fish__catch__catcnt", lookup_expr="exact"
)
catcnt__gte = django_filters.NumberFilter(
field_name="fish__catch__catcnt", lookup_expr="gte"
)
catcnt__lte = django_filters.NumberFilter(
field_name="fish__catch__catcnt", lookup_expr="lte"
)
catcnt__gt = django_filters.NumberFilter(
field_name="fish__catch__catcnt", lookup_expr="gt"
)
catcnt__lt = django_filters.NumberFilter(
field_name="fish__catch__catcnt", lookup_expr="lt"
)
biocnt = django_filters.NumberFilter(
field_name="fish__catch__biocnt", lookup_expr="exact"
)
biocnt__gte = django_filters.NumberFilter(
field_name="fish__catch__biocnt", lookup_expr="gte"
)
biocnt__lte = django_filters.NumberFilter(
field_name="fish__catch__biocnt", lookup_expr="lte"
)
biocnt__gt = django_filters.NumberFilter(
field_name="fish__catch__biocnt", lookup_expr="gt"
)
biocnt__lt = django_filters.NumberFilter(
field_name="fish__catch__biocnt", lookup_expr="lt"
)
# FN125 (FISH) ATTRIBUTES:
tlen = django_filters.NumberFilter(field_name="fish__tlen")
tlen__gte = django_filters.NumberFilter(field_name="fish__tlen", lookup_expr="gte")
tlen__lte = django_filters.NumberFilter(field_name="fish__tlen", lookup_expr="lte")
tlen__gt = django_filters.NumberFilter(field_name="fish__tlen", lookup_expr="gt")
tlen__lt = django_filters.NumberFilter(field_name="fish__tlen", lookup_expr="lt")
flen = django_filters.NumberFilter(field_name="fish__flen")
flen__gte = django_filters.NumberFilter(field_name="fish__flen", lookup_expr="gte")
flen__lte = django_filters.NumberFilter(field_name="fish__flen", lookup_expr="lte")
flen__gt = django_filters.NumberFilter(field_name="fish__flen", lookup_expr="gt")
flen__lt = django_filters.NumberFilter(field_name="fish__flen", lookup_expr="lt")
rwt = django_filters.NumberFilter(field_name="fish__rwt")
rwt__null = django_filters.BooleanFilter(
field_name="fish__rwt", lookup_expr="isnull"
)
rwt__gte = django_filters.NumberFilter(field_name="fish__rwt", lookup_expr="gte")
rwt__lte = django_filters.NumberFilter(field_name="fish__rwt", lookup_expr="lte")
rwt__gt = django_filters.NumberFilter(field_name="fish__rwt", lookup_expr="gt")
rwt__lt = django_filters.NumberFilter(field_name="fish__rwt", lookup_expr="lt")
mat = ValueInFilter(field_name="fish__mat")
mat__not = ValueInFilter(field_name="fish__mat", exclude=True)
mat__null = django_filters.BooleanFilter(
field_name="fish__mat", lookup_expr="isnull"
)
gon = ValueInFilter(field_name="fish__gon")
gon__not = ValueInFilter(field_name="fish__gon", exclude=True)
gon__null = django_filters.BooleanFilter(
field_name="fish__gon", lookup_expr="isnull"
)
sex = ValueInFilter(field_name="fish__sex")
sex__not = ValueInFilter(field_name="fish__sex", exclude=True)
sex__null = django_filters.BooleanFilter(
field_name="fish__sex", lookup_expr="isnull"
)
clipc = ValueInFilter(field_name="fish__clipc")
clipc__not = ValueInFilter(field_name="fish__clipc", exclude=True)
clipc__null = django_filters.BooleanFilter(
field_name="fish__clipc", lookup_expr="isnull"
)
clipc__like = django_filters.CharFilter(
field_name="fish__clipc", lookup_expr="icontains"
)
clipc__not_like = django_filters.CharFilter(
field_name="fish__clipc", lookup_expr="icontains", exclude=True
)
clipa = ValueInFilter(field_name="fish__clipa")
clipa__not = ValueInFilter(field_name="fish__clipa", exclude=True)
clipa__null = django_filters.BooleanFilter(
field_name="fish__clipa", lookup_expr="isnull"
)
clipa__like = django_filters.CharFilter(
field_name="fish__clipa", lookup_expr="icontains"
)
clipa__not_like = django_filters.CharFilter(
field_name="fish__clipa", lookup_expr="icontains", exclude=True
)
nodc = ValueInFilter(field_name="fish__nodc")
nodc__not = ValueInFilter(field_name="fish__nodc", exclude=True)
nodc__null = django_filters.BooleanFilter(
field_name="fish__nodc", lookup_expr="isnull"
)
nodc__like = django_filters.CharFilter(
field_name="fish__nodc", lookup_expr="icontains"
)
nodc__not_like = django_filters.CharFilter(
field_name="fish__nodc", lookup_expr="icontains", exclude=True
)
noda = ValueInFilter(field_name="fish__noda")
noda__not = ValueInFilter(field_name="fish__noda", exclude=True)
noda__null = django_filters.BooleanFilter(
field_name="fish__noda", lookup_expr="isnull"
)
noda__like = django_filters.CharFilter(
field_name="fish__noda", lookup_expr="icontains"
)
noda__not_like = django_filters.CharFilter(
field_name="fish__noda", lookup_expr="icontains", exclude=True
)
| 37.255319 | 88 | 0.724475 | 1,898 | 15,759 | 5.301897 | 0.076396 | 0.118951 | 0.171818 | 0.157408 | 0.845474 | 0.797476 | 0.780284 | 0.708636 | 0.655172 | 0.568021 | 0 | 0.006203 | 0.181547 | 15,759 | 422 | 89 | 37.343602 | 0.773996 | 0.031664 | 0 | 0.156863 | 0 | 0 | 0.268925 | 0.180684 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.005602 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
2d18e8169c2fc367046cbb04843a2cec05fd504a | 7,162 | py | Python | pcrond/sched.py | luca-vercelli/pcrond | ac5e9b987ae175521144bcf634fcd8316b4b334d | [
"MIT"
] | null | null | null | pcrond/sched.py | luca-vercelli/pcrond | ac5e9b987ae175521144bcf634fcd8316b4b334d | [
"MIT"
] | null | null | null | pcrond/sched.py | luca-vercelli/pcrond | ac5e9b987ae175521144bcf634fcd8316b4b334d | [
"MIT"
] | null | null | null | # most of the code here comes from https://github.com/dbader/schedule
from .job import Job, ALIASES
import logging
import time
logger = logging.getLogger('pcrond')
def std_launch_func(cmd_splitted, stdin=None):
"""
Default way of executing commands is to invoke subprocess.run()
"""
if stdin is None:
def f():
logger.info("Now running: " + str(cmd_splitted))
from subprocess import Popen
Popen(cmd_splitted, stdin=None, stdout=None, stderr=None)
# not returning anything here
else:
def f():
logger.info("Now running: " + str(cmd_splitted))
from subprocess import Popen, PIPE
p = Popen(cmd_splitted, stdin=PIPE, stdout=None, stderr=None)
p.communicate(input=stdin)
# not returning anything here
return f
class Scheduler(object):
"""
Objects instantiated by the :class:`Scheduler <Scheduler>` are
factories to create jobs, keep record of scheduled jobs and
handle their execution.
"""
def __init__(self):
self.delay = 60 # in seconds
self.jobs = []
self.ask_for_stop = False
def run_pending(self):
"""
Run all jobs that are scheduled to run.
Please note that it is *intended behavior that run_pending()
does not run missed jobs*. For example, if you've registered a job
that should run every minute and you only call run_pending()
in one hour increments then your job won't be run 60 times in
between but only once.
"""
logger.debug("available jobs: " + str(self.jobs))
runnable_jobs = (job for job in self.jobs if job.should_run())
logger.debug("runnable jobs: " + str(self.jobs))
for job in runnable_jobs:
job.run()
def run_all(self, delay_seconds=0):
"""
Run all jobs regardless if they are scheduled to run or not.
A delay of `delay` seconds is added between each job. This helps
distribute system load generated by the jobs more evenly
over time.
:param delay_seconds: A delay added between every executed job
"""
logger.info('Running *all* %i jobs with %is delay inbetween',
len(self.jobs), delay_seconds)
for job in self.jobs[:]:
job.run()
time.sleep(delay_seconds)
def clear(self):
"""
Deletes scheduled jobs
"""
del self.jobs[:]
logger.info("jobs cleared")
def cancel_job(self, job):
"""
Delete a scheduled job.
If the job is running it won't be stopped.
:param job: The job to be unscheduled
"""
try:
self.jobs.remove(job)
except ValueError:
pass
def cron(self, crontab, job_func):
"""
Create a job and add it to this Scheduler
:param crontab:
string containing crontab pattern
Its tokens may be either: 1 (if alias), 5 (without year token),
6 (with year token)
:param job_func:
the job 0-ary function to run
:return: a Job
"""
job = Job(crontab, job_func, self)
self.jobs.append(job)
return job
def _load_crontab_line(self, rownum, crontab_line, job_func_func=std_launch_func, stdin=None):
"""
create a Job from a single crontab entry, and add it to this Scheduler
:param crontab_line:
a line from crontab
PRE: not empty and it not a comment
:param job_func_func:
function to be executed, @see load_crontab_file
:return: a Job
"""
pieces = crontab_line.split()
if pieces[0] in ALIASES.keys():
try:
# CASE 1 - pattern using alias
job = self.cron(pieces[0], job_func_func(pieces[1:]))
return job
except ValueError as e:
# shouldn't happen
logger.error(("Error at line %d, cannot parse pattern, the line will be ignored.\r\n" +
"Inner Exception: %s") % (rownum, str(e)))
return None
if len(pieces) < 6:
logger.error("Error at line %d, expected at least 6 tokens" % rownum)
return None
if len(pieces) >= 7:
try:
# CASE 2 - pattern including year
job = self.cron(" ".join(pieces[0:6]), job_func_func(pieces[6:]))
return job
except ValueError:
pass
try:
# CASE 3 - pattern not including year
job = self.cron(" ".join(pieces[0:5]), job_func_func(pieces[5:]))
return job
except ValueError as e:
logger.error(("Error at line %d, cannot parse pattern, the line will be ignored.\r\n" +
"Inner Exception: %s") % (rownum, str(e)))
return None
def _split_input_line(self, s):
"""
Command is split in command and stdin using %, not %%
:return: two strings, command and stdin
"""
# s == aaaa%%bbbbbb%cccc%dd%%ee
pieces = [x.split('%') for x in s.split('%%')]
# pieces == [[aaaa],[bbbbbb,cccc,dd],[ee]]
rejoin = "%".join(["\n".join(x) for x in pieces])
# rejoin == aaaa%bbbbbb\ncccc\ndd%ee
return rejoin.split('\n', 1)
# lines == [aaaa%bbbbbb,ccc\ndd%ee]
def load_crontab_file(self, crontab_file, clear=True, job_func_func=std_launch_func):
"""
Read crontab file, create corresponding jobs in this scheduler
:param crontab_file:
crontab file path
:param job_func_func:
a function that takes a list of tokens (from crontab file) and
returns a 0-args function
:param clear:
should the new schedule override the previous ones?
"""
if clear:
self.clear()
with open(crontab_file) as fp:
for rownum, line in enumerate(fp):
if line is not None: # not sure if this can happen
line = line.strip()
if line != "" and line[0] != "#":
# skip empty lines and comments
pieces = self._split_input_line(line)
stdin = pieces[1] if len(pieces) > 1 else None
self._load_crontab_line(rownum, pieces[0], job_func_func, stdin)
# TODO support % sign inside command, should consider pieces[1] if any
logger.info(str(len(self.jobs)) + " jobs loaded from configuration file")
def main_loop(self):
"""
Perform main run-and-wait loop.
"""
import time
while not self.ask_for_stop:
self.run_pending()
time.sleep(self.delay)
# FIXME this will look at self.ask_for_stop only every self.delay seconds
# see https://stackoverflow.com/questions/5114292/break-interrupt-a-time-sleep-in-python
| 37.302083 | 103 | 0.562692 | 912 | 7,162 | 4.335526 | 0.289474 | 0.019474 | 0.022256 | 0.010622 | 0.183359 | 0.148963 | 0.116844 | 0.116844 | 0.081437 | 0.081437 | 0 | 0.008099 | 0.344876 | 7,162 | 191 | 104 | 37.497382 | 0.834612 | 0.349902 | 0 | 0.311828 | 0 | 0 | 0.093765 | 0 | 0 | 0 | 0 | 0.010471 | 0 | 1 | 0.139785 | false | 0.021505 | 0.064516 | 0 | 0.311828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2d1939ff6d648f3379f25ae86317a8339f7cfcc1 | 469 | py | Python | simu/UserAgent.py | TheodoreKrypton/simu-data-catcher | 2625f6d4e33859cf0a7e0df4d190e3219e759228 | [
"MIT"
] | 3 | 2017-09-11T03:13:06.000Z | 2020-08-11T15:15:09.000Z | simu/UserAgent.py | TheodoreKrypton/simu-data-catcher | 2625f6d4e33859cf0a7e0df4d190e3219e759228 | [
"MIT"
] | null | null | null | simu/UserAgent.py | TheodoreKrypton/simu-data-catcher | 2625f6d4e33859cf0a7e0df4d190e3219e759228 | [
"MIT"
] | 6 | 2017-06-08T13:19:50.000Z | 2021-04-20T15:11:28.000Z | import urllib
import re
ptrn = re.compile("<textarea name='uas' id='uas_textfeld' rows='4' cols='30'>(.+?)</textarea>")
with open("user_agent.txt", "w") as fp:
for i in range(12381, 15480):
try:
url = "http://www.useragentstring.com/index.php?id=" + str(i)
html = urllib.urlopen(url).read()
user_agent = re.search(ptrn, html).group(1)
fp.write(user_agent + "\n")
except Exception:
pass
| 29.3125 | 95 | 0.567164 | 64 | 469 | 4.09375 | 0.75 | 0.103053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040462 | 0.26226 | 469 | 15 | 96 | 31.266667 | 0.716763 | 0 | 0 | 0 | 0 | 0 | 0.287846 | 0.055437 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.083333 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2d1ab381951528b1b2a476a21ada75863d82ce29 | 2,436 | py | Python | Bot.py | Grohiik/BotMauData2020 | 7b7eecca1b5fdc73a0e9b3593cea62b2fb1333f8 | [
"MIT"
] | null | null | null | Bot.py | Grohiik/BotMauData2020 | 7b7eecca1b5fdc73a0e9b3593cea62b2fb1333f8 | [
"MIT"
] | null | null | null | Bot.py | Grohiik/BotMauData2020 | 7b7eecca1b5fdc73a0e9b3593cea62b2fb1333f8 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from secrets import Token as Token
bot = commands.Bot(command_prefix="!")
role_add_channel_id = 750036407757832242
@bot.event
async def on_message(message):
if bot.user == message.author:
return
if message.channel.id == role_add_channel_id:
await bot.process_commands(message)
await message.delete(delay=10)
@bot.command(name="färg", help="lägg till en färg")
async def color(payload):
if payload.channel.id == role_add_channel_id:
member = payload.author
guild = member.guild
check = True
color = discord.Colour(int(payload.message.content[6:12], base=16))
for role in member.roles:
if str(member.id) == role.name:
await role.edit(colour=(color))
await payload.channel.send(
content=f"Ändrade {member.display_name} färg till {color.value}",
delete_after=10,
)
check = False
break
if check:
await guild.create_role(
name=str(member.id), color=color, reason="färg roll"
)
for role in guild.roles:
if str(member.id) == role.name:
await member.add_roles(role)
await payload.channel.send(
content=f"Ändrade {member.display_name} färg till {color.value}",
delete_after=10,
)
break
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, discord.ext.commands.errors.CommandNotFound):
await ctx.send("detta kommandet finns inte", delete_after=15)
await ctx.delete(delay=10)
@bot.event
async def on_command_error2(ctx, error):
await ctx.send("kommand error", delete_after=15)
await ctx.delete(delay=10)
# command to test if the bot is running
@bot.command(name="test", help="test")
async def test(ctx):
response = "Jag är online!"
await ctx.send(response)
# command to test if the bot is running
@bot.command(name="ping", help="test")
async def test2(ctx):
response = "pong 🏓"
await ctx.send(response)
# print a message if the bot is online
@bot.event
async def on_ready():
print("bot connected")
# change status to online
await bot.change_presence(activity=discord.Game("FÄRG"))
bot.run(Token)
| 29 | 89 | 0.614122 | 316 | 2,436 | 4.655063 | 0.316456 | 0.038069 | 0.03535 | 0.043508 | 0.35758 | 0.333107 | 0.265126 | 0.265126 | 0.176751 | 0.176751 | 0 | 0.022311 | 0.28243 | 2,436 | 83 | 90 | 29.349398 | 0.81865 | 0.055829 | 0 | 0.290323 | 0 | 0 | 0.099782 | 0.018301 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.048387 | 0 | 0.064516 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d1abf839257188564b4d1db171a9a9e2fcac08f | 1,572 | py | Python | cms/blocks.py | mitodl/micromasters | 2b1df8ac7c4395cc0a0227d936b3f021f0ae3019 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | cms/blocks.py | mitodl/micromasters | 2b1df8ac7c4395cc0a0227d936b3f021f0ae3019 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | cms/blocks.py | mitodl/micromasters | 2b1df8ac7c4395cc0a0227d936b3f021f0ae3019 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | """Page blocks"""
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class CourseTeamBlock(blocks.StructBlock):
"""
Block class that defines a course team member
"""
name = blocks.CharBlock(max_length=100, help_text="Name of the course team member.")
title = blocks.RichTextBlock(
required=False,
features=["bold", "italic"],
help_text="Title of the course team member."
)
bio = blocks.TextBlock(help_text="Short bio of course team member.")
image = ImageChooserBlock(
help_text='Image for the faculty member. Should be 385px by 385px.'
)
class ImageWithLinkBlock(blocks.StructBlock):
""" Image with a clickable link on it """
image = ImageChooserBlock(label="Image", required=True, help_text="The image to display.")
link = blocks.URLBlock(
label="Link",
required=True,
help_text="Absolute URL to the image, like https://example.com/some_image.jpg"
)
align = blocks.ChoiceBlock(
choices=[('center', 'Center'), ('right', 'Right'), ('left', 'Left')],
default='left',
max_length=10,
)
width = blocks.IntegerBlock(required=False)
height = blocks.IntegerBlock(required=False)
class Meta:
template = 'cms/imagewithlink.html'
form_classname = 'ImageWithLinkBlock'
icon = 'picture'
class ResourceBlock(blocks.StructBlock):
"""
A custom block for resource pages.
"""
heading = blocks.CharBlock(max_length=100)
detail = blocks.RichTextBlock()
| 30.230769 | 94 | 0.660305 | 179 | 1,572 | 5.73743 | 0.497207 | 0.046738 | 0.062317 | 0.046738 | 0.093476 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011438 | 0.221374 | 1,572 | 51 | 95 | 30.823529 | 0.827614 | 0.080789 | 0 | 0 | 0 | 0 | 0.2402 | 0.015681 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d1d61b3fcb8f382f84576e100b3996770495f89 | 25 | py | Python | env/Lib/site-packages/win32/mapi/__init__.py | Daniel-Key/HearStone-Python | 981584d2b9502319393bd92b48f0ec8d906b4d44 | [
"MIT"
] | null | null | null | env/Lib/site-packages/win32/mapi/__init__.py | Daniel-Key/HearStone-Python | 981584d2b9502319393bd92b48f0ec8d906b4d44 | [
"MIT"
] | 1 | 2020-10-27T14:44:08.000Z | 2020-10-27T14:44:08.000Z | env/Lib/site-packages/win32/mapi/__init__.py | Daniel-Key/HearStone-Python | 981584d2b9502319393bd92b48f0ec8d906b4d44 | [
"MIT"
] | null | null | null | from win32._mapi import * | 25 | 25 | 0.8 | 4 | 25 | 4.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0.12 | 25 | 1 | 25 | 25 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2d1e81bfe87388671566d46153dbbdae8f99b502 | 15,698 | py | Python | zygrader/grader.py | natecraddock/zygrader | 3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a | [
"MIT"
] | 5 | 2019-11-15T17:42:42.000Z | 2021-04-20T19:35:25.000Z | zygrader/grader.py | natecraddock/zygrader | 3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a | [
"MIT"
] | 76 | 2020-02-22T01:42:16.000Z | 2021-04-28T18:47:20.000Z | zygrader/grader.py | natecraddock/zygrader | 3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a | [
"MIT"
] | 2 | 2020-02-21T04:39:38.000Z | 2021-04-20T19:35:20.000Z | """Grader: Menus and popups for grading and pair programming"""
import curses
import getpass
from zygrader import data, ui, utils
from zygrader.config import preferences
from zygrader.config.shared import SharedData
from zygrader.data import model
from zygrader.zybooks import Zybooks
from zygrader.ui import colors
def get_student_row_color_sort_index(lab, student):
"""Color the student names in the grader based on locked, flagged, or normal status"""
if data.lock.is_locked(student, lab) and not isinstance(student, str):
return curses.color_pair(colors.COLOR_PAIR_LOCKED), 0
if data.flags.is_submission_flagged(student,
lab) and not isinstance(student, str):
return curses.color_pair(colors.COLOR_PAIR_FLAGGED), 1
return curses.color_pair(colors.COLOR_PAIR_DEFAULT), 2
def fill_student_list(student_list: ui.layers.ListLayer,
students,
lab,
use_locks,
callback_fn=None):
student_list.clear_rows()
for student in students:
row = student_list.add_row_text(str(student), callback_fn, student, lab,
use_locks)
color, sort_index = get_student_row_color_sort_index(lab, student)
row.set_row_color(color)
row.set_row_sort_index(sort_index)
student_list.rebuild = True
def set_submission_message(popup: ui.layers.OptionsPopup,
submission: data.model.Submission):
popup.set_message(list(submission))
def get_submission(lab, student, use_locks=True):
"""Get a submission from zyBooks given the lab and student"""
window = ui.get_window()
zy_api = Zybooks()
# Lock student
if use_locks:
data.lock.lock(student, lab)
submission_response = zy_api.download_assignment(student, lab)
submission = data.model.Submission(student, lab, submission_response)
# Report missing files
if submission.flag & data.model.SubmissionFlag.BAD_ZIP_URL:
msg = [
f"One or more URLs for {student.full_name}'s code submission are bad.",
"Some files could not be downloaded. Please",
"View the most recent submission on zyBooks.",
]
popup = ui.layers("Warning", msg)
window.run_layer(popup)
# A student may have submissions beyond the due date, and an exception
# In case that happens, always allow a normal grade, but show a message
if submission.flag == data.model.SubmissionFlag.NO_SUBMISSION:
pass
return submission
def pick_submission(submission_popup: ui.layers.OptionsPopup,
lab: data.model.Lab, student: data.model.Student,
submission: data.model.Submission):
"""Allow the user to pick a submission to view"""
window = ui.get_window()
zy_api = Zybooks()
# If the lab has multiple parts, prompt to pick a part
part_index = 0
if len(lab.parts) > 1:
part_index = submission.pick_part(pick_all=True)
if part_index is None:
return
if part_index == -1:
def wait_fn():
for i, part in enumerate(lab.parts):
part_submissions = zy_api.get_submissions_list(
part["id"], student.id)
if len(part_submissions) > 0:
part_response = zy_api.download_assignment_part(
lab, student.id, part,
len(part_submissions) - 1)
submission.update_part(part_response,
lab.parts.index(part))
set_submission_message(submission_popup, submission)
popup = ui.layers.WaitPopup("Downloading")
popup.set_message([f"Downloading latest submissions..."])
popup.set_wait_fn(wait_fn)
window.run_layer(popup)
return
# Get list of all submissions for that part
part = lab.parts[part_index]
all_submissions = zy_api.get_submissions_list(part["id"], student.id)
if not all_submissions:
popup = ui.layers.Popup("No Submissions",
["The student did not submit this part"])
window.run_layer(popup)
return
# Reverse to display most recent submission first
all_submissions.reverse()
popup = ui.layers.ListLayer("Select Submission", popup=True)
popup.set_exit_text("Cancel")
for sub in all_submissions:
popup.add_row_text(sub)
window.run_layer(popup)
if popup.canceled:
return
submission_index = popup.selected_index()
# Modify submission index to un-reverse the index
submission_index = abs(submission_index - (len(all_submissions) - 1))
# Fetch that submission
part_response = zy_api.download_assignment_part(lab, student.id, part,
submission_index)
submission.update_part(part_response, lab.parts.index(part))
set_submission_message(submission_popup, submission)
def view_diff(first: model.Submission, second: model.Submission):
"""View a diff of the two submissions"""
if (first.flag & model.SubmissionFlag.NO_SUBMISSION
or second.flag & model.SubmissionFlag.NO_SUBMISSION):
window = ui.get_window()
popup = ui.layers.Popup("No Submissions", [
"Cannot diff submissions because at least one student has not submitted."
])
window.run_layer(popup)
return
use_browser = preferences.get("browser_diff")
paths_a = utils.get_source_file_paths(first.files_directory)
paths_b = utils.get_source_file_paths(second.files_directory)
paths_a.sort()
paths_b.sort()
diff = utils.make_diff_string(paths_a, paths_b, first.student.full_name,
second.student.full_name, use_browser)
utils.view_string(diff, "submissions.diff", use_browser)
def run_code_fn(window, submission):
"""Callback to compile and run a submission's code"""
use_gdb = False
if not submission.compile_and_run_code(use_gdb):
popup = ui.layers.OptionsPopup("Error", ["Could not compile code"])
popup.add_option("View Log", submission.view_stderr)
window.run_layer(popup)
def pair_programming_submission_callback(lab, submission):
"""Show both pair programming students for viewing a diff"""
window = ui.get_window()
popup = ui.layers.OptionsPopup("Pair Programming Submission")
popup.set_message(submission)
popup.add_option(
"Pick Submission",
lambda: pick_submission(popup, lab, submission.student, submission))
popup.add_option("Run", lambda: run_code_fn(window, submission))
popup.add_option("View", lambda: submission.show_files())
window.run_layer(popup)
SharedData.running_process = None
def flag_submission(lab, student, flag_text="", flagtag=""):
"""Flag a submission with a note"""
window = ui.get_window()
if not flagtag:
flagtags = ["Needs Head TA", "Student Action Required", "Other"]
tag_input = ui.layers.ListLayer("Flag Tag", popup=True)
for tag in flagtags:
tag_input.add_row_text(tag)
window.run_layer(tag_input)
if tag_input.canceled:
return
flagtag = flagtags[tag_input.selected_index()]
text_input = ui.layers.TextInputLayer("Flag Note")
text_input.set_prompt(["Enter a flag note"])
text_input.set_text(flag_text)
window.run_layer(text_input)
if text_input.canceled:
return
flag_note = text_input.get_text()
full_message = f"{flagtag}: {flag_note}"
data.flags.flag_submission(student, lab, full_message)
def edit_flag(flag_string: str, student: model.Student, lab: model.Lab):
"""Edit the text in a flagged submission"""
# The note might contain `:` characters, so we handle that case
parts = flag_string.split(":")
tag_type = parts[0].strip()
tag_text = ":".join(parts[1:]).strip()
flag_submission(lab, student, tag_text, tag_type)
def can_get_through_locks(use_locks, student, lab):
if not use_locks:
return True
window = ui.get_window()
if data.lock.is_locked(student, lab):
netid = data.lock.get_locked_netid(student, lab)
# If being graded by the user who locked it, allow grading
if netid != getpass.getuser():
name = data.netid_to_name(netid)
msg = [f"This student is already being graded by {name}"]
popup = ui.layers.Popup("Student Locked", msg)
window.run_layer(popup)
return False
if data.flags.is_submission_flagged(student, lab):
flag_message = data.flags.get_flag_message(student, lab)
msg = [
"This submission has been flagged",
"",
flag_message,
]
popup = ui.layers.OptionsPopup("Submission Flagged", msg)
popup.add_option("Edit")
popup.add_option("Unflag")
popup.add_option("View")
window.run_layer(popup)
choice = popup.get_selected()
if choice == "Edit":
edit_flag(flag_message, student, lab)
return False
elif choice == "Unflag":
data.flags.unflag_submission(student, lab)
elif choice == "View":
return True
else:
return False
return True
def pair_programming_message(first, second) -> list:
"""To support dynamic updates on the pair programming popup"""
return [
f"{first.student.full_name} {first.latest_submission}",
f"{second.student.full_name} {second.latest_submission}",
"",
"Pick a student's submission to view or view the diff",
]
def grade_pair_programming(first_submission, use_locks):
"""Pick a second student to grade pair programming with"""
# Get second student
window = ui.get_window()
students = data.get_students()
lab = first_submission.lab
student_list = ui.layers.ListLayer()
student_list.set_searchable("Student")
student_list.set_sortable()
fill_student_list(student_list, students, lab, use_locks)
window.run_layer(student_list)
if student_list.canceled:
return
# Get student
student_index = student_list.selected_index()
student = students[student_index]
if not can_get_through_locks(use_locks, student, lab):
return
try:
second_submission = get_submission(lab, student, use_locks)
if second_submission is None:
return
if second_submission == first_submission:
popup = ui.layers.Popup(
"Invalid Student",
["The first and second students are the same"])
window.run_layer(popup)
return
first_submission_fn = lambda: pair_programming_submission_callback(
lab, first_submission)
second_submission_fn = lambda: pair_programming_submission_callback(
lab, second_submission)
msg = lambda: pair_programming_message(first_submission,
second_submission)
popup = ui.layers.OptionsPopup("Pair Programming")
popup.set_message(msg)
popup.add_option(first_submission.student.full_name,
first_submission_fn)
popup.add_option(second_submission.student.full_name,
second_submission_fn)
popup.add_option("View Diff",
lambda: view_diff(first_submission, second_submission))
window.run_layer(popup)
finally:
if use_locks:
data.lock.unlock(student, lab)
def diff_parts_fn(window, submission):
"""Callback for text diffing parts of a submission"""
error = submission.diff_parts()
if error:
popup = ui.layer.Popup("Error", [error])
window.run_layer(popup)
def student_select_fn(student, lab, use_locks):
"""Show the submission for the selected lab and student"""
window = ui.get_window()
# Wait for student's assignment to be available
if not can_get_through_locks(use_locks, student, lab):
return
try:
# Get the student's submission
submission = get_submission(lab, student, use_locks)
# Exit if student has not submitted
if submission is None:
return
def flag_submission_fn():
flag_submission(lab, student)
# Return to the list of students
events = ui.get_events()
events.push_layer_close_event()
popup = ui.layers.OptionsPopup("Submission")
set_submission_message(popup, submission)
popup.add_option("Flag", flag_submission_fn)
popup.add_option(
"Pick Submission",
lambda: pick_submission(popup, lab, student, submission))
popup.add_option("Pair Programming",
lambda: grade_pair_programming(submission, use_locks))
if submission.flag & data.model.SubmissionFlag.DIFF_PARTS:
popup.add_option("Diff Parts",
lambda: diff_parts_fn(window, submission))
popup.add_option("Run", lambda: run_code_fn(window, submission))
popup.add_option("View", lambda: submission.show_files())
window.run_layer(popup)
SharedData.running_process = None
finally:
# Always unlock the lab when no longer grading
if use_locks:
data.lock.unlock(student, lab)
def watch_students(student_list, students, lab, use_locks):
"""Register paths when the filtered list is created"""
paths = [SharedData.get_locks_directory(), SharedData.get_flags_directory()]
data.fs_watch.fs_watch_register(paths, "student_list_watch",
fill_student_list, student_list, students,
lab, use_locks, student_select_fn)
def lab_select_fn(selected_index, use_locks, student: model.Student = None):
"""Callback function that executes after selecting a lab"""
lab = data.get_labs()[selected_index]
# Skip selecting a student and go immediately to the grader
if student:
student_select_fn(student, lab, use_locks)
return
window = ui.get_window()
students = data.get_students()
student_list = ui.layers.ListLayer()
student_list.set_searchable("Student")
student_list.set_sortable()
fill_student_list(student_list, students, lab, use_locks, student_select_fn)
# Register a watch function to watch the students
watch_students(student_list, students, lab, use_locks)
# # Remove the file watch handler when done choosing students
student_list.set_destroy_fn(
lambda: data.fs_watch.fs_watch_unregister("student_list_watch"))
window.register_layer(student_list, lab.name)
def grade(use_locks=True, student: model.Student = None):
"""Create the list of labs to pick one to grade"""
window = ui.get_window()
labs = data.get_labs()
if not labs:
popup = ui.layers.Popup("Error")
popup.set_message(["No labs have been created yet"])
window.run_layer(popup)
return
title = "Grader"
if not use_locks:
title = "Run for Fun"
lab_list = ui.layers.ListLayer()
lab_list.set_searchable("Lab")
for index, lab in enumerate(labs):
lab_list.add_row_text(str(lab), lab_select_fn, index, use_locks,
student)
window.register_layer(lab_list, title)
| 35.118568 | 90 | 0.647407 | 1,940 | 15,698 | 5.022165 | 0.140722 | 0.029354 | 0.024428 | 0.027302 | 0.362619 | 0.272401 | 0.235862 | 0.19573 | 0.149133 | 0.141538 | 0 | 0.000951 | 0.263409 | 15,698 | 446 | 91 | 35.197309 | 0.84165 | 0.107657 | 0 | 0.27476 | 0 | 0 | 0.080757 | 0.008845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063898 | false | 0.009585 | 0.025559 | 0 | 0.172524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d205b0c022a8abc4d1eb512a97ffb4ce79cbfb0 | 1,212 | py | Python | jupiter/utils/EmailUtil.py | gaott/jupiter | 29fb266b080e9c8ca921a39e57a5e6a803375746 | [
"Apache-2.0"
] | null | null | null | jupiter/utils/EmailUtil.py | gaott/jupiter | 29fb266b080e9c8ca921a39e57a5e6a803375746 | [
"Apache-2.0"
] | null | null | null | jupiter/utils/EmailUtil.py | gaott/jupiter | 29fb266b080e9c8ca921a39e57a5e6a803375746 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Created on 2013年7月1日
@author: gaott
'''
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.utils import COMMASPACE
import smtplib
def sendEmail(body):
strFrom = 'example@gmail.com'
strTo = ['abc@xxx.com','efg@xxx.com']
subject = 'Warning Message'
server = 'smtp.gmail.com'
port = 25
user = "example@gmail.com"
passwd = "******"
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = subject
msgRoot['From'] = strFrom
msgRoot['To'] = COMMASPACE.join(strTo)
msgRoot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText(body, 'plain', 'utf-8')
msgAlternative.attach(msgText)
smtp = smtplib.SMTP(server, port)
smtp.ehlo()
smtp.starttls()
smtp.login(user, passwd)
smtp.sendmail(strFrom, strTo, msgRoot.as_string())
smtp.quit()
return
if __name__ == '__main__':
sendEmail("hello")
| 26.347826 | 82 | 0.671617 | 145 | 1,212 | 5.551724 | 0.558621 | 0.03354 | 0.037267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010331 | 0.20132 | 1,212 | 45 | 83 | 26.933333 | 0.821281 | 0.168317 | 0 | 0 | 0 | 0 | 0.189379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.066667 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2d220d13853eb72792eb10d11e5500d16a8130fd | 3,857 | py | Python | physics/n-body/n-body.py | ludius0/simulations | 3f8992edfef89d0450479647a96b889c6f0f43a3 | [
"MIT"
] | null | null | null | physics/n-body/n-body.py | ludius0/simulations | 3f8992edfef89d0450479647a96b889c6f0f43a3 | [
"MIT"
] | null | null | null | physics/n-body/n-body.py | ludius0/simulations | 3f8992edfef89d0450479647a96b889c6f0f43a3 | [
"MIT"
] | null | null | null | # libs
import pygame
import math
import sys
from random import randrange, seed
from numbers import Real
# simulation settings
G = 1
sum_mass = 50.0
softening = 100 # also function as speed component
seed(1)
# additional settings
COLLIS_MERGE = True
ACTIVE_BORDERS = False
# world objects
NBODIES = 150
BODIES = []
# pygame settings
COLOR = (255, 192, 64)
min_size = 0
wsize = (700, 700)
# init pygame
pygame.init()
screen = pygame.display.set_mode(wsize)
pygame.display.set_caption("N-body")
# support functions
def check_borders(body, min=min_size, max=wsize[0]):
for index, (p, v) in enumerate(zip(body.pos, body.vel)):
if p+v <= 0:
b.vel[index] = -v
elif p+v >= wsize[1]:
b.vel[index] = -v
def create_rand_vec3(min=min_size, max=500, regulate=1):
return [randrange(min, max) / regulate, randrange(min, max) / regulate, randrange(min, max) / regulate]
# physic object
class Body:
def __init__(self, mass: float, position: list, velocity: list):
assert isinstance(mass, Real) and isinstance(position, list) and isinstance(velocity, list)
assert len(position) == 3 and len(velocity) == 3
self.mass = mass
self.pos = position
self.vel = velocity
self.dvel = [0., 0., 0.]
self.collision = False
self.volume = 5
self.radius = 1.06
def fg(self, other):
assert isinstance(other, Body)
# distance between two bodies
x_ = other.pos[0] - self.pos[0]
y_ = other.pos[1] - self.pos[1]
z_ = other.pos[2] - self.pos[2]
distance = [x_, y_, z_]
r = math.sqrt(x_**2 + y_**2 + z_**2)
# collision
error = abs(x_)+abs(y_)+abs(z_)
if error <= 1:
print("Collision!")
other.collision = True
# compute Newton law based on distance F=G*(m1*m2)/r (with some regulation for each axis)
for index in range(3):
f = (G * self.mass * other.mass / r**2) * distance[index] #/ r * softening
self.dvel[index] = self.dvel[index] + f / self.mass
def comp_radius(self, other):
self.volume += other.volume
self.radius = (self.volume * 3 / 4 * math.pi)**(1/3)
def update(self):
# Velocity and delta velocity
self.vel = [self.vel[0]+self.dvel[0], self.vel[1]+self.dvel[1], self.vel[2]+self.dvel[2]]
self.dvel = [0., 0., 0.]
x = self.pos[0] + self.vel[0]
y = self.pos[1] + self.vel[1]
z = self.pos[2] + self.vel[2]
self.pos = [x, y, z]
# generate bodies
mass = sum_mass / NBODIES
for n in range(NBODIES):
BODIES.append(Body(mass, create_rand_vec3(min=min_size+100, max=wsize[0]-100), create_rand_vec3(min=-1, max=1, regulate=10)))
# Event loop
while 1:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# update simulation
for b1 in BODIES:
for index, b2 in enumerate(BODIES):
# if b1 and b2 are same than ignore
if b1 == b2:
continue
# calculate Newton law
b1.fg(b2)
# check for collision
if b2.collision == True and COLLIS_MERGE == True:
# delete one body and update velocity & mass of second one
b1.mass += b2.mass
b1.vel = [b1.vel[0]+b2.vel[0], b1.vel[1]+b2.vel[1], b1.vel[2]+b2.vel[2]]
b1.comp_radius(b2)
BODIES.remove(b2)
# update every pos
for b in BODIES:
if ACTIVE_BORDERS == True:
check_borders(b)
b.update()
# draw with pygame
for b in BODIES:
pygame.draw.circle(screen, COLOR, (b.pos[0], b.pos[1]), b.radius)
#pygame.display.update()
pygame.display.flip()
pygame.quit()
| 29 | 129 | 0.577132 | 560 | 3,857 | 3.907143 | 0.280357 | 0.025594 | 0.013711 | 0.023309 | 0.063528 | 0.053473 | 0.031536 | 0.031536 | 0 | 0 | 0 | 0.044542 | 0.289863 | 3,857 | 132 | 130 | 29.219697 | 0.75429 | 0.143635 | 0 | 0.068182 | 0 | 0 | 0.004881 | 0 | 0 | 0 | 0 | 0 | 0.034091 | 1 | 0.068182 | false | 0 | 0.056818 | 0.011364 | 0.147727 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2277cdda5d2b5e047ce9495c3c9bd5671495c6 | 136 | py | Python | core/handlers/main_db_handler.py | Tampan793/Watermark-Bot | e872f85675e7cdaeeae5efcb1a0af59625d554f5 | [
"MIT"
] | null | null | null | core/handlers/main_db_handler.py | Tampan793/Watermark-Bot | e872f85675e7cdaeeae5efcb1a0af59625d554f5 | [
"MIT"
] | null | null | null | core/handlers/main_db_handler.py | Tampan793/Watermark-Bot | e872f85675e7cdaeeae5efcb1a0af59625d554f5 | [
"MIT"
] | null | null | null | # (c) @M4SK3R1N
from configs import Config
from core.database import Database
db = Database(Config.DATABASE_URL, Config.BOT_USERNAME)
| 19.428571 | 55 | 0.794118 | 19 | 136 | 5.578947 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02521 | 0.125 | 136 | 6 | 56 | 22.666667 | 0.865546 | 0.095588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 4 |
2d23358acadeaa1dff525002691c0097420904a8 | 4,026 | py | Python | src/item/consumables.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | src/item/consumables.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | src/item/consumables.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | from config import Config
from dataclasses import dataclass
from logger import Logger
from d2r_image.data_models import HoveredItem
@dataclass
class Consumables:
tp: int = 0
id: int = 0
rejuv: int = 0
health: int = 0
mana: int = 0
key: int = 0
def __getitem__(self, key):
return super().__getattribute__(key)
def __setitem__(self, key, value):
setattr(self, key, value)
def any_needs(self):
return sum([self.tp, self.id, self.rejuv, self.health, self.mana, self.key])
def as_dict(self):
return {
"tp": self.tp,
"id": self.id,
"rejuv": self.rejuv,
"health": self.health,
"mana": self.mana,
"key": self.key
}
consumable_needs = Consumables()
ITEM_CONSUMABLES_MAP = {
"rejuvenation potion": "rejuv",
"full rejuvenation potion": "rejuv",
"rejuvpotion": "rejuv",
"super healing potion": "health",
"greater healing potion": "health",
"healing potion": "health",
"healingpotion": "health",
"light healing potion": "health",
"minor healing potion": "health",
"super mana potion": "mana",
"greater mana potion": "mana",
"mana potion": "mana",
"manapotion": "mana",
"light mana potion": "mana",
"minor mana potion": "mana",
"scroll of town portal": "tp",
"scroll of identify": "id",
"key": "key"
}
pot_cols = {
"rejuv": Config().char["belt_rejuv_columns"],
"health": Config().char["belt_hp_columns"],
"mana": Config().char["belt_mp_columns"],
}
def get_needs(consumable_type: str = None):
if consumable_type:
consumable = reduce_name(consumable_type)
return consumable_needs[consumable]
return consumable_needs
def set_needs(consumable_type: str, quantity: int):
global consumable_needs
consumable = reduce_name(consumable_type)
consumable_needs[consumable] = quantity
def increment_need(consumable_type: str = None, quantity: int = 1):
"""
Adjust the consumable_needs of a specific consumable
:param consumable_type: Name of item in pickit or in consumable_map
:param quantity: Increase the need (+int) or decrease the need (-int)
"""
global consumable_needs
consumable = reduce_name(consumable_type)
consumable_needs[consumable] = max(0, consumable_needs[reduce_name(consumable)] + quantity)
def reduce_name(consumable_type: str):
if consumable_type.lower() in ITEM_CONSUMABLES_MAP:
consumable_type = ITEM_CONSUMABLES_MAP[consumable_type]
elif consumable_type.lower() in ITEM_CONSUMABLES_MAP.values():
pass
else:
Logger.warning(f"adjust_consumable_need: unknown item: {consumable_type}")
return consumable_type
def get_remaining(item_name: str = None) -> int:
if item_name is None:
Logger.error("get_remaining: param item_name is required")
return -1
if item_name.lower() in ["health", "mana", "rejuv"]:
return pot_cols[item_name] * Config().char["belt_rows"] - consumable_needs[item_name]
elif item_name.lower() in ['tp', 'id']:
return 20 - consumable_needs[item_name]
elif item_name.lower() == "key":
return 12 - consumable_needs[item_name]
else:
Logger.error(f"get_remaining: error with item_name={item_name}")
return -1
def should_buy(item_name: str = None, min_remaining: int = None, min_needed: int = None) -> bool:
if item_name is None:
Logger.error("should_buy: param item_name is required")
return False
if min_needed:
return consumable_needs[item_name] >= min_needed
elif min_remaining:
return get_remaining(item_name) <= min_remaining
else:
Logger.error("should_buy: need to specify min_remaining or min_needed")
return False
def is_consumable(item: HoveredItem) -> str | bool:
for consumable_type in ITEM_CONSUMABLES_MAP.keys():
if item.Name.lower() == consumable_type:
return consumable_type
return False | 34.410256 | 97 | 0.664183 | 509 | 4,026 | 5.037328 | 0.21611 | 0.098284 | 0.035101 | 0.037442 | 0.227769 | 0.170047 | 0.147426 | 0.095944 | 0.064743 | 0.064743 | 0 | 0.004792 | 0.222553 | 4,026 | 117 | 98 | 34.410256 | 0.814377 | 0.047193 | 0 | 0.203884 | 0 | 0 | 0.191975 | 0.011539 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106796 | false | 0.009709 | 0.038835 | 0.029126 | 0.378641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d24021470465eba916fd57e573635e8e812a4a2 | 2,612 | py | Python | gorden_crawler/spiders/shopbop.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | 2 | 2019-02-22T13:51:08.000Z | 2020-08-03T14:01:30.000Z | gorden_crawler/spiders/shopbop.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | null | null | null | gorden_crawler/spiders/shopbop.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | 1 | 2020-08-03T14:01:32.000Z | 2020-08-03T14:01:32.000Z | # -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from scrapy.selector import Selector
import re
from scrapy import Request
from gorden_crawler.spiders.shopbop_eastdane_common import ShopbopEastdaneCommon
class ShopBopSpider(ShopbopEastdaneCommon):
#class ShopBopSpider(Spider):
name = "shopbop"
allowed_domains = ["shopbop.com"]
shopbop_base_url = 'https://www.shopbop.com'
custom_settings = {
# 'USER_AGENT': 'search_crawler (+http://www.shijisearch.com)',
'COOKIES_ENABLED' : True,
'DOWNLOAD_TIMEOUT': 60,
'RETRY_TIMES': 20,
}
start_urls = [
'https://www.shopbop.com',
]
#
gender_start_urls_map = {
'https://cn.shopbop.com/clothing/br/v=1/2534374302155112.htm' : {'product_type' : 'clothing'},
'https://cn.shopbop.com/shoes/br/v=1/2534374302024643.htm' : {'product_type' : 'shoes'},
'https://cn.shopbop.com/bags/br/v=1/2534374302024667.htm' : {'product_type' : 'bags'},
'https://cn.shopbop.com/accessories/br/v=1/2534374302024641.htm' : {'product_type' : 'accessories'},
}
def parse(self, response):
url_suffixs = [
# shopbop
'https://www.shopbop.com/clothing/br/v=1/2534374302155112.htm',
'https://www.shopbop.com/shoes/br/v=1/2534374302024643.htm',
'https://www.shopbop.com/bags/br/v=1/2534374302024667.htm',
'https://www.shopbop.com/accessories/br/v=1/2534374302024641.htm'
]
avoid_302_redirect_tail_str = '?switchToCurrency=USD&switchToLocation=US&switchToLanguage=zh'
for url_suffix in url_suffixs:
url = url_suffix + avoid_302_redirect_tail_str
yield Request(url, callback=self.parse_product_type)
def parse_product_type(self, response):
response_link=response.url
product_type = self.gender_start_urls_map[response_link]['product_type']
gender = 'women'
sel = Selector(response)
category_links = sel.xpath('//li[@class="leftNavCategoryLi nav-item"]/a')[1:]
category_url={}
for category_link in category_links:
url =self.shopbop_base_url + category_link.xpath('./@href').extract()[0]
category = category_link.xpath('./text()').extract()[0]
if not re.search(r'Boutique', category):
category_url[category] = url
yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url}) | 45.824561 | 172 | 0.637825 | 302 | 2,612 | 5.327815 | 0.344371 | 0.068365 | 0.019888 | 0.067122 | 0.282163 | 0.238658 | 0.198881 | 0.198881 | 0 | 0 | 0 | 0.074111 | 0.225115 | 2,612 | 57 | 172 | 45.824561 | 0.72085 | 0.048622 | 0 | 0 | 0 | 0 | 0.335484 | 0.036694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.106383 | 0 | 0.297872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d24304d5ecf67d3329a74e6bd41bb16295b32dd | 67 | py | Python | vnpy/api/oanda/workers/__init__.py | WongLynn/vnpy_Amerlin-1.1.20 | d701d8f12c29cc33f58ea025920b0c7240f74f82 | [
"MIT"
] | 11 | 2019-10-28T13:01:48.000Z | 2021-06-20T03:38:09.000Z | vnpy/api/oanda/workers/__init__.py | Rayshawn8/vnpy_Amerlin-1.1.20 | d701d8f12c29cc33f58ea025920b0c7240f74f82 | [
"MIT"
] | null | null | null | vnpy/api/oanda/workers/__init__.py | Rayshawn8/vnpy_Amerlin-1.1.20 | d701d8f12c29cc33f58ea025920b0c7240f74f82 | [
"MIT"
] | 6 | 2019-10-28T13:16:13.000Z | 2020-09-08T08:03:41.000Z | from .transaction import *
from .tick import *
from .order import * | 22.333333 | 26 | 0.746269 | 9 | 67 | 5.555556 | 0.555556 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.164179 | 67 | 3 | 27 | 22.333333 | 0.892857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 5 |
2d25dfe193d9f222df81203587f5dc472c35b589 | 1,104 | py | Python | features/steps/login_steps.py | adrisalas/flask-appointmentsDoctor-TDD-BDD | 1f7c14f83c85144e844f3db9a4fb27eebcf678a2 | [
"MIT"
] | null | null | null | features/steps/login_steps.py | adrisalas/flask-appointmentsDoctor-TDD-BDD | 1f7c14f83c85144e844f3db9a4fb27eebcf678a2 | [
"MIT"
] | null | null | null | features/steps/login_steps.py | adrisalas/flask-appointmentsDoctor-TDD-BDD | 1f7c14f83c85144e844f3db9a4fb27eebcf678a2 | [
"MIT"
] | null | null | null | from behave import given, when, then
@given(u'estoy en la pagina de login')
def flask_setup(context):
context.client.get('/logout') # If you do not check you've already logout, errors may arise
context.page = context.client.get('/login')
assert "Iniciar Sesión".encode("utf-8") in context.page.data
@given(u'inicio sesion con "{email}" y "{password}"')
@when(u'inicio sesion con "{email}" y "{password}"')
def login(context, email, password):
context.page = context.client.post('/login', data=dict(
email=email,
password=password
), follow_redirects=True)
assert context.page
@then(u'debo ver el mensaje de error "{alert}"')
def logged_in_error(context, alert):
assert alert.encode("utf-8") in context.page.data
@then(u'debo ver el mensaje de exito "{alert}" y el menu de "{menu}"')
def logged_in_success(context, alert, menu):
assert alert.encode("utf-8") in context.page.data \
and menu.encode("utf-8") in context.page.data
with context.session.session_transaction() as sess:
assert 'patient' in sess.keys() or 'doctor' in sess.keys() | 40.888889 | 95 | 0.692935 | 169 | 1,104 | 4.485207 | 0.414201 | 0.101583 | 0.05277 | 0.063325 | 0.311346 | 0.311346 | 0.311346 | 0.100264 | 0.100264 | 0 | 0 | 0.004324 | 0.162138 | 1,104 | 27 | 96 | 40.888889 | 0.815135 | 0.053442 | 0 | 0 | 0 | 0 | 0.26341 | 0 | 0 | 0 | 0 | 0 | 0.217391 | 1 | 0.173913 | false | 0.173913 | 0.043478 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2d2653b77757ecdc03fee246ac217f3cdcfedd54 | 3,679 | py | Python | app/main/views.py | arondasamuel123/FlaskBlog | 33fa1bbc288a831b29a95bfff0b514a8d6f93f4e | [
"MIT"
] | null | null | null | app/main/views.py | arondasamuel123/FlaskBlog | 33fa1bbc288a831b29a95bfff0b514a8d6f93f4e | [
"MIT"
] | null | null | null | app/main/views.py | arondasamuel123/FlaskBlog | 33fa1bbc288a831b29a95bfff0b514a8d6f93f4e | [
"MIT"
] | null | null | null | from . import main
from flask import render_template, url_for,redirect
from app.models import Posts, User, Comment
from flask_login import current_user, login_required
from .forms import PostForm, CommentForm, UpdateBlogForm
from .. import db
from sqlalchemy import desc
from ..requests import get_quotes
from ..email import mail_message
@main.route('/')
def home():
posts = Posts.query.order_by(Posts.blog_created.desc()).all()
quote = get_quotes()
user = current_user
if user.user_type=='User':
return render_template('home.html', posts=posts, quote=quote)
else:
return render_template('notuser.html')
@main.route('/writer')
def writer():
posts = Posts.query.order_by(Posts.blog_created.desc()).all()
user = current_user
if user.user_type=='Writer':
return render_template('writer.html',posts=posts)
@main.route('/create', methods=['GET','POST'])
@login_required
def create_post():
post_form = PostForm()
user = current_user
if user.user_type=='Writer':
if post_form.validate_on_submit():
post = Posts(title=post_form.title.data, category=post_form.category.data, blog=post_form.post.data,user=current_user)
post.save_post()
users = User.query.filter_by(user_type='User').all()
for user in users:
mail_message("New Post has arrived", "email/new_post", user.email, user=user)
return redirect(url_for('main.writer'))
else:
return "This page is for only writers"
return render_template('createpost.html', post_form=post_form)
@main.route('/post/<int:id>')
def get_post(id):
post = Posts.query.filter_by(id=id).all()
return render_template('viewpost.html', post=post)
@main.route('/createcomment/<int:id>', methods=['GET', 'POST'])
@login_required
def create_comment(id):
comment_post = Posts.query.get(id)
user = current_user
comment_form = CommentForm()
if user.user_type=='User':
if comment_form.validate_on_submit():
new_comment = Comment(comment=comment_form.comment.data, user=current_user, post=comment_post)
new_comment.save_comment()
return "Comment added"
else:
return "This page is for only users"
return render_template('addcomment.html', comment_form=comment_form)
@main.route('/viewcomments/<int:id>')
def get_comments(id):
comments = Comment.query.filter_by(post_id=id).all()
return render_template('viewcomment.html', comments=comments)
@main.route('/dblog/<int:id>', methods=['GET', 'POST'])
def delete_blog(id):
delete_post = Posts.query.filter_by(id=id).first()
db.session.delete(delete_post)
db.session.commit()
return redirect(url_for('main.writer'))
# return "Post Deleted"
@main.route('/ublog/<int:id>', methods=['GET', 'POST'])
def update_blog(id):
blog_update = Posts.query.filter_by(id=id).first()
update_form = UpdateBlogForm()
if update_form.validate_on_submit():
blog_update.title = update_form.title.data
blog_update.blog = update_form.post.data
blog_update.category = update_form.category.data
db.session.add(blog_update)
db.session.commit()
return "Blog updated"
return render_template("update.html", update_form=update_form)
@main.route('/dcomment/<int:id>', methods=['GET', 'POST'])
def delete_comment(id):
delete_comm = Comment.query.filter_by(id=id).first()
db.session.delete(delete_comm)
db.session.commit()
return redirect(url_for('main.writer'))
| 31.991304 | 130 | 0.668116 | 487 | 3,679 | 4.858316 | 0.186858 | 0.053254 | 0.067625 | 0.023669 | 0.331361 | 0.300507 | 0.257396 | 0.142012 | 0.112426 | 0.074387 | 0 | 0 | 0.199783 | 3,679 | 115 | 131 | 31.991304 | 0.803668 | 0.005708 | 0 | 0.244186 | 0 | 0 | 0.117856 | 0.012305 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104651 | false | 0 | 0.104651 | 0 | 0.383721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d26eedc95197a2362633c769d7c96b7f86fab9b | 2,117 | py | Python | examples/tree_view_example.py | cgpipline/dayu_widgets | 040a09fb9a20ce72997a3fba60e381e3944bff59 | [
"MIT"
] | 157 | 2019-03-10T05:55:21.000Z | 2022-03-31T09:07:00.000Z | examples/tree_view_example.py | cgpipline/dayu_widgets | 040a09fb9a20ce72997a3fba60e381e3944bff59 | [
"MIT"
] | 16 | 2019-07-15T11:30:53.000Z | 2021-12-16T14:17:59.000Z | examples/tree_view_example.py | phenom-films/dayu_widgets | 1eb8fbf2847f9de95af2cd62d5eaec392f1c1e22 | [
"MIT"
] | 56 | 2019-06-19T03:35:27.000Z | 2022-03-22T08:07:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
import examples._mock_data as mock
from dayu_widgets import dayu_theme
from dayu_widgets.field_mixin import MFieldMixin
from dayu_widgets.item_model import MTableModel, MSortFilterModel
from dayu_widgets.item_view import MTreeView
from dayu_widgets.line_edit import MLineEdit
from dayu_widgets.push_button import MPushButton
from dayu_widgets.qt import *
class TreeViewExample(QWidget, MFieldMixin):
def __init__(self, parent=None):
super(TreeViewExample, self).__init__(parent)
self._init_ui()
def _init_ui(self):
model_1 = MTableModel()
model_1.set_header_list(mock.header_list)
model_sort = MSortFilterModel()
model_sort.setSourceModel(model_1)
tree_view = MTreeView()
tree_view.setModel(model_sort)
model_sort.set_header_list(mock.header_list)
tree_view.set_header_list(mock.header_list)
model_1.set_data_list(mock.tree_data_list)
line_edit = MLineEdit().search().small()
line_edit.textChanged.connect(model_sort.set_search_pattern)
expand_all_button = MPushButton('Expand All').small()
expand_all_button.clicked.connect(tree_view.expandAll)
collapse_all_button = MPushButton('Collapse All').small()
collapse_all_button.clicked.connect(tree_view.collapseAll)
button_lay = QHBoxLayout()
button_lay.addWidget(expand_all_button)
button_lay.addWidget(collapse_all_button)
button_lay.addWidget(line_edit)
button_lay.addStretch()
main_lay = QVBoxLayout()
main_lay.addLayout(button_lay)
main_lay.addWidget(tree_view)
main_lay.addStretch()
self.setLayout(main_lay)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = TreeViewExample()
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
| 32.569231 | 68 | 0.671233 | 251 | 2,117 | 5.290837 | 0.358566 | 0.042169 | 0.079066 | 0.038404 | 0.155873 | 0.115211 | 0.048193 | 0 | 0 | 0 | 0 | 0.009233 | 0.181389 | 2,117 | 64 | 69 | 33.078125 | 0.757069 | 0.047709 | 0 | 0 | 0 | 0 | 0.015983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.2 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d281bc3b188a3528fb07c6f83aec0f80d042987 | 385 | py | Python | app/http/pili_service.py | sy264115809/techshow | 7d9c8d003b6a652684b140b601061ac59dc44892 | [
"MIT"
] | null | null | null | app/http/pili_service.py | sy264115809/techshow | 7d9c8d003b6a652684b140b601061ac59dc44892 | [
"MIT"
] | null | null | null | app/http/pili_service.py | sy264115809/techshow | 7d9c8d003b6a652684b140b601061ac59dc44892 | [
"MIT"
] | 1 | 2021-09-14T18:01:39.000Z | 2021-09-14T18:01:39.000Z | # coding=utf-8
from pili import *
from flask import current_app
def _hub():
credentials = Credentials(current_app.config['PILI_ACCESS_KEY'], current_app.config['PILI_SECRET_KEY'])
return Hub(credentials, current_app.config['PILI_HUB_NAME'])
def get_stream(stream_id):
return _hub().get_stream(stream_id)
def create_dynamic_stream():
return _hub().create_stream()
| 22.647059 | 107 | 0.755844 | 55 | 385 | 4.927273 | 0.418182 | 0.147601 | 0.177122 | 0.221402 | 0.228782 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002976 | 0.127273 | 385 | 16 | 108 | 24.0625 | 0.803571 | 0.031169 | 0 | 0 | 0 | 0 | 0.115903 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.222222 | 0.222222 | 0.888889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 4 |
2d28a277601e69e469768302b3fb700a887a42db | 1,131 | py | Python | test/use_case_examples/startUp.py | sbanik1/sheetTrap | 287746bf33b41e7f1066e80ee12bd08f75b155bc | [
"MIT"
] | null | null | null | test/use_case_examples/startUp.py | sbanik1/sheetTrap | 287746bf33b41e7f1066e80ee12bd08f75b155bc | [
"MIT"
] | null | null | null | test/use_case_examples/startUp.py | sbanik1/sheetTrap | 287746bf33b41e7f1066e80ee12bd08f75b155bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This code sets the directories for running the test codes
Created on Sat Dec 19 12:51:22 2020
@author: Swarnav Banik
sbanik1@umd.edu
"""
# %% Import all ###############################################################
import sys
import matplotlib.pyplot as plt
# %% Add necessary paths ######################################################
sys.path.insert(1, '/Users/swarnav/Google Drive/Work/Projects/Imaging/sheetTrap/src')
# %% Define the output directory ##############################################
saveDir = '/Users/swarnav/Google Drive/Work/Projects/Imaging/sheetTrap/test/out'
# %% Set some default values ##################################################
params = {
'image.origin': 'lower',
'image.interpolation': 'nearest',
'image.cmap': 'gray',
'axes.grid': True,
'axes.labelsize': 14, # fontsize for x and y labels (was 10)
'axes.titlesize': 12,
'font.size': 8,
'legend.fontsize': 6, # was 10
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': False,
'font.family': 'serif',
}
plt.rcParams.update(params) | 34.272727 | 85 | 0.542882 | 127 | 1,131 | 4.834646 | 0.748032 | 0.039088 | 0.058632 | 0.074919 | 0.166124 | 0.166124 | 0.166124 | 0.166124 | 0 | 0 | 0 | 0.030864 | 0.140584 | 1,131 | 33 | 86 | 34.272727 | 0.600823 | 0.282051 | 0 | 0 | 0 | 0 | 0.523077 | 0.220513 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d28c3e225ac0e71c89f1c665bbce426470ea70d | 8,038 | py | Python | tests/basic.py | srobo-legacy/comp-webdis | 5cef63976700ca262a87ed24f10abb908fa434de | [
"BSD-2-Clause"
] | null | null | null | tests/basic.py | srobo-legacy/comp-webdis | 5cef63976700ca262a87ed24f10abb908fa434de | [
"BSD-2-Clause"
] | null | null | null | tests/basic.py | srobo-legacy/comp-webdis | 5cef63976700ca262a87ed24f10abb908fa434de | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
import urllib2, unittest, json, hashlib
from functools import wraps
try:
import bson
except:
bson = None
try:
import msgpack
except:
msgpack = None
host = '127.0.0.1'
port = 7379
class TestWebdis(unittest.TestCase):
def wrap(self,url):
return 'http://%s:%d/%s' % (host, port, url)
def query(self, url, data = None, headers={}):
r = urllib2.Request(self.wrap(url), data, headers)
return urllib2.urlopen(r)
class TestBasics(TestWebdis):
def test_crossdomain(self):
f = self.query('crossdomain.xml')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/xml')
self.assertTrue("allow-access-from domain" in f.read())
def test_options(self):
pass
# not sure if OPTIONS is supported by urllib2...
# f = self.query('') # TODO: call with OPTIONS.
# self.assertTrue(f.headers.getheader('Content-Type') == 'text/html')
# self.assertTrue(f.headers.getheader('Allow') == 'GET,POST,PUT,OPTIONS')
# self.assertTrue(f.headers.getheader('Content-Length') == '0')
# self.assertTrue(f.headers.getheader('Access-Control-Allow-Origin') == '*')
class TestJSON(TestWebdis):
def test_set(self):
"success type (+OK)"
self.query('DEL/hello')
f = self.query('SET/hello/world')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/json')
self.assertTrue(f.headers.getheader('ETag') == '"0db1124cf79ffeb80aff6d199d5822f8"')
self.assertTrue(f.read() == '{"SET":[true,"OK"]}')
def test_get(self):
"string type"
self.query('SET/hello/world')
f = self.query('GET/hello')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/json')
self.assertTrue(f.headers.getheader('ETag') == '"8cf38afc245b7a6a88696566483d1390"')
self.assertTrue(f.read() == '{"GET":"world"}')
def test_incr(self):
"integer type"
self.query('DEL/hello')
f = self.query('INCR/hello')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/json')
self.assertTrue(f.headers.getheader('ETag') == '"500e9bcdcbb1e98f25c1fbb880a96c99"')
self.assertTrue(f.read() == '{"INCR":1}')
def test_list(self):
"list type"
self.query('DEL/hello')
self.query('RPUSH/hello/abc')
self.query('RPUSH/hello/def')
f = self.query('LRANGE/hello/0/-1')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/json')
self.assertTrue(f.headers.getheader('ETag') == '"622e51f547a480bef7cf5452fb7782db"')
self.assertTrue(f.read() == '{"LRANGE":["abc","def"]}')
def test_error(self):
"error return type"
f = self.query('UNKNOWN/COMMAND')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/json')
try:
obj = json.loads(f.read())
except:
self.assertTrue(False)
return
self.assertTrue(len(obj) == 1)
self.assertTrue('UNKNOWN' in obj)
self.assertTrue(isinstance(obj['UNKNOWN'], list))
self.assertTrue(obj['UNKNOWN'][0] == False)
self.assertTrue(isinstance(obj['UNKNOWN'][1], unicode))
class TestCustom(TestWebdis):
def test_list(self):
"List responses with custom format"
self.query('DEL/hello')
self.query('RPUSH/hello/a/b/c')
f = self.query('LRANGE/hello/0/-1.txt')
self.assertTrue(f.headers.getheader('Content-Type') == 'text/plain')
self.assertTrue(f.read() == "abc")
def test_separator(self):
"Separator in list responses with custom format"
self.query('DEL/hello')
self.query('RPUSH/hello/a/b/c')
f = self.query('LRANGE/hello/0/-1.txt?sep=--')
self.assertTrue(f.headers.getheader('Content-Type') == 'text/plain')
self.assertTrue(f.read() == "a--b--c")
class TestRaw(TestWebdis):
def test_set(self):
"success type (+OK)"
self.query('DEL/hello')
f = self.query('SET/hello/world.raw')
self.assertTrue(f.headers.getheader('Content-Type') == 'binary/octet-stream')
self.assertTrue(f.read() == "+OK\r\n")
def test_get(self):
"string type"
self.query('SET/hello/world')
f = self.query('GET/hello.raw')
self.assertTrue(f.read() == '$5\r\nworld\r\n')
def test_incr(self):
"integer type"
self.query('DEL/hello')
f = self.query('INCR/hello.raw')
self.assertTrue(f.read() == ':1\r\n')
def test_list(self):
"list type"
self.query('DEL/hello')
self.query('RPUSH/hello/abc')
self.query('RPUSH/hello/def')
f = self.query('LRANGE/hello/0/-1.raw')
self.assertTrue(f.read() == "*2\r\n$3\r\nabc\r\n$3\r\ndef\r\n")
def test_error(self):
"error return type"
f = self.query('UNKNOWN/COMMAND.raw')
self.assertTrue(f.read().startswith("-ERR "))
def need_bson(fn):
def wrapper(self):
if bson:
fn(self)
return wrapper
class TestBSon(TestWebdis):
@need_bson
def test_set(self):
"success type (+OK)"
self.query('DEL/hello')
f = self.query('SET/hello/world.bson')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/bson')
obj = bson.decode_all(f.read())
self.assertTrue(obj == [{u'SET': [True, bson.Binary('OK', 0)]}])
@need_bson
def test_get(self):
"string type"
self.query('SET/hello/world')
f = self.query('GET/hello.bson')
obj = bson.decode_all(f.read())
self.assertTrue(obj == [{u'GET': bson.Binary('world', 0)}])
@need_bson
def test_incr(self):
"integer type"
self.query('DEL/hello')
f = self.query('INCR/hello.bson')
obj = bson.decode_all(f.read())
self.assertTrue(obj == [{u'INCR': 1L}])
@need_bson
def test_list(self):
"list type"
self.query('DEL/hello')
self.query('RPUSH/hello/abc')
self.query('RPUSH/hello/def')
f = self.query('LRANGE/hello/0/-1.bson')
obj = bson.decode_all(f.read())
self.assertTrue(obj == [{u'LRANGE': [bson.Binary('abc', 0), bson.Binary('def', 0)]}])
@need_bson
def test_error(self):
"error return type"
f = self.query('UNKNOWN/COMMAND.bson')
obj = bson.decode_all(f.read())
self.assertTrue(len(obj) == 1)
self.assertTrue(u'UNKNOWN' in obj[0])
self.assertTrue(isinstance(obj[0], dict))
self.assertTrue(isinstance(obj[0][u'UNKNOWN'], list))
self.assertTrue(obj[0]['UNKNOWN'][0] == False)
self.assertTrue(isinstance(obj[0]['UNKNOWN'][1], bson.Binary))
def need_msgpack(fn):
def wrapper(self):
if msgpack:
fn(self)
return wrapper
class TestMsgPack(TestWebdis):
@need_msgpack
def test_set(self):
"success type (+OK)"
self.query('DEL/hello')
f = self.query('SET/hello/world.msg')
self.assertTrue(f.headers.getheader('Content-Type') == 'application/x-msgpack')
obj = msgpack.loads(f.read())
self.assertTrue(obj == {'SET': (True, 'OK')})
@need_msgpack
def test_get(self):
"string type"
self.query('SET/hello/world')
f = self.query('GET/hello.msg')
obj = msgpack.loads(f.read())
self.assertTrue(obj == {'GET': 'world'})
@need_msgpack
def test_incr(self):
"integer type"
self.query('DEL/hello')
f = self.query('INCR/hello.msg')
obj = msgpack.loads(f.read())
self.assertTrue(obj == {'INCR': 1})
@need_msgpack
def test_list(self):
"list type"
self.query('DEL/hello')
self.query('RPUSH/hello/abc')
self.query('RPUSH/hello/def')
f = self.query('LRANGE/hello/0/-1.msg')
obj = msgpack.loads(f.read())
self.assertTrue(obj == {'LRANGE': ('abc', 'def')})
@need_msgpack
def test_error(self):
"error return type"
f = self.query('UNKNOWN/COMMAND.msg')
obj = msgpack.loads(f.read())
self.assertTrue('UNKNOWN' in obj)
self.assertTrue(isinstance(obj, dict))
self.assertTrue(isinstance(obj['UNKNOWN'], tuple))
self.assertTrue(obj['UNKNOWN'][0] == False)
self.assertTrue(isinstance(obj['UNKNOWN'][1], str))
class TestETag(TestWebdis):
def test_etag_match(self):
self.query('SET/hello/world')
h = hashlib.md5("world").hexdigest() # match Etag
try:
f = self.query('GET/hello.txt', None, {'If-None-Match': '"'+ h +'"'})
except urllib2.HTTPError as e:
self.assertTrue(e.code == 304)
return
self.assertTrue(False) # we should have received a 304.
def test_etag_fail(self):
self.query('SET/hello/world')
h = hashlib.md5("nonsense").hexdigest() # non-matching Etag
f = self.query('GET/hello.txt', None, {'If-None-Match': '"'+ h +'"'})
self.assertTrue(f.read() == 'world')
if __name__ == '__main__':
unittest.main()
| 29.229091 | 87 | 0.672555 | 1,156 | 8,038 | 4.630623 | 0.139273 | 0.154306 | 0.086867 | 0.078087 | 0.70297 | 0.63385 | 0.62563 | 0.592752 | 0.543247 | 0.484214 | 0 | 0.019806 | 0.126897 | 8,038 | 274 | 88 | 29.335766 | 0.742947 | 0.055362 | 0 | 0.546667 | 0 | 0.004444 | 0.261145 | 0.042997 | 0 | 0 | 0 | 0.00365 | 0.244444 | 0 | null | null | 0.004444 | 0.017778 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |